Index: usr.sbin/bhyveload/bhyveload.8 =================================================================== --- usr.sbin/bhyveload/bhyveload.8 (revision 247523) +++ usr.sbin/bhyveload/bhyveload.8 (working copy) @@ -35,8 +35,7 @@ guest inside a bhyve virtual machine .Sh SYNOPSIS .Nm -.Op Fl m Ar lowmem -.Op Fl M Ar highmem +.Op Fl m Ar mem-size .Op Fl d Ar disk-path .Op Fl h Ar host-path .Ar vmname @@ -61,22 +60,13 @@ .Sh OPTIONS The following options are available: .Bl -tag -width indent -.It Fl m Ar lowmem -.Ar lowmem -is the amount of memory allocated below 4GB in the guest's physical address -space. +.It Fl m Ar mem-size +.Ar mem-size +is the amount of memory allocated to the guest in units of megabytes. .Pp The default value of -.Ar lowmem -is 256MB. -.It Fl M Ar highmem -.Ar highmem -is the amount of memory allocated above 4GB in the guest's physical address -space. -.Pp -The default value of -.Ar highmem -is 0MB. +.Ar mem-size +is 256. .It Fl d Ar disk-path The .Ar disk-path @@ -93,16 +83,8 @@ .Pa /freebsd/release.iso and has 1GB memory allocated to it: .Pp -.Dl "bhyveload -m 256 -M 768 -d /freebsd/release.iso freebsd-vm" +.Dl "bhyveload -m 1024 -d /freebsd/release.iso freebsd-vm" .Pp -In the example above the 1GB allocation is split in two segments: -.Pp -.Bl -dash -compact -.It -256MB below the 4GB boundary (0MB - 256MB) -.It -768MB above the 4GB boundary (4096MB - 4864MB) -.El .Sh SEE ALSO .Xr bhyve 4 , .Xr bhyve 8 , Index: usr.sbin/bhyveload/bhyveload.c =================================================================== --- usr.sbin/bhyveload/bhyveload.c (revision 247523) +++ usr.sbin/bhyveload/bhyveload.c (working copy) @@ -88,8 +88,7 @@ static struct termios term, oldterm; static int disk_fd = -1; -static char *vmname, *progname, *membase; -static uint64_t lowmem, highmem; +static char *vmname, *progname; static struct vmctx *ctx; static uint64_t gdtbase, cr3, rsp; @@ -323,30 +322,30 @@ static int cb_copyin(void *arg, const void *from, uint64_t to, size_t size) { + char *ptr; to &= 0x7fffffff; - if (to > lowmem) + + ptr = vm_map_gpa(ctx, to, size); + if (ptr == NULL) return (EFAULT); - if (to + size > lowmem) - size = lowmem - to; - memcpy(&membase[to], from, size); - + memcpy(ptr, from, size); return (0); } static int cb_copyout(void *arg, uint64_t from, void *to, size_t size) { + char *ptr; from &= 0x7fffffff; - if (from > lowmem) + + ptr = vm_map_gpa(ctx, from, size); + if (ptr == NULL) return (EFAULT); - if (from + size > lowmem) - size = lowmem - from; - memcpy(to, &membase[from], size); - + memcpy(to, ptr, size); return (0); } @@ -493,8 +492,8 @@ cb_getmem(void *arg, uint64_t *ret_lowmem, uint64_t *ret_highmem) { - *ret_lowmem = lowmem; - *ret_highmem = highmem; + vm_get_memory_seg(ctx, 0, ret_lowmem); + vm_get_memory_seg(ctx, 4 * GB, ret_highmem); } static const char * @@ -551,9 +550,9 @@ usage(void) { - printf("usage: %s [-d ] [-h ] " - "[-m ][-M ] " - "\n", progname); + fprintf(stderr, + "usage: %s [-m mem-size][-d ] [-h ] " + "\n", progname); exit(1); } @@ -562,16 +561,16 @@ { void *h; void (*func)(struct loader_callbacks *, void *, int, int); + uint64_t mem_size; int opt, error; char *disk_image; progname = argv[0]; - lowmem = 128 * MB; - highmem = 0; + mem_size = 256 * MB; disk_image = NULL; - while ((opt = getopt(argc, argv, "d:h:m:M:")) != -1) { + while ((opt = getopt(argc, argv, "d:h:m:")) != -1) { switch (opt) { case 'd': disk_image = optarg; @@ -582,13 +581,9 @@ break; case 'm': - lowmem = strtoul(optarg, NULL, 0) * MB; + mem_size = strtoul(optarg, NULL, 0) * MB; break; - case 'M': - highmem = strtoul(optarg, NULL, 0) * MB; - break; - case '?': usage(); } @@ -615,20 +610,12 @@ exit(1); } - error = vm_setup_memory(ctx, 0, lowmem, &membase); + error = vm_setup_memory(ctx, mem_size, VM_MMAP_ALL); if (error) { - perror("vm_setup_memory(lowmem)"); + perror("vm_setup_memory"); exit(1); } - if (highmem != 0) { - error = vm_setup_memory(ctx, 4 * GB, highmem, NULL); - if (error) { - perror("vm_setup_memory(highmem)"); - exit(1); - } - } - tcgetattr(0, &term); oldterm = term; term.c_lflag &= ~(ICANON|ECHO); Index: usr.sbin/bhyvectl/bhyvectl.c =================================================================== --- usr.sbin/bhyvectl/bhyvectl.c (revision 247523) +++ usr.sbin/bhyvectl/bhyvectl.c (working copy) @@ -186,9 +186,8 @@ " [--set-x2apic-state=]\n" " [--get-x2apic-state]\n" " [--unassign-pptdev=]\n" - " [--set-lowmem=]\n" + " [--set-mem=]\n" " [--get-lowmem]\n" - " [--set-highmem=]\n" " [--get-highmem]\n", progname); exit(1); @@ -197,7 +196,7 @@ static int get_stats, getcap, setcap, capval; static const char *capname; static int create, destroy, get_lowmem, get_highmem; -static uint64_t lowmem, highmem; +static uint64_t memsize; static int set_cr0, get_cr0, set_cr3, get_cr3, set_cr4, get_cr4; static int set_efer, get_efer; static int set_dr7, get_dr7; @@ -351,8 +350,7 @@ enum { VMNAME = 1000, /* avoid collision with return values from getopt */ VCPU, - SET_LOWMEM, - SET_HIGHMEM, + SET_MEM, SET_EFER, SET_CR0, SET_CR3, @@ -400,8 +398,7 @@ struct option opts[] = { { "vm", REQ_ARG, 0, VMNAME }, { "cpu", REQ_ARG, 0, VCPU }, - { "set-lowmem", REQ_ARG, 0, SET_LOWMEM }, - { "set-highmem",REQ_ARG, 0, SET_HIGHMEM }, + { "set-mem", REQ_ARG, 0, SET_MEM }, { "set-efer", REQ_ARG, 0, SET_EFER }, { "set-cr0", REQ_ARG, 0, SET_CR0 }, { "set-cr3", REQ_ARG, 0, SET_CR3 }, @@ -572,14 +569,10 @@ case VCPU: vcpu = atoi(optarg); break; - case SET_LOWMEM: - lowmem = atoi(optarg) * MB; - lowmem = roundup(lowmem, 2 * MB); + case SET_MEM: + memsize = atoi(optarg) * MB; + memsize = roundup(memsize, 2 * MB); break; - case SET_HIGHMEM: - highmem = atoi(optarg) * MB; - highmem = roundup(highmem, 2 * MB); - break; case SET_EFER: efer = strtoul(optarg, NULL, 0); set_efer = 1; @@ -702,12 +695,9 @@ error = -1; } - if (!error && lowmem) - error = vm_setup_memory(ctx, 0, lowmem, NULL); + if (!error && memsize) + error = vm_setup_memory(ctx, memsize, VM_MMAP_NONE); - if (!error && highmem) - error = vm_setup_memory(ctx, 4 * GB, highmem, NULL); - if (!error && set_efer) error = vm_set_register(ctx, vcpu, VM_REG_GUEST_EFER, efer); Index: usr.sbin/bhyve/pci_virtio_net.c =================================================================== --- usr.sbin/bhyve/pci_virtio_net.c (revision 247523) +++ usr.sbin/bhyve/pci_virtio_net.c (working copy) @@ -148,6 +148,7 @@ struct vring_hqueue vsc_hq[VTNET_MAXQ]; uint16_t vsc_msix_table_idx[VTNET_MAXQ]; }; +#define vtnet_ctx(sc) ((sc)->vsc_pi->pi_vmctx) /* * Return the size of IO BAR that maps virtio header and device specific @@ -326,7 +327,7 @@ * Get a pointer to the rx header, and use the * data immediately following it for the packet buffer. */ - vrx = paddr_guest2host(vd->vd_addr, vd->vd_len); + vrx = paddr_guest2host(vtnet_ctx(sc), vd->vd_addr, vd->vd_len); buf = (uint8_t *)(vrx + 1); len = read(sc->vsc_tapfd, buf, @@ -434,7 +435,8 @@ for (i = 0, plen = 0; i < VTNET_MAXSEGS; i++, vd = &hq->hq_dtable[vd->vd_next]) { - iov[i].iov_base = paddr_guest2host(vd->vd_addr, vd->vd_len); + iov[i].iov_base = paddr_guest2host(vtnet_ctx(sc), + vd->vd_addr, vd->vd_len); iov[i].iov_len = vd->vd_len; plen += vd->vd_len; tlen += vd->vd_len; @@ -517,7 +519,7 @@ hq = &sc->vsc_hq[qnum]; hq->hq_size = pci_vtnet_qsize(qnum); - hq->hq_dtable = paddr_guest2host(pfn << VRING_PFN, + hq->hq_dtable = paddr_guest2host(vtnet_ctx(sc), pfn << VRING_PFN, vring_size(hq->hq_size)); hq->hq_avail_flags = (uint16_t *)(hq->hq_dtable + hq->hq_size); hq->hq_avail_idx = hq->hq_avail_flags + 1; Index: usr.sbin/bhyve/pci_virtio_block.c =================================================================== --- usr.sbin/bhyve/pci_virtio_block.c (revision 247523) +++ usr.sbin/bhyve/pci_virtio_block.c (working copy) @@ -141,6 +141,7 @@ uint16_t msix_table_idx_req; uint16_t msix_table_idx_cfg; }; +#define vtblk_ctx(sc) ((sc)->vbsc_pi->pi_vmctx) /* * Return the size of IO BAR that maps virtio header and device specific @@ -222,13 +223,14 @@ assert(nsegs >= 3); assert(nsegs < VTBLK_MAXSEGS + 2); - vid = paddr_guest2host(vd->vd_addr, vd->vd_len); + vid = paddr_guest2host(vtblk_ctx(sc), vd->vd_addr, vd->vd_len); assert((vid->vd_flags & VRING_DESC_F_INDIRECT) == 0); /* * The first descriptor will be the read-only fixed header */ - vbh = paddr_guest2host(vid[0].vd_addr, sizeof(struct virtio_blk_hdr)); + vbh = paddr_guest2host(vtblk_ctx(sc), vid[0].vd_addr, + sizeof(struct virtio_blk_hdr)); assert(vid[0].vd_len == sizeof(struct virtio_blk_hdr)); assert(vid[0].vd_flags & VRING_DESC_F_NEXT); assert((vid[0].vd_flags & VRING_DESC_F_WRITE) == 0); @@ -247,8 +249,8 @@ * Build up the iovec based on the guest's data descriptors */ for (i = 1, iolen = 0; i < nsegs - 1; i++) { - iov[i-1].iov_base = paddr_guest2host(vid[i].vd_addr, - vid[i].vd_len); + iov[i-1].iov_base = paddr_guest2host(vtblk_ctx(sc), + vid[i].vd_addr, vid[i].vd_len); iov[i-1].iov_len = vid[i].vd_len; iolen += vid[i].vd_len; @@ -266,7 +268,7 @@ } /* Lastly, get the address of the status byte */ - status = paddr_guest2host(vid[nsegs - 1].vd_addr, 1); + status = paddr_guest2host(vtblk_ctx(sc), vid[nsegs - 1].vd_addr, 1); assert(vid[nsegs - 1].vd_len == 1); assert((vid[nsegs - 1].vd_flags & VRING_DESC_F_NEXT) == 0); assert(vid[nsegs - 1].vd_flags & VRING_DESC_F_WRITE); @@ -342,7 +344,7 @@ hq = &sc->vbsc_q; hq->hq_size = VTBLK_RINGSZ; - hq->hq_dtable = paddr_guest2host(pfn << VRING_PFN, + hq->hq_dtable = paddr_guest2host(vtblk_ctx(sc), pfn << VRING_PFN, vring_size(VTBLK_RINGSZ)); hq->hq_avail_flags = (uint16_t *)(hq->hq_dtable + hq->hq_size); hq->hq_avail_idx = hq->hq_avail_flags + 1; Index: usr.sbin/bhyve/pci_emul.c =================================================================== --- usr.sbin/bhyve/pci_emul.c (revision 247523) +++ usr.sbin/bhyve/pci_emul.c (working copy) @@ -86,6 +86,8 @@ SET_DECLARE(pci_devemu_set, struct pci_devemu); +static uint32_t pci_hole_startaddr; + static uint64_t pci_emul_iobase; static uint64_t pci_emul_membase32; static uint64_t pci_emul_membase64; @@ -93,7 +95,6 @@ #define PCI_EMUL_IOBASE 0x2000 #define PCI_EMUL_IOLIMIT 0x10000 -#define PCI_EMUL_MEMBASE32 (lomem_sz) #define PCI_EMUL_MEMLIMIT32 0xE0000000 /* 3.5GB */ #define PCI_EMUL_MEMBASE64 0xD000000000UL @@ -870,8 +871,10 @@ int slot, func; int error; + pci_hole_startaddr = vm_get_lowmem_limit(ctx); + pci_emul_iobase = PCI_EMUL_IOBASE; - pci_emul_membase32 = PCI_EMUL_MEMBASE32; + pci_emul_membase32 = pci_hole_startaddr; pci_emul_membase64 = PCI_EMUL_MEMBASE64; for (slot = 0; slot < MAXSLOTS; slot++) { @@ -904,8 +907,8 @@ memset(&memp, 0, sizeof(struct mem_range)); memp.name = "PCI hole"; memp.flags = MEM_F_RW; - memp.base = lomem_sz; - memp.size = (4ULL * 1024 * 1024 * 1024) - lomem_sz; + memp.base = pci_hole_startaddr; + memp.size = (4ULL * 1024 * 1024 * 1024) - pci_hole_startaddr; memp.handler = pci_emul_fallback_handler; error = register_mem_fallback(&memp); Index: usr.sbin/bhyve/acpi.c =================================================================== --- usr.sbin/bhyve/acpi.c (revision 247523) +++ usr.sbin/bhyve/acpi.c (working copy) @@ -680,25 +680,26 @@ } static int -basl_load(int fd, uint64_t off) +basl_load(struct vmctx *ctx, int fd, uint64_t off) { struct stat sb; void *gaddr; - int err; - err = 0; - gaddr = paddr_guest2host(basl_acpi_base + off, sb.st_size); - if (gaddr != NULL) { - if (fstat(fd, &sb) < 0 || read(fd, gaddr, sb.st_size) < 0) - err = errno; - } else - err = EFAULT; + if (fstat(fd, &sb) < 0) + return (errno); + + gaddr = paddr_guest2host(ctx, basl_acpi_base + off, sb.st_size); + if (gaddr == NULL) + return (EFAULT); - return (err); + if (read(fd, gaddr, sb.st_size) < 0) + return (errno); + + return (0); } static int -basl_compile(int (*fwrite_section)(FILE *fp), uint64_t offset) +basl_compile(struct vmctx *ctx, int (*fwrite_section)(FILE *), uint64_t offset) { struct basl_fio io[2]; static char iaslbuf[3*MAXPATHLEN + 10]; @@ -732,7 +733,7 @@ * Copy the aml output file into guest * memory at the specified location */ - err = basl_load(io[1].fd, offset); + err = basl_load(ctx, io[1].fd, offset); } } basl_end(&io[0], &io[1]); @@ -838,7 +839,7 @@ * copying them into guest memory */ while (!err && basl_ftables[i].wsect != NULL) { - err = basl_compile(basl_ftables[i].wsect, + err = basl_compile(ctx, basl_ftables[i].wsect, basl_ftables[i].offset); i++; } Index: usr.sbin/bhyve/bhyverun.c =================================================================== --- usr.sbin/bhyve/bhyverun.c (revision 247523) +++ usr.sbin/bhyve/bhyverun.c (working copy) @@ -80,9 +80,6 @@ int guest_hz = DEFAULT_GUEST_HZ; char *vmname; -u_long lomem_sz; -u_long himem_sz; - int guest_ncpus; static int pincpu = -1; @@ -95,9 +92,6 @@ static int acpi; -static char *lomem_addr; -static char *himem_addr; - static char *progname; static const int BSP = 0; @@ -147,8 +141,7 @@ " -z: guest hz (default is %d)\n" " -s: PCI slot config\n" " -S: legacy PCI slot config\n" - " -m: lowmem in MB\n" - " -M: highmem in MB\n" + " -m: memory size in MB\n" " -x: mux vcpus to 1 hcpu\n" " -t: mux vcpu timeslice hz (default %d)\n", progname, DEFAULT_GDB_PORT, DEFAULT_GUEST_HZ, @@ -157,19 +150,10 @@ } void * -paddr_guest2host(uintptr_t gaddr, size_t len) +paddr_guest2host(struct vmctx *ctx, uintptr_t gaddr, size_t len) { - if (gaddr < lomem_sz && gaddr + len <= lomem_sz) - return ((void *)(lomem_addr + gaddr)); - - if (gaddr >= 4*GB) { - gaddr -= 4*GB; - if (gaddr < himem_sz && gaddr + len <= himem_sz) - return ((void *)(himem_addr + gaddr)); - } - - return (NULL); + return (vm_map_gpa(ctx, gaddr, len)); } int @@ -604,6 +588,7 @@ int max_vcpus; struct vmctx *ctx; uint64_t rip; + size_t memsize; bvmcons = 0; inject_bkpt = 0; @@ -611,8 +596,9 @@ gdb_port = DEFAULT_GDB_PORT; guest_ncpus = 1; ioapic = 0; + memsize = 256 * MB; - while ((c = getopt(argc, argv, "abehABHIPxp:g:c:z:s:S:n:m:M:")) != -1) { + while ((c = getopt(argc, argv, "abehABHIPxp:g:c:z:s:S:n:m:")) != -1) { switch (c) { case 'a': disable_x2apic = 1; @@ -651,11 +637,8 @@ pci_parse_slot(optarg, 1); break; case 'm': - lomem_sz = strtoul(optarg, NULL, 0) * MB; + memsize = strtoul(optarg, NULL, 0) * MB; break; - case 'M': - himem_sz = strtoul(optarg, NULL, 0) * MB; - break; case 'H': guest_vmexit_on_hlt = 1; break; @@ -739,17 +722,10 @@ exit(1); } - if (lomem_sz != 0) { - lomem_addr = vm_map_memory(ctx, 0, lomem_sz); - if (lomem_addr == (char *) MAP_FAILED) { - lomem_sz = 0; - } else if (himem_sz != 0) { - himem_addr = vm_map_memory(ctx, 4*GB, himem_sz); - if (himem_addr == (char *) MAP_FAILED) { - lomem_sz = 0; - himem_sz = 0; - } - } + err = vm_setup_memory(ctx, memsize, VM_MMAP_ALL); + if (err) { + fprintf(stderr, "Unable to setup memory (%d)\n", err); + exit(1); } init_inout(); Index: usr.sbin/bhyve/bhyverun.h =================================================================== --- usr.sbin/bhyve/bhyverun.h (revision 247523) +++ usr.sbin/bhyve/bhyverun.h (working copy) @@ -41,10 +41,8 @@ extern int guest_ncpus; extern char *vmname; -extern u_long lomem_sz, himem_sz; +void *paddr_guest2host(struct vmctx *ctx, uintptr_t addr, size_t len); -void *paddr_guest2host(uintptr_t addr, size_t len); - void fbsdrun_addcpu(struct vmctx *ctx, int cpu, uint64_t rip); int fbsdrun_muxed(void); int fbsdrun_vmexit_on_hlt(void); Index: usr.sbin/bhyve/mptbl.c =================================================================== --- usr.sbin/bhyve/mptbl.c (revision 247523) +++ usr.sbin/bhyve/mptbl.c (working copy) @@ -349,7 +349,7 @@ char *curraddr; char *startaddr; - startaddr = paddr_guest2host(MPTABLE_BASE, MPTABLE_MAX_LENGTH); + startaddr = paddr_guest2host(ctx, MPTABLE_BASE, MPTABLE_MAX_LENGTH); if (startaddr == NULL) { printf("mptable requires mapped mem\n"); return (ENOMEM); Index: lib/libvmmapi/vmmapi.h =================================================================== --- lib/libvmmapi/vmmapi.h (revision 247523) +++ lib/libvmmapi/vmmapi.h (working copy) @@ -32,24 +32,26 @@ struct vmctx; enum x2apic_state; +/* + * Different styles of mapping the memory assigned to a VM into the address + * space of the controlling process. + */ +enum vm_mmap_style { + VM_MMAP_NONE, /* no mapping */ + VM_MMAP_ALL, /* fully and statically mapped */ + VM_MMAP_SPARSE, /* mappings created on-demand */ +}; + int vm_create(const char *name); struct vmctx *vm_open(const char *name); void vm_destroy(struct vmctx *ctx); size_t vmm_get_mem_total(void); size_t vmm_get_mem_free(void); int vm_get_memory_seg(struct vmctx *ctx, vm_paddr_t gpa, size_t *ret_len); -/* - * Create a memory segment of 'len' bytes in the guest physical address space - * at offset 'gpa'. - * - * If 'mapaddr' is not NULL then this region is mmap'ed into the address - * space of the calling process. If there is an mmap error then *mapaddr - * will be set to MAP_FAILED. - */ - -int vm_setup_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len, - char **mapaddr); -char * vm_map_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len); +int vm_setup_memory(struct vmctx *ctx, size_t len, enum vm_mmap_style s); +void *vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len); +uint32_t vm_get_lowmem_limit(struct vmctx *ctx); +void vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit); int vm_set_desc(struct vmctx *ctx, int vcpu, int reg, uint64_t base, uint32_t limit, uint32_t access); int vm_get_desc(struct vmctx *ctx, int vcpu, int reg, Index: lib/libvmmapi/vmmapi.c =================================================================== --- lib/libvmmapi/vmmapi.c (revision 247523) +++ lib/libvmmapi/vmmapi.c (working copy) @@ -48,8 +48,16 @@ #include "vmmapi.h" +#define GB (1024 * 1024 * 1024UL) + struct vmctx { int fd; + uint32_t lowmem_limit; + enum vm_mmap_style vms; + size_t lowmem; + char *lowmem_addr; + size_t highmem; + char *highmem_addr; char *name; }; @@ -90,6 +98,7 @@ assert(vm != NULL); vm->fd = -1; + vm->lowmem_limit = 3 * GB; vm->name = (char *)(vm + 1); strcpy(vm->name, name); @@ -151,9 +160,23 @@ return (error); } -int -vm_setup_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **mapaddr) +uint32_t +vm_get_lowmem_limit(struct vmctx *ctx) { + + return (ctx->lowmem_limit); +} + +void +vm_set_lowmem_limit(struct vmctx *ctx, uint32_t limit) +{ + + ctx->lowmem_limit = limit; +} + +static int +setup_memory_segment(struct vmctx *ctx, vm_paddr_t gpa, size_t len, char **addr) +{ int error; struct vm_memory_segment seg; @@ -165,22 +188,71 @@ seg.gpa = gpa; seg.len = len; error = ioctl(ctx->fd, VM_MAP_MEMORY, &seg); - if (error == 0 && mapaddr != NULL) { - *mapaddr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, + if (error == 0 && addr != NULL) { + *addr = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, ctx->fd, gpa); } return (error); } -char * -vm_map_memory(struct vmctx *ctx, vm_paddr_t gpa, size_t len) +int +vm_setup_memory(struct vmctx *ctx, size_t memsize, enum vm_mmap_style vms) { + char **addr; + int error; - /* Map 'len' bytes of memory at guest physical address 'gpa' */ - return ((char *)mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, - ctx->fd, gpa)); + /* XXX VM_MMAP_SPARSE not implemented yet */ + assert(vms == VM_MMAP_NONE || vms == VM_MMAP_ALL); + ctx->vms = vms; + + /* + * If 'memsize' cannot fit entirely in the 'lowmem' segment then + * create another 'highmem' segment above 4GB for the remainder. + */ + if (memsize > ctx->lowmem_limit) { + ctx->lowmem = ctx->lowmem_limit; + ctx->highmem = memsize - ctx->lowmem; + } else { + ctx->lowmem = memsize; + ctx->highmem = 0; + } + + if (ctx->lowmem > 0) { + addr = (vms == VM_MMAP_ALL) ? &ctx->lowmem_addr : NULL; + error = setup_memory_segment(ctx, 0, ctx->lowmem, addr); + if (error) + return (error); + } + + if (ctx->highmem > 0) { + addr = (vms == VM_MMAP_ALL) ? &ctx->highmem_addr : NULL; + error = setup_memory_segment(ctx, 4*GB, ctx->highmem, addr); + if (error) + return (error); + } + + return (0); } +void * +vm_map_gpa(struct vmctx *ctx, vm_paddr_t gaddr, size_t len) +{ + + /* XXX VM_MMAP_SPARSE not implemented yet */ + assert(ctx->vms == VM_MMAP_ALL); + + if (gaddr < ctx->lowmem && gaddr + len <= ctx->lowmem) + return ((void *)(ctx->lowmem_addr + gaddr)); + + if (gaddr >= 4*GB) { + gaddr -= 4*GB; + if (gaddr < ctx->highmem && gaddr + len <= ctx->highmem) + return ((void *)(ctx->highmem_addr + gaddr)); + } + + return (NULL); +} + int vm_set_desc(struct vmctx *ctx, int vcpu, int reg, uint64_t base, uint32_t limit, uint32_t access)