diff options
Diffstat (limited to 'drivers/xen/privcmd.c')
-rw-r--r-- | drivers/xen/privcmd.c | 48 |
1 files changed, 24 insertions, 24 deletions
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c index 5a296161d843..5e9adac928e6 100644 --- a/drivers/xen/privcmd.c +++ b/drivers/xen/privcmd.c @@ -193,16 +193,16 @@ static int traverse_pages_block(unsigned nelem, size_t size, return ret; } -struct mmap_mfn_state { +struct mmap_gfn_state { unsigned long va; struct vm_area_struct *vma; domid_t domain; }; -static int mmap_mfn_range(void *data, void *state) +static int mmap_gfn_range(void *data, void *state) { struct privcmd_mmap_entry *msg = data; - struct mmap_mfn_state *st = state; + struct mmap_gfn_state *st = state; struct vm_area_struct *vma = st->vma; int rc; @@ -216,7 +216,7 @@ static int mmap_mfn_range(void *data, void *state) ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end)) return -EINVAL; - rc = xen_remap_domain_mfn_range(vma, + rc = xen_remap_domain_gfn_range(vma, msg->va & PAGE_MASK, msg->mfn, msg->npages, vma->vm_page_prot, @@ -236,7 +236,7 @@ static long privcmd_ioctl_mmap(void __user *udata) struct vm_area_struct *vma; int rc; LIST_HEAD(pagelist); - struct mmap_mfn_state state; + struct mmap_gfn_state state; /* We only support privcmd_ioctl_mmap_batch for auto translated. */ if (xen_feature(XENFEAT_auto_translated_physmap)) @@ -273,7 +273,7 @@ static long privcmd_ioctl_mmap(void __user *udata) rc = traverse_pages(mmapcmd.num, sizeof(struct privcmd_mmap_entry), &pagelist, - mmap_mfn_range, &state); + mmap_gfn_range, &state); out_up: @@ -299,18 +299,18 @@ struct mmap_batch_state { int global_error; int version; - /* User-space mfn array to store errors in the second pass for V1. */ - xen_pfn_t __user *user_mfn; + /* User-space gfn array to store errors in the second pass for V1. */ + xen_pfn_t __user *user_gfn; /* User-space int array to store errors in the second pass for V2. */ int __user *user_err; }; -/* auto translated dom0 note: if domU being created is PV, then mfn is - * mfn(addr on bus). If it's auto xlated, then mfn is pfn (input to HAP). +/* auto translated dom0 note: if domU being created is PV, then gfn is + * mfn(addr on bus). If it's auto xlated, then gfn is pfn (input to HAP). */ static int mmap_batch_fn(void *data, int nr, void *state) { - xen_pfn_t *mfnp = data; + xen_pfn_t *gfnp = data; struct mmap_batch_state *st = state; struct vm_area_struct *vma = st->vma; struct page **pages = vma->vm_private_data; @@ -321,8 +321,8 @@ static int mmap_batch_fn(void *data, int nr, void *state) cur_pages = &pages[st->index]; BUG_ON(nr < 0); - ret = xen_remap_domain_mfn_array(st->vma, st->va & PAGE_MASK, mfnp, nr, - (int *)mfnp, st->vma->vm_page_prot, + ret = xen_remap_domain_gfn_array(st->vma, st->va & PAGE_MASK, gfnp, nr, + (int *)gfnp, st->vma->vm_page_prot, st->domain, cur_pages); /* Adjust the global_error? */ @@ -347,22 +347,22 @@ static int mmap_return_error(int err, struct mmap_batch_state *st) if (st->version == 1) { if (err) { - xen_pfn_t mfn; + xen_pfn_t gfn; - ret = get_user(mfn, st->user_mfn); + ret = get_user(gfn, st->user_gfn); if (ret < 0) return ret; /* * V1 encodes the error codes in the 32bit top - * nibble of the mfn (with its known + * nibble of the gfn (with its known * limitations vis-a-vis 64 bit callers). */ - mfn |= (err == -ENOENT) ? + gfn |= (err == -ENOENT) ? PRIVCMD_MMAPBATCH_PAGED_ERROR : PRIVCMD_MMAPBATCH_MFN_ERROR; - return __put_user(mfn, st->user_mfn++); + return __put_user(gfn, st->user_gfn++); } else - st->user_mfn++; + st->user_gfn++; } else { /* st->version == 2 */ if (err) return __put_user(err, st->user_err++); @@ -388,7 +388,7 @@ static int mmap_return_errors(void *data, int nr, void *state) return 0; } -/* Allocate pfns that are then mapped with gmfns from foreign domid. Update +/* Allocate pfns that are then mapped with gfns from foreign domid. Update * the vma with the page info to use later. * Returns: 0 if success, otherwise -errno */ @@ -414,7 +414,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs) return 0; } -static struct vm_operations_struct privcmd_vm_ops; +static const struct vm_operations_struct privcmd_vm_ops; static long privcmd_ioctl_mmap_batch(void __user *udata, int version) { @@ -526,7 +526,7 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version) if (state.global_error) { /* Write back errors in second pass. */ - state.user_mfn = (xen_pfn_t *)m.arr; + state.user_gfn = (xen_pfn_t *)m.arr; state.user_err = m.err; ret = traverse_pages_block(m.num, sizeof(xen_pfn_t), &pagelist, mmap_return_errors, &state); @@ -587,7 +587,7 @@ static void privcmd_close(struct vm_area_struct *vma) if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages) return; - rc = xen_unmap_domain_mfn_range(vma, numpgs, pages); + rc = xen_unmap_domain_gfn_range(vma, numpgs, pages); if (rc == 0) free_xenballooned_pages(numpgs, pages); else @@ -605,7 +605,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf) return VM_FAULT_SIGBUS; } -static struct vm_operations_struct privcmd_vm_ops = { +static const struct vm_operations_struct privcmd_vm_ops = { .close = privcmd_close, .fault = privcmd_fault }; |