1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
|
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/init.h>
#include <linux/capability.h>
#include <xen/xen.h>
#include <xen/page.h>
#include <xen/xenbus.h>
#include <xen/xenbus_dev.h>
#include <xen/grant_table.h>
#include <xen/events.h>
#include <asm/xen/hypervisor.h>
#include "xenbus_comms.h"
static int xenbus_backend_open(struct inode *inode, struct file *filp)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
return nonseekable_open(inode, filp);
}
static long xenbus_alloc(domid_t domid)
{
struct evtchn_alloc_unbound arg;
int err = -EEXIST;
xs_suspend();
/* If xenstored_ready is nonzero, that means we have already talked to
* xenstore and set up watches. These watches will be restored by
* xs_resume, but that requires communication over the port established
* below that is not visible to anyone until the ioctl returns.
*
* This can be resolved by splitting the ioctl into two parts
* (postponing the resume until xenstored is active) but this is
* unnecessarily complex for the intended use where xenstored is only
* started once - so return -EEXIST if it's already running.
*/
if (xenstored_ready)
goto out_err;
gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
virt_to_gfn(xen_store_interface), 0 /* writable */);
arg.dom = DOMID_SELF;
arg.remote_dom = domid;
err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg);
if (err)
goto out_err;
if (xen_store_evtchn > 0)
xb_deinit_comms();
xen_store_evtchn = arg.port;
xs_resume();
return arg.port;
out_err:
xs_suspend_cancel();
return err;
}
static long xenbus_backend_ioctl(struct file *file, unsigned int cmd,
unsigned long data)
{
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
switch (cmd) {
case IOCTL_XENBUS_BACKEND_EVTCHN:
if (xen_store_evtchn > 0)
return xen_store_evtchn;
return -ENODEV;
case IOCTL_XENBUS_BACKEND_SETUP:
return xenbus_alloc(data);
default:
return -ENOTTY;
}
}
static int xenbus_backend_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
return -EINVAL;
if (remap_pfn_range(vma, vma->vm_start,
virt_to_pfn(xen_store_interface),
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
static const struct file_operations xenbus_backend_fops = {
.open = xenbus_backend_open,
.mmap = xenbus_backend_mmap,
.unlocked_ioctl = xenbus_backend_ioctl,
};
static struct miscdevice xenbus_backend_dev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "xen/xenbus_backend",
.fops = &xenbus_backend_fops,
};
static int __init xenbus_backend_init(void)
{
int err;
if (!xen_initial_domain())
return -ENODEV;
err = misc_register(&xenbus_backend_dev);
if (err)
pr_err("Could not register xenbus backend device\n");
return err;
}
device_initcall(xenbus_backend_init);
|