From 193da6092764ab693da7170c5badbf60d7758c1d Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Nov 2008 12:03:54 +0100 Subject: fuse: move FUSE_MINOR to miscdevice.h Move FUSE_MINOR to miscdevice.h. While at it, de-uglify the file. Signed-off-by: Tejun Heo Signed-off-by: Miklos Szeredi --- include/linux/fuse.h | 3 --- include/linux/miscdevice.h | 42 +++++++++++++++++++++--------------------- 2 files changed, 21 insertions(+), 24 deletions(-) (limited to 'include') diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 350fe9767bbc..7caa473306e4 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -40,9 +40,6 @@ /** The major number of the fuse character device */ #define FUSE_MAJOR MISC_MAJOR -/** The minor number of the fuse character device */ -#define FUSE_MINOR 229 - /* Make sure all structures are padded to 64bit boundary, so 32bit userspace works under 64bit kernels */ diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 26433ec520b3..a820f816a49e 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -3,33 +3,33 @@ #include #include -#define PSMOUSE_MINOR 1 -#define MS_BUSMOUSE_MINOR 2 -#define ATIXL_BUSMOUSE_MINOR 3 -/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */ -#define ATARIMOUSE_MINOR 5 -#define SUN_MOUSE_MINOR 6 -#define APOLLO_MOUSE_MINOR 7 -#define PC110PAD_MINOR 9 -/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ +#define PSMOUSE_MINOR 1 +#define MS_BUSMOUSE_MINOR 2 +#define ATIXL_BUSMOUSE_MINOR 3 +/*#define AMIGAMOUSE_MINOR 4 FIXME OBSOLETE */ +#define ATARIMOUSE_MINOR 5 +#define SUN_MOUSE_MINOR 6 +#define APOLLO_MOUSE_MINOR 7 +#define PC110PAD_MINOR 9 +/*#define ADB_MOUSE_MINOR 10 FIXME OBSOLETE */ #define WATCHDOG_MINOR 130 /* Watchdog timer */ #define TEMP_MINOR 131 /* Temperature Sensor */ -#define RTC_MINOR 135 +#define RTC_MINOR 135 #define EFI_RTC_MINOR 136 /* EFI Time services */ -#define SUN_OPENPROM_MINOR 139 +#define SUN_OPENPROM_MINOR 139 #define DMAPI_MINOR 140 /* DMAPI */ -#define NVRAM_MINOR 144 -#define SGI_MMTIMER 153 +#define NVRAM_MINOR 144 +#define SGI_MMTIMER 153 #define STORE_QUEUE_MINOR 155 -#define I2O_MINOR 166 +#define I2O_MINOR 166 #define MICROCODE_MINOR 184 -#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ -#define MPT_MINOR 220 -#define MISC_DYNAMIC_MINOR 255 - -#define TUN_MINOR 200 -#define HPET_MINOR 228 -#define KVM_MINOR 232 +#define TUN_MINOR 200 +#define MWAVE_MINOR 219 /* ACP/Mwave Modem */ +#define MPT_MINOR 220 +#define HPET_MINOR 228 +#define FUSE_MINOR 229 +#define KVM_MINOR 232 +#define MISC_DYNAMIC_MINOR 255 struct device; -- cgit v1.2.3 From 59efec7b903987dcb60b9ebc85c7acd4443a11a1 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Nov 2008 12:03:55 +0100 Subject: fuse: implement ioctl support Generic ioctl support is tricky to implement because only the ioctl implementation itself knows which memory regions need to be read and/or written. To support this, fuse client can request retry of ioctl specifying memory regions to read and write. Deep copying (nested pointers) can be implemented by retrying multiple times resolving one depth of dereference at a time. For security and cleanliness considerations, ioctl implementation has restricted mode where the kernel determines data transfer directions and sizes using the _IOC_*() macros on the ioctl command. In this mode, retry is not allowed. For all FUSE servers, restricted mode is enforced. Unrestricted ioctl will be used by CUSE. Plese read the comment on top of fs/fuse/file.c::fuse_file_do_ioctl() for more information. Signed-off-by: Tejun Heo Signed-off-by: Miklos Szeredi --- fs/fuse/file.c | 280 +++++++++++++++++++++++++++++++++++++++++++++++++++ include/linux/fuse.h | 32 ++++++ 2 files changed, 312 insertions(+) (limited to 'include') diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 617269803913..baed06ea7622 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1469,6 +1469,282 @@ static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) return retval; } +static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, + unsigned int nr_segs, size_t bytes, bool to_user) +{ + struct iov_iter ii; + int page_idx = 0; + + if (!bytes) + return 0; + + iov_iter_init(&ii, iov, nr_segs, bytes, 0); + + while (iov_iter_count(&ii)) { + struct page *page = pages[page_idx++]; + size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); + void *kaddr, *map; + + kaddr = map = kmap(page); + + while (todo) { + char __user *uaddr = ii.iov->iov_base + ii.iov_offset; + size_t iov_len = ii.iov->iov_len - ii.iov_offset; + size_t copy = min(todo, iov_len); + size_t left; + + if (!to_user) + left = copy_from_user(kaddr, uaddr, copy); + else + left = copy_to_user(uaddr, kaddr, copy); + + if (unlikely(left)) + return -EFAULT; + + iov_iter_advance(&ii, copy); + todo -= copy; + kaddr += copy; + } + + kunmap(map); + } + + return 0; +} + +/* + * For ioctls, there is no generic way to determine how much memory + * needs to be read and/or written. Furthermore, ioctls are allowed + * to dereference the passed pointer, so the parameter requires deep + * copying but FUSE has no idea whatsoever about what to copy in or + * out. + * + * This is solved by allowing FUSE server to retry ioctl with + * necessary in/out iovecs. Let's assume the ioctl implementation + * needs to read in the following structure. + * + * struct a { + * char *buf; + * size_t buflen; + * } + * + * On the first callout to FUSE server, inarg->in_size and + * inarg->out_size will be NULL; then, the server completes the ioctl + * with FUSE_IOCTL_RETRY set in out->flags, out->in_iovs set to 1 and + * the actual iov array to + * + * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) } } + * + * which tells FUSE to copy in the requested area and retry the ioctl. + * On the second round, the server has access to the structure and + * from that it can tell what to look for next, so on the invocation, + * it sets FUSE_IOCTL_RETRY, out->in_iovs to 2 and iov array to + * + * { { .iov_base = inarg.arg, .iov_len = sizeof(struct a) }, + * { .iov_base = a.buf, .iov_len = a.buflen } } + * + * FUSE will copy both struct a and the pointed buffer from the + * process doing the ioctl and retry ioctl with both struct a and the + * buffer. + * + * This time, FUSE server has everything it needs and completes ioctl + * without FUSE_IOCTL_RETRY which finishes the ioctl call. + * + * Copying data out works the same way. + * + * Note that if FUSE_IOCTL_UNRESTRICTED is clear, the kernel + * automatically initializes in and out iovs by decoding @cmd with + * _IOC_* macros and the server is not allowed to request RETRY. This + * limits ioctl data transfers to well-formed ioctls and is the forced + * behavior for all FUSE servers. + */ +static long fuse_file_do_ioctl(struct file *file, unsigned int cmd, + unsigned long arg, unsigned int flags) +{ + struct inode *inode = file->f_dentry->d_inode; + struct fuse_file *ff = file->private_data; + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_ioctl_in inarg = { + .fh = ff->fh, + .cmd = cmd, + .arg = arg, + .flags = flags + }; + struct fuse_ioctl_out outarg; + struct fuse_req *req = NULL; + struct page **pages = NULL; + struct page *iov_page = NULL; + struct iovec *in_iov = NULL, *out_iov = NULL; + unsigned int in_iovs = 0, out_iovs = 0, num_pages = 0, max_pages; + size_t in_size, out_size, transferred; + int err; + + /* assume all the iovs returned by client always fits in a page */ + BUILD_BUG_ON(sizeof(struct iovec) * FUSE_IOCTL_MAX_IOV > PAGE_SIZE); + + if (!fuse_allow_task(fc, current)) + return -EACCES; + + err = -EIO; + if (is_bad_inode(inode)) + goto out; + + err = -ENOMEM; + pages = kzalloc(sizeof(pages[0]) * FUSE_MAX_PAGES_PER_REQ, GFP_KERNEL); + iov_page = alloc_page(GFP_KERNEL); + if (!pages || !iov_page) + goto out; + + /* + * If restricted, initialize IO parameters as encoded in @cmd. + * RETRY from server is not allowed. + */ + if (!(flags & FUSE_IOCTL_UNRESTRICTED)) { + struct iovec *iov = page_address(iov_page); + + iov->iov_base = (void *)arg; + iov->iov_len = _IOC_SIZE(cmd); + + if (_IOC_DIR(cmd) & _IOC_WRITE) { + in_iov = iov; + in_iovs = 1; + } + + if (_IOC_DIR(cmd) & _IOC_READ) { + out_iov = iov; + out_iovs = 1; + } + } + + retry: + inarg.in_size = in_size = iov_length(in_iov, in_iovs); + inarg.out_size = out_size = iov_length(out_iov, out_iovs); + + /* + * Out data can be used either for actual out data or iovs, + * make sure there always is at least one page. + */ + out_size = max_t(size_t, out_size, PAGE_SIZE); + max_pages = DIV_ROUND_UP(max(in_size, out_size), PAGE_SIZE); + + /* make sure there are enough buffer pages and init request with them */ + err = -ENOMEM; + if (max_pages > FUSE_MAX_PAGES_PER_REQ) + goto out; + while (num_pages < max_pages) { + pages[num_pages] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); + if (!pages[num_pages]) + goto out; + num_pages++; + } + + req = fuse_get_req(fc); + if (IS_ERR(req)) { + err = PTR_ERR(req); + req = NULL; + goto out; + } + memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); + req->num_pages = num_pages; + + /* okay, let's send it to the client */ + req->in.h.opcode = FUSE_IOCTL; + req->in.h.nodeid = get_node_id(inode); + req->in.numargs = 1; + req->in.args[0].size = sizeof(inarg); + req->in.args[0].value = &inarg; + if (in_size) { + req->in.numargs++; + req->in.args[1].size = in_size; + req->in.argpages = 1; + + err = fuse_ioctl_copy_user(pages, in_iov, in_iovs, in_size, + false); + if (err) + goto out; + } + + req->out.numargs = 2; + req->out.args[0].size = sizeof(outarg); + req->out.args[0].value = &outarg; + req->out.args[1].size = out_size; + req->out.argpages = 1; + req->out.argvar = 1; + + request_send(fc, req); + err = req->out.h.error; + transferred = req->out.args[1].size; + fuse_put_request(fc, req); + req = NULL; + if (err) + goto out; + + /* did it ask for retry? */ + if (outarg.flags & FUSE_IOCTL_RETRY) { + char *vaddr; + + /* no retry if in restricted mode */ + err = -EIO; + if (!(flags & FUSE_IOCTL_UNRESTRICTED)) + goto out; + + in_iovs = outarg.in_iovs; + out_iovs = outarg.out_iovs; + + /* + * Make sure things are in boundary, separate checks + * are to protect against overflow. + */ + err = -ENOMEM; + if (in_iovs > FUSE_IOCTL_MAX_IOV || + out_iovs > FUSE_IOCTL_MAX_IOV || + in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) + goto out; + + err = -EIO; + if ((in_iovs + out_iovs) * sizeof(struct iovec) != transferred) + goto out; + + /* okay, copy in iovs and retry */ + vaddr = kmap_atomic(pages[0], KM_USER0); + memcpy(page_address(iov_page), vaddr, transferred); + kunmap_atomic(vaddr, KM_USER0); + + in_iov = page_address(iov_page); + out_iov = in_iov + in_iovs; + + goto retry; + } + + err = -EIO; + if (transferred > inarg.out_size) + goto out; + + err = fuse_ioctl_copy_user(pages, out_iov, out_iovs, transferred, true); + out: + if (req) + fuse_put_request(fc, req); + if (iov_page) + __free_page(iov_page); + while (num_pages) + __free_page(pages[--num_pages]); + kfree(pages); + + return err ? err : outarg.result; +} + +static long fuse_file_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return fuse_file_do_ioctl(file, cmd, arg, 0); +} + +static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT); +} + static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, .read = do_sync_read, @@ -1483,6 +1759,8 @@ static const struct file_operations fuse_file_operations = { .lock = fuse_file_lock, .flock = fuse_file_flock, .splice_read = generic_file_splice_read, + .unlocked_ioctl = fuse_file_ioctl, + .compat_ioctl = fuse_file_compat_ioctl, }; static const struct file_operations fuse_direct_io_file_operations = { @@ -1495,6 +1773,8 @@ static const struct file_operations fuse_direct_io_file_operations = { .fsync = fuse_fsync, .lock = fuse_file_lock, .flock = fuse_file_flock, + .unlocked_ioctl = fuse_file_ioctl, + .compat_ioctl = fuse_file_compat_ioctl, /* no mmap and splice_read */ }; diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 7caa473306e4..608e300ae883 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -148,6 +148,21 @@ struct fuse_file_lock { */ #define FUSE_READ_LOCKOWNER (1 << 1) +/** + * Ioctl flags + * + * FUSE_IOCTL_COMPAT: 32bit compat ioctl on 64bit machine + * FUSE_IOCTL_UNRESTRICTED: not restricted to well-formed ioctls, retry allowed + * FUSE_IOCTL_RETRY: retry with new iovecs + * + * FUSE_IOCTL_MAX_IOV: maximum of in_iovecs + out_iovecs + */ +#define FUSE_IOCTL_COMPAT (1 << 0) +#define FUSE_IOCTL_UNRESTRICTED (1 << 1) +#define FUSE_IOCTL_RETRY (1 << 2) + +#define FUSE_IOCTL_MAX_IOV 256 + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -185,6 +200,7 @@ enum fuse_opcode { FUSE_INTERRUPT = 36, FUSE_BMAP = 37, FUSE_DESTROY = 38, + FUSE_IOCTL = 39, }; /* The read buffer is required to be at least 8k, but may be much larger */ @@ -385,6 +401,22 @@ struct fuse_bmap_out { __u64 block; }; +struct fuse_ioctl_in { + __u64 fh; + __u32 flags; + __u32 cmd; + __u64 arg; + __u32 in_size; + __u32 out_size; +}; + +struct fuse_ioctl_out { + __s32 result; + __u32 flags; + __u32 in_iovs; + __u32 out_iovs; +}; + struct fuse_in_header { __u32 len; __u32 opcode; -- cgit v1.2.3 From 8599396b5062bf6bd2a0b433503849e2322df1c2 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Nov 2008 12:03:55 +0100 Subject: fuse: implement unsolicited notification Clients always used to write only in response to read requests. To implement poll efficiently, clients should be able to issue unsolicited notifications. This patch implements basic notification support. Zero fuse_out_header.unique is now accepted and considered unsolicited notification and the error field contains notification code. This patch doesn't implement any actual notification. Signed-off-by: Tejun Heo Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 27 +++++++++++++++++++++++++-- include/linux/fuse.h | 4 ++++ 2 files changed, 29 insertions(+), 2 deletions(-) (limited to 'include') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index 225388f54ae7..ffd670bb8c8c 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -816,6 +816,15 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, return err; } +static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, + unsigned int size, struct fuse_copy_state *cs) +{ + switch (code) { + default: + return -EINVAL; + } +} + /* Look up request on processing list by unique ID */ static struct fuse_req *request_find(struct fuse_conn *fc, u64 unique) { @@ -879,9 +888,23 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, err = fuse_copy_one(&cs, &oh, sizeof(oh)); if (err) goto err_finish; + + err = -EINVAL; + if (oh.len != nbytes) + goto err_finish; + + /* + * Zero oh.unique indicates unsolicited notification message + * and error contains notification code. + */ + if (!oh.unique) { + err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs); + fuse_copy_finish(&cs); + return err ? err : nbytes; + } + err = -EINVAL; - if (!oh.unique || oh.error <= -1000 || oh.error > 0 || - oh.len != nbytes) + if (oh.error <= -1000 || oh.error > 0) goto err_finish; spin_lock(&fc->lock); diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 608e300ae883..abde9949e2c0 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -203,6 +203,10 @@ enum fuse_opcode { FUSE_IOCTL = 39, }; +enum fuse_notify_code { + FUSE_NOTIFY_CODE_MAX, +}; + /* The read buffer is required to be at least 8k, but may be much larger */ #define FUSE_MIN_READ_BUFFER 8192 -- cgit v1.2.3 From 95668a69a4bb862063c4d28a746e55107dee7b98 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Wed, 26 Nov 2008 12:03:55 +0100 Subject: fuse: implement poll support Implement poll support. Polled files are indexed using kh in a RB tree rooted at fuse_conn->polled_files. Client should send FUSE_NOTIFY_POLL notification once after processing FUSE_POLL which has FUSE_POLL_SCHEDULE_NOTIFY set. Sending notification unconditionally after the latest poll or everytime file content might have changed is inefficient but won't cause malfunction. fuse_file_poll() can sleep and requires patches from the following thread which allows f_op->poll() to sleep. http://thread.gmane.org/gmane.linux.kernel/726176 Signed-off-by: Tejun Heo Signed-off-by: Miklos Szeredi --- fs/fuse/dev.c | 19 ++++++++ fs/fuse/file.c | 132 +++++++++++++++++++++++++++++++++++++++++++++++++++ fs/fuse/fuse_i.h | 20 ++++++++ fs/fuse/inode.c | 1 + include/linux/fuse.h | 25 ++++++++++ 5 files changed, 197 insertions(+) (limited to 'include') diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ffd670bb8c8c..6176e444c76e 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -816,10 +816,29 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, return err; } +static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size, + struct fuse_copy_state *cs) +{ + struct fuse_notify_poll_wakeup_out outarg; + int err; + + if (size != sizeof(outarg)) + return -EINVAL; + + err = fuse_copy_one(cs, &outarg, sizeof(outarg)); + if (err) + return err; + + return fuse_notify_poll_wakeup(fc, &outarg); +} + static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code, unsigned int size, struct fuse_copy_state *cs) { switch (code) { + case FUSE_NOTIFY_POLL: + return fuse_notify_poll(fc, size, cs); + default: return -EINVAL; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index a28ced678d38..b3a944e4bb9c 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -62,6 +62,8 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) ff->kh = ++fc->khctr; spin_unlock(&fc->lock); } + RB_CLEAR_NODE(&ff->polled_node); + init_waitqueue_head(&ff->poll_wait); } return ff; } @@ -170,7 +172,11 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir) spin_lock(&fc->lock); list_del(&ff->write_entry); + if (!RB_EMPTY_NODE(&ff->polled_node)) + rb_erase(&ff->polled_node, &fc->polled_files); spin_unlock(&fc->lock); + + wake_up_interruptible_sync(&ff->poll_wait); /* * Normally this will send the RELEASE request, * however if some asynchronous READ or WRITE requests @@ -1749,6 +1755,130 @@ static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, return fuse_file_do_ioctl(file, cmd, arg, FUSE_IOCTL_COMPAT); } +/* + * All files which have been polled are linked to RB tree + * fuse_conn->polled_files which is indexed by kh. Walk the tree and + * find the matching one. + */ +static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, + struct rb_node **parent_out) +{ + struct rb_node **link = &fc->polled_files.rb_node; + struct rb_node *last = NULL; + + while (*link) { + struct fuse_file *ff; + + last = *link; + ff = rb_entry(last, struct fuse_file, polled_node); + + if (kh < ff->kh) + link = &last->rb_left; + else if (kh > ff->kh) + link = &last->rb_right; + else + return link; + } + + if (parent_out) + *parent_out = last; + return link; +} + +/* + * The file is about to be polled. Make sure it's on the polled_files + * RB tree. Note that files once added to the polled_files tree are + * not removed before the file is released. This is because a file + * polled once is likely to be polled again. + */ +static void fuse_register_polled_file(struct fuse_conn *fc, + struct fuse_file *ff) +{ + spin_lock(&fc->lock); + if (RB_EMPTY_NODE(&ff->polled_node)) { + struct rb_node **link, *parent; + + link = fuse_find_polled_node(fc, ff->kh, &parent); + BUG_ON(*link); + rb_link_node(&ff->polled_node, parent, link); + rb_insert_color(&ff->polled_node, &fc->polled_files); + } + spin_unlock(&fc->lock); +} + +static unsigned fuse_file_poll(struct file *file, poll_table *wait) +{ + struct inode *inode = file->f_dentry->d_inode; + struct fuse_file *ff = file->private_data; + struct fuse_conn *fc = get_fuse_conn(inode); + struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; + struct fuse_poll_out outarg; + struct fuse_req *req; + int err; + + if (fc->no_poll) + return DEFAULT_POLLMASK; + + poll_wait(file, &ff->poll_wait, wait); + + /* + * Ask for notification iff there's someone waiting for it. + * The client may ignore the flag and always notify. + */ + if (waitqueue_active(&ff->poll_wait)) { + inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; + fuse_register_polled_file(fc, ff); + } + + req = fuse_get_req(fc); + if (IS_ERR(req)) + return PTR_ERR(req); + + req->in.h.opcode = FUSE_POLL; + req->in.h.nodeid = get_node_id(inode); + req->in.numargs = 1; + req->in.args[0].size = sizeof(inarg); + req->in.args[0].value = &inarg; + req->out.numargs = 1; + req->out.args[0].size = sizeof(outarg); + req->out.args[0].value = &outarg; + request_send(fc, req); + err = req->out.h.error; + fuse_put_request(fc, req); + + if (!err) + return outarg.revents; + if (err == -ENOSYS) { + fc->no_poll = 1; + return DEFAULT_POLLMASK; + } + return POLLERR; +} + +/* + * This is called from fuse_handle_notify() on FUSE_NOTIFY_POLL and + * wakes up the poll waiters. + */ +int fuse_notify_poll_wakeup(struct fuse_conn *fc, + struct fuse_notify_poll_wakeup_out *outarg) +{ + u64 kh = outarg->kh; + struct rb_node **link; + + spin_lock(&fc->lock); + + link = fuse_find_polled_node(fc, kh, NULL); + if (*link) { + struct fuse_file *ff; + + ff = rb_entry(*link, struct fuse_file, polled_node); + wake_up_interruptible_sync(&ff->poll_wait); + } + + spin_unlock(&fc->lock); + return 0; +} + static const struct file_operations fuse_file_operations = { .llseek = fuse_file_llseek, .read = do_sync_read, @@ -1765,6 +1895,7 @@ static const struct file_operations fuse_file_operations = { .splice_read = generic_file_splice_read, .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, + .poll = fuse_file_poll, }; static const struct file_operations fuse_direct_io_file_operations = { @@ -1779,6 +1910,7 @@ static const struct file_operations fuse_direct_io_file_operations = { .flock = fuse_file_flock, .unlocked_ioctl = fuse_file_ioctl, .compat_ioctl = fuse_file_compat_ioctl, + .poll = fuse_file_poll, /* no mmap and splice_read */ }; diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index 86f013303828..986fbd4c1ff5 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -19,6 +19,8 @@ #include #include #include +#include +#include /** Max number of pages that can be used in a single read request */ #define FUSE_MAX_PAGES_PER_REQ 32 @@ -111,6 +113,12 @@ struct fuse_file { /** Entry on inode's write_files list */ struct list_head write_entry; + + /** RB node to be linked on fuse_conn->polled_files */ + struct rb_node polled_node; + + /** Wait queue head for poll */ + wait_queue_head_t poll_wait; }; /** One input argument of a request */ @@ -328,6 +336,9 @@ struct fuse_conn { /** The next unique kernel file handle */ u64 khctr; + /** rbtree of fuse_files waiting for poll events indexed by ph */ + struct rb_root polled_files; + /** Number of requests currently in the background */ unsigned num_background; @@ -416,6 +427,9 @@ struct fuse_conn { /** Is bmap not implemented by fs? */ unsigned no_bmap:1; + /** Is poll not implemented by fs? */ + unsigned no_poll:1; + /** Do multi-page cached writes */ unsigned big_writes:1; @@ -524,6 +538,12 @@ int fuse_release_common(struct inode *inode, struct file *file, int isdir); int fuse_fsync_common(struct file *file, struct dentry *de, int datasync, int isdir); +/** + * Notify poll wakeup + */ +int fuse_notify_poll_wakeup(struct fuse_conn *fc, + struct fuse_notify_poll_wakeup_out *outarg); + /** * Initialize file operations on a regular file */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 0e15bc221d23..ba7256128084 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -486,6 +486,7 @@ static struct fuse_conn *new_conn(struct super_block *sb) /* fuse does it's own writeback accounting */ fc->bdi.capabilities = BDI_CAP_NO_ACCT_WB; fc->khctr = 0; + fc->polled_files = RB_ROOT; fc->dev = sb->s_dev; err = bdi_init(&fc->bdi); if (err) diff --git a/include/linux/fuse.h b/include/linux/fuse.h index abde9949e2c0..5650cf033e73 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -163,6 +163,13 @@ struct fuse_file_lock { #define FUSE_IOCTL_MAX_IOV 256 +/** + * Poll flags + * + * FUSE_POLL_SCHEDULE_NOTIFY: request poll notify + */ +#define FUSE_POLL_SCHEDULE_NOTIFY (1 << 0) + enum fuse_opcode { FUSE_LOOKUP = 1, FUSE_FORGET = 2, /* no reply */ @@ -201,9 +208,11 @@ enum fuse_opcode { FUSE_BMAP = 37, FUSE_DESTROY = 38, FUSE_IOCTL = 39, + FUSE_POLL = 40, }; enum fuse_notify_code { + FUSE_NOTIFY_POLL = 1, FUSE_NOTIFY_CODE_MAX, }; @@ -421,6 +430,22 @@ struct fuse_ioctl_out { __u32 out_iovs; }; +struct fuse_poll_in { + __u64 fh; + __u64 kh; + __u32 flags; + __u32 padding; +}; + +struct fuse_poll_out { + __u32 revents; + __u32 padding; +}; + +struct fuse_notify_poll_wakeup_out { + __u64 kh; +}; + struct fuse_in_header { __u32 len; __u32 opcode; -- cgit v1.2.3 From 1f55ed06cf0c361b293b32e5947d35d173eff2aa Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Mon, 1 Dec 2008 19:14:02 +0100 Subject: fuse: update interface version Change interface version to 7.11 after adding the IOCTL and POLL messages. Also clean up the header a bit: - update copyright date to 2008 - fix checkpatch warning: WARNING: Use #include instead of - remove FUSE_MAJOR define, which is not being used any more Signed-off-by: Miklos Szeredi --- include/linux/fuse.h | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'include') diff --git a/include/linux/fuse.h b/include/linux/fuse.h index 5650cf033e73..162e5defe683 100644 --- a/include/linux/fuse.h +++ b/include/linux/fuse.h @@ -1,6 +1,6 @@ /* FUSE: Filesystem in Userspace - Copyright (C) 2001-2006 Miklos Szeredi + Copyright (C) 2001-2008 Miklos Szeredi This program can be distributed under the terms of the GNU GPL. See the file COPYING. @@ -20,26 +20,27 @@ * * 7.10 * - add nonseekable open flag + * + * 7.11 + * - add IOCTL message + * - add unsolicited notification support + * - add POLL message and NOTIFY_POLL notification */ #ifndef _LINUX_FUSE_H #define _LINUX_FUSE_H -#include -#include +#include /** Version number of this interface */ #define FUSE_KERNEL_VERSION 7 /** Minor version number of this interface */ -#define FUSE_KERNEL_MINOR_VERSION 10 +#define FUSE_KERNEL_MINOR_VERSION 11 /** The node ID of the root inode */ #define FUSE_ROOT_ID 1 -/** The major number of the fuse character device */ -#define FUSE_MAJOR MISC_MAJOR - /* Make sure all structures are padded to 64bit boundary, so 32bit userspace works under 64bit kernels */ -- cgit v1.2.3