summaryrefslogtreecommitdiff
path: root/fs/file.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/file.c')
-rw-r--r--fs/file.c123
1 files changed, 48 insertions, 75 deletions
diff --git a/fs/file.c b/fs/file.c
index 655338effe9c..eb093e736972 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -272,59 +272,45 @@ static inline bool fd_is_open(unsigned int fd, const struct fdtable *fdt)
return test_bit(fd, fdt->open_fds);
}
-static unsigned int count_open_files(struct fdtable *fdt)
-{
- unsigned int size = fdt->max_fds;
- unsigned int i;
-
- /* Find the last open fd */
- for (i = size / BITS_PER_LONG; i > 0; ) {
- if (fdt->open_fds[--i])
- break;
- }
- i = (i + 1) * BITS_PER_LONG;
- return i;
-}
-
/*
* Note that a sane fdtable size always has to be a multiple of
* BITS_PER_LONG, since we have bitmaps that are sized by this.
*
- * 'max_fds' will normally already be properly aligned, but it
- * turns out that in the close_range() -> __close_range() ->
- * unshare_fd() -> dup_fd() -> sane_fdtable_size() we can end
- * up having a 'max_fds' value that isn't already aligned.
- *
- * Rather than make close_range() have to worry about this,
- * just make that BITS_PER_LONG alignment be part of a sane
- * fdtable size. Becuase that's really what it is.
+ * punch_hole is optional - when close_range() is asked to unshare
+ * and close, we don't need to copy descriptors in that range, so
+ * a smaller cloned descriptor table might suffice if the last
+ * currently opened descriptor falls into that range.
*/
-static unsigned int sane_fdtable_size(struct fdtable *fdt, unsigned int max_fds)
+static unsigned int sane_fdtable_size(struct fdtable *fdt, struct fd_range *punch_hole)
{
- unsigned int count;
-
- count = count_open_files(fdt);
- if (max_fds < NR_OPEN_DEFAULT)
- max_fds = NR_OPEN_DEFAULT;
- return ALIGN(min(count, max_fds), BITS_PER_LONG);
+ unsigned int last = find_last_bit(fdt->open_fds, fdt->max_fds);
+
+ if (last == fdt->max_fds)
+ return NR_OPEN_DEFAULT;
+ if (punch_hole && punch_hole->to >= last && punch_hole->from <= last) {
+ last = find_last_bit(fdt->open_fds, punch_hole->from);
+ if (last == punch_hole->from)
+ return NR_OPEN_DEFAULT;
+ }
+ return ALIGN(last + 1, BITS_PER_LONG);
}
/*
- * Allocate a new files structure and copy contents from the
- * passed in files structure.
- * errorp will be valid only when the returned files_struct is NULL.
+ * Allocate a new descriptor table and copy contents from the passed in
+ * instance. Returns a pointer to cloned table on success, ERR_PTR()
+ * on failure. For 'punch_hole' see sane_fdtable_size().
*/
-struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int *errorp)
+struct files_struct *dup_fd(struct files_struct *oldf, struct fd_range *punch_hole)
{
struct files_struct *newf;
struct file **old_fds, **new_fds;
unsigned int open_files, i;
struct fdtable *old_fdt, *new_fdt;
+ int error;
- *errorp = -ENOMEM;
newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
if (!newf)
- goto out;
+ return ERR_PTR(-ENOMEM);
atomic_set(&newf->count, 1);
@@ -341,7 +327,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf);
- open_files = sane_fdtable_size(old_fdt, max_fds);
+ open_files = sane_fdtable_size(old_fdt, punch_hole);
/*
* Check whether we need to allocate a larger fd array and fd set.
@@ -354,14 +340,14 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
new_fdt = alloc_fdtable(open_files - 1);
if (!new_fdt) {
- *errorp = -ENOMEM;
+ error = -ENOMEM;
goto out_release;
}
/* beyond sysctl_nr_open; nothing to do */
if (unlikely(new_fdt->max_fds < open_files)) {
__free_fdtable(new_fdt);
- *errorp = -EMFILE;
+ error = -EMFILE;
goto out_release;
}
@@ -372,7 +358,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
*/
spin_lock(&oldf->file_lock);
old_fdt = files_fdtable(oldf);
- open_files = sane_fdtable_size(old_fdt, max_fds);
+ open_files = sane_fdtable_size(old_fdt, punch_hole);
}
copy_fd_bitmaps(new_fdt, old_fdt, open_files / BITS_PER_LONG);
@@ -406,8 +392,7 @@ struct files_struct *dup_fd(struct files_struct *oldf, unsigned int max_fds, int
out_release:
kmem_cache_free(files_cachep, newf);
-out:
- return NULL;
+ return ERR_PTR(error);
}
static struct fdtable *close_files(struct files_struct * files)
@@ -672,7 +657,7 @@ int close_fd(unsigned fd)
return filp_close(file, files);
}
-EXPORT_SYMBOL(close_fd); /* for ksys_close() */
+EXPORT_SYMBOL(close_fd);
/**
* last_fd - return last valid index into fd table
@@ -748,37 +733,25 @@ int __close_range(unsigned fd, unsigned max_fd, unsigned int flags)
if (fd > max_fd)
return -EINVAL;
- if (flags & CLOSE_RANGE_UNSHARE) {
- int ret;
- unsigned int max_unshare_fds = NR_OPEN_MAX;
+ if ((flags & CLOSE_RANGE_UNSHARE) && atomic_read(&cur_fds->count) > 1) {
+ struct fd_range range = {fd, max_fd}, *punch_hole = &range;
/*
* If the caller requested all fds to be made cloexec we always
* copy all of the file descriptors since they still want to
* use them.
*/
- if (!(flags & CLOSE_RANGE_CLOEXEC)) {
- /*
- * If the requested range is greater than the current
- * maximum, we're closing everything so only copy all
- * file descriptors beneath the lowest file descriptor.
- */
- rcu_read_lock();
- if (max_fd >= last_fd(files_fdtable(cur_fds)))
- max_unshare_fds = fd;
- rcu_read_unlock();
- }
-
- ret = unshare_fd(CLONE_FILES, max_unshare_fds, &fds);
- if (ret)
- return ret;
+ if (flags & CLOSE_RANGE_CLOEXEC)
+ punch_hole = NULL;
+ fds = dup_fd(cur_fds, punch_hole);
+ if (IS_ERR(fds))
+ return PTR_ERR(fds);
/*
* We used to share our file descriptor table, and have now
* created a private one, make sure we're using it below.
*/
- if (fds)
- swap(cur_fds, fds);
+ swap(cur_fds, fds);
}
if (flags & CLOSE_RANGE_CLOEXEC)
@@ -1124,7 +1097,7 @@ EXPORT_SYMBOL(task_lookup_next_fdget_rcu);
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
*/
-static unsigned long __fget_light(unsigned int fd, fmode_t mask)
+static inline struct fd __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
@@ -1141,22 +1114,22 @@ static unsigned long __fget_light(unsigned int fd, fmode_t mask)
if (likely(atomic_read_acquire(&files->count) == 1)) {
file = files_lookup_fd_raw(files, fd);
if (!file || unlikely(file->f_mode & mask))
- return 0;
- return (unsigned long)file;
+ return EMPTY_FD;
+ return BORROWED_FD(file);
} else {
file = __fget_files(files, fd, mask);
if (!file)
- return 0;
- return FDPUT_FPUT | (unsigned long)file;
+ return EMPTY_FD;
+ return CLONED_FD(file);
}
}
-unsigned long __fdget(unsigned int fd)
+struct fd fdget(unsigned int fd)
{
return __fget_light(fd, FMODE_PATH);
}
-EXPORT_SYMBOL(__fdget);
+EXPORT_SYMBOL(fdget);
-unsigned long __fdget_raw(unsigned int fd)
+struct fd fdget_raw(unsigned int fd)
{
return __fget_light(fd, 0);
}
@@ -1177,16 +1150,16 @@ static inline bool file_needs_f_pos_lock(struct file *file)
(file_count(file) > 1 || file->f_op->iterate_shared);
}
-unsigned long __fdget_pos(unsigned int fd)
+struct fd fdget_pos(unsigned int fd)
{
- unsigned long v = __fdget(fd);
- struct file *file = (struct file *)(v & ~3);
+ struct fd f = fdget(fd);
+ struct file *file = fd_file(f);
if (file && file_needs_f_pos_lock(file)) {
- v |= FDPUT_POS_UNLOCK;
+ f.word |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
- return v;
+ return f;
}
void __f_unlock_pos(struct file *f)