diff options
author | Eric Paris <eparis@redhat.com> | 2009-12-18 05:24:24 +0300 |
---|---|---|
committer | Eric Paris <eparis@redhat.com> | 2010-07-28 17:58:53 +0400 |
commit | 841bdc10f573aa010dd5818d35a5690b7d9f73ce (patch) | |
tree | 58ef7a15e24ac07d3af7c6db7306199c9392f7dd /fs/notify/inode_mark.c | |
parent | d07754412f9cdc2f4a99318d5ee81ace6715ea99 (diff) | |
download | linux-841bdc10f573aa010dd5818d35a5690b7d9f73ce.tar.xz |
fsnotify: rename mark_entry to just mark
previously I used mark_entry when talking about marks on inodes. The
_entry is pretty useless. Just use "mark" instead.
Signed-off-by: Eric Paris <eparis@redhat.com>
Diffstat (limited to 'fs/notify/inode_mark.c')
-rw-r--r-- | fs/notify/inode_mark.c | 148 |
1 files changed, 74 insertions, 74 deletions
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c index 01c42632eb2a..27c1b43ad739 100644 --- a/fs/notify/inode_mark.c +++ b/fs/notify/inode_mark.c @@ -30,21 +30,21 @@ * There are 3 spinlocks involved with fsnotify inode marks and they MUST * be taken in order as follows: * - * entry->lock + * mark->lock * group->mark_lock * inode->i_lock * - * entry->lock protects 2 things, entry->group and entry->inode. You must hold + * mark->lock protects 2 things, mark->group and mark->inode. You must hold * that lock to dereference either of these things (they could be NULL even with * the lock) * * group->mark_lock protects the marks_list anchored inside a given group - * and each entry is hooked via the g_list. It also sorta protects the + * and each mark is hooked via the g_list. It also sorta protects the * free_g_list, which when used is anchored by a private list on the stack of the * task which held the group->mark_lock. * * inode->i_lock protects the i_fsnotify_marks list anchored inside a - * given inode and each entry is hooked via the i_list. (and sorta the + * given inode and each mark is hooked via the i_list. (and sorta the * free_i_list) * * @@ -95,15 +95,15 @@ #include <linux/fsnotify_backend.h> #include "fsnotify.h" -void fsnotify_get_mark(struct fsnotify_mark *entry) +void fsnotify_get_mark(struct fsnotify_mark *mark) { - atomic_inc(&entry->refcnt); + atomic_inc(&mark->refcnt); } -void fsnotify_put_mark(struct fsnotify_mark *entry) +void fsnotify_put_mark(struct fsnotify_mark *mark) { - if (atomic_dec_and_test(&entry->refcnt)) - entry->free_mark(entry); + if (atomic_dec_and_test(&mark->refcnt)) + mark->free_mark(mark); } /* @@ -111,14 +111,14 @@ void fsnotify_put_mark(struct fsnotify_mark *entry) */ static void fsnotify_recalc_inode_mask_locked(struct inode *inode) { - struct fsnotify_mark *entry; + struct fsnotify_mark *mark; struct hlist_node *pos; __u32 new_mask = 0; assert_spin_locked(&inode->i_lock); - hlist_for_each_entry(entry, pos, &inode->i_fsnotify_marks, i.i_list) - new_mask |= entry->mask; + hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) + new_mask |= mark->mask; inode->i_fsnotify_mask = new_mask; } @@ -138,40 +138,40 @@ void fsnotify_recalc_inode_mask(struct inode *inode) /* * Any time a mark is getting freed we end up here. * The caller had better be holding a reference to this mark so we don't actually - * do the final put under the entry->lock + * do the final put under the mark->lock */ -void fsnotify_destroy_mark(struct fsnotify_mark *entry) +void fsnotify_destroy_mark(struct fsnotify_mark *mark) { struct fsnotify_group *group; struct inode *inode; - spin_lock(&entry->lock); + spin_lock(&mark->lock); - group = entry->group; - inode = entry->i.inode; + group = mark->group; + inode = mark->i.inode; BUG_ON(group && !inode); BUG_ON(!group && inode); /* if !group something else already marked this to die */ if (!group) { - spin_unlock(&entry->lock); + spin_unlock(&mark->lock); return; } /* 1 from caller and 1 for being on i_list/g_list */ - BUG_ON(atomic_read(&entry->refcnt) < 2); + BUG_ON(atomic_read(&mark->refcnt) < 2); spin_lock(&group->mark_lock); spin_lock(&inode->i_lock); - hlist_del_init(&entry->i.i_list); - entry->i.inode = NULL; + hlist_del_init(&mark->i.i_list); + mark->i.inode = NULL; - list_del_init(&entry->g_list); - entry->group = NULL; + list_del_init(&mark->g_list); + mark->group = NULL; - fsnotify_put_mark(entry); /* for i_list and g_list */ + fsnotify_put_mark(mark); /* for i_list and g_list */ /* * this mark is now off the inode->i_fsnotify_marks list and we @@ -182,21 +182,21 @@ void fsnotify_destroy_mark(struct fsnotify_mark *entry) spin_unlock(&inode->i_lock); spin_unlock(&group->mark_lock); - spin_unlock(&entry->lock); + spin_unlock(&mark->lock); /* * Some groups like to know that marks are being freed. This is a - * callback to the group function to let it know that this entry + * callback to the group function to let it know that this mark * is being freed. */ if (group->ops->freeing_mark) - group->ops->freeing_mark(entry, group); + group->ops->freeing_mark(mark, group); /* * __fsnotify_update_child_dentry_flags(inode); * * I really want to call that, but we can't, we have no idea if the inode - * still exists the second we drop the entry->lock. + * still exists the second we drop the mark->lock. * * The next time an event arrive to this inode from one of it's children * __fsnotify_parent will see that the inode doesn't care about it's @@ -221,20 +221,20 @@ void fsnotify_destroy_mark(struct fsnotify_mark *entry) */ void fsnotify_clear_marks_by_group(struct fsnotify_group *group) { - struct fsnotify_mark *lentry, *entry; + struct fsnotify_mark *lmark, *mark; LIST_HEAD(free_list); spin_lock(&group->mark_lock); - list_for_each_entry_safe(entry, lentry, &group->marks_list, g_list) { - list_add(&entry->free_g_list, &free_list); - list_del_init(&entry->g_list); - fsnotify_get_mark(entry); + list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { + list_add(&mark->free_g_list, &free_list); + list_del_init(&mark->g_list); + fsnotify_get_mark(mark); } spin_unlock(&group->mark_lock); - list_for_each_entry_safe(entry, lentry, &free_list, free_g_list) { - fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); + list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) { + fsnotify_destroy_mark(mark); + fsnotify_put_mark(mark); } } @@ -243,21 +243,21 @@ void fsnotify_clear_marks_by_group(struct fsnotify_group *group) */ void fsnotify_clear_marks_by_inode(struct inode *inode) { - struct fsnotify_mark *entry, *lentry; + struct fsnotify_mark *mark, *lmark; struct hlist_node *pos, *n; LIST_HEAD(free_list); spin_lock(&inode->i_lock); - hlist_for_each_entry_safe(entry, pos, n, &inode->i_fsnotify_marks, i.i_list) { - list_add(&entry->i.free_i_list, &free_list); - hlist_del_init(&entry->i.i_list); - fsnotify_get_mark(entry); + hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) { + list_add(&mark->i.free_i_list, &free_list); + hlist_del_init(&mark->i.i_list); + fsnotify_get_mark(mark); } spin_unlock(&inode->i_lock); - list_for_each_entry_safe(entry, lentry, &free_list, i.free_i_list) { - fsnotify_destroy_mark(entry); - fsnotify_put_mark(entry); + list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) { + fsnotify_destroy_mark(mark); + fsnotify_put_mark(mark); } } @@ -268,15 +268,15 @@ void fsnotify_clear_marks_by_inode(struct inode *inode) struct fsnotify_mark *fsnotify_find_mark(struct fsnotify_group *group, struct inode *inode) { - struct fsnotify_mark *entry; + struct fsnotify_mark *mark; struct hlist_node *pos; assert_spin_locked(&inode->i_lock); - hlist_for_each_entry(entry, pos, &inode->i_fsnotify_marks, i.i_list) { - if (entry->group == group) { - fsnotify_get_mark(entry); - return entry; + hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) { + if (mark->group == group) { + fsnotify_get_mark(mark); + return mark; } } return NULL; @@ -294,35 +294,35 @@ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *ol /* * Nothing fancy, just initialize lists and locks and counters. */ -void fsnotify_init_mark(struct fsnotify_mark *entry, - void (*free_mark)(struct fsnotify_mark *entry)) +void fsnotify_init_mark(struct fsnotify_mark *mark, + void (*free_mark)(struct fsnotify_mark *mark)) { - spin_lock_init(&entry->lock); - atomic_set(&entry->refcnt, 1); - INIT_HLIST_NODE(&entry->i.i_list); - entry->group = NULL; - entry->mask = 0; - entry->i.inode = NULL; - entry->free_mark = free_mark; + spin_lock_init(&mark->lock); + atomic_set(&mark->refcnt, 1); + INIT_HLIST_NODE(&mark->i.i_list); + mark->group = NULL; + mark->mask = 0; + mark->i.inode = NULL; + mark->free_mark = free_mark; } /* - * Attach an initialized mark entry to a given group and inode. + * Attach an initialized mark mark to a given group and inode. * These marks may be used for the fsnotify backend to determine which * event types should be delivered to which group and for which inodes. */ -int fsnotify_add_mark(struct fsnotify_mark *entry, +int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, struct inode *inode, int allow_dups) { - struct fsnotify_mark *lentry = NULL; + struct fsnotify_mark *lmark = NULL; int ret = 0; inode = igrab(inode); if (unlikely(!inode)) return -EINVAL; - entry->flags = FSNOTIFY_MARK_FLAG_INODE; + mark->flags = FSNOTIFY_MARK_FLAG_INODE; /* * if this group isn't being testing for inode type events we need @@ -340,24 +340,24 @@ int fsnotify_add_mark(struct fsnotify_mark *entry, /* * LOCKING ORDER!!!! - * entry->lock + * mark->lock * group->mark_lock * inode->i_lock */ - spin_lock(&entry->lock); + spin_lock(&mark->lock); spin_lock(&group->mark_lock); spin_lock(&inode->i_lock); if (!allow_dups) - lentry = fsnotify_find_mark(group, inode); - if (!lentry) { - entry->group = group; - entry->i.inode = inode; + lmark = fsnotify_find_mark(group, inode); + if (!lmark) { + mark->group = group; + mark->i.inode = inode; - hlist_add_head(&entry->i.i_list, &inode->i_fsnotify_marks); - list_add(&entry->g_list, &group->marks_list); + hlist_add_head(&mark->i.i_list, &inode->i_fsnotify_marks); + list_add(&mark->g_list, &group->marks_list); - fsnotify_get_mark(entry); /* for i_list and g_list */ + fsnotify_get_mark(mark); /* for i_list and g_list */ atomic_inc(&group->num_marks); @@ -366,12 +366,12 @@ int fsnotify_add_mark(struct fsnotify_mark *entry, spin_unlock(&inode->i_lock); spin_unlock(&group->mark_lock); - spin_unlock(&entry->lock); + spin_unlock(&mark->lock); - if (lentry) { + if (lmark) { ret = -EEXIST; iput(inode); - fsnotify_put_mark(lentry); + fsnotify_put_mark(lmark); } else { __fsnotify_update_child_dentry_flags(inode); } |