summaryrefslogtreecommitdiff
path: root/fs/dcache.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-03 21:16:43 +0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-04 01:01:40 +0400
commite419b4cc585680940bc42f8ca8a071d6023fb1bb (patch)
tree8fce0f12b7b2a0fdca7a937af137910011efa783 /fs/dcache.c
parentac001e76546523ec2ef05b2f7001d8fdc588d069 (diff)
downloadlinux-e419b4cc585680940bc42f8ca8a071d6023fb1bb.tar.xz
vfs: make word-at-a-time accesses handle a non-existing page
It turns out that there are more cases than CONFIG_DEBUG_PAGEALLOC that can have holes in the kernel address space: it seems to happen easily with Xen, and it looks like the AMD gart64 code will also punch holes dynamically. Actually hitting that case is still very unlikely, so just do the access, and take an exception and fix it up for the very unlikely case of it being a page-crosser with no next page. And hey, this abstraction might even help other architectures that have other issues with unaligned word accesses than the possible missing next page. IOW, this could do the byte order magic too. Peter Anvin fixed a thinko in the shifting for the exception case. Reported-and-tested-by: Jana Saout <jana@saout.de> Cc: Peter Anvin <hpa@zytor.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/dcache.c')
-rw-r--r--fs/dcache.c26
1 files changed, 22 insertions, 4 deletions
diff --git a/fs/dcache.c b/fs/dcache.c
index b60ddc41d783..b80531c91779 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -141,18 +141,29 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
* Compare 2 name strings, return 0 if they match, otherwise non-zero.
* The strings are both count bytes long, and count is non-zero.
*/
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+#include <asm/word-at-a-time.h>
+/*
+ * NOTE! 'cs' and 'scount' come from a dentry, so it has a
+ * aligned allocation for this particular component. We don't
+ * strictly need the load_unaligned_zeropad() safety, but it
+ * doesn't hurt either.
+ *
+ * In contrast, 'ct' and 'tcount' can be from a pathname, and do
+ * need the careful unaligned handling.
+ */
static inline int dentry_cmp(const unsigned char *cs, size_t scount,
const unsigned char *ct, size_t tcount)
{
-#ifdef CONFIG_DCACHE_WORD_ACCESS
unsigned long a,b,mask;
if (unlikely(scount != tcount))
return 1;
for (;;) {
- a = *(unsigned long *)cs;
- b = *(unsigned long *)ct;
+ a = load_unaligned_zeropad(cs);
+ b = load_unaligned_zeropad(ct);
if (tcount < sizeof(unsigned long))
break;
if (unlikely(a != b))
@@ -165,7 +176,13 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
}
mask = ~(~0ul << tcount*8);
return unlikely(!!((a ^ b) & mask));
+}
+
#else
+
+static inline int dentry_cmp(const unsigned char *cs, size_t scount,
+ const unsigned char *ct, size_t tcount)
+{
if (scount != tcount)
return 1;
@@ -177,9 +194,10 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount,
tcount--;
} while (tcount);
return 0;
-#endif
}
+#endif
+
static void __d_free(struct rcu_head *head)
{
struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);