summaryrefslogtreecommitdiff
path: root/fs/jbd2
diff options
context:
space:
mode:
Diffstat (limited to 'fs/jbd2')
-rw-r--r--fs/jbd2/commit.c16
-rw-r--r--fs/jbd2/journal.c128
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/revoke.c4
-rw-r--r--fs/jbd2/transaction.c19
5 files changed, 35 insertions, 134 deletions
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index c0f59d1b13dc..6986f334c643 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -278,7 +278,7 @@ static inline void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
unsigned long long block)
{
tag->t_blocknr = cpu_to_be32(block & (u32)~0);
- if (tag_bytes > JBD_TAG_SIZE32)
+ if (tag_bytes > JBD2_TAG_SIZE32)
tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}
@@ -384,7 +384,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
- jbd2_slab_free(jh->b_committed_data, bh->b_size);
+ jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh);
}
@@ -475,7 +475,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
spin_unlock(&journal->j_list_lock);
if (err)
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, err);
jbd2_journal_write_revoke_records(journal, commit_transaction);
@@ -533,7 +533,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor) {
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, -EIO);
continue;
}
@@ -566,7 +566,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
and repeat this loop: we'll fall into the
refile-on-abort condition above. */
if (err) {
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, err);
continue;
}
@@ -757,7 +757,7 @@ wait_for_iobuf:
err = -EIO;
if (err)
- __jbd2_journal_abort_hard(journal);
+ jbd2_journal_abort(journal, err);
/* End of a transaction! Finally, we can do checkpoint
processing: any buffers committed as a result of this
@@ -801,14 +801,14 @@ restart_loop:
* Otherwise, we can just throw away the frozen data now.
*/
if (jh->b_committed_data) {
- jbd2_slab_free(jh->b_committed_data, bh->b_size);
+ jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
if (jh->b_frozen_data) {
jh->b_committed_data = jh->b_frozen_data;
jh->b_frozen_data = NULL;
}
} else if (jh->b_frozen_data) {
- jbd2_slab_free(jh->b_frozen_data, bh->b_size);
+ jbd2_free(jh->b_frozen_data, bh->b_size);
jh->b_frozen_data = NULL;
}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index f37324aee817..6ddc5531587c 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -84,7 +84,6 @@ EXPORT_SYMBOL(jbd2_journal_force_commit);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
-static int jbd2_journal_create_jbd_slab(size_t slab_size);
/*
* Helper function used to manage commit timeouts
@@ -335,10 +334,10 @@ repeat:
char *tmp;
jbd_unlock_bh_state(bh_in);
- tmp = jbd2_slab_alloc(bh_in->b_size, GFP_NOFS);
+ tmp = jbd2_alloc(bh_in->b_size, GFP_NOFS);
jbd_lock_bh_state(bh_in);
if (jh_in->b_frozen_data) {
- jbd2_slab_free(tmp, bh_in->b_size);
+ jbd2_free(tmp, bh_in->b_size);
goto repeat;
}
@@ -655,10 +654,9 @@ static journal_t * journal_init_common (void)
journal_t *journal;
int err;
- journal = jbd_kmalloc(sizeof(*journal), GFP_KERNEL);
+ journal = kzalloc(sizeof(*journal), GFP_KERNEL|__GFP_NOFAIL);
if (!journal)
goto fail;
- memset(journal, 0, sizeof(*journal));
init_waitqueue_head(&journal->j_wait_transaction_locked);
init_waitqueue_head(&journal->j_wait_logspace);
@@ -672,7 +670,7 @@ static journal_t * journal_init_common (void)
spin_lock_init(&journal->j_list_lock);
spin_lock_init(&journal->j_state_lock);
- journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
+ journal->j_commit_interval = (HZ * JBD2_DEFAULT_MAX_COMMIT_AGE);
/* The journal is marked for error until we succeed with recovery! */
journal->j_flags = JBD2_ABORT;
@@ -1096,13 +1094,6 @@ int jbd2_journal_load(journal_t *journal)
}
}
- /*
- * Create a slab for this blocksize
- */
- err = jbd2_journal_create_jbd_slab(be32_to_cpu(sb->s_blocksize));
- if (err)
- return err;
-
/* Let the recovery code check whether it needs to recover any
* data from the journal. */
if (jbd2_journal_recover(journal))
@@ -1621,89 +1612,9 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
size_t journal_tag_bytes(journal_t *journal)
{
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
- return JBD_TAG_SIZE64;
+ return JBD2_TAG_SIZE64;
else
- return JBD_TAG_SIZE32;
-}
-
-/*
- * Simple support for retrying memory allocations. Introduced to help to
- * debug different VM deadlock avoidance strategies.
- */
-void * __jbd2_kmalloc (const char *where, size_t size, gfp_t flags, int retry)
-{
- return kmalloc(size, flags | (retry ? __GFP_NOFAIL : 0));
-}
-
-/*
- * jbd slab management: create 1k, 2k, 4k, 8k slabs as needed
- * and allocate frozen and commit buffers from these slabs.
- *
- * Reason for doing this is to avoid, SLAB_DEBUG - since it could
- * cause bh to cross page boundary.
- */
-
-#define JBD_MAX_SLABS 5
-#define JBD_SLAB_INDEX(size) (size >> 11)
-
-static struct kmem_cache *jbd_slab[JBD_MAX_SLABS];
-static const char *jbd_slab_names[JBD_MAX_SLABS] = {
- "jbd2_1k", "jbd2_2k", "jbd2_4k", NULL, "jbd2_8k"
-};
-
-static void jbd2_journal_destroy_jbd_slabs(void)
-{
- int i;
-
- for (i = 0; i < JBD_MAX_SLABS; i++) {
- if (jbd_slab[i])
- kmem_cache_destroy(jbd_slab[i]);
- jbd_slab[i] = NULL;
- }
-}
-
-static int jbd2_journal_create_jbd_slab(size_t slab_size)
-{
- int i = JBD_SLAB_INDEX(slab_size);
-
- BUG_ON(i >= JBD_MAX_SLABS);
-
- /*
- * Check if we already have a slab created for this size
- */
- if (jbd_slab[i])
- return 0;
-
- /*
- * Create a slab and force alignment to be same as slabsize -
- * this will make sure that allocations won't cross the page
- * boundary.
- */
- jbd_slab[i] = kmem_cache_create(jbd_slab_names[i],
- slab_size, slab_size, 0, NULL);
- if (!jbd_slab[i]) {
- printk(KERN_EMERG "JBD: no memory for jbd_slab cache\n");
- return -ENOMEM;
- }
- return 0;
-}
-
-void * jbd2_slab_alloc(size_t size, gfp_t flags)
-{
- int idx;
-
- idx = JBD_SLAB_INDEX(size);
- BUG_ON(jbd_slab[idx] == NULL);
- return kmem_cache_alloc(jbd_slab[idx], flags | __GFP_NOFAIL);
-}
-
-void jbd2_slab_free(void *ptr, size_t size)
-{
- int idx;
-
- idx = JBD_SLAB_INDEX(size);
- BUG_ON(jbd_slab[idx] == NULL);
- kmem_cache_free(jbd_slab[idx], ptr);
+ return JBD2_TAG_SIZE32;
}
/*
@@ -1770,7 +1681,7 @@ static void journal_free_journal_head(struct journal_head *jh)
{
#ifdef CONFIG_JBD2_DEBUG
atomic_dec(&nr_journal_heads);
- memset(jh, JBD_POISON_FREE, sizeof(*jh));
+ memset(jh, JBD2_POISON_FREE, sizeof(*jh));
#endif
kmem_cache_free(jbd2_journal_head_cache, jh);
}
@@ -1893,13 +1804,13 @@ static void __journal_remove_journal_head(struct buffer_head *bh)
printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n",
__FUNCTION__);
- jbd2_slab_free(jh->b_frozen_data, bh->b_size);
+ jbd2_free(jh->b_frozen_data, bh->b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing "
"b_committed_data\n",
__FUNCTION__);
- jbd2_slab_free(jh->b_committed_data, bh->b_size);
+ jbd2_free(jh->b_committed_data, bh->b_size);
}
bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */
@@ -1953,16 +1864,14 @@ void jbd2_journal_put_journal_head(struct journal_head *jh)
/*
* debugfs tunables
*/
-#if defined(CONFIG_JBD2_DEBUG)
-u8 jbd2_journal_enable_debug;
+#ifdef CONFIG_JBD2_DEBUG
+u8 jbd2_journal_enable_debug __read_mostly;
EXPORT_SYMBOL(jbd2_journal_enable_debug);
-#endif
-
-#if defined(CONFIG_JBD2_DEBUG) && defined(CONFIG_DEBUG_FS)
#define JBD2_DEBUG_NAME "jbd2-debug"
-struct dentry *jbd2_debugfs_dir, *jbd2_debug;
+static struct dentry *jbd2_debugfs_dir;
+static struct dentry *jbd2_debug;
static void __init jbd2_create_debugfs_entry(void)
{
@@ -1975,24 +1884,18 @@ static void __init jbd2_create_debugfs_entry(void)
static void __exit jbd2_remove_debugfs_entry(void)
{
- if (jbd2_debug)
- debugfs_remove(jbd2_debug);
- if (jbd2_debugfs_dir)
- debugfs_remove(jbd2_debugfs_dir);
+ debugfs_remove(jbd2_debug);
+ debugfs_remove(jbd2_debugfs_dir);
}
#else
static void __init jbd2_create_debugfs_entry(void)
{
- do {
- } while (0);
}
static void __exit jbd2_remove_debugfs_entry(void)
{
- do {
- } while (0);
}
#endif
@@ -2040,7 +1943,6 @@ static void jbd2_journal_destroy_caches(void)
jbd2_journal_destroy_revoke_caches();
jbd2_journal_destroy_jbd2_journal_head_cache();
jbd2_journal_destroy_handle_cache();
- jbd2_journal_destroy_jbd_slabs();
}
static int __init journal_init(void)
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index b50be8a044eb..d0ce627539ef 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -311,7 +311,7 @@ int jbd2_journal_skip_recovery(journal_t *journal)
static inline unsigned long long read_tag_block(int tag_bytes, journal_block_tag_t *tag)
{
unsigned long long block = be32_to_cpu(tag->t_blocknr);
- if (tag_bytes > JBD_TAG_SIZE32)
+ if (tag_bytes > JBD2_TAG_SIZE32)
block |= (u64)be32_to_cpu(tag->t_blocknr_high) << 32;
return block;
}
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 01d88975e0c5..3595fd432d5b 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -352,7 +352,7 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
if (bh)
BUFFER_TRACE(bh, "found on hash");
}
-#ifdef JBD_EXPENSIVE_CHECKING
+#ifdef JBD2_EXPENSIVE_CHECKING
else {
struct buffer_head *bh2;
@@ -453,7 +453,7 @@ int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
}
}
-#ifdef JBD_EXPENSIVE_CHECKING
+#ifdef JBD2_EXPENSIVE_CHECKING
/* There better not be one left behind by now! */
record = find_revoke_record(journal, bh->b_blocknr);
J_ASSERT_JH(jh, record == NULL);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7946ff43fc40..b1fcf2b3dca3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -96,13 +96,12 @@ static int start_this_handle(journal_t *journal, handle_t *handle)
alloc_transaction:
if (!journal->j_running_transaction) {
- new_transaction = jbd_kmalloc(sizeof(*new_transaction),
- GFP_NOFS);
+ new_transaction = kzalloc(sizeof(*new_transaction),
+ GFP_NOFS|__GFP_NOFAIL);
if (!new_transaction) {
ret = -ENOMEM;
goto out;
}
- memset(new_transaction, 0, sizeof(*new_transaction));
}
jbd_debug(3, "New handle %p going live.\n", handle);
@@ -236,7 +235,7 @@ out:
/* Allocate a new handle. This should probably be in a slab... */
static handle_t *new_handle(int nblocks)
{
- handle_t *handle = jbd_alloc_handle(GFP_NOFS);
+ handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
if (!handle)
return NULL;
memset(handle, 0, sizeof(*handle));
@@ -282,7 +281,7 @@ handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
err = start_this_handle(journal, handle);
if (err < 0) {
- jbd_free_handle(handle);
+ jbd2_free_handle(handle);
current->journal_info = NULL;
handle = ERR_PTR(err);
}
@@ -668,7 +667,7 @@ repeat:
JBUFFER_TRACE(jh, "allocate memory for buffer");
jbd_unlock_bh_state(bh);
frozen_buffer =
- jbd2_slab_alloc(jh2bh(jh)->b_size,
+ jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS);
if (!frozen_buffer) {
printk(KERN_EMERG
@@ -728,7 +727,7 @@ done:
out:
if (unlikely(frozen_buffer)) /* It's usually NULL */
- jbd2_slab_free(frozen_buffer, bh->b_size);
+ jbd2_free(frozen_buffer, bh->b_size);
JBUFFER_TRACE(jh, "exit");
return error;
@@ -881,7 +880,7 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
repeat:
if (!jh->b_committed_data) {
- committed_data = jbd2_slab_alloc(jh2bh(jh)->b_size, GFP_NOFS);
+ committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
printk(KERN_EMERG "%s: No memory for committed data\n",
__FUNCTION__);
@@ -908,7 +907,7 @@ repeat:
out:
jbd2_journal_put_journal_head(jh);
if (unlikely(committed_data))
- jbd2_slab_free(committed_data, bh->b_size);
+ jbd2_free(committed_data, bh->b_size);
return err;
}
@@ -1411,7 +1410,7 @@ int jbd2_journal_stop(handle_t *handle)
spin_unlock(&journal->j_state_lock);
}
- jbd_free_handle(handle);
+ jbd2_free_handle(handle);
return err;
}