summaryrefslogtreecommitdiff
path: root/drivers/lightnvm/pblk-init.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/lightnvm/pblk-init.c')
-rw-r--r--drivers/lightnvm/pblk-init.c103
1 files changed, 79 insertions, 24 deletions
diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
index 13822594647c..f9a3e47b6a93 100644
--- a/drivers/lightnvm/pblk-init.c
+++ b/drivers/lightnvm/pblk-init.c
@@ -207,9 +207,6 @@ static int pblk_rwb_init(struct pblk *pblk)
return pblk_rb_init(&pblk->rwb, buffer_size, threshold, geo->csecs);
}
-/* Minimum pages needed within a lun */
-#define ADDR_POOL_SIZE 64
-
static int pblk_set_addrf_12(struct pblk *pblk, struct nvm_geo *geo,
struct nvm_addrf_12 *dst)
{
@@ -350,23 +347,19 @@ fail_destroy_ws:
static int pblk_get_global_caches(void)
{
- int ret;
+ int ret = 0;
mutex_lock(&pblk_caches.mutex);
- if (kref_read(&pblk_caches.kref) > 0) {
- kref_get(&pblk_caches.kref);
- mutex_unlock(&pblk_caches.mutex);
- return 0;
- }
+ if (kref_get_unless_zero(&pblk_caches.kref))
+ goto out;
ret = pblk_create_global_caches();
-
if (!ret)
- kref_get(&pblk_caches.kref);
+ kref_init(&pblk_caches.kref);
+out:
mutex_unlock(&pblk_caches.mutex);
-
return ret;
}
@@ -406,12 +399,45 @@ static int pblk_core_init(struct pblk *pblk)
pblk->nr_flush_rst = 0;
pblk->min_write_pgs = geo->ws_opt;
+ pblk->min_write_pgs_data = pblk->min_write_pgs;
max_write_ppas = pblk->min_write_pgs * geo->all_luns;
pblk->max_write_pgs = min_t(int, max_write_ppas, NVM_MAX_VLBA);
pblk->max_write_pgs = min_t(int, pblk->max_write_pgs,
queue_max_hw_sectors(dev->q) / (geo->csecs >> SECTOR_SHIFT));
pblk_set_sec_per_write(pblk, pblk->min_write_pgs);
+ pblk->oob_meta_size = geo->sos;
+ if (!pblk_is_oob_meta_supported(pblk)) {
+ /* For drives which does not have OOB metadata feature
+ * in order to support recovery feature we need to use
+ * so called packed metadata. Packed metada will store
+ * the same information as OOB metadata (l2p table mapping,
+ * but in the form of the single page at the end of
+ * every write request.
+ */
+ if (pblk->min_write_pgs
+ * sizeof(struct pblk_sec_meta) > PAGE_SIZE) {
+ /* We want to keep all the packed metadata on single
+ * page per write requests. So we need to ensure that
+ * it will fit.
+ *
+ * This is more like sanity check, since there is
+ * no device with such a big minimal write size
+ * (above 1 metabytes).
+ */
+ pblk_err(pblk, "Not supported min write size\n");
+ return -EINVAL;
+ }
+ /* For packed meta approach we do some simplification.
+ * On read path we always issue requests which size
+ * equal to max_write_pgs, with all pages filled with
+ * user payload except of last one page which will be
+ * filled with packed metadata.
+ */
+ pblk->max_write_pgs = pblk->min_write_pgs;
+ pblk->min_write_pgs_data = pblk->min_write_pgs - 1;
+ }
+
pblk->pad_dist = kcalloc(pblk->min_write_pgs - 1, sizeof(atomic64_t),
GFP_KERNEL);
if (!pblk->pad_dist)
@@ -635,40 +661,61 @@ static unsigned int calc_emeta_len(struct pblk *pblk)
return (lm->emeta_len[1] + lm->emeta_len[2] + lm->emeta_len[3]);
}
-static void pblk_set_provision(struct pblk *pblk, long nr_free_blks)
+static int pblk_set_provision(struct pblk *pblk, int nr_free_chks)
{
struct nvm_tgt_dev *dev = pblk->dev;
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line_meta *lm = &pblk->lm;
struct nvm_geo *geo = &dev->geo;
sector_t provisioned;
- int sec_meta, blk_meta;
+ int sec_meta, blk_meta, clba;
+ int minimum;
if (geo->op == NVM_TARGET_DEFAULT_OP)
pblk->op = PBLK_DEFAULT_OP;
else
pblk->op = geo->op;
- provisioned = nr_free_blks;
+ minimum = pblk_get_min_chks(pblk);
+ provisioned = nr_free_chks;
provisioned *= (100 - pblk->op);
sector_div(provisioned, 100);
- pblk->op_blks = nr_free_blks - provisioned;
+ if ((nr_free_chks - provisioned) < minimum) {
+ if (geo->op != NVM_TARGET_DEFAULT_OP) {
+ pblk_err(pblk, "OP too small to create a sane instance\n");
+ return -EINTR;
+ }
+
+ /* If the user did not specify an OP value, and PBLK_DEFAULT_OP
+ * is not enough, calculate and set sane value
+ */
+
+ provisioned = nr_free_chks - minimum;
+ pblk->op = (100 * minimum) / nr_free_chks;
+ pblk_info(pblk, "Default OP insufficient, adjusting OP to %d\n",
+ pblk->op);
+ }
+
+ pblk->op_blks = nr_free_chks - provisioned;
/* Internally pblk manages all free blocks, but all calculations based
* on user capacity consider only provisioned blocks
*/
- pblk->rl.total_blocks = nr_free_blks;
- pblk->rl.nr_secs = nr_free_blks * geo->clba;
+ pblk->rl.total_blocks = nr_free_chks;
+ pblk->rl.nr_secs = nr_free_chks * geo->clba;
/* Consider sectors used for metadata */
sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
- pblk->capacity = (provisioned - blk_meta) * geo->clba;
+ clba = (geo->clba / pblk->min_write_pgs) * pblk->min_write_pgs_data;
+ pblk->capacity = (provisioned - blk_meta) * clba;
- atomic_set(&pblk->rl.free_blocks, nr_free_blks);
- atomic_set(&pblk->rl.free_user_blocks, nr_free_blks);
+ atomic_set(&pblk->rl.free_blocks, nr_free_chks);
+ atomic_set(&pblk->rl.free_user_blocks, nr_free_chks);
+
+ return 0;
}
static int pblk_setup_line_meta_chk(struct pblk *pblk, struct pblk_line *line,
@@ -984,7 +1031,7 @@ static int pblk_lines_init(struct pblk *pblk)
struct pblk_line_mgmt *l_mg = &pblk->l_mg;
struct pblk_line *line;
void *chunk_meta;
- long nr_free_chks = 0;
+ int nr_free_chks = 0;
int i, ret;
ret = pblk_line_meta_init(pblk);
@@ -1031,7 +1078,9 @@ static int pblk_lines_init(struct pblk *pblk)
goto fail_free_lines;
}
- pblk_set_provision(pblk, nr_free_chks);
+ ret = pblk_set_provision(pblk, nr_free_chks);
+ if (ret)
+ goto fail_free_lines;
vfree(chunk_meta);
return 0;
@@ -1041,7 +1090,7 @@ fail_free_lines:
pblk_line_meta_free(l_mg, &pblk->lines[i]);
kfree(pblk->lines);
fail_free_chunk_meta:
- kfree(chunk_meta);
+ vfree(chunk_meta);
fail_free_luns:
kfree(pblk->luns);
fail_free_meta:
@@ -1154,6 +1203,12 @@ static void *pblk_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk,
return ERR_PTR(-EINVAL);
}
+ if (geo->ext) {
+ pblk_err(pblk, "extended metadata not supported\n");
+ kfree(pblk);
+ return ERR_PTR(-EINVAL);
+ }
+
spin_lock_init(&pblk->resubmit_lock);
spin_lock_init(&pblk->trans_lock);
spin_lock_init(&pblk->lock);