diff options
author | Dan Williams <dan.j.williams@intel.com> | 2016-05-18 19:59:34 +0300 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2016-05-18 19:59:34 +0300 |
commit | 594d6d96ea042366878aa7dc7f5711b8c245db5a (patch) | |
tree | 1b7333bf5b5c1147e136f050d686ab4f888ab85f /drivers/nvdimm/pfn_devs.c | |
parent | 1b8d2afde54fade94339f573c4e05644f9ae9866 (diff) | |
parent | 45a0dac0451136fa7ae34a6fea53ef6a136287ce (diff) | |
download | linux-594d6d96ea042366878aa7dc7f5711b8c245db5a.tar.xz |
Merge branch 'for-4.7/dax' into libnvdimm-for-next
Diffstat (limited to 'drivers/nvdimm/pfn_devs.c')
-rw-r--r-- | drivers/nvdimm/pfn_devs.c | 319 |
1 files changed, 272 insertions, 47 deletions
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c index e071e214feba..2248056d29e7 100644 --- a/drivers/nvdimm/pfn_devs.c +++ b/drivers/nvdimm/pfn_devs.c @@ -1,5 +1,5 @@ /* - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -10,6 +10,7 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ +#include <linux/memremap.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/genhd.h> @@ -53,10 +54,29 @@ struct nd_pfn *to_nd_pfn(struct device *dev) } EXPORT_SYMBOL(to_nd_pfn); +static struct nd_pfn *to_nd_pfn_safe(struct device *dev) +{ + /* + * pfn device attributes are re-used by dax device instances, so we + * need to be careful to correct device-to-nd_pfn conversion. + */ + if (is_nd_pfn(dev)) + return to_nd_pfn(dev); + + if (is_nd_dax(dev)) { + struct nd_dax *nd_dax = to_nd_dax(dev); + + return &nd_dax->nd_pfn; + } + + WARN_ON(1); + return NULL; +} + static ssize_t mode_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); switch (nd_pfn->mode) { case PFN_MODE_RAM: @@ -71,7 +91,7 @@ static ssize_t mode_show(struct device *dev, static ssize_t mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc = 0; device_lock(dev); @@ -105,7 +125,7 @@ static DEVICE_ATTR_RW(mode); static ssize_t align_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); return sprintf(buf, "%lx\n", nd_pfn->align); } @@ -133,7 +153,7 @@ static ssize_t __align_store(struct nd_pfn *nd_pfn, const char *buf) static ssize_t align_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; device_lock(dev); @@ -151,7 +171,7 @@ static DEVICE_ATTR_RW(align); static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); if (nd_pfn->uuid) return sprintf(buf, "%pUb\n", nd_pfn->uuid); @@ -161,7 +181,7 @@ static ssize_t uuid_show(struct device *dev, static ssize_t uuid_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; device_lock(dev); @@ -177,7 +197,7 @@ static DEVICE_ATTR_RW(uuid); static ssize_t namespace_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; nvdimm_bus_lock(dev); @@ -190,7 +210,7 @@ static ssize_t namespace_show(struct device *dev, static ssize_t namespace_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; device_lock(dev); @@ -208,7 +228,7 @@ static DEVICE_ATTR_RW(namespace); static ssize_t resource_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; device_lock(dev); @@ -234,7 +254,7 @@ static DEVICE_ATTR_RO(resource); static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); + struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev); ssize_t rc; device_lock(dev); @@ -269,7 +289,7 @@ static struct attribute *nd_pfn_attributes[] = { NULL, }; -static struct attribute_group nd_pfn_attribute_group = { +struct attribute_group nd_pfn_attribute_group = { .attrs = nd_pfn_attributes, }; @@ -280,16 +300,32 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = { NULL, }; -static struct device *__nd_pfn_create(struct nd_region *nd_region, +struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn, struct nd_namespace_common *ndns) { - struct nd_pfn *nd_pfn; - struct device *dev; + struct device *dev = &nd_pfn->dev; - /* we can only create pages for contiguous ranged of pmem */ - if (!is_nd_pmem(&nd_region->dev)) + if (!nd_pfn) return NULL; + nd_pfn->mode = PFN_MODE_NONE; + nd_pfn->align = HPAGE_SIZE; + dev = &nd_pfn->dev; + device_initialize(&nd_pfn->dev); + if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { + dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", + __func__, dev_name(ndns->claim)); + put_device(dev); + return NULL; + } + return dev; +} + +static struct nd_pfn *nd_pfn_alloc(struct nd_region *nd_region) +{ + struct nd_pfn *nd_pfn; + struct device *dev; + nd_pfn = kzalloc(sizeof(*nd_pfn), GFP_KERNEL); if (!nd_pfn) return NULL; @@ -300,29 +336,27 @@ static struct device *__nd_pfn_create(struct nd_region *nd_region, return NULL; } - nd_pfn->mode = PFN_MODE_NONE; - nd_pfn->align = HPAGE_SIZE; dev = &nd_pfn->dev; dev_set_name(dev, "pfn%d.%d", nd_region->id, nd_pfn->id); - dev->parent = &nd_region->dev; - dev->type = &nd_pfn_device_type; dev->groups = nd_pfn_attribute_groups; - device_initialize(&nd_pfn->dev); - if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) { - dev_dbg(&ndns->dev, "%s failed, already claimed by %s\n", - __func__, dev_name(ndns->claim)); - put_device(dev); - return NULL; - } - return dev; + dev->type = &nd_pfn_device_type; + dev->parent = &nd_region->dev; + + return nd_pfn; } struct device *nd_pfn_create(struct nd_region *nd_region) { - struct device *dev = __nd_pfn_create(nd_region, NULL); + struct nd_pfn *nd_pfn; + struct device *dev; + + if (!is_nd_pmem(&nd_region->dev)) + return NULL; - if (dev) - __nd_device_register(dev); + nd_pfn = nd_pfn_alloc(nd_region); + dev = nd_pfn_devinit(nd_pfn, NULL); + + __nd_device_register(dev); return dev; } @@ -360,6 +394,9 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) pfn_sb->end_trunc = 0; } + if (__le16_to_cpu(pfn_sb->version_minor) < 2) + pfn_sb->align = 0; + switch (le32_to_cpu(pfn_sb->mode)) { case PFN_MODE_RAM: case PFN_MODE_PMEM: @@ -399,7 +436,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) return -EBUSY; } - nd_pfn->align = 1UL << ilog2(offset); + nd_pfn->align = le32_to_cpu(pfn_sb->align); if (!is_power_of_2(offset) || offset < PAGE_SIZE) { dev_err(&nd_pfn->dev, "bad offset: %#llx dax disabled\n", offset); @@ -410,11 +447,11 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn) } EXPORT_SYMBOL(nd_pfn_validate); -int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) +int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns) { int rc; - struct device *dev; struct nd_pfn *nd_pfn; + struct device *pfn_dev; struct nd_pfn_sb *pfn_sb; struct nd_region *nd_region = to_nd_region(ndns->dev.parent); @@ -422,25 +459,213 @@ int nd_pfn_probe(struct nd_namespace_common *ndns, void *drvdata) return -ENODEV; nvdimm_bus_lock(&ndns->dev); - dev = __nd_pfn_create(nd_region, ndns); + nd_pfn = nd_pfn_alloc(nd_region); + pfn_dev = nd_pfn_devinit(nd_pfn, ndns); nvdimm_bus_unlock(&ndns->dev); - if (!dev) + if (!pfn_dev) return -ENOMEM; - dev_set_drvdata(dev, drvdata); - pfn_sb = kzalloc(sizeof(*pfn_sb), GFP_KERNEL); - nd_pfn = to_nd_pfn(dev); + pfn_sb = devm_kzalloc(dev, sizeof(*pfn_sb), GFP_KERNEL); + nd_pfn = to_nd_pfn(pfn_dev); nd_pfn->pfn_sb = pfn_sb; rc = nd_pfn_validate(nd_pfn); - nd_pfn->pfn_sb = NULL; - kfree(pfn_sb); - dev_dbg(&ndns->dev, "%s: pfn: %s\n", __func__, - rc == 0 ? dev_name(dev) : "<none>"); + dev_dbg(dev, "%s: pfn: %s\n", __func__, + rc == 0 ? dev_name(pfn_dev) : "<none>"); if (rc < 0) { - __nd_detach_ndns(dev, &nd_pfn->ndns); - put_device(dev); + __nd_detach_ndns(pfn_dev, &nd_pfn->ndns); + put_device(pfn_dev); } else - __nd_device_register(&nd_pfn->dev); + __nd_device_register(pfn_dev); return rc; } EXPORT_SYMBOL(nd_pfn_probe); + +/* + * We hotplug memory at section granularity, pad the reserved area from + * the previous section base to the namespace base address. + */ +static unsigned long init_altmap_base(resource_size_t base) +{ + unsigned long base_pfn = PHYS_PFN(base); + + return PFN_SECTION_ALIGN_DOWN(base_pfn); +} + +static unsigned long init_altmap_reserve(resource_size_t base) +{ + unsigned long reserve = PHYS_PFN(SZ_8K); + unsigned long base_pfn = PHYS_PFN(base); + + reserve += base_pfn - PFN_SECTION_ALIGN_DOWN(base_pfn); + return reserve; +} + +static struct vmem_altmap *__nvdimm_setup_pfn(struct nd_pfn *nd_pfn, + struct resource *res, struct vmem_altmap *altmap) +{ + struct nd_pfn_sb *pfn_sb = nd_pfn->pfn_sb; + u64 offset = le64_to_cpu(pfn_sb->dataoff); + u32 start_pad = __le32_to_cpu(pfn_sb->start_pad); + u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc); + struct nd_namespace_common *ndns = nd_pfn->ndns; + struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev); + resource_size_t base = nsio->res.start + start_pad; + struct vmem_altmap __altmap = { + .base_pfn = init_altmap_base(base), + .reserve = init_altmap_reserve(base), + }; + + memcpy(res, &nsio->res, sizeof(*res)); + res->start += start_pad; + res->end -= end_trunc; + + nd_pfn->mode = le32_to_cpu(nd_pfn->pfn_sb->mode); + if (nd_pfn->mode == PFN_MODE_RAM) { + if (offset < SZ_8K) + return ERR_PTR(-EINVAL); + nd_pfn->npfns = le64_to_cpu(pfn_sb->npfns); + altmap = NULL; + } else if (nd_pfn->mode == PFN_MODE_PMEM) { + nd_pfn->npfns = (resource_size(res) - offset) / PAGE_SIZE; + if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns) + dev_info(&nd_pfn->dev, + "number of pfns truncated from %lld to %ld\n", + le64_to_cpu(nd_pfn->pfn_sb->npfns), + nd_pfn->npfns); + memcpy(altmap, &__altmap, sizeof(*altmap)); + altmap->free = PHYS_PFN(offset - SZ_8K); + altmap->alloc = 0; + } else + return ERR_PTR(-ENXIO); + + return altmap; +} + +static int nd_pfn_init(struct nd_pfn *nd_pfn) +{ + u32 dax_label_reserve = is_nd_dax(&nd_pfn->dev) ? SZ_128K : 0; + struct nd_namespace_common *ndns = nd_pfn->ndns; + u32 start_pad = 0, end_trunc = 0; + resource_size_t start, size; + struct nd_namespace_io *nsio; + struct nd_region *nd_region; + struct nd_pfn_sb *pfn_sb; + unsigned long npfns; + phys_addr_t offset; + u64 checksum; + int rc; + + pfn_sb = devm_kzalloc(&nd_pfn->dev, sizeof(*pfn_sb), GFP_KERNEL); + if (!pfn_sb) + return -ENOMEM; + + nd_pfn->pfn_sb = pfn_sb; + rc = nd_pfn_validate(nd_pfn); + if (rc != -ENODEV) + return rc; + + /* no info block, do init */; + nd_region = to_nd_region(nd_pfn->dev.parent); + if (nd_region->ro) { + dev_info(&nd_pfn->dev, + "%s is read-only, unable to init metadata\n", + dev_name(&nd_region->dev)); + return -ENXIO; + } + + memset(pfn_sb, 0, sizeof(*pfn_sb)); + + /* + * Check if pmem collides with 'System RAM' when section aligned and + * trim it accordingly + */ + nsio = to_nd_namespace_io(&ndns->dev); + start = PHYS_SECTION_ALIGN_DOWN(nsio->res.start); + size = resource_size(&nsio->res); + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED) { + start = nsio->res.start; + start_pad = PHYS_SECTION_ALIGN_UP(start) - start; + } + + start = nsio->res.start; + size = PHYS_SECTION_ALIGN_UP(start + size) - start; + if (region_intersects(start, size, IORESOURCE_SYSTEM_RAM, + IORES_DESC_NONE) == REGION_MIXED) { + size = resource_size(&nsio->res); + end_trunc = start + size - PHYS_SECTION_ALIGN_DOWN(start + size); + } + + if (start_pad + end_trunc) + dev_info(&nd_pfn->dev, "%s section collision, truncate %d bytes\n", + dev_name(&ndns->dev), start_pad + end_trunc); + + /* + * Note, we use 64 here for the standard size of struct page, + * debugging options may cause it to be larger in which case the + * implementation will limit the pfns advertised through + * ->direct_access() to those that are included in the memmap. + */ + start += start_pad; + size = resource_size(&nsio->res); + npfns = (size - start_pad - end_trunc - SZ_8K) / SZ_4K; + if (nd_pfn->mode == PFN_MODE_PMEM) { + unsigned long memmap_size; + + /* + * vmemmap_populate_hugepages() allocates the memmap array in + * HPAGE_SIZE chunks. + */ + memmap_size = ALIGN(64 * npfns, HPAGE_SIZE); + offset = ALIGN(start + SZ_8K + memmap_size + dax_label_reserve, + nd_pfn->align) - start; + } else if (nd_pfn->mode == PFN_MODE_RAM) + offset = ALIGN(start + SZ_8K + dax_label_reserve, + nd_pfn->align) - start; + else + return -ENXIO; + + if (offset + start_pad + end_trunc >= size) { + dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n", + dev_name(&ndns->dev)); + return -ENXIO; + } + + npfns = (size - offset - start_pad - end_trunc) / SZ_4K; + pfn_sb->mode = cpu_to_le32(nd_pfn->mode); + pfn_sb->dataoff = cpu_to_le64(offset); + pfn_sb->npfns = cpu_to_le64(npfns); + memcpy(pfn_sb->signature, PFN_SIG, PFN_SIG_LEN); + memcpy(pfn_sb->uuid, nd_pfn->uuid, 16); + memcpy(pfn_sb->parent_uuid, nd_dev_to_uuid(&ndns->dev), 16); + pfn_sb->version_major = cpu_to_le16(1); + pfn_sb->version_minor = cpu_to_le16(2); + pfn_sb->start_pad = cpu_to_le32(start_pad); + pfn_sb->end_trunc = cpu_to_le32(end_trunc); + pfn_sb->align = cpu_to_le32(nd_pfn->align); + checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb); + pfn_sb->checksum = cpu_to_le64(checksum); + + return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb)); +} + +/* + * Determine the effective resource range and vmem_altmap from an nd_pfn + * instance. + */ +struct vmem_altmap *nvdimm_setup_pfn(struct nd_pfn *nd_pfn, + struct resource *res, struct vmem_altmap *altmap) +{ + int rc; + + if (!nd_pfn->uuid || !nd_pfn->ndns) + return ERR_PTR(-ENODEV); + + rc = nd_pfn_init(nd_pfn); + if (rc) + return ERR_PTR(rc); + + /* we need a valid pfn_sb before we can init a vmem_altmap */ + return __nvdimm_setup_pfn(nd_pfn, res, altmap); +} +EXPORT_SYMBOL_GPL(nvdimm_setup_pfn); |