diff options
author | Dan Williams <dan.j.williams@intel.com> | 2019-11-07 06:56:46 +0300 |
---|---|---|
committer | Dan Williams <dan.j.williams@intel.com> | 2019-11-17 20:17:38 +0300 |
commit | cb719d5fefc508bc80c25ac6c875a4fe6240ba73 (patch) | |
tree | 7f718dfba8b2fc1c1dfa6362169ae390f75ffb2b /drivers/nvdimm | |
parent | 78c81cc89a40114d09a5ec0693cfd97831ffbe79 (diff) | |
download | linux-cb719d5fefc508bc80c25ac6c875a4fe6240ba73.tar.xz |
libnvdimm: Move region attribute group definition
In preparation for moving region attributes from device attribute groups
to the region device-type, reorder the declaration so that it can be
referenced by the device-type definition without forward declarations.
No functional changes are intended to result from this change.
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Signed-off-by: Dan Williams <dan.j.williams@intel.com>
Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Link: https://lore.kernel.org/r/157309900624.1582359.6929998072035982264.stgit@dwillia2-desk3.amr.corp.intel.com
Diffstat (limited to 'drivers/nvdimm')
-rw-r--r-- | drivers/nvdimm/region_devs.c | 208 |
1 files changed, 104 insertions, 104 deletions
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c index ef423ba1a711..e89f2eb3678c 100644 --- a/drivers/nvdimm/region_devs.c +++ b/drivers/nvdimm/region_devs.c @@ -140,36 +140,6 @@ static void nd_region_release(struct device *dev) kfree(nd_region); } -static struct device_type nd_blk_device_type = { - .name = "nd_blk", - .release = nd_region_release, -}; - -static struct device_type nd_pmem_device_type = { - .name = "nd_pmem", - .release = nd_region_release, -}; - -static struct device_type nd_volatile_device_type = { - .name = "nd_volatile", - .release = nd_region_release, -}; - -bool is_nd_pmem(struct device *dev) -{ - return dev ? dev->type == &nd_pmem_device_type : false; -} - -bool is_nd_blk(struct device *dev) -{ - return dev ? dev->type == &nd_blk_device_type : false; -} - -bool is_nd_volatile(struct device *dev) -{ - return dev ? dev->type == &nd_volatile_device_type : false; -} - struct nd_region *to_nd_region(struct device *dev) { struct nd_region *nd_region = container_of(dev, struct nd_region, dev); @@ -674,80 +644,6 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n) return 0; } -struct attribute_group nd_region_attribute_group = { - .attrs = nd_region_attributes, - .is_visible = region_visible, -}; -EXPORT_SYMBOL_GPL(nd_region_attribute_group); - -u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, - struct nd_namespace_index *nsindex) -{ - struct nd_interleave_set *nd_set = nd_region->nd_set; - - if (!nd_set) - return 0; - - if (nsindex && __le16_to_cpu(nsindex->major) == 1 - && __le16_to_cpu(nsindex->minor) == 1) - return nd_set->cookie1; - return nd_set->cookie2; -} - -u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) -{ - struct nd_interleave_set *nd_set = nd_region->nd_set; - - if (nd_set) - return nd_set->altcookie; - return 0; -} - -void nd_mapping_free_labels(struct nd_mapping *nd_mapping) -{ - struct nd_label_ent *label_ent, *e; - - lockdep_assert_held(&nd_mapping->lock); - list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { - list_del(&label_ent->list); - kfree(label_ent); - } -} - -/* - * When a namespace is activated create new seeds for the next - * namespace, or namespace-personality to be configured. - */ -void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) -{ - nvdimm_bus_lock(dev); - if (nd_region->ns_seed == dev) { - nd_region_create_ns_seed(nd_region); - } else if (is_nd_btt(dev)) { - struct nd_btt *nd_btt = to_nd_btt(dev); - - if (nd_region->btt_seed == dev) - nd_region_create_btt_seed(nd_region); - if (nd_region->ns_seed == &nd_btt->ndns->dev) - nd_region_create_ns_seed(nd_region); - } else if (is_nd_pfn(dev)) { - struct nd_pfn *nd_pfn = to_nd_pfn(dev); - - if (nd_region->pfn_seed == dev) - nd_region_create_pfn_seed(nd_region); - if (nd_region->ns_seed == &nd_pfn->ndns->dev) - nd_region_create_ns_seed(nd_region); - } else if (is_nd_dax(dev)) { - struct nd_dax *nd_dax = to_nd_dax(dev); - - if (nd_region->dax_seed == dev) - nd_region_create_dax_seed(nd_region); - if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) - nd_region_create_ns_seed(nd_region); - } - nvdimm_bus_unlock(dev); -} - static ssize_t mappingN(struct device *dev, char *buf, int n) { struct nd_region *nd_region = to_nd_region(dev); @@ -861,6 +757,110 @@ struct attribute_group nd_mapping_attribute_group = { }; EXPORT_SYMBOL_GPL(nd_mapping_attribute_group); +struct attribute_group nd_region_attribute_group = { + .attrs = nd_region_attributes, + .is_visible = region_visible, +}; +EXPORT_SYMBOL_GPL(nd_region_attribute_group); + +static struct device_type nd_blk_device_type = { + .name = "nd_blk", + .release = nd_region_release, +}; + +static struct device_type nd_pmem_device_type = { + .name = "nd_pmem", + .release = nd_region_release, +}; + +static struct device_type nd_volatile_device_type = { + .name = "nd_volatile", + .release = nd_region_release, +}; + +bool is_nd_pmem(struct device *dev) +{ + return dev ? dev->type == &nd_pmem_device_type : false; +} + +bool is_nd_blk(struct device *dev) +{ + return dev ? dev->type == &nd_blk_device_type : false; +} + +bool is_nd_volatile(struct device *dev) +{ + return dev ? dev->type == &nd_volatile_device_type : false; +} + +u64 nd_region_interleave_set_cookie(struct nd_region *nd_region, + struct nd_namespace_index *nsindex) +{ + struct nd_interleave_set *nd_set = nd_region->nd_set; + + if (!nd_set) + return 0; + + if (nsindex && __le16_to_cpu(nsindex->major) == 1 + && __le16_to_cpu(nsindex->minor) == 1) + return nd_set->cookie1; + return nd_set->cookie2; +} + +u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region) +{ + struct nd_interleave_set *nd_set = nd_region->nd_set; + + if (nd_set) + return nd_set->altcookie; + return 0; +} + +void nd_mapping_free_labels(struct nd_mapping *nd_mapping) +{ + struct nd_label_ent *label_ent, *e; + + lockdep_assert_held(&nd_mapping->lock); + list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) { + list_del(&label_ent->list); + kfree(label_ent); + } +} + +/* + * When a namespace is activated create new seeds for the next + * namespace, or namespace-personality to be configured. + */ +void nd_region_advance_seeds(struct nd_region *nd_region, struct device *dev) +{ + nvdimm_bus_lock(dev); + if (nd_region->ns_seed == dev) { + nd_region_create_ns_seed(nd_region); + } else if (is_nd_btt(dev)) { + struct nd_btt *nd_btt = to_nd_btt(dev); + + if (nd_region->btt_seed == dev) + nd_region_create_btt_seed(nd_region); + if (nd_region->ns_seed == &nd_btt->ndns->dev) + nd_region_create_ns_seed(nd_region); + } else if (is_nd_pfn(dev)) { + struct nd_pfn *nd_pfn = to_nd_pfn(dev); + + if (nd_region->pfn_seed == dev) + nd_region_create_pfn_seed(nd_region); + if (nd_region->ns_seed == &nd_pfn->ndns->dev) + nd_region_create_ns_seed(nd_region); + } else if (is_nd_dax(dev)) { + struct nd_dax *nd_dax = to_nd_dax(dev); + + if (nd_region->dax_seed == dev) + nd_region_create_dax_seed(nd_region); + if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev) + nd_region_create_ns_seed(nd_region); + } + nvdimm_bus_unlock(dev); +} + int nd_blk_region_init(struct nd_region *nd_region) { struct device *dev = &nd_region->dev; |