diff options
author | Jiri Kosina <jkosina@suse.cz> | 2017-09-05 12:07:05 +0300 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2017-09-05 12:07:05 +0300 |
commit | b11918bdbe79bd002d00a9f1d78958167ccfad99 (patch) | |
tree | 931c5680a242041f49b07b20823012ae9bdb98c2 /drivers | |
parent | d3c7ad2432115b0b53fb838c14b8ad9ad72f7254 (diff) | |
parent | 0152b29c89650654abf4f0e96bbf2566b85ae55d (diff) | |
download | linux-b11918bdbe79bd002d00a9f1d78958167ccfad99.tar.xz |
Merge branch 'for-4.14/battery' into for-linus
- support for batteries driven by HID input reports, from Dmitry Torokhov
Diffstat (limited to 'drivers')
655 files changed, 30242 insertions, 10689 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 10347e3d73ad..e51a1e98e62f 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -576,7 +576,7 @@ static struct attribute *lpss_attrs[] = { NULL, }; -static struct attribute_group lpss_attr_group = { +static const struct attribute_group lpss_attr_group = { .attrs = lpss_attrs, .name = "lpss_ltr", }; diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index d048f72c23f8..a3215ee671c1 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -31,6 +31,11 @@ #define IORT_IOMMU_TYPE ((1 << ACPI_IORT_NODE_SMMU) | \ (1 << ACPI_IORT_NODE_SMMU_V3)) +/* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX +#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 +#endif + struct iort_its_msi_chip { struct list_head list; struct fwnode_handle *fw_node; @@ -819,6 +824,36 @@ static int __init arm_smmu_v3_count_resources(struct acpi_iort_node *node) return num_res; } +static bool arm_smmu_v3_is_combined_irq(struct acpi_iort_smmu_v3 *smmu) +{ + /* + * Cavium ThunderX2 implementation doesn't not support unique + * irq line. Use single irq line for all the SMMUv3 interrupts. + */ + if (smmu->model != ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) + return false; + + /* + * ThunderX2 doesn't support MSIs from the SMMU, so we're checking + * SPI numbers here. + */ + return smmu->event_gsiv == smmu->pri_gsiv && + smmu->event_gsiv == smmu->gerr_gsiv && + smmu->event_gsiv == smmu->sync_gsiv; +} + +static unsigned long arm_smmu_v3_resource_size(struct acpi_iort_smmu_v3 *smmu) +{ + /* + * Override the size, for Cavium ThunderX2 implementation + * which doesn't support the page 1 SMMU register space. + */ + if (smmu->model == ACPI_IORT_SMMU_V3_CAVIUM_CN99XX) + return SZ_64K; + + return SZ_128K; +} + static void __init arm_smmu_v3_init_resources(struct resource *res, struct acpi_iort_node *node) { @@ -829,30 +864,38 @@ static void __init arm_smmu_v3_init_resources(struct resource *res, smmu = (struct acpi_iort_smmu_v3 *)node->node_data; res[num_res].start = smmu->base_address; - res[num_res].end = smmu->base_address + SZ_128K - 1; + res[num_res].end = smmu->base_address + + arm_smmu_v3_resource_size(smmu) - 1; res[num_res].flags = IORESOURCE_MEM; num_res++; + if (arm_smmu_v3_is_combined_irq(smmu)) { + if (smmu->event_gsiv) + acpi_iort_register_irq(smmu->event_gsiv, "combined", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); + } else { - if (smmu->event_gsiv) - acpi_iort_register_irq(smmu->event_gsiv, "eventq", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); - - if (smmu->pri_gsiv) - acpi_iort_register_irq(smmu->pri_gsiv, "priq", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); - - if (smmu->gerr_gsiv) - acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); - - if (smmu->sync_gsiv) - acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", - ACPI_EDGE_SENSITIVE, - &res[num_res++]); + if (smmu->event_gsiv) + acpi_iort_register_irq(smmu->event_gsiv, "eventq", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); + + if (smmu->pri_gsiv) + acpi_iort_register_irq(smmu->pri_gsiv, "priq", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); + + if (smmu->gerr_gsiv) + acpi_iort_register_irq(smmu->gerr_gsiv, "gerror", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); + + if (smmu->sync_gsiv) + acpi_iort_register_irq(smmu->sync_gsiv, "cmdq-sync", + ACPI_EDGE_SENSITIVE, + &res[num_res++]); + } } static bool __init arm_smmu_v3_is_coherent(struct acpi_iort_node *node) diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c index df1c629205e7..75af78361ce5 100644 --- a/drivers/acpi/bgrt.c +++ b/drivers/acpi/bgrt.c @@ -76,7 +76,7 @@ static struct bin_attribute *bgrt_bin_attributes[] = { NULL, }; -static struct attribute_group bgrt_attribute_group = { +static const struct attribute_group bgrt_attribute_group = { .attrs = bgrt_attributes, .bin_attrs = bgrt_bin_attributes, }; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 5a6fbe0fcaf2..af74b420ec83 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -409,11 +409,15 @@ static void acpi_bus_notify(acpi_handle handle, u32 type, void *data) (driver->flags & ACPI_DRIVER_ALL_NOTIFY_EVENTS)) driver->ops.notify(adev, type); - if (hotplug_event && ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) + if (!hotplug_event) { + acpi_bus_put_acpi_device(adev); + return; + } + + if (ACPI_SUCCESS(acpi_hotplug_schedule(adev, type))) return; acpi_bus_put_acpi_device(adev); - return; err: acpi_evaluate_ost(handle, type, ost_code, NULL); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index 28938b5a334e..2ed6935d4483 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -262,8 +262,10 @@ int acpi_bus_init_power(struct acpi_device *device) return -EINVAL; device->power.state = ACPI_STATE_UNKNOWN; - if (!acpi_device_is_present(device)) + if (!acpi_device_is_present(device)) { + device->flags.initialized = false; return -ENXIO; + } result = acpi_device_get_power(device, &state); if (result) diff --git a/drivers/acpi/dptf/dptf_power.c b/drivers/acpi/dptf/dptf_power.c index 734642dc5008..e1c242568341 100644 --- a/drivers/acpi/dptf/dptf_power.c +++ b/drivers/acpi/dptf/dptf_power.c @@ -65,7 +65,7 @@ static struct attribute *dptf_power_attrs[] = { NULL }; -static struct attribute_group dptf_power_attribute_group = { +static const struct attribute_group dptf_power_attribute_group = { .attrs = dptf_power_attrs, .name = "dptf_power" }; diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 854d428e2a2d..ddb01e9fa5b2 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -147,7 +147,7 @@ static unsigned int ec_storm_threshold __read_mostly = 8; module_param(ec_storm_threshold, uint, 0644); MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); -static bool ec_freeze_events __read_mostly = true; +static bool ec_freeze_events __read_mostly = false; module_param(ec_freeze_events, bool, 0644); MODULE_PARM_DESC(ec_freeze_events, "Disabling event handling during suspend/resume"); @@ -1870,24 +1870,6 @@ error: } #ifdef CONFIG_PM_SLEEP -static int acpi_ec_suspend_noirq(struct device *dev) -{ - struct acpi_ec *ec = - acpi_driver_data(to_acpi_device(dev)); - - acpi_ec_enter_noirq(ec); - return 0; -} - -static int acpi_ec_resume_noirq(struct device *dev) -{ - struct acpi_ec *ec = - acpi_driver_data(to_acpi_device(dev)); - - acpi_ec_leave_noirq(ec); - return 0; -} - static int acpi_ec_suspend(struct device *dev) { struct acpi_ec *ec = @@ -1909,7 +1891,6 @@ static int acpi_ec_resume(struct device *dev) #endif static const struct dev_pm_ops acpi_ec_pm = { - SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend_noirq, acpi_ec_resume_noirq) SET_SYSTEM_SLEEP_PM_OPS(acpi_ec_suspend, acpi_ec_resume) }; diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index be79f7db1850..9531d3276f65 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -111,7 +111,7 @@ int acpi_device_setup_files(struct acpi_device *dev); void acpi_device_remove_files(struct acpi_device *dev); void acpi_device_add_finalize(struct acpi_device *device); void acpi_free_pnp_ids(struct acpi_device_pnp *pnp); -bool acpi_device_is_present(struct acpi_device *adev); +bool acpi_device_is_present(const struct acpi_device *adev); bool acpi_device_is_battery(struct acpi_device *adev); bool acpi_device_is_first_physical_node(struct acpi_device *adev, const struct device *dev); diff --git a/drivers/acpi/irq.c b/drivers/acpi/irq.c index 830299a74b84..7c352cba0528 100644 --- a/drivers/acpi/irq.c +++ b/drivers/acpi/irq.c @@ -24,7 +24,7 @@ static struct fwnode_handle *acpi_gsi_domain_id; * * irq location updated with irq value [>0 on success, 0 on failure] * - * Returns: linux IRQ number on success (>0) + * Returns: 0 on success * -EINVAL on failure */ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) @@ -37,7 +37,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq) * *irq == 0 means no mapping, that should * be reported as a failure */ - return (*irq > 0) ? *irq : -EINVAL; + return (*irq > 0) ? 0 : -EINVAL; } EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index b75b734ee73a..19182d091587 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -3160,6 +3160,8 @@ static struct acpi_driver acpi_nfit_driver = { static __init int nfit_init(void) { + int ret; + BUILD_BUG_ON(sizeof(struct acpi_table_nfit) != 40); BUILD_BUG_ON(sizeof(struct acpi_nfit_system_address) != 56); BUILD_BUG_ON(sizeof(struct acpi_nfit_memory_map) != 48); @@ -3187,8 +3189,14 @@ static __init int nfit_init(void) return -ENOMEM; nfit_mce_register(); + ret = acpi_bus_register_driver(&acpi_nfit_driver); + if (ret) { + nfit_mce_unregister(); + destroy_workqueue(nfit_wq); + } + + return ret; - return acpi_bus_register_driver(&acpi_nfit_driver); } static __exit void nfit_exit(void) diff --git a/drivers/acpi/osi.c b/drivers/acpi/osi.c index 849f9d2245ca..723bee58bbcf 100644 --- a/drivers/acpi/osi.c +++ b/drivers/acpi/osi.c @@ -265,7 +265,8 @@ static void __init acpi_osi_dmi_darwin(bool enable, __acpi_osi_setup_darwin(enable); } -void __init acpi_osi_dmi_linux(bool enable, const struct dmi_system_id *d) +static void __init acpi_osi_dmi_linux(bool enable, + const struct dmi_system_id *d) { pr_notice("DMI detected to setup _OSI(\"Linux\"): %s\n", d->ident); osi_config.linux_dmi = 1; diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c index 3a6c9b741b23..1b475bc1ae16 100644 --- a/drivers/acpi/power.c +++ b/drivers/acpi/power.c @@ -352,7 +352,7 @@ static struct attribute *attrs[] = { NULL, }; -static struct attribute_group attr_groups[] = { +static const struct attribute_group attr_groups[] = { [ACPI_STATE_D0] = { .name = "power_resources_D0", .attrs = attrs, @@ -371,14 +371,14 @@ static struct attribute_group attr_groups[] = { }, }; -static struct attribute_group wakeup_attr_group = { +static const struct attribute_group wakeup_attr_group = { .name = "power_resources_wakeup", .attrs = attrs, }; static void acpi_power_hide_list(struct acpi_device *adev, struct list_head *resources, - struct attribute_group *attr_group) + const struct attribute_group *attr_group) { struct acpi_power_resource_entry *entry; @@ -397,7 +397,7 @@ static void acpi_power_hide_list(struct acpi_device *adev, static void acpi_power_expose_list(struct acpi_device *adev, struct list_head *resources, - struct attribute_group *attr_group) + const struct attribute_group *attr_group) { struct acpi_power_resource_entry *entry; int ret; @@ -425,7 +425,7 @@ static void acpi_power_expose_list(struct acpi_device *adev, static void acpi_power_expose_hide(struct acpi_device *adev, struct list_head *resources, - struct attribute_group *attr_group, + const struct attribute_group *attr_group, bool expose) { if (expose) diff --git a/drivers/acpi/property.c b/drivers/acpi/property.c index 9364398204e9..917c789f953d 100644 --- a/drivers/acpi/property.c +++ b/drivers/acpi/property.c @@ -57,6 +57,7 @@ static bool acpi_nondev_subnode_extract(const union acpi_object *desc, dn->name = link->package.elements[0].string.pointer; dn->fwnode.type = FWNODE_ACPI_DATA; + dn->fwnode.ops = &acpi_fwnode_ops; dn->parent = parent; INIT_LIST_HEAD(&dn->data.subnodes); @@ -1119,3 +1120,119 @@ int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode, return 0; } + +static bool acpi_fwnode_device_is_available(struct fwnode_handle *fwnode) +{ + if (!is_acpi_device_node(fwnode)) + return false; + + return acpi_device_is_present(to_acpi_device_node(fwnode)); +} + +static bool acpi_fwnode_property_present(struct fwnode_handle *fwnode, + const char *propname) +{ + return !acpi_node_prop_get(fwnode, propname, NULL); +} + +static int acpi_fwnode_property_read_int_array(struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, + void *val, size_t nval) +{ + enum dev_prop_type type; + + switch (elem_size) { + case sizeof(u8): + type = DEV_PROP_U8; + break; + case sizeof(u16): + type = DEV_PROP_U16; + break; + case sizeof(u32): + type = DEV_PROP_U32; + break; + case sizeof(u64): + type = DEV_PROP_U64; + break; + default: + return -ENXIO; + } + + return acpi_node_prop_read(fwnode, propname, type, val, nval); +} + +static int acpi_fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) +{ + return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, + val, nval); +} + +static struct fwnode_handle * +acpi_fwnode_get_named_child_node(struct fwnode_handle *fwnode, + const char *childname) +{ + struct fwnode_handle *child; + + /* + * Find first matching named child node of this fwnode. + * For ACPI this will be a data only sub-node. + */ + fwnode_for_each_child_node(fwnode, child) + if (acpi_data_node_match(child, childname)) + return child; + + return NULL; +} + +static struct fwnode_handle * +acpi_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + struct fwnode_handle *endpoint; + + endpoint = acpi_graph_get_next_endpoint(fwnode, prev); + if (IS_ERR(endpoint)) + return NULL; + + return endpoint; +} + +static struct fwnode_handle * +acpi_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) +{ + struct fwnode_handle *endpoint = NULL; + + acpi_graph_get_remote_endpoint(fwnode, NULL, NULL, &endpoint); + + return endpoint; +} + +static int acpi_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint) +{ + struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode); + + endpoint->local_fwnode = fwnode; + + fwnode_property_read_u32(port_fwnode, "port", &endpoint->port); + fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id); + + return 0; +} + +const struct fwnode_operations acpi_fwnode_ops = { + .device_is_available = acpi_fwnode_device_is_available, + .property_present = acpi_fwnode_property_present, + .property_read_int_array = acpi_fwnode_property_read_int_array, + .property_read_string_array = acpi_fwnode_property_read_string_array, + .get_parent = acpi_node_get_parent, + .get_next_child_node = acpi_get_next_subnode, + .get_named_child_node = acpi_fwnode_get_named_child_node, + .graph_get_next_endpoint = acpi_fwnode_graph_get_next_endpoint, + .graph_get_remote_endpoint = acpi_fwnode_graph_get_remote_endpoint, + .graph_get_port_parent = acpi_node_get_parent, + .graph_parse_endpoint = acpi_fwnode_graph_parse_endpoint, +}; diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 09f65f57bebe..33897298f03e 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -404,10 +404,6 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) error = dock_notify(adev, src); } else if (adev->flags.hotplug_notify) { error = acpi_generic_hotplug_event(adev, src); - if (error == -EPERM) { - ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; - goto err_out; - } } else { int (*notify)(struct acpi_device *, u32); @@ -423,8 +419,20 @@ void acpi_device_hotplug(struct acpi_device *adev, u32 src) else goto out; } - if (!error) + switch (error) { + case 0: ost_code = ACPI_OST_SC_SUCCESS; + break; + case -EPERM: + ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED; + break; + case -EBUSY: + ost_code = ACPI_OST_SC_DEVICE_BUSY; + break; + default: + ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE; + break; + } err_out: acpi_evaluate_ost(adev->handle, src, ost_code, NULL); @@ -1460,6 +1468,7 @@ void acpi_init_device_object(struct acpi_device *device, acpi_handle handle, device->handle = handle; device->parent = acpi_bus_get_parent(handle); device->fwnode.type = FWNODE_ACPI; + device->fwnode.ops = &acpi_fwnode_ops; acpi_set_device_status(device, sta); acpi_device_get_busid(device); acpi_set_pnp_ids(handle, &device->pnp, type); @@ -1592,13 +1601,9 @@ static int acpi_bus_type_and_status(acpi_handle handle, int *type, return 0; } -bool acpi_device_is_present(struct acpi_device *adev) +bool acpi_device_is_present(const struct acpi_device *adev) { - if (adev->status.present || adev->status.functional) - return true; - - adev->flags.initialized = false; - return false; + return adev->status.present || adev->status.functional; } static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler, @@ -1831,6 +1836,7 @@ static void acpi_bus_attach(struct acpi_device *device) acpi_bus_get_status(device); /* Skip devices that are not present. */ if (!acpi_device_is_present(device)) { + device->flags.initialized = false; acpi_device_clear_enumerated(device); device->flags.power_manageable = 0; return; diff --git a/drivers/acpi/spcr.c b/drivers/acpi/spcr.c index 3afa8c1fa127..4ac3e06b41d8 100644 --- a/drivers/acpi/spcr.c +++ b/drivers/acpi/spcr.c @@ -36,6 +36,26 @@ static bool qdf2400_erratum_44_present(struct acpi_table_header *h) return false; } +/* + * APM X-Gene v1 and v2 UART hardware is an 16550 like device but has its + * register aligned to 32-bit. In addition, the BIOS also encoded the + * access width to be 8 bits. This function detects this errata condition. + */ +static bool xgene_8250_erratum_present(struct acpi_table_spcr *tb) +{ + if (tb->interface_type != ACPI_DBG2_16550_COMPATIBLE) + return false; + + if (memcmp(tb->header.oem_id, "APMC0D", ACPI_OEM_ID_SIZE)) + return false; + + if (!memcmp(tb->header.oem_table_id, "XGENESPC", + ACPI_OEM_TABLE_ID_SIZE) && tb->header.oem_revision == 0) + return true; + + return false; +} + /** * parse_spcr() - parse ACPI SPCR table and add preferred console * @@ -74,8 +94,22 @@ int __init parse_spcr(bool earlycon) goto done; } - iotype = table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY ? - "mmio" : "io"; + if (table->serial_port.space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) { + switch (table->serial_port.access_width) { + default: + pr_err("Unexpected SPCR Access Width. Defaulting to byte size\n"); + case ACPI_ACCESS_SIZE_BYTE: + iotype = "mmio"; + break; + case ACPI_ACCESS_SIZE_WORD: + iotype = "mmio16"; + break; + case ACPI_ACCESS_SIZE_DWORD: + iotype = "mmio32"; + break; + } + } else + iotype = "io"; switch (table->interface_type) { case ACPI_DBG2_ARM_SBSA_32BIT: @@ -115,6 +149,8 @@ int __init parse_spcr(bool earlycon) if (qdf2400_erratum_44_present(&table->header)) uart = "qdf2400_e44"; + if (xgene_8250_erratum_present(table)) + iotype = "mmio32"; snprintf(opts, sizeof(opts), "%s,%s,0x%llx,%d", uart, iotype, table->serial_port.address, baud_rate); diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index bd86b809c848..b4fbb9929482 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -12,6 +12,7 @@ */ #include <linux/acpi.h> +#include <linux/dmi.h> #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include "../internal.h" @@ -20,6 +21,10 @@ * Some ACPI devices are hidden (status == 0x0) in recent BIOS-es because * some recent Windows drivers bind to one device but poke at multiple * devices at the same time, so the others get hidden. + * + * Some BIOS-es (temporarily) hide specific APCI devices to work around Windows + * driver bugs. We use DMI matching to match known cases of this. + * * We work around this by always reporting ACPI_STA_DEFAULT for these * devices. Note this MUST only be done for devices where this is safe. * @@ -31,14 +36,16 @@ struct always_present_id { struct acpi_device_id hid[2]; struct x86_cpu_id cpu_ids[2]; + struct dmi_system_id dmi_ids[2]; /* Optional */ const char *uid; }; #define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } -#define ENTRY(hid, uid, cpu_models) { \ +#define ENTRY(hid, uid, cpu_models, dmi...) { \ { { hid, }, {} }, \ { cpu_models, {} }, \ + { { .matches = dmi }, {} }, \ uid, \ } @@ -47,13 +54,35 @@ static const struct always_present_id always_present_ids[] = { * Bay / Cherry Trail PWM directly poked by GPU driver in win10, * but Linux uses a separate PWM driver, harmless if not used. */ - ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1)), - ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)), + ENTRY("80860F09", "1", ICPU(INTEL_FAM6_ATOM_SILVERMONT1), {}), + ENTRY("80862288", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), /* * The INT0002 device is necessary to clear wakeup interrupt sources * on Cherry Trail devices, without it we get nobody cared IRQ msgs. */ - ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT)), + ENTRY("INT0002", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), {}), + /* + * On the Dell Venue 11 Pro 7130 the DSDT hides the touchscreen ACPI + * device until a certain time after _SB.PCI0.GFX0.LCD.LCD1._ON gets + * called has passed *and* _STA has been called at least 3 times since. + */ + ENTRY("SYNA7500", "1", ICPU(INTEL_FAM6_HASWELL_ULT), { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Venue 11 Pro 7130"), + }), + /* + * The GPD win BIOS dated 20170320 has disabled the accelerometer, the + * drivers sometimes cause crashes under Windows and this is how the + * manufacturer has solved this :| Note that the the DMI data is less + * generic then it seems, a board_vendor of "AMI Corporation" is quite + * rare and a board_name of "Default String" also is rare. + */ + ENTRY("KIOX000A", "1", ICPU(INTEL_FAM6_ATOM_AIRMONT), { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_MATCH(DMI_BIOS_DATE, "03/20/2017") + }), }; bool acpi_device_always_present(struct acpi_device *adev) @@ -76,6 +105,10 @@ bool acpi_device_always_present(struct acpi_device *adev) if (!x86_match_cpu(always_present_ids[i].cpu_ids)) continue; + if (always_present_ids[i].dmi_ids[0].matches[0].slot && + !dmi_check_system(always_present_ids[i].dmi_ids)) + continue; + if (old_status != ACPI_STA_DEFAULT) /* Log only once */ dev_info(&adev->dev, "Device [%s] is in always present list\n", diff --git a/drivers/android/binder.c b/drivers/android/binder.c index aae4d8d4be36..f7665c31feca 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2200,8 +2200,12 @@ static void binder_transaction(struct binder_proc *proc, list_add_tail(&t->work.entry, target_list); tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; list_add_tail(&tcomplete->entry, &thread->todo); - if (target_wait) - wake_up_interruptible(target_wait); + if (target_wait) { + if (reply || !(t->flags & TF_ONE_WAY)) + wake_up_interruptible_sync(target_wait); + else + wake_up_interruptible(target_wait); + } return; err_translate_failed: @@ -3247,10 +3251,6 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ - if (unlikely(current->mm != proc->vma_vm_mm)) { - pr_err("current mm mismatch proc mm\n"); - return -EINVAL; - } trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); @@ -3464,9 +3464,8 @@ static int binder_open(struct inode *nodp, struct file *filp) proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; - get_task_struct(current); - proc->tsk = current; - proc->vma_vm_mm = current->mm; + get_task_struct(current->group_leader); + proc->tsk = current->group_leader; INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->wait); proc->default_priority = task_nice(current); diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c index 292dec18ffb8..07bdd51b3b9a 100644 --- a/drivers/atm/zatm.c +++ b/drivers/atm/zatm.c @@ -1613,7 +1613,7 @@ static int zatm_init_one(struct pci_dev *pci_dev, ret = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); if (ret < 0) - goto out_disable; + goto out_release; zatm_dev->pci_dev = pci_dev; dev->dev_data = zatm_dev; diff --git a/drivers/base/node.c b/drivers/base/node.c index 73d39bc58c42..d8dc83017d8d 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -288,7 +288,7 @@ static void node_device_release(struct device *dev) * * Initialize and register the node device. */ -static int register_node(struct node *node, int num, struct node *parent) +static int register_node(struct node *node, int num) { int error; @@ -567,19 +567,14 @@ static void init_node_hugetlb_work(int nid) { } int __register_one_node(int nid) { - int p_node = parent_node(nid); - struct node *parent = NULL; int error; int cpu; - if (p_node != nid) - parent = node_devices[p_node]; - node_devices[nid] = kzalloc(sizeof(struct node), GFP_KERNEL); if (!node_devices[nid]) return -ENOMEM; - error = register_node(node_devices[nid], nid, parent); + error = register_node(node_devices[nid], nid); /* link cpu under this node */ for_each_present_cpu(cpu) { diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 9649dce63e19..60303aa28587 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1148,8 +1148,8 @@ static void genpd_syscore_switch(struct device *dev, bool suspend) { struct generic_pm_domain *genpd; - genpd = genpd_lookup_dev(dev); - if (!genpd) + genpd = dev_to_genpd(dev); + if (!pm_genpd_present(genpd)) return; if (suspend) { @@ -1180,6 +1180,7 @@ EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron); #define pm_genpd_resume_noirq NULL #define pm_genpd_freeze_noirq NULL #define pm_genpd_thaw_noirq NULL +#define pm_genpd_poweroff_noirq NULL #define pm_genpd_restore_noirq NULL #define pm_genpd_complete NULL @@ -1221,8 +1222,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, spin_unlock_irq(&dev->power.lock); - dev_pm_domain_set(dev, &genpd->domain); - return gpd_data; err_free: @@ -1236,8 +1235,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, static void genpd_free_dev_data(struct device *dev, struct generic_pm_domain_data *gpd_data) { - dev_pm_domain_set(dev, NULL); - spin_lock_irq(&dev->power.lock); dev->power.subsys_data->domain_data = NULL; @@ -1274,6 +1271,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (ret) goto out; + dev_pm_domain_set(dev, &genpd->domain); + genpd->device_count++; genpd->max_off_time_changed = true; @@ -1335,6 +1334,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, if (genpd->detach_dev) genpd->detach_dev(genpd, dev); + dev_pm_domain_set(dev, NULL); + list_del_init(&pdd->list_node); genpd_unlock(genpd); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 185a52581cfa..156ab57bca77 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -272,6 +272,8 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) value = PM_QOS_LATENCY_ANY; + else + return -EINVAL; } ret = dev_pm_qos_update_user_latency_tolerance(dev, value); return ret < 0 ? ret : n; diff --git a/drivers/base/property.c b/drivers/base/property.c index 149de311a10e..edf02c1b5845 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -187,6 +187,50 @@ struct fwnode_handle *dev_fwnode(struct device *dev) } EXPORT_SYMBOL_GPL(dev_fwnode); +static bool pset_fwnode_property_present(struct fwnode_handle *fwnode, + const char *propname) +{ + return !!pset_prop_get(to_pset_node(fwnode), propname); +} + +static int pset_fwnode_read_int_array(struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) +{ + struct property_set *node = to_pset_node(fwnode); + + if (!val) + return pset_prop_count_elems_of_size(node, propname, elem_size); + + switch (elem_size) { + case sizeof(u8): + return pset_prop_read_u8_array(node, propname, val, nval); + case sizeof(u16): + return pset_prop_read_u16_array(node, propname, val, nval); + case sizeof(u32): + return pset_prop_read_u32_array(node, propname, val, nval); + case sizeof(u64): + return pset_prop_read_u64_array(node, propname, val, nval); + } + + return -ENXIO; +} + +static int pset_fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) +{ + return pset_prop_read_string_array(to_pset_node(fwnode), propname, + val, nval); +} + +static const struct fwnode_operations pset_fwnode_ops = { + .property_present = pset_fwnode_property_present, + .property_read_int_array = pset_fwnode_read_int_array, + .property_read_string_array = pset_fwnode_property_read_string_array, +}; + /** * device_property_present - check if a property of a device is present * @dev: Device whose property is being checked @@ -200,18 +244,6 @@ bool device_property_present(struct device *dev, const char *propname) } EXPORT_SYMBOL_GPL(device_property_present); -static bool __fwnode_property_present(struct fwnode_handle *fwnode, - const char *propname) -{ - if (is_of_node(fwnode)) - return of_property_read_bool(to_of_node(fwnode), propname); - else if (is_acpi_node(fwnode)) - return !acpi_node_prop_get(fwnode, propname, NULL); - else if (is_pset_node(fwnode)) - return !!pset_prop_get(to_pset_node(fwnode), propname); - return false; -} - /** * fwnode_property_present - check if a property of a firmware node is present * @fwnode: Firmware node whose property to check @@ -221,10 +253,11 @@ bool fwnode_property_present(struct fwnode_handle *fwnode, const char *propname) { bool ret; - ret = __fwnode_property_present(fwnode, propname); + ret = fwnode_call_bool_op(fwnode, property_present, propname); if (ret == false && !IS_ERR_OR_NULL(fwnode) && !IS_ERR_OR_NULL(fwnode->secondary)) - ret = __fwnode_property_present(fwnode->secondary, propname); + ret = fwnode_call_bool_op(fwnode->secondary, property_present, + propname); return ret; } EXPORT_SYMBOL_GPL(fwnode_property_present); @@ -398,42 +431,23 @@ int device_property_match_string(struct device *dev, const char *propname, } EXPORT_SYMBOL_GPL(device_property_match_string); -#define OF_DEV_PROP_READ_ARRAY(node, propname, type, val, nval) \ - (val) ? of_property_read_##type##_array((node), (propname), (val), (nval)) \ - : of_property_count_elems_of_size((node), (propname), sizeof(type)) - -#define PSET_PROP_READ_ARRAY(node, propname, type, val, nval) \ - (val) ? pset_prop_read_##type##_array((node), (propname), (val), (nval)) \ - : pset_prop_count_elems_of_size((node), (propname), sizeof(type)) - -#define FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ -({ \ - int _ret_; \ - if (is_of_node(_fwnode_)) \ - _ret_ = OF_DEV_PROP_READ_ARRAY(to_of_node(_fwnode_), _propname_, \ - _type_, _val_, _nval_); \ - else if (is_acpi_node(_fwnode_)) \ - _ret_ = acpi_node_prop_read(_fwnode_, _propname_, _proptype_, \ - _val_, _nval_); \ - else if (is_pset_node(_fwnode_)) \ - _ret_ = PSET_PROP_READ_ARRAY(to_pset_node(_fwnode_), _propname_, \ - _type_, _val_, _nval_); \ - else \ - _ret_ = -ENXIO; \ - _ret_; \ -}) - -#define FWNODE_PROP_READ_ARRAY(_fwnode_, _propname_, _type_, _proptype_, _val_, _nval_) \ -({ \ - int _ret_; \ - _ret_ = FWNODE_PROP_READ(_fwnode_, _propname_, _type_, _proptype_, \ - _val_, _nval_); \ - if (_ret_ == -EINVAL && !IS_ERR_OR_NULL(_fwnode_) && \ - !IS_ERR_OR_NULL(_fwnode_->secondary)) \ - _ret_ = FWNODE_PROP_READ(_fwnode_->secondary, _propname_, _type_, \ - _proptype_, _val_, _nval_); \ - _ret_; \ -}) +static int fwnode_property_read_int_array(struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) +{ + int ret; + + ret = fwnode_call_int_op(fwnode, property_read_int_array, propname, + elem_size, val, nval); + if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && + !IS_ERR_OR_NULL(fwnode->secondary)) + ret = fwnode_call_int_op( + fwnode->secondary, property_read_int_array, propname, + elem_size, val, nval); + + return ret; +} /** * fwnode_property_read_u8_array - return a u8 array property of firmware node @@ -456,8 +470,8 @@ EXPORT_SYMBOL_GPL(device_property_match_string); int fwnode_property_read_u8_array(struct fwnode_handle *fwnode, const char *propname, u8 *val, size_t nval) { - return FWNODE_PROP_READ_ARRAY(fwnode, propname, u8, DEV_PROP_U8, - val, nval); + return fwnode_property_read_int_array(fwnode, propname, sizeof(u8), + val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); @@ -482,8 +496,8 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u8_array); int fwnode_property_read_u16_array(struct fwnode_handle *fwnode, const char *propname, u16 *val, size_t nval) { - return FWNODE_PROP_READ_ARRAY(fwnode, propname, u16, DEV_PROP_U16, - val, nval); + return fwnode_property_read_int_array(fwnode, propname, sizeof(u16), + val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); @@ -508,8 +522,8 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u16_array); int fwnode_property_read_u32_array(struct fwnode_handle *fwnode, const char *propname, u32 *val, size_t nval) { - return FWNODE_PROP_READ_ARRAY(fwnode, propname, u32, DEV_PROP_U32, - val, nval); + return fwnode_property_read_int_array(fwnode, propname, sizeof(u32), + val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); @@ -534,29 +548,11 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_u32_array); int fwnode_property_read_u64_array(struct fwnode_handle *fwnode, const char *propname, u64 *val, size_t nval) { - return FWNODE_PROP_READ_ARRAY(fwnode, propname, u64, DEV_PROP_U64, - val, nval); + return fwnode_property_read_int_array(fwnode, propname, sizeof(u64), + val, nval); } EXPORT_SYMBOL_GPL(fwnode_property_read_u64_array); -static int __fwnode_property_read_string_array(struct fwnode_handle *fwnode, - const char *propname, - const char **val, size_t nval) -{ - if (is_of_node(fwnode)) - return val ? - of_property_read_string_array(to_of_node(fwnode), - propname, val, nval) : - of_property_count_strings(to_of_node(fwnode), propname); - else if (is_acpi_node(fwnode)) - return acpi_node_prop_read(fwnode, propname, DEV_PROP_STRING, - val, nval); - else if (is_pset_node(fwnode)) - return pset_prop_read_string_array(to_pset_node(fwnode), - propname, val, nval); - return -ENXIO; -} - /** * fwnode_property_read_string_array - return string array property of a node * @fwnode: Firmware node to get the property of @@ -581,11 +577,13 @@ int fwnode_property_read_string_array(struct fwnode_handle *fwnode, { int ret; - ret = __fwnode_property_read_string_array(fwnode, propname, val, nval); + ret = fwnode_call_int_op(fwnode, property_read_string_array, propname, + val, nval); if (ret == -EINVAL && !IS_ERR_OR_NULL(fwnode) && !IS_ERR_OR_NULL(fwnode->secondary)) - ret = __fwnode_property_read_string_array(fwnode->secondary, - propname, val, nval); + ret = fwnode_call_int_op(fwnode->secondary, + property_read_string_array, propname, + val, nval); return ret; } EXPORT_SYMBOL_GPL(fwnode_property_read_string_array); @@ -903,6 +901,7 @@ int device_add_properties(struct device *dev, return PTR_ERR(p); p->fwnode.type = FWNODE_PDATA; + p->fwnode.ops = &pset_fwnode_ops; set_secondary_fwnode(dev, &p->fwnode); return 0; } @@ -938,19 +937,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_next_parent); */ struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode) { - struct fwnode_handle *parent = NULL; - - if (is_of_node(fwnode)) { - struct device_node *node; - - node = of_get_parent(to_of_node(fwnode)); - if (node) - parent = &node->fwnode; - } else if (is_acpi_node(fwnode)) { - parent = acpi_node_get_parent(fwnode); - } - - return parent; + return fwnode_call_ptr_op(fwnode, get_parent); } EXPORT_SYMBOL_GPL(fwnode_get_parent); @@ -962,18 +949,7 @@ EXPORT_SYMBOL_GPL(fwnode_get_parent); struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode, struct fwnode_handle *child) { - if (is_of_node(fwnode)) { - struct device_node *node; - - node = of_get_next_available_child(to_of_node(fwnode), - to_of_node(child)); - if (node) - return &node->fwnode; - } else if (is_acpi_node(fwnode)) { - return acpi_get_next_subnode(fwnode, child); - } - - return NULL; + return fwnode_call_ptr_op(fwnode, get_next_child_node, child); } EXPORT_SYMBOL_GPL(fwnode_get_next_child_node); @@ -1005,23 +981,7 @@ EXPORT_SYMBOL_GPL(device_get_next_child_node); struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode, const char *childname) { - struct fwnode_handle *child; - - /* - * Find first matching named child node of this fwnode. - * For ACPI this will be a data only sub-node. - */ - fwnode_for_each_child_node(fwnode, child) { - if (is_of_node(child)) { - if (!of_node_cmp(to_of_node(child)->name, childname)) - return child; - } else if (is_acpi_data_node(child)) { - if (acpi_data_node_match(child, childname)) - return child; - } - } - - return NULL; + return fwnode_call_ptr_op(fwnode, get_named_child_node, childname); } EXPORT_SYMBOL_GPL(fwnode_get_named_child_node); @@ -1043,8 +1003,7 @@ EXPORT_SYMBOL_GPL(device_get_named_child_node); */ void fwnode_handle_get(struct fwnode_handle *fwnode) { - if (is_of_node(fwnode)) - of_node_get(to_of_node(fwnode)); + fwnode_call_void_op(fwnode, get); } EXPORT_SYMBOL_GPL(fwnode_handle_get); @@ -1058,12 +1017,21 @@ EXPORT_SYMBOL_GPL(fwnode_handle_get); */ void fwnode_handle_put(struct fwnode_handle *fwnode) { - if (is_of_node(fwnode)) - of_node_put(to_of_node(fwnode)); + fwnode_call_void_op(fwnode, put); } EXPORT_SYMBOL_GPL(fwnode_handle_put); /** + * fwnode_device_is_available - check if a device is available for use + * @fwnode: Pointer to the fwnode of the device. + */ +bool fwnode_device_is_available(struct fwnode_handle *fwnode) +{ + return fwnode_call_bool_op(fwnode, device_is_available); +} +EXPORT_SYMBOL_GPL(fwnode_device_is_available); + +/** * device_get_child_node_count - return the number of child nodes for device * @dev: Device to cound the child nodes for */ @@ -1198,26 +1166,29 @@ struct fwnode_handle * fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, struct fwnode_handle *prev) { - struct fwnode_handle *endpoint = NULL; - - if (is_of_node(fwnode)) { - struct device_node *node; + return fwnode_call_ptr_op(fwnode, graph_get_next_endpoint, prev); +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); - node = of_graph_get_next_endpoint(to_of_node(fwnode), - to_of_node(prev)); +/** + * fwnode_graph_get_port_parent - Return the device fwnode of a port endpoint + * @endpoint: Endpoint firmware node of the port + * + * Return: the firmware node of the device the @endpoint belongs to. + */ +struct fwnode_handle * +fwnode_graph_get_port_parent(struct fwnode_handle *endpoint) +{ + struct fwnode_handle *port, *parent; - if (node) - endpoint = &node->fwnode; - } else if (is_acpi_node(fwnode)) { - endpoint = acpi_graph_get_next_endpoint(fwnode, prev); - if (IS_ERR(endpoint)) - endpoint = NULL; - } + port = fwnode_get_parent(endpoint); + parent = fwnode_call_ptr_op(port, graph_get_port_parent); - return endpoint; + fwnode_handle_put(port); + return parent; } -EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); +EXPORT_SYMBOL_GPL(fwnode_graph_get_port_parent); /** * fwnode_graph_get_remote_port_parent - Return fwnode of a remote device @@ -1228,22 +1199,12 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_next_endpoint); struct fwnode_handle * fwnode_graph_get_remote_port_parent(struct fwnode_handle *fwnode) { - struct fwnode_handle *parent = NULL; - - if (is_of_node(fwnode)) { - struct device_node *node; + struct fwnode_handle *endpoint, *parent; - node = of_graph_get_remote_port_parent(to_of_node(fwnode)); - if (node) - parent = &node->fwnode; - } else if (is_acpi_node(fwnode)) { - int ret; + endpoint = fwnode_graph_get_remote_endpoint(fwnode); + parent = fwnode_graph_get_port_parent(endpoint); - ret = acpi_graph_get_remote_endpoint(fwnode, &parent, NULL, - NULL); - if (ret) - return NULL; - } + fwnode_handle_put(endpoint); return parent; } @@ -1257,23 +1218,7 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port_parent); */ struct fwnode_handle *fwnode_graph_get_remote_port(struct fwnode_handle *fwnode) { - struct fwnode_handle *port = NULL; - - if (is_of_node(fwnode)) { - struct device_node *node; - - node = of_graph_get_remote_port(to_of_node(fwnode)); - if (node) - port = &node->fwnode; - } else if (is_acpi_node(fwnode)) { - int ret; - - ret = acpi_graph_get_remote_endpoint(fwnode, NULL, &port, NULL); - if (ret) - return NULL; - } - - return port; + return fwnode_get_next_parent(fwnode_graph_get_remote_endpoint(fwnode)); } EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); @@ -1286,27 +1231,46 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_port); struct fwnode_handle * fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) { - struct fwnode_handle *endpoint = NULL; + return fwnode_call_ptr_op(fwnode, graph_get_remote_endpoint); +} +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); - if (is_of_node(fwnode)) { - struct device_node *node; +/** + * fwnode_graph_get_remote_node - get remote parent node for given port/endpoint + * @fwnode: pointer to parent fwnode_handle containing graph port/endpoint + * @port_id: identifier of the parent port node + * @endpoint_id: identifier of the endpoint node + * + * Return: Remote fwnode handle associated with remote endpoint node linked + * to @node. Use fwnode_node_put() on it when done. + */ +struct fwnode_handle *fwnode_graph_get_remote_node(struct fwnode_handle *fwnode, + u32 port_id, u32 endpoint_id) +{ + struct fwnode_handle *endpoint = NULL; - node = of_parse_phandle(to_of_node(fwnode), "remote-endpoint", - 0); - if (node) - endpoint = &node->fwnode; - } else if (is_acpi_node(fwnode)) { + while ((endpoint = fwnode_graph_get_next_endpoint(fwnode, endpoint))) { + struct fwnode_endpoint fwnode_ep; + struct fwnode_handle *remote; int ret; - ret = acpi_graph_get_remote_endpoint(fwnode, NULL, NULL, - &endpoint); - if (ret) + ret = fwnode_graph_parse_endpoint(endpoint, &fwnode_ep); + if (ret < 0) + continue; + + if (fwnode_ep.port != port_id || fwnode_ep.id != endpoint_id) + continue; + + remote = fwnode_graph_get_remote_port_parent(endpoint); + if (!remote) return NULL; + + return fwnode_device_is_available(remote) ? remote : NULL; } - return endpoint; + return NULL; } -EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); +EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_node); /** * fwnode_graph_parse_endpoint - parse common endpoint node properties @@ -1320,22 +1284,8 @@ EXPORT_SYMBOL_GPL(fwnode_graph_get_remote_endpoint); int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, struct fwnode_endpoint *endpoint) { - struct fwnode_handle *port_fwnode = fwnode_get_parent(fwnode); - memset(endpoint, 0, sizeof(*endpoint)); - endpoint->local_fwnode = fwnode; - - if (is_acpi_node(port_fwnode)) { - fwnode_property_read_u32(port_fwnode, "port", &endpoint->port); - fwnode_property_read_u32(fwnode, "endpoint", &endpoint->id); - } else { - fwnode_property_read_u32(port_fwnode, "reg", &endpoint->port); - fwnode_property_read_u32(fwnode, "reg", &endpoint->id); - } - - fwnode_handle_put(port_fwnode); - - return 0; + return fwnode_call_int_op(fwnode, graph_parse_endpoint, endpoint); } EXPORT_SYMBOL(fwnode_graph_parse_endpoint); diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c index 5f04e7bf063e..e6c64b0be5b2 100644 --- a/drivers/base/regmap/regmap-w1.c +++ b/drivers/base/regmap/regmap-w1.c @@ -1,7 +1,7 @@ /* * Register map access API - W1 (1-Wire) support * - * Copyright (C) 2017 OAO Radioavionica + * Copyright (c) 2017 Radioavionica Corporation * Author: Alex A. Mihaylov <minimumlaw@rambler.ru> * * This program is free software; you can redistribute it and/or modify @@ -11,7 +11,7 @@ #include <linux/regmap.h> #include <linux/module.h> -#include "../../w1/w1.h" +#include <linux/w1.h> #include "internal.h" diff --git a/drivers/block/brd.c b/drivers/block/brd.c index 17723fd50a53..104b71c0490d 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -9,6 +9,7 @@ */ #include <linux/init.h> +#include <linux/initrd.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/major.h> diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 02a611993bb4..678af946be30 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1944,6 +1944,13 @@ static void cciss_get_serial_no(ctlr_info_t *h, int logvol, return; } +static void cciss_initialize_rq(struct request *rq) +{ + struct scsi_request *sreq = blk_mq_rq_to_pdu(rq); + + scsi_req_init(sreq); +} + /* * cciss_add_disk sets up the block device queue for a logical drive */ @@ -1956,6 +1963,7 @@ static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, disk->queue->cmd_size = sizeof(struct scsi_request); disk->queue->request_fn = do_cciss_request; + disk->queue->initialize_rq_fn = cciss_initialize_rq; disk->queue->queue_lock = &h->lock; queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, disk->queue); if (blk_init_allocated_queue(disk->queue) < 0) diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 61b046f256ca..4a3cfc7940de 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -174,7 +174,6 @@ static void mtip_init_cmd_header(struct request *rq) { struct driver_data *dd = rq->q->queuedata; struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); - u32 host_cap_64 = readl(dd->mmio + HOST_CAP) & HOST_CAP_64; /* Point the command headers at the command tables. */ cmd->command_header = dd->port->command_list + @@ -182,7 +181,7 @@ static void mtip_init_cmd_header(struct request *rq) cmd->command_header_dma = dd->port->command_list_dma + (sizeof(struct mtip_cmd_hdr) * rq->tag); - if (host_cap_64) + if (test_bit(MTIP_PF_HOST_CAP_64, &dd->port->flags)) cmd->command_header->ctbau = __force_bit2int cpu_to_le32((cmd->command_dma >> 16) >> 16); cmd->command_header->ctba = __force_bit2int cpu_to_le32(cmd->command_dma & 0xFFFFFFFF); @@ -386,6 +385,7 @@ static void mtip_init_port(struct mtip_port *port) port->mmio + PORT_LST_ADDR_HI); writel((port->rxfis_dma >> 16) >> 16, port->mmio + PORT_FIS_ADDR_HI); + set_bit(MTIP_PF_HOST_CAP_64, &port->flags); } writel(port->command_list_dma & 0xFFFFFFFF, @@ -950,7 +950,7 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) unsigned long to; bool active = true; - blk_mq_stop_hw_queues(port->dd->queue); + blk_mq_quiesce_queue(port->dd->queue); to = jiffies + msecs_to_jiffies(timeout); do { @@ -970,10 +970,10 @@ static int mtip_quiesce_io(struct mtip_port *port, unsigned long timeout) break; } while (time_before(jiffies, to)); - blk_mq_start_stopped_hw_queues(port->dd->queue, true); + blk_mq_unquiesce_queue(port->dd->queue); return active ? -EBUSY : 0; err_fault: - blk_mq_start_stopped_hw_queues(port->dd->queue, true); + blk_mq_unquiesce_queue(port->dd->queue); return -EFAULT; } @@ -2737,6 +2737,9 @@ static void mtip_abort_cmd(struct request *req, void *data, struct mtip_cmd *cmd = blk_mq_rq_to_pdu(req); struct driver_data *dd = data; + if (!blk_mq_request_started(req)) + return; + dbg_printk(MTIP_DRV_NAME " Aborting request, tag = %d\n", req->tag); clear_bit(req->tag, dd->port->cmds_to_issue); @@ -2749,6 +2752,9 @@ static void mtip_queue_cmd(struct request *req, void *data, { struct driver_data *dd = data; + if (!blk_mq_request_started(req)) + return; + set_bit(req->tag, dd->port->cmds_to_issue); blk_abort_request(req); } @@ -2814,6 +2820,8 @@ restart_eh: dev_warn(&dd->pdev->dev, "Completion workers still active!"); + blk_mq_quiesce_queue(dd->queue); + spin_lock(dd->queue->queue_lock); blk_mq_tagset_busy_iter(&dd->tags, mtip_queue_cmd, dd); @@ -2826,6 +2834,8 @@ restart_eh: mtip_abort_cmd, dd); clear_bit(MTIP_PF_TO_ACTIVE_BIT, &dd->port->flags); + + blk_mq_unquiesce_queue(dd->queue); } if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) { @@ -3995,8 +4005,9 @@ static int mtip_block_remove(struct driver_data *dd) dd->disk->disk_name); blk_freeze_queue_start(dd->queue); - blk_mq_stop_hw_queues(dd->queue); + blk_mq_quiesce_queue(dd->queue); blk_mq_tagset_busy_iter(&dd->tags, mtip_no_dev_cleanup, dd); + blk_mq_unquiesce_queue(dd->queue); /* * Delete our gendisk structure. This also removes the device diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index e8286af50e16..e20e55dab443 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -140,6 +140,7 @@ enum { (1 << MTIP_PF_SE_ACTIVE_BIT) | (1 << MTIP_PF_DM_ACTIVE_BIT) | (1 << MTIP_PF_TO_ACTIVE_BIT)), + MTIP_PF_HOST_CAP_64 = 10, /* cache HOST_CAP_64 */ MTIP_PF_SVC_THD_ACTIVE_BIT = 4, MTIP_PF_ISSUE_CMDS_BIT = 5, diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 977ec960dd2f..87a0a29f6e7e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -626,7 +626,6 @@ static void recv_work(struct work_struct *work) struct nbd_device *nbd = args->nbd; struct nbd_config *config = nbd->config; struct nbd_cmd *cmd; - int ret = 0; while (1) { cmd = nbd_read_stat(nbd, args->index); @@ -636,7 +635,6 @@ static void recv_work(struct work_struct *work) mutex_lock(&nsock->tx_lock); nbd_mark_nsock_dead(nbd, nsock, 1); mutex_unlock(&nsock->tx_lock); - ret = PTR_ERR(cmd); break; } @@ -661,9 +659,9 @@ static void nbd_clear_req(struct request *req, void *data, bool reserved) static void nbd_clear_que(struct nbd_device *nbd) { - blk_mq_stop_hw_queues(nbd->disk->queue); + blk_mq_quiesce_queue(nbd->disk->queue); blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); - blk_mq_start_hw_queues(nbd->disk->queue); + blk_mq_unquiesce_queue(nbd->disk->queue); dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); } diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 71f4422eba81..85c24cace973 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -844,9 +844,6 @@ static int __init null_init(void) queue_mode = NULL_Q_MQ; } - if (queue_mode == NULL_Q_MQ && shared_tags) - null_init_tag_set(&tag_set); - if (queue_mode == NULL_Q_MQ && use_per_node_hctx) { if (submit_queues < nr_online_nodes) { pr_warn("null_blk: submit_queues param is set to %u.", @@ -858,11 +855,19 @@ static int __init null_init(void) else if (!submit_queues) submit_queues = 1; + if (queue_mode == NULL_Q_MQ && shared_tags) { + ret = null_init_tag_set(&tag_set); + if (ret) + return ret; + } + mutex_init(&lock); null_major = register_blkdev(0, "nullb"); - if (null_major < 0) - return null_major; + if (null_major < 0) { + ret = null_major; + goto err_tagset; + } if (use_lightnvm) { ppa_cache = kmem_cache_create("ppa_cache", 64 * sizeof(u64), @@ -891,6 +896,9 @@ err_dev: kmem_cache_destroy(ppa_cache); err_ppa: unregister_blkdev(null_major, "nullb"); +err_tagset: + if (queue_mode == NULL_Q_MQ && shared_tags) + blk_mq_free_tag_set(&tag_set); return ret; } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 0297ad7c1452..4e02aa5fdac0 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -840,7 +840,7 @@ static int virtblk_freeze(struct virtio_device *vdev) /* Make sure no work handler is accessing the device. */ flush_work(&vblk->config_work); - blk_mq_stop_hw_queues(vblk->disk->queue); + blk_mq_quiesce_queue(vblk->disk->queue); vdev->config->del_vqs(vdev); return 0; @@ -857,7 +857,7 @@ static int virtblk_restore(struct virtio_device *vdev) virtio_device_ready(vdev); - blk_mq_start_stopped_hw_queues(vblk->disk->queue, true); + blk_mq_unquiesce_queue(vblk->disk->queue); return 0; } #endif diff --git a/drivers/block/zram/zcomp.c b/drivers/block/zram/zcomp.c index 12046f4f00e4..5b8992beffec 100644 --- a/drivers/block/zram/zcomp.c +++ b/drivers/block/zram/zcomp.c @@ -68,13 +68,11 @@ static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp) bool zcomp_available_algorithm(const char *comp) { - int i = 0; + int i; - while (backends[i]) { - if (sysfs_streq(comp, backends[i])) - return true; - i++; - } + i = __sysfs_match_string(backends, -1, comp); + if (i >= 0) + return true; /* * Crypto does not ignore a trailing new line symbol, diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index d3e3af22a088..856d5dc02451 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -1124,7 +1124,7 @@ static struct attribute *zram_disk_attrs[] = { NULL, }; -static struct attribute_group zram_disk_attr_group = { +static const struct attribute_group zram_disk_attr_group = { .attrs = zram_disk_attrs, }; diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig index 90f3edffb067..f6fa056a52fc 100644 --- a/drivers/char/ipmi/Kconfig +++ b/drivers/char/ipmi/Kconfig @@ -5,6 +5,7 @@ menuconfig IPMI_HANDLER tristate 'IPMI top-level message handler' depends on HAS_IOMEM + select IPMI_DMI_DECODE if DMI help This enables the central IPMI message handler, required for IPMI to work. @@ -16,6 +17,9 @@ menuconfig IPMI_HANDLER If unsure, say N. +config IPMI_DMI_DECODE + bool + if IPMI_HANDLER config IPMI_PANIC_EVENT diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile index 0d98cd91def1..eefb0b301e83 100644 --- a/drivers/char/ipmi/Makefile +++ b/drivers/char/ipmi/Makefile @@ -7,6 +7,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o obj-$(CONFIG_IPMI_SI) += ipmi_si.o +obj-$(CONFIG_IPMI_DMI_DECODE) += ipmi_dmi.o obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o diff --git a/drivers/char/ipmi/ipmi_dmi.c b/drivers/char/ipmi/ipmi_dmi.c new file mode 100644 index 000000000000..2a84401dea05 --- /dev/null +++ b/drivers/char/ipmi/ipmi_dmi.c @@ -0,0 +1,273 @@ +/* + * A hack to create a platform device from a DMI entry. This will + * allow autoloading of the IPMI drive based on SMBIOS entries. + */ + +#include <linux/ipmi.h> +#include <linux/init.h> +#include <linux/dmi.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include "ipmi_dmi.h" + +struct ipmi_dmi_info { + int type; + u32 flags; + unsigned long addr; + u8 slave_addr; + struct ipmi_dmi_info *next; +}; + +static struct ipmi_dmi_info *ipmi_dmi_infos; + +static int ipmi_dmi_nr __initdata; + +static void __init dmi_add_platform_ipmi(unsigned long base_addr, + u32 flags, + u8 slave_addr, + int irq, + int offset, + int type) +{ + struct platform_device *pdev; + struct resource r[4]; + unsigned int num_r = 1, size; + struct property_entry p[4] = { + PROPERTY_ENTRY_U8("slave-addr", slave_addr), + PROPERTY_ENTRY_U8("ipmi-type", type), + PROPERTY_ENTRY_U16("i2c-addr", base_addr), + { } + }; + char *name, *override; + int rv; + struct ipmi_dmi_info *info; + + info = kmalloc(sizeof(*info), GFP_KERNEL); + if (!info) { + pr_warn("ipmi:dmi: Could not allocate dmi info\n"); + } else { + info->type = type; + info->flags = flags; + info->addr = base_addr; + info->slave_addr = slave_addr; + info->next = ipmi_dmi_infos; + ipmi_dmi_infos = info; + } + + name = "dmi-ipmi-si"; + override = "ipmi_si"; + switch (type) { + case IPMI_DMI_TYPE_SSIF: + name = "dmi-ipmi-ssif"; + override = "ipmi_ssif"; + offset = 1; + size = 1; + break; + case IPMI_DMI_TYPE_BT: + size = 3; + break; + case IPMI_DMI_TYPE_KCS: + case IPMI_DMI_TYPE_SMIC: + size = 2; + break; + default: + pr_err("ipmi:dmi: Invalid IPMI type: %d", type); + return; + } + + pdev = platform_device_alloc(name, ipmi_dmi_nr); + if (!pdev) { + pr_err("ipmi:dmi: Error allocation IPMI platform device"); + return; + } + pdev->driver_override = override; + + if (type == IPMI_DMI_TYPE_SSIF) + goto add_properties; + + memset(r, 0, sizeof(r)); + + r[0].start = base_addr; + r[0].end = r[0].start + offset - 1; + r[0].name = "IPMI Address 1"; + r[0].flags = flags; + + if (size > 1) { + r[1].start = r[0].start + offset; + r[1].end = r[1].start + offset - 1; + r[1].name = "IPMI Address 2"; + r[1].flags = flags; + num_r++; + } + + if (size > 2) { + r[2].start = r[1].start + offset; + r[2].end = r[2].start + offset - 1; + r[2].name = "IPMI Address 3"; + r[2].flags = flags; + num_r++; + } + + if (irq) { + r[num_r].start = irq; + r[num_r].end = irq; + r[num_r].name = "IPMI IRQ"; + r[num_r].flags = IORESOURCE_IRQ; + num_r++; + } + + rv = platform_device_add_resources(pdev, r, num_r); + if (rv) { + dev_err(&pdev->dev, + "ipmi:dmi: Unable to add resources: %d\n", rv); + goto err; + } + +add_properties: + rv = platform_device_add_properties(pdev, p); + if (rv) { + dev_err(&pdev->dev, + "ipmi:dmi: Unable to add properties: %d\n", rv); + goto err; + } + + rv = platform_device_add(pdev); + if (rv) { + dev_err(&pdev->dev, "ipmi:dmi: Unable to add device: %d\n", rv); + goto err; + } + + ipmi_dmi_nr++; + return; + +err: + platform_device_put(pdev); +} + +/* + * Look up the slave address for a given interface. This is here + * because ACPI doesn't have a slave address while SMBIOS does, but we + * prefer using ACPI so the ACPI code can use the IPMI namespace. + * This function allows an ACPI-specified IPMI device to look up the + * slave address from the DMI table. + */ +int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr) +{ + struct ipmi_dmi_info *info = ipmi_dmi_infos; + + while (info) { + if (info->type == type && + info->flags == flags && + info->addr == base_addr) + return info->slave_addr; + info = info->next; + } + + return 0; +} +EXPORT_SYMBOL(ipmi_dmi_get_slave_addr); + +#define DMI_IPMI_MIN_LENGTH 0x10 +#define DMI_IPMI_VER2_LENGTH 0x12 +#define DMI_IPMI_TYPE 4 +#define DMI_IPMI_SLAVEADDR 6 +#define DMI_IPMI_ADDR 8 +#define DMI_IPMI_ACCESS 0x10 +#define DMI_IPMI_IRQ 0x11 +#define DMI_IPMI_IO_MASK 0xfffe + +static void __init dmi_decode_ipmi(const struct dmi_header *dm) +{ + const u8 *data = (const u8 *) dm; + u32 flags = IORESOURCE_IO; + unsigned long base_addr; + u8 len = dm->length; + u8 slave_addr; + int irq = 0, offset; + int type; + + if (len < DMI_IPMI_MIN_LENGTH) + return; + + type = data[DMI_IPMI_TYPE]; + slave_addr = data[DMI_IPMI_SLAVEADDR]; + + memcpy(&base_addr, data + DMI_IPMI_ADDR, sizeof(unsigned long)); + if (len >= DMI_IPMI_VER2_LENGTH) { + if (type == IPMI_DMI_TYPE_SSIF) { + offset = 0; + flags = 0; + base_addr = data[DMI_IPMI_ADDR] >> 1; + if (base_addr == 0) { + /* + * Some broken systems put the I2C address in + * the slave address field. We try to + * accommodate them here. + */ + base_addr = data[DMI_IPMI_SLAVEADDR] >> 1; + slave_addr = 0; + } + } else { + if (base_addr & 1) { + /* I/O */ + base_addr &= DMI_IPMI_IO_MASK; + } else { + /* Memory */ + flags = IORESOURCE_MEM; + } + + /* + * If bit 4 of byte 0x10 is set, then the lsb + * for the address is odd. + */ + base_addr |= (data[DMI_IPMI_ACCESS] >> 4) & 1; + + irq = data[DMI_IPMI_IRQ]; + + /* + * The top two bits of byte 0x10 hold the + * register spacing. + */ + switch ((data[DMI_IPMI_ACCESS] >> 6) & 3) { + case 0: /* Byte boundaries */ + offset = 1; + break; + case 1: /* 32-bit boundaries */ + offset = 4; + break; + case 2: /* 16-byte boundaries */ + offset = 16; + break; + default: + pr_err("ipmi:dmi: Invalid offset: 0"); + return; + } + } + } else { + /* Old DMI spec. */ + /* + * Note that technically, the lower bit of the base + * address should be 1 if the address is I/O and 0 if + * the address is in memory. So many systems get that + * wrong (and all that I have seen are I/O) so we just + * ignore that bit and assume I/O. Systems that use + * memory should use the newer spec, anyway. + */ + base_addr = base_addr & DMI_IPMI_IO_MASK; + offset = 1; + } + + dmi_add_platform_ipmi(base_addr, flags, slave_addr, irq, + offset, type); +} + +static int __init scan_for_dmi_ipmi(void) +{ + const struct dmi_device *dev = NULL; + + while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) + dmi_decode_ipmi((const struct dmi_header *) dev->device_data); + + return 0; +} +subsys_initcall(scan_for_dmi_ipmi); diff --git a/drivers/char/ipmi/ipmi_dmi.h b/drivers/char/ipmi/ipmi_dmi.h new file mode 100644 index 000000000000..0a1afe5ceb1e --- /dev/null +++ b/drivers/char/ipmi/ipmi_dmi.h @@ -0,0 +1,12 @@ +/* + * DMI defines for use by IPMI + */ + +#define IPMI_DMI_TYPE_KCS 0x01 +#define IPMI_DMI_TYPE_SMIC 0x02 +#define IPMI_DMI_TYPE_BT 0x03 +#define IPMI_DMI_TYPE_SSIF 0x04 + +#ifdef CONFIG_IPMI_DMI_DECODE +int ipmi_dmi_get_slave_addr(int type, u32 flags, unsigned long base_addr); +#endif diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 9f699951b75a..810b138f5897 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -2397,7 +2397,7 @@ static umode_t bmc_dev_attr_is_visible(struct kobject *kobj, return mode; } -static struct attribute_group bmc_dev_attr_group = { +static const struct attribute_group bmc_dev_attr_group = { .attrs = bmc_dev_attrs, .is_visible = bmc_dev_attr_is_visible, }; @@ -2407,7 +2407,7 @@ static const struct attribute_group *bmc_dev_attr_groups[] = { NULL }; -static struct device_type bmc_device_type = { +static const struct device_type bmc_device_type = { .groups = bmc_dev_attr_groups, }; @@ -3878,6 +3878,9 @@ static void smi_recv_tasklet(unsigned long val) * because the lower layer is allowed to hold locks while calling * message delivery. */ + + rcu_read_lock(); + if (!run_to_completion) spin_lock_irqsave(&intf->xmit_msgs_lock, flags); if (intf->curr_msg == NULL && !intf->in_shutdown) { @@ -3900,6 +3903,8 @@ static void smi_recv_tasklet(unsigned long val) if (newmsg) intf->handlers->sender(intf->send_info, newmsg); + rcu_read_unlock(); + handle_new_recv_msgs(intf); } diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 59ee93ea84eb..985973855005 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c @@ -61,6 +61,7 @@ #include <linux/ipmi_smi.h> #include <asm/io.h> #include "ipmi_si_sm.h" +#include "ipmi_dmi.h" #include <linux/dmi.h> #include <linux/string.h> #include <linux/ctype.h> @@ -1942,7 +1943,7 @@ static int hotmod_handler(const char *val, struct kernel_param *kp) info->io.regspacing = DEFAULT_REGSPACING; info->io.regsize = regsize; if (!info->io.regsize) - info->io.regsize = DEFAULT_REGSPACING; + info->io.regsize = DEFAULT_REGSIZE; info->io.regshift = regshift; info->irq = irq; if (info->irq) @@ -2036,7 +2037,7 @@ static int hardcode_find_bmc(void) info->io.regspacing = DEFAULT_REGSPACING; info->io.regsize = regsizes[i]; if (!info->io.regsize) - info->io.regsize = DEFAULT_REGSPACING; + info->io.regsize = DEFAULT_REGSIZE; info->io.regshift = regshifts[i]; info->irq = irqs[i]; if (info->irq) @@ -2273,136 +2274,105 @@ static void spmi_find_bmc(void) } #endif -#ifdef CONFIG_DMI -struct dmi_ipmi_data { - u8 type; - u8 addr_space; - unsigned long base_addr; - u8 irq; - u8 offset; - u8 slave_addr; -}; - -static int decode_dmi(const struct dmi_header *dm, - struct dmi_ipmi_data *dmi) +#if defined(CONFIG_DMI) || defined(CONFIG_ACPI) +struct resource *ipmi_get_info_from_resources(struct platform_device *pdev, + struct smi_info *info) { - const u8 *data = (const u8 *)dm; - unsigned long base_addr; - u8 reg_spacing; - u8 len = dm->length; - - dmi->type = data[4]; + struct resource *res, *res_second; - memcpy(&base_addr, data+8, sizeof(unsigned long)); - if (len >= 0x11) { - if (base_addr & 1) { - /* I/O */ - base_addr &= 0xFFFE; - dmi->addr_space = IPMI_IO_ADDR_SPACE; - } else - /* Memory */ - dmi->addr_space = IPMI_MEM_ADDR_SPACE; - - /* If bit 4 of byte 0x10 is set, then the lsb for the address - is odd. */ - dmi->base_addr = base_addr | ((data[0x10] & 0x10) >> 4); - - dmi->irq = data[0x11]; - - /* The top two bits of byte 0x10 hold the register spacing. */ - reg_spacing = (data[0x10] & 0xC0) >> 6; - switch (reg_spacing) { - case 0x00: /* Byte boundaries */ - dmi->offset = 1; - break; - case 0x01: /* 32-bit boundaries */ - dmi->offset = 4; - break; - case 0x02: /* 16-byte boundaries */ - dmi->offset = 16; - break; - default: - /* Some other interface, just ignore it. */ - return -EIO; - } + res = platform_get_resource(pdev, IORESOURCE_IO, 0); + if (res) { + info->io_setup = port_setup; + info->io.addr_type = IPMI_IO_ADDR_SPACE; } else { - /* Old DMI spec. */ - /* - * Note that technically, the lower bit of the base - * address should be 1 if the address is I/O and 0 if - * the address is in memory. So many systems get that - * wrong (and all that I have seen are I/O) so we just - * ignore that bit and assume I/O. Systems that use - * memory should use the newer spec, anyway. - */ - dmi->base_addr = base_addr & 0xfffe; - dmi->addr_space = IPMI_IO_ADDR_SPACE; - dmi->offset = 1; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (res) { + info->io_setup = mem_setup; + info->io.addr_type = IPMI_MEM_ADDR_SPACE; + } } + if (!res) { + dev_err(&pdev->dev, "no I/O or memory address\n"); + return NULL; + } + info->io.addr_data = res->start; - dmi->slave_addr = data[6]; + info->io.regspacing = DEFAULT_REGSPACING; + res_second = platform_get_resource(pdev, + (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? + IORESOURCE_IO : IORESOURCE_MEM, + 1); + if (res_second) { + if (res_second->start > info->io.addr_data) + info->io.regspacing = + res_second->start - info->io.addr_data; + } + info->io.regsize = DEFAULT_REGSIZE; + info->io.regshift = 0; - return 0; + return res; } -static void try_init_dmi(struct dmi_ipmi_data *ipmi_data) +#endif + +#ifdef CONFIG_DMI +static int dmi_ipmi_probe(struct platform_device *pdev) { struct smi_info *info; + u8 type, slave_addr; + int rv; + + if (!si_trydmi) + return -ENODEV; + + rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type); + if (rv) + return -ENODEV; info = smi_info_alloc(); if (!info) { pr_err(PFX "Could not allocate SI data\n"); - return; + return -ENOMEM; } info->addr_source = SI_SMBIOS; pr_info(PFX "probing via SMBIOS\n"); - switch (ipmi_data->type) { - case 0x01: /* KCS */ + switch (type) { + case IPMI_DMI_TYPE_KCS: info->si_type = SI_KCS; break; - case 0x02: /* SMIC */ + case IPMI_DMI_TYPE_SMIC: info->si_type = SI_SMIC; break; - case 0x03: /* BT */ + case IPMI_DMI_TYPE_BT: info->si_type = SI_BT; break; default: kfree(info); - return; + return -EINVAL; } - switch (ipmi_data->addr_space) { - case IPMI_MEM_ADDR_SPACE: - info->io_setup = mem_setup; - info->io.addr_type = IPMI_MEM_ADDR_SPACE; - break; - - case IPMI_IO_ADDR_SPACE: - info->io_setup = port_setup; - info->io.addr_type = IPMI_IO_ADDR_SPACE; - break; - - default: - kfree(info); - pr_warn(PFX "Unknown SMBIOS I/O Address type: %d\n", - ipmi_data->addr_space); - return; + if (!ipmi_get_info_from_resources(pdev, info)) { + rv = -EINVAL; + goto err_free; } - info->io.addr_data = ipmi_data->base_addr; - info->io.regspacing = ipmi_data->offset; - if (!info->io.regspacing) - info->io.regspacing = DEFAULT_REGSPACING; - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = 0; - - info->slave_addr = ipmi_data->slave_addr; + rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr); + if (rv) { + dev_warn(&pdev->dev, "device has no slave-addr property"); + info->slave_addr = 0x20; + } else { + info->slave_addr = slave_addr; + } - info->irq = ipmi_data->irq; - if (info->irq) + info->irq = platform_get_irq(pdev, 0); + if (info->irq > 0) info->irq_setup = std_irq_setup; + else + info->irq = 0; + + info->dev = &pdev->dev; pr_info("ipmi_si: SMBIOS: %s %#lx regsize %d spacing %d irq %d\n", (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? "io" : "mem", @@ -2411,21 +2381,17 @@ static void try_init_dmi(struct dmi_ipmi_data *ipmi_data) if (add_smi(info)) kfree(info); -} -static void dmi_find_bmc(void) -{ - const struct dmi_device *dev = NULL; - struct dmi_ipmi_data data; - int rv; + return 0; - while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) { - memset(&data, 0, sizeof(data)); - rv = decode_dmi((const struct dmi_header *) dev->device_data, - &data); - if (!rv) - try_init_dmi(&data); - } +err_free: + kfree(info); + return rv; +} +#else +static int dmi_ipmi_probe(struct platform_device *pdev) +{ + return -ENODEV; } #endif /* CONFIG_DMI */ @@ -2684,17 +2650,47 @@ static int of_ipmi_probe(struct platform_device *dev) #endif #ifdef CONFIG_ACPI +static int find_slave_address(struct smi_info *info, int slave_addr) +{ +#ifdef CONFIG_IPMI_DMI_DECODE + if (!slave_addr) { + int type = -1; + u32 flags = IORESOURCE_IO; + + switch (info->si_type) { + case SI_KCS: + type = IPMI_DMI_TYPE_KCS; + break; + case SI_BT: + type = IPMI_DMI_TYPE_BT; + break; + case SI_SMIC: + type = IPMI_DMI_TYPE_SMIC; + break; + } + + if (info->io.addr_type == IPMI_MEM_ADDR_SPACE) + flags = IORESOURCE_MEM; + + slave_addr = ipmi_dmi_get_slave_addr(type, flags, + info->io.addr_data); + } +#endif + + return slave_addr; +} + static int acpi_ipmi_probe(struct platform_device *dev) { struct smi_info *info; - struct resource *res, *res_second; acpi_handle handle; acpi_status status; unsigned long long tmp; + struct resource *res; int rv = -EINVAL; if (!si_tryacpi) - return 0; + return -ENODEV; handle = ACPI_HANDLE(&dev->dev); if (!handle) @@ -2734,35 +2730,11 @@ static int acpi_ipmi_probe(struct platform_device *dev) goto err_free; } - res = platform_get_resource(dev, IORESOURCE_IO, 0); - if (res) { - info->io_setup = port_setup; - info->io.addr_type = IPMI_IO_ADDR_SPACE; - } else { - res = platform_get_resource(dev, IORESOURCE_MEM, 0); - if (res) { - info->io_setup = mem_setup; - info->io.addr_type = IPMI_MEM_ADDR_SPACE; - } - } + res = ipmi_get_info_from_resources(dev, info); if (!res) { - dev_err(&dev->dev, "no I/O or memory address\n"); + rv = -EINVAL; goto err_free; } - info->io.addr_data = res->start; - - info->io.regspacing = DEFAULT_REGSPACING; - res_second = platform_get_resource(dev, - (info->io.addr_type == IPMI_IO_ADDR_SPACE) ? - IORESOURCE_IO : IORESOURCE_MEM, - 1); - if (res_second) { - if (res_second->start > info->io.addr_data) - info->io.regspacing = - res_second->start - info->io.addr_data; - } - info->io.regsize = DEFAULT_REGSPACING; - info->io.regshift = 0; /* If _GPE exists, use it; otherwise use standard interrupts */ status = acpi_evaluate_integer(handle, "_GPE", NULL, &tmp); @@ -2778,6 +2750,8 @@ static int acpi_ipmi_probe(struct platform_device *dev) } } + info->slave_addr = find_slave_address(info, info->slave_addr); + info->dev = &dev->dev; platform_set_drvdata(dev, info); @@ -2813,7 +2787,10 @@ static int ipmi_probe(struct platform_device *dev) if (of_ipmi_probe(dev) == 0) return 0; - return acpi_ipmi_probe(dev); + if (acpi_ipmi_probe(dev) == 0) + return 0; + + return dmi_ipmi_probe(dev); } static int ipmi_remove(struct platform_device *dev) @@ -3786,11 +3763,6 @@ static int init_ipmi_si(void) } #endif -#ifdef CONFIG_DMI - if (si_trydmi) - dmi_find_bmc(); -#endif - #ifdef CONFIG_ACPI if (si_tryacpi) spmi_find_bmc(); @@ -3938,6 +3910,7 @@ static void cleanup_ipmi_si(void) } module_exit(cleanup_ipmi_si); +MODULE_ALIAS("platform:dmi-ipmi-si"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>"); MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT" diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index 0b22a9be5029..0aea3bcb6158 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -53,6 +53,7 @@ #include <linux/acpi.h> #include <linux/ctype.h> #include <linux/time64.h> +#include "ipmi_dmi.h" #define PFX "ipmi_ssif: " #define DEVICE_NAME "ipmi_ssif" @@ -180,6 +181,8 @@ struct ssif_addr_info { int slave_addr; enum ipmi_addr_src addr_src; union ipmi_smi_info_union addr_info; + struct device *dev; + struct i2c_client *client; struct mutex clients_mutex; struct list_head clients; @@ -408,6 +411,7 @@ static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags) msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -430,6 +434,7 @@ static void start_recv_msg_fetch(struct ssif_info *ssif_info, msg = ipmi_alloc_smi_msg(); if (!msg) { ssif_info->ssif_state = SSIF_NORMAL; + ipmi_ssif_unlock_cond(ssif_info, flags); return; } @@ -761,6 +766,11 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result, result, len, data[2]); } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 || data[1] != IPMI_GET_MSG_FLAGS_CMD) { + /* + * Don't abort here, maybe it was a queued + * response to a previous command. + */ + ipmi_ssif_unlock_cond(ssif_info, flags); pr_warn(PFX "Invalid response getting flags: %x %x\n", data[0], data[1]); } else { @@ -1094,7 +1104,7 @@ static int inc_usecount(void *send_info) { struct ssif_info *ssif_info = send_info; - if (!i2c_get_adapter(ssif_info->client->adapter->nr)) + if (!i2c_get_adapter(i2c_adapter_id(ssif_info->client->adapter))) return -ENODEV; i2c_use_client(ssif_info->client); @@ -1169,6 +1179,7 @@ static LIST_HEAD(ssif_infos); static int ssif_remove(struct i2c_client *client) { struct ssif_info *ssif_info = i2c_get_clientdata(client); + struct ssif_addr_info *addr_info; int rv; if (!ssif_info) @@ -1196,6 +1207,13 @@ static int ssif_remove(struct i2c_client *client) kthread_stop(ssif_info->thread); } + list_for_each_entry(addr_info, &ssif_infos, link) { + if (addr_info->client == client) { + addr_info->client = NULL; + break; + } + } + /* * No message can be outstanding now, we have removed the * upper layer and it permitted us to do so. @@ -1404,28 +1422,13 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev) static int find_slave_address(struct i2c_client *client, int slave_addr) { - struct ssif_addr_info *info; - - if (slave_addr) - return slave_addr; - - /* - * Came in without a slave address, search around to see if - * the other sources have a slave address. This lets us pick - * up an SMBIOS slave address when using ACPI. - */ - list_for_each_entry(info, &ssif_infos, link) { - if (info->binfo.addr != client->addr) - continue; - if (info->adapter_name && client->adapter->name && - strcmp_nospace(info->adapter_name, - client->adapter->name)) - continue; - if (info->slave_addr) { - slave_addr = info->slave_addr; - break; - } - } +#ifdef CONFIG_IPMI_DMI_DECODE + if (!slave_addr) + slave_addr = ipmi_dmi_get_slave_addr( + IPMI_DMI_TYPE_SSIF, + i2c_adapter_id(client->adapter), + client->addr); +#endif return slave_addr; } @@ -1447,7 +1450,6 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) u8 slave_addr = 0; struct ssif_addr_info *addr_info = NULL; - resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL); if (!resp) return -ENOMEM; @@ -1468,6 +1470,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) ssif_info->addr_source = addr_info->addr_src; ssif_info->ssif_debug = addr_info->debug; ssif_info->addr_info = addr_info->addr_info; + addr_info->client = client; slave_addr = addr_info->slave_addr; } } @@ -1664,7 +1667,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) { unsigned int thread_num; - thread_num = ((ssif_info->client->adapter->nr << 8) | + thread_num = ((i2c_adapter_id(ssif_info->client->adapter) + << 8) | ssif_info->client->addr); init_completion(&ssif_info->wake_thread); ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info, @@ -1705,8 +1709,19 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id) } out: - if (rv) + if (rv) { + /* + * Note that if addr_info->client is assigned, we + * leave it. The i2c client hangs around even if we + * return a failure here, and the failure here is not + * propagated back to the i2c code. This seems to be + * design intent, strange as it may be. But if we + * don't leave it, ssif_platform_remove will not remove + * the client like it should. + */ + dev_err(&client->dev, "Unable to start IPMI SSIF: %d\n", rv); kfree(ssif_info); + } kfree(resp); return rv; @@ -1731,7 +1746,8 @@ static int ssif_adapter_handler(struct device *adev, void *opaque) static int new_ssif_client(int addr, char *adapter_name, int debug, int slave_addr, - enum ipmi_addr_src addr_src) + enum ipmi_addr_src addr_src, + struct device *dev) { struct ssif_addr_info *addr_info; int rv = 0; @@ -1764,6 +1780,10 @@ static int new_ssif_client(int addr, char *adapter_name, addr_info->debug = debug; addr_info->slave_addr = slave_addr; addr_info->addr_src = addr_src; + addr_info->dev = dev; + + if (dev) + dev_set_drvdata(dev, addr_info); list_add_tail(&addr_info->link, &ssif_infos); @@ -1902,7 +1922,7 @@ static int try_init_spmi(struct SPMITable *spmi) myaddr = spmi->addr.address & 0x7f; - return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI); + return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI, NULL); } static void spmi_find_bmc(void) @@ -1931,48 +1951,40 @@ static void spmi_find_bmc(void) { } #endif #ifdef CONFIG_DMI -static int decode_dmi(const struct dmi_device *dmi_dev) +static int dmi_ipmi_probe(struct platform_device *pdev) { - struct dmi_header *dm = dmi_dev->device_data; - u8 *data = (u8 *) dm; - u8 len = dm->length; - unsigned short myaddr; - int slave_addr; + u8 type, slave_addr = 0; + u16 i2c_addr; + int rv; - if (num_addrs >= MAX_SSIF_BMCS) - return -1; + if (!ssif_trydmi) + return -ENODEV; - if (len < 9) - return -1; + rv = device_property_read_u8(&pdev->dev, "ipmi-type", &type); + if (rv) + return -ENODEV; - if (data[0x04] != 4) /* Not SSIF */ - return -1; + if (type != IPMI_DMI_TYPE_SSIF) + return -ENODEV; - if ((data[8] >> 1) == 0) { - /* - * Some broken systems put the I2C address in - * the slave address field. We try to - * accommodate them here. - */ - myaddr = data[6] >> 1; - slave_addr = 0; - } else { - myaddr = data[8] >> 1; - slave_addr = data[6]; + rv = device_property_read_u16(&pdev->dev, "i2c-addr", &i2c_addr); + if (rv) { + dev_warn(&pdev->dev, PFX "No i2c-addr property\n"); + return -ENODEV; } - return new_ssif_client(myaddr, NULL, 0, slave_addr, SI_SMBIOS); -} - -static void dmi_iterator(void) -{ - const struct dmi_device *dev = NULL; + rv = device_property_read_u8(&pdev->dev, "slave-addr", &slave_addr); + if (rv) + dev_warn(&pdev->dev, "device has no slave-addr property"); - while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev))) - decode_dmi(dev); + return new_ssif_client(i2c_addr, NULL, 0, + slave_addr, SI_SMBIOS, &pdev->dev); } #else -static void dmi_iterator(void) { } +static int dmi_ipmi_probe(struct platform_device *pdev) +{ + return -ENODEV; +} #endif static const struct i2c_device_id ssif_id[] = { @@ -1993,6 +2005,36 @@ static struct i2c_driver ssif_i2c_driver = { .detect = ssif_detect }; +static int ssif_platform_probe(struct platform_device *dev) +{ + return dmi_ipmi_probe(dev); +} + +static int ssif_platform_remove(struct platform_device *dev) +{ + struct ssif_addr_info *addr_info = dev_get_drvdata(&dev->dev); + + if (!addr_info) + return 0; + + mutex_lock(&ssif_infos_mutex); + if (addr_info->client) + i2c_unregister_device(addr_info->client); + + list_del(&addr_info->link); + kfree(addr_info); + mutex_unlock(&ssif_infos_mutex); + return 0; +} + +static struct platform_driver ipmi_driver = { + .driver = { + .name = DEVICE_NAME, + }, + .probe = ssif_platform_probe, + .remove = ssif_platform_remove, +}; + static int init_ipmi_ssif(void) { int i; @@ -2007,7 +2049,7 @@ static int init_ipmi_ssif(void) for (i = 0; i < num_addrs; i++) { rv = new_ssif_client(addr[i], adapter_name[i], dbg[i], slave_addrs[i], - SI_HARDCODED); + SI_HARDCODED, NULL); if (rv) pr_err(PFX "Couldn't add hardcoded device at addr 0x%x\n", @@ -2017,11 +2059,16 @@ static int init_ipmi_ssif(void) if (ssif_tryacpi) ssif_i2c_driver.driver.acpi_match_table = ACPI_PTR(ssif_acpi_match); - if (ssif_trydmi) - dmi_iterator(); + if (ssif_tryacpi) spmi_find_bmc(); + if (ssif_trydmi) { + rv = platform_driver_register(&ipmi_driver); + if (rv) + pr_err(PFX "Unable to register driver: %d\n", rv); + } + ssif_i2c_driver.address_list = ssif_address_list(); rv = i2c_add_driver(&ssif_i2c_driver); @@ -2041,10 +2088,13 @@ static void cleanup_ipmi_ssif(void) i2c_del_driver(&ssif_i2c_driver); + platform_driver_unregister(&ipmi_driver); + free_ssif_clients(); } module_exit(cleanup_ipmi_ssif); +MODULE_ALIAS("platform:dmi-ipmi-ssif"); MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>"); MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus"); MODULE_LICENSE("GPL"); diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c index a5c6cfe71a8e..3d832d0362a4 100644 --- a/drivers/char/ipmi/ipmi_watchdog.c +++ b/drivers/char/ipmi/ipmi_watchdog.c @@ -1163,10 +1163,11 @@ static int wdog_reboot_handler(struct notifier_block *this, ipmi_watchdog_state = WDOG_TIMEOUT_NONE; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); } else if (ipmi_watchdog_state != WDOG_TIMEOUT_NONE) { - /* Set a long timer to let the reboot happens, but - reboot if it hangs, but only if the watchdog + /* Set a long timer to let the reboot happen or + reset if it hangs, but only if the watchdog timer was already running. */ - timeout = 120; + if (timeout < 120) + timeout = 120; pretimeout = 0; ipmi_watchdog_state = WDOG_TIMEOUT_RESET; ipmi_set_timeout(IPMI_SET_TIMEOUT_NO_HB); diff --git a/drivers/char/random.c b/drivers/char/random.c index 01a260f67437..afa3ce7d3e72 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -288,7 +288,6 @@ #define SEC_XFER_SIZE 512 #define EXTRACT_SIZE 10 -#define DEBUG_RANDOM_BOOT 0 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long)) @@ -437,6 +436,7 @@ static void _extract_crng(struct crng_state *crng, static void _crng_backtrack_protect(struct crng_state *crng, __u8 tmp[CHACHA20_BLOCK_SIZE], int used); static void process_random_ready_list(void); +static void _get_random_bytes(void *buf, int nbytes); /********************************************************************** * @@ -777,7 +777,7 @@ static void crng_initialize(struct crng_state *crng) _extract_entropy(&input_pool, &crng->state[4], sizeof(__u32) * 12, 0); else - get_random_bytes(&crng->state[4], sizeof(__u32) * 12); + _get_random_bytes(&crng->state[4], sizeof(__u32) * 12); for (i = 4; i < 16; i++) { if (!arch_get_random_seed_long(&rv) && !arch_get_random_long(&rv)) @@ -851,11 +851,6 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } } -static inline void crng_wait_ready(void) -{ - wait_event_interruptible(crng_init_wait, crng_ready()); -} - static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA20_BLOCK_SIZE]) { @@ -987,6 +982,11 @@ void add_device_randomness(const void *buf, unsigned int size) unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; + if (!crng_ready()) { + crng_fast_load(buf, size); + return; + } + trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); _mix_pool_bytes(&input_pool, buf, size); @@ -1472,22 +1472,44 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf, return ret; } +#define warn_unseeded_randomness(previous) \ + _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous)) + +static void _warn_unseeded_randomness(const char *func_name, void *caller, + void **previous) +{ +#ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM + const bool print_once = false; +#else + static bool print_once __read_mostly; +#endif + + if (print_once || + crng_ready() || + (previous && (caller == READ_ONCE(*previous)))) + return; + WRITE_ONCE(*previous, caller); +#ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM + print_once = true; +#endif + pr_notice("random: %s called from %pF with crng_init=%d\n", + func_name, caller, crng_init); +} + /* * This function is the exported kernel interface. It returns some * number of good random numbers, suitable for key generation, seeding * TCP sequence numbers, etc. It does not rely on the hardware random * number generator. For random bytes direct from the hardware RNG - * (when available), use get_random_bytes_arch(). + * (when available), use get_random_bytes_arch(). In order to ensure + * that the randomness provided by this function is okay, the function + * wait_for_random_bytes() should be called and return 0 at least once + * at any point prior. */ -void get_random_bytes(void *buf, int nbytes) +static void _get_random_bytes(void *buf, int nbytes) { __u8 tmp[CHACHA20_BLOCK_SIZE]; -#if DEBUG_RANDOM_BOOT > 0 - if (!crng_ready()) - printk(KERN_NOTICE "random: %pF get_random_bytes called " - "with crng_init = %d\n", (void *) _RET_IP_, crng_init); -#endif trace_get_random_bytes(nbytes, _RET_IP_); while (nbytes >= CHACHA20_BLOCK_SIZE) { @@ -1504,9 +1526,35 @@ void get_random_bytes(void *buf, int nbytes) crng_backtrack_protect(tmp, CHACHA20_BLOCK_SIZE); memzero_explicit(tmp, sizeof(tmp)); } + +void get_random_bytes(void *buf, int nbytes) +{ + static void *previous; + + warn_unseeded_randomness(&previous); + _get_random_bytes(buf, nbytes); +} EXPORT_SYMBOL(get_random_bytes); /* + * Wait for the urandom pool to be seeded and thus guaranteed to supply + * cryptographically secure random numbers. This applies to: the /dev/urandom + * device, the get_random_bytes function, and the get_random_{u32,u64,int,long} + * family of functions. Using any of these functions without first calling + * this function forfeits the guarantee of security. + * + * Returns: 0 if the urandom pool has been seeded. + * -ERESTARTSYS if the function was interrupted by a signal. + */ +int wait_for_random_bytes(void) +{ + if (likely(crng_ready())) + return 0; + return wait_event_interruptible(crng_init_wait, crng_ready()); +} +EXPORT_SYMBOL(wait_for_random_bytes); + +/* * Add a callback function that will be invoked when the nonblocking * pool is initialised. * @@ -1860,6 +1908,8 @@ const struct file_operations urandom_fops = { SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) { + int ret; + if (flags & ~(GRND_NONBLOCK|GRND_RANDOM)) return -EINVAL; @@ -1872,9 +1922,9 @@ SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count, if (!crng_ready()) { if (flags & GRND_NONBLOCK) return -EAGAIN; - crng_wait_ready(); - if (signal_pending(current)) - return -ERESTARTSYS; + ret = wait_for_random_bytes(); + if (unlikely(ret)) + return ret; } return urandom_read(NULL, buf, count, NULL); } @@ -2035,15 +2085,19 @@ static rwlock_t batched_entropy_reset_lock = __RW_LOCK_UNLOCKED(batched_entropy_ /* * Get a random word for internal kernel use only. The quality of the random * number is either as good as RDRAND or as good as /dev/urandom, with the - * goal of being quite fast and not depleting entropy. + * goal of being quite fast and not depleting entropy. In order to ensure + * that the randomness provided by this function is okay, the function + * wait_for_random_bytes() should be called and return 0 at least once + * at any point prior. */ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64); u64 get_random_u64(void) { u64 ret; - bool use_lock = READ_ONCE(crng_init) < 2; + bool use_lock; unsigned long flags = 0; struct batched_entropy *batch; + static void *previous; #if BITS_PER_LONG == 64 if (arch_get_random_long((unsigned long *)&ret)) @@ -2054,6 +2108,9 @@ u64 get_random_u64(void) return ret; #endif + warn_unseeded_randomness(&previous); + + use_lock = READ_ONCE(crng_init) < 2; batch = &get_cpu_var(batched_entropy_u64); if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); @@ -2073,13 +2130,17 @@ static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32); u32 get_random_u32(void) { u32 ret; - bool use_lock = READ_ONCE(crng_init) < 2; + bool use_lock; unsigned long flags = 0; struct batched_entropy *batch; + static void *previous; if (arch_get_random_int(&ret)) return ret; + warn_unseeded_randomness(&previous); + + use_lock = READ_ONCE(crng_init) < 2; batch = &get_cpu_var(batched_entropy_u32); if (use_lock) read_lock_irqsave(&batched_entropy_reset_lock, flags); diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index d406b087553f..68ca2d9fcd73 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -221,6 +221,7 @@ config COMMON_CLK_VC5 source "drivers/clk/bcm/Kconfig" source "drivers/clk/hisilicon/Kconfig" +source "drivers/clk/imgtec/Kconfig" source "drivers/clk/keystone/Kconfig" source "drivers/clk/mediatek/Kconfig" source "drivers/clk/meson/Kconfig" diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile index 4f6a812342ed..cd376b3fb47a 100644 --- a/drivers/clk/Makefile +++ b/drivers/clk/Makefile @@ -60,6 +60,7 @@ obj-y += bcm/ obj-$(CONFIG_ARCH_BERLIN) += berlin/ obj-$(CONFIG_H8300) += h8300/ obj-$(CONFIG_ARCH_HISI) += hisilicon/ +obj-y += imgtec/ obj-$(CONFIG_ARCH_MXC) += imx/ obj-$(CONFIG_MACH_INGENIC) += ingenic/ obj-$(CONFIG_ARCH_KEYSTONE) += keystone/ diff --git a/drivers/clk/imgtec/Kconfig b/drivers/clk/imgtec/Kconfig new file mode 100644 index 000000000000..f6dcb748e9c4 --- /dev/null +++ b/drivers/clk/imgtec/Kconfig @@ -0,0 +1,9 @@ +config COMMON_CLK_BOSTON + bool "Clock driver for MIPS Boston boards" + depends on MIPS || COMPILE_TEST + select MFD_SYSCON + ---help--- + Enable this to support the system & CPU clocks on the MIPS Boston + development board from Imagination Technologies. These are simple + fixed rate clocks whose rate is determined by reading a platform + provided register. diff --git a/drivers/clk/imgtec/Makefile b/drivers/clk/imgtec/Makefile new file mode 100644 index 000000000000..ac779b8c22f2 --- /dev/null +++ b/drivers/clk/imgtec/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_COMMON_CLK_BOSTON) += clk-boston.o diff --git a/drivers/clk/imgtec/clk-boston.c b/drivers/clk/imgtec/clk-boston.c new file mode 100644 index 000000000000..f18f10351785 --- /dev/null +++ b/drivers/clk/imgtec/clk-boston.c @@ -0,0 +1,103 @@ +/* + * Copyright (C) 2016-2017 Imagination Technologies + * Author: Paul Burton <paul.burton@imgtec.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#define pr_fmt(fmt) "clk-boston: " fmt + +#include <linux/clk-provider.h> +#include <linux/kernel.h> +#include <linux/of.h> +#include <linux/regmap.h> +#include <linux/slab.h> +#include <linux/mfd/syscon.h> + +#include <dt-bindings/clock/boston-clock.h> + +#define BOSTON_PLAT_MMCMDIV 0x30 +# define BOSTON_PLAT_MMCMDIV_CLK0DIV (0xff << 0) +# define BOSTON_PLAT_MMCMDIV_INPUT (0xff << 8) +# define BOSTON_PLAT_MMCMDIV_MUL (0xff << 16) +# define BOSTON_PLAT_MMCMDIV_CLK1DIV (0xff << 24) + +#define BOSTON_CLK_COUNT 3 + +static u32 ext_field(u32 val, u32 mask) +{ + return (val & mask) >> (ffs(mask) - 1); +} + +static void __init clk_boston_setup(struct device_node *np) +{ + unsigned long in_freq, cpu_freq, sys_freq; + uint mmcmdiv, mul, cpu_div, sys_div; + struct clk_hw_onecell_data *onecell; + struct regmap *regmap; + struct clk_hw *hw; + int err; + + regmap = syscon_node_to_regmap(np->parent); + if (IS_ERR(regmap)) { + pr_err("failed to find regmap\n"); + return; + } + + err = regmap_read(regmap, BOSTON_PLAT_MMCMDIV, &mmcmdiv); + if (err) { + pr_err("failed to read mmcm_div register: %d\n", err); + return; + } + + in_freq = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_INPUT) * 1000000; + mul = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_MUL); + + sys_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK0DIV); + sys_freq = mult_frac(in_freq, mul, sys_div); + + cpu_div = ext_field(mmcmdiv, BOSTON_PLAT_MMCMDIV_CLK1DIV); + cpu_freq = mult_frac(in_freq, mul, cpu_div); + + onecell = kzalloc(sizeof(*onecell) + + (BOSTON_CLK_COUNT * sizeof(struct clk_hw *)), + GFP_KERNEL); + if (!onecell) + return; + + onecell->num = BOSTON_CLK_COUNT; + + hw = clk_hw_register_fixed_rate(NULL, "input", NULL, 0, in_freq); + if (IS_ERR(hw)) { + pr_err("failed to register input clock: %ld\n", PTR_ERR(hw)); + return; + } + onecell->hws[BOSTON_CLK_INPUT] = hw; + + hw = clk_hw_register_fixed_rate(NULL, "sys", "input", 0, sys_freq); + if (IS_ERR(hw)) { + pr_err("failed to register sys clock: %ld\n", PTR_ERR(hw)); + return; + } + onecell->hws[BOSTON_CLK_SYS] = hw; + + hw = clk_hw_register_fixed_rate(NULL, "cpu", "input", 0, cpu_freq); + if (IS_ERR(hw)) { + pr_err("failed to register cpu clock: %ld\n", PTR_ERR(hw)); + return; + } + onecell->hws[BOSTON_CLK_CPU] = hw; + + err = of_clk_add_hw_provider(np, of_clk_hw_onecell_get, onecell); + if (err) + pr_err("failed to add DT provider: %d\n", err); +} + +/* + * Use CLK_OF_DECLARE so that this driver is probed early enough to provide the + * CPU frequency for use with the GIC or cop0 counters/timers. + */ +CLK_OF_DECLARE(clk_boston, "img,boston-clock", clk_boston_setup); diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index f6e7491c873c..d509b500a7b5 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -41,8 +41,16 @@ static __init int timer_irq_init(struct device_node *np, struct timer_of *to = container_of(of_irq, struct timer_of, of_irq); struct clock_event_device *clkevt = &to->clkevt; - of_irq->irq = of_irq->name ? of_irq_get_byname(np, of_irq->name): - irq_of_parse_and_map(np, of_irq->index); + if (of_irq->name) { + of_irq->irq = ret = of_irq_get_byname(np, of_irq->name); + if (ret < 0) { + pr_err("Failed to get interrupt %s for %s\n", + of_irq->name, np->full_name); + return ret; + } + } else { + of_irq->irq = irq_of_parse_and_map(np, of_irq->index); + } if (!of_irq->irq) { pr_err("Failed to map interrupt for %s\n", np->full_name); return -EINVAL; diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 418042201e6d..ea6d62547b10 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -540,7 +540,7 @@ static void bL_cpufreq_ready(struct cpufreq_policy *policy) &power_coefficient); cdev[cur_cluster] = of_cpufreq_power_cooling_register(np, - policy->related_cpus, power_coefficient, NULL); + policy, power_coefficient, NULL); if (IS_ERR(cdev[cur_cluster])) { dev_err(cpu_dev, "running cpufreq without cooling device: %ld\n", diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index c943787d761e..fef3c2160691 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -326,7 +326,7 @@ static void cpufreq_ready(struct cpufreq_policy *policy) &power_coefficient); priv->cdev = of_cpufreq_power_cooling_register(np, - policy->related_cpus, power_coefficient, NULL); + policy, power_coefficient, NULL); if (IS_ERR(priv->cdev)) { dev_err(priv->cpu_dev, "running cpufreq without cooling device: %ld\n", diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index f570ead62454..e75880eb037d 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -135,7 +135,7 @@ static struct attribute *default_attrs[] = { &trans_table.attr, NULL }; -static struct attribute_group stats_attr_group = { +static const struct attribute_group stats_attr_group = { .attrs = default_attrs, .name = "stats" }; @@ -170,11 +170,10 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy) unsigned int i = 0, count = 0, ret = -ENOMEM; struct cpufreq_stats *stats; unsigned int alloc_size; - struct cpufreq_frequency_table *pos, *table; + struct cpufreq_frequency_table *pos; - /* We need cpufreq table for creating stats table */ - table = policy->freq_table; - if (unlikely(!table)) + count = cpufreq_table_count_valid_entries(policy); + if (!count) return; /* stats already initialized */ @@ -185,10 +184,6 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy) if (!stats) return; - /* Find total allocation size */ - cpufreq_for_each_valid_entry(pos, table) - count++; - alloc_size = count * sizeof(int) + count * sizeof(u64); alloc_size += count * count * sizeof(int); @@ -205,7 +200,7 @@ void cpufreq_stats_create_table(struct cpufreq_policy *policy) stats->max_state = count; /* Find valid-unique entries */ - cpufreq_for_each_valid_entry(pos, table) + cpufreq_for_each_valid_entry(pos, policy->freq_table) if (freq_table_get_index(stats, pos->frequency) == -1) stats->freq_table[i++] = pos->frequency; diff --git a/drivers/cpufreq/dbx500-cpufreq.c b/drivers/cpufreq/dbx500-cpufreq.c index 3575b82210ba..4ee0431579c1 100644 --- a/drivers/cpufreq/dbx500-cpufreq.c +++ b/drivers/cpufreq/dbx500-cpufreq.c @@ -43,7 +43,7 @@ static int dbx500_cpufreq_exit(struct cpufreq_policy *policy) static void dbx500_cpufreq_ready(struct cpufreq_policy *policy) { - cdev = cpufreq_cooling_register(policy->cpus); + cdev = cpufreq_cooling_register(policy); if (IS_ERR(cdev)) pr_err("Failed to register cooling device %ld\n", PTR_ERR(cdev)); else diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 48a98f11a84e..6cd503525638 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -225,6 +225,9 @@ struct global_params { * @vid: Stores VID limits for this CPU * @pid: Stores PID parameters for this CPU * @last_sample_time: Last Sample time + * @aperf_mperf_shift: Number of clock cycles after aperf, merf is incremented + * This shift is a multiplier to mperf delta to + * calculate CPU busy. * @prev_aperf: Last APERF value read from APERF MSR * @prev_mperf: Last MPERF value read from MPERF MSR * @prev_tsc: Last timestamp counter (TSC) value @@ -259,6 +262,7 @@ struct cpudata { u64 last_update; u64 last_sample_time; + u64 aperf_mperf_shift; u64 prev_aperf; u64 prev_mperf; u64 prev_tsc; @@ -321,6 +325,7 @@ struct pstate_funcs { int (*get_min)(void); int (*get_turbo)(void); int (*get_scaling)(void); + int (*get_aperf_mperf_shift)(void); u64 (*get_val)(struct cpudata*, int pstate); void (*get_vid)(struct cpudata *); void (*update_util)(struct update_util_data *data, u64 time, @@ -572,7 +577,7 @@ static int min_perf_pct_min(void) int turbo_pstate = cpu->pstate.turbo_pstate; return turbo_pstate ? - DIV_ROUND_UP(cpu->pstate.min_pstate * 100, turbo_pstate) : 0; + (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0; } static s16 intel_pstate_get_epb(struct cpudata *cpu_data) @@ -1214,7 +1219,7 @@ static struct attribute *intel_pstate_attributes[] = { NULL }; -static struct attribute_group intel_pstate_attr_group = { +static const struct attribute_group intel_pstate_attr_group = { .attrs = intel_pstate_attributes, }; @@ -1486,6 +1491,11 @@ static u64 core_get_val(struct cpudata *cpudata, int pstate) return val; } +static int knl_get_aperf_mperf_shift(void) +{ + return 10; +} + static int knl_get_turbo_pstate(void) { u64 value; @@ -1543,6 +1553,9 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling; cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling; + if (pstate_funcs.get_aperf_mperf_shift) + cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift(); + if (pstate_funcs.get_vid) pstate_funcs.get_vid(cpu); @@ -1616,7 +1629,8 @@ static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) int32_t busy_frac, boost; int target, avg_pstate; - busy_frac = div_fp(sample->mperf, sample->tsc); + busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift, + sample->tsc); boost = cpu->iowait_boost; cpu->iowait_boost >>= 1; @@ -1675,7 +1689,8 @@ static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) sample_ratio = div_fp(pid_params.sample_rate_ns, duration_ns); perf_scaled = mul_fp(perf_scaled, sample_ratio); } else { - sample_ratio = div_fp(100 * cpu->sample.mperf, cpu->sample.tsc); + sample_ratio = div_fp(100 * (cpu->sample.mperf << cpu->aperf_mperf_shift), + cpu->sample.tsc); if (sample_ratio < int_tofp(1)) perf_scaled = 0; } @@ -1807,6 +1822,7 @@ static const struct pstate_funcs knl_funcs = { .get_max_physical = core_get_max_pstate_physical, .get_min = core_get_min_pstate, .get_turbo = knl_get_turbo_pstate, + .get_aperf_mperf_shift = knl_get_aperf_mperf_shift, .get_scaling = core_get_scaling, .get_val = core_get_val, .update_util = intel_pstate_update_util_pid, @@ -2403,6 +2419,7 @@ static void __init copy_cpu_funcs(struct pstate_funcs *funcs) pstate_funcs.get_val = funcs->get_val; pstate_funcs.get_vid = funcs->get_vid; pstate_funcs.update_util = funcs->update_util; + pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift; intel_pstate_use_acpi_profile(); } diff --git a/drivers/cpufreq/mt8173-cpufreq.c b/drivers/cpufreq/mt8173-cpufreq.c index fd1886faf33a..f9f00fb4bc3a 100644 --- a/drivers/cpufreq/mt8173-cpufreq.c +++ b/drivers/cpufreq/mt8173-cpufreq.c @@ -320,9 +320,7 @@ static void mtk_cpufreq_ready(struct cpufreq_policy *policy) of_property_read_u32(np, DYNAMIC_POWER, &capacitance); info->cdev = of_cpufreq_power_cooling_register(np, - policy->related_cpus, - capacitance, - NULL); + policy, capacitance, NULL); if (IS_ERR(info->cdev)) { dev_err(info->cpu_dev, diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index e2ea433a5f9c..4ada55b8856e 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -278,8 +278,7 @@ static void qoriq_cpufreq_ready(struct cpufreq_policy *policy) struct device_node *np = of_get_cpu_node(policy->cpu, NULL); if (of_find_property(np, "#cooling-cells", NULL)) { - cpud->cdev = of_cpufreq_cooling_register(np, - policy->related_cpus); + cpud->cdev = of_cpufreq_cooling_register(np, policy); if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) { pr_err("cpu%d is not running as cooling device: %ld\n", diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c index a9482023d7d3..dad4e5bad827 100644 --- a/drivers/crypto/atmel-sha.c +++ b/drivers/crypto/atmel-sha.c @@ -1204,7 +1204,9 @@ static int atmel_sha_finup(struct ahash_request *req) ctx->flags |= SHA_FLAGS_FINUP; err1 = atmel_sha_update(req); - if (err1 == -EINPROGRESS || err1 == -EBUSY) + if (err1 == -EINPROGRESS || + (err1 == -EBUSY && (ahash_request_flags(req) & + CRYPTO_TFM_REQ_MAY_BACKLOG))) return err1; /* diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c index fde399c88779..0488b7f81dcf 100644 --- a/drivers/crypto/caam/caamalg.c +++ b/drivers/crypto/caam/caamalg.c @@ -882,10 +882,10 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; -#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -904,6 +904,14 @@ static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, #endif ablkcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. This is used e.g. by the CTS mode. + */ + scatterwalk_map_and_copy(req->info, req->dst, req->nbytes - ivsize, + ivsize, 0); + kfree(edesc); ablkcipher_request_complete(req, err); @@ -914,10 +922,10 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, { struct ablkcipher_request *req = context; struct ablkcipher_edesc *edesc; -#ifdef DEBUG struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); int ivsize = crypto_ablkcipher_ivsize(ablkcipher); +#ifdef DEBUG dev_err(jrdev, "%s %d: err 0x%x\n", __func__, __LINE__, err); #endif @@ -935,6 +943,14 @@ static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, #endif ablkcipher_unmap(jrdev, edesc, req); + + /* + * The crypto API expects us to set the IV (req->info) to the last + * ciphertext block. + */ + scatterwalk_map_and_copy(req->info, req->src, req->nbytes - ivsize, + ivsize, 0); + kfree(edesc); ablkcipher_request_complete(req, err); diff --git a/drivers/crypto/caam/caamhash.c b/drivers/crypto/caam/caamhash.c index 7c44c90ad593..910ec61cae09 100644 --- a/drivers/crypto/caam/caamhash.c +++ b/drivers/crypto/caam/caamhash.c @@ -396,7 +396,7 @@ static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c index 1bb2816a9b4d..c425d4adaf2a 100644 --- a/drivers/crypto/caam/key_gen.c +++ b/drivers/crypto/caam/key_gen.c @@ -149,7 +149,7 @@ int gen_split_key(struct device *jrdev, u8 *key_out, ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); if (!ret) { /* in progress */ - wait_for_completion_interruptible(&result.completion); + wait_for_completion(&result.completion); ret = result.err; #ifdef DEBUG print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", diff --git a/drivers/crypto/cavium/cpt/cptvf_algs.c b/drivers/crypto/cavium/cpt/cptvf_algs.c index 1b220f3ed017..df21d996db7e 100644 --- a/drivers/crypto/cavium/cpt/cptvf_algs.c +++ b/drivers/crypto/cavium/cpt/cptvf_algs.c @@ -222,17 +222,17 @@ static inline int cvm_enc_dec(struct ablkcipher_request *req, u32 enc) return -EINPROGRESS; } -int cvm_encrypt(struct ablkcipher_request *req) +static int cvm_encrypt(struct ablkcipher_request *req) { return cvm_enc_dec(req, true); } -int cvm_decrypt(struct ablkcipher_request *req) +static int cvm_decrypt(struct ablkcipher_request *req) { return cvm_enc_dec(req, false); } -int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, +static int cvm_xts_setkey(struct crypto_ablkcipher *cipher, const u8 *key, u32 keylen) { struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); @@ -336,7 +336,7 @@ static int cvm_ecb_des3_setkey(struct crypto_ablkcipher *cipher, const u8 *key, return cvm_setkey(cipher, key, keylen, DES3_ECB); } -int cvm_enc_dec_init(struct crypto_tfm *tfm) +static int cvm_enc_dec_init(struct crypto_tfm *tfm) { struct cvm_enc_ctx *ctx = crypto_tfm_ctx(tfm); diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c index cfc723a10610..0e8160701833 100644 --- a/drivers/crypto/chelsio/chcr_algo.c +++ b/drivers/crypto/chelsio/chcr_algo.c @@ -898,26 +898,20 @@ static int chcr_update_tweak(struct ablkcipher_request *req, u8 *iv) u8 *key; unsigned int keylen; - cipher = crypto_alloc_cipher("aes-generic", 0, 0); + cipher = ablkctx->aes_generic; memcpy(iv, req->info, AES_BLOCK_SIZE); - if (IS_ERR(cipher)) { - ret = -ENOMEM; - goto out; - } keylen = ablkctx->enckey_len / 2; key = ablkctx->key + keylen; ret = crypto_cipher_setkey(cipher, key, keylen); if (ret) - goto out1; + goto out; crypto_cipher_encrypt_one(cipher, iv, iv); for (i = 0; i < (reqctx->processed / AES_BLOCK_SIZE); i++) gf128mul_x_ble((le128 *)iv, (le128 *)iv); crypto_cipher_decrypt_one(cipher, iv, iv); -out1: - crypto_free_cipher(cipher); out: return ret; } @@ -1261,6 +1255,17 @@ static int chcr_cra_init(struct crypto_tfm *tfm) pr_err("failed to allocate fallback for %s\n", alg->cra_name); return PTR_ERR(ablkctx->sw_cipher); } + + if (get_cryptoalg_subtype(tfm) == CRYPTO_ALG_SUB_TYPE_XTS) { + /* To update tweak*/ + ablkctx->aes_generic = crypto_alloc_cipher("aes-generic", 0, 0); + if (IS_ERR(ablkctx->aes_generic)) { + pr_err("failed to allocate aes cipher for tweak\n"); + return PTR_ERR(ablkctx->aes_generic); + } + } else + ablkctx->aes_generic = NULL; + tfm->crt_ablkcipher.reqsize = sizeof(struct chcr_blkcipher_req_ctx); return chcr_device_init(crypto_tfm_ctx(tfm)); } @@ -1291,6 +1296,8 @@ static void chcr_cra_exit(struct crypto_tfm *tfm) struct ablk_ctx *ablkctx = ABLK_CTX(ctx); crypto_free_skcipher(ablkctx->sw_cipher); + if (ablkctx->aes_generic) + crypto_free_cipher(ablkctx->aes_generic); } static int get_alg_config(struct algo_param *params, diff --git a/drivers/crypto/chelsio/chcr_crypto.h b/drivers/crypto/chelsio/chcr_crypto.h index a4f95b014b46..30af1ee17b87 100644 --- a/drivers/crypto/chelsio/chcr_crypto.h +++ b/drivers/crypto/chelsio/chcr_crypto.h @@ -155,6 +155,7 @@ struct ablk_ctx { struct crypto_skcipher *sw_cipher; + struct crypto_cipher *aes_generic; __be32 key_ctx_hdr; unsigned int enckey_len; unsigned char ciph_mode; diff --git a/drivers/dax/device-dax.h b/drivers/dax/device-dax.h index fdcd9769ffde..688b051750bd 100644 --- a/drivers/dax/device-dax.h +++ b/drivers/dax/device-dax.h @@ -21,5 +21,5 @@ struct dax_region *alloc_dax_region(struct device *parent, int region_id, struct resource *res, unsigned int align, void *addr, unsigned long flags); struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - struct resource *res, int count); + int id, struct resource *res, int count); #endif /* __DEVICE_DAX_H__ */ diff --git a/drivers/dax/device.c b/drivers/dax/device.c index 12943d19bfc4..e9f3b3e4bbf4 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -529,7 +529,8 @@ static void dev_dax_release(struct device *dev) struct dax_region *dax_region = dev_dax->region; struct dax_device *dax_dev = dev_dax->dax_dev; - ida_simple_remove(&dax_region->ida, dev_dax->id); + if (dev_dax->id >= 0) + ida_simple_remove(&dax_region->ida, dev_dax->id); dax_region_put(dax_region); put_dax(dax_dev); kfree(dev_dax); @@ -559,7 +560,7 @@ static void unregister_dev_dax(void *dev) } struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, - struct resource *res, int count) + int id, struct resource *res, int count) { struct device *parent = dax_region->dev; struct dax_device *dax_dev; @@ -567,7 +568,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, struct inode *inode; struct device *dev; struct cdev *cdev; - int rc = 0, i; + int rc, i; + + if (!count) + return ERR_PTR(-EINVAL); dev_dax = kzalloc(sizeof(*dev_dax) + sizeof(*res) * count, GFP_KERNEL); if (!dev_dax) @@ -587,10 +591,16 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, if (i < count) goto err_id; - dev_dax->id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); - if (dev_dax->id < 0) { - rc = dev_dax->id; - goto err_id; + if (id < 0) { + id = ida_simple_get(&dax_region->ida, 0, 0, GFP_KERNEL); + dev_dax->id = id; + if (id < 0) { + rc = id; + goto err_id; + } + } else { + /* region provider owns @id lifetime */ + dev_dax->id = -1; } /* @@ -598,8 +608,10 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, * device outside of mmap of the resulting character device. */ dax_dev = alloc_dax(dev_dax, NULL, NULL); - if (!dax_dev) + if (!dax_dev) { + rc = -ENOMEM; goto err_dax; + } /* from here on we're committed to teardown via dax_dev_release() */ dev = &dev_dax->dev; @@ -620,7 +632,7 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, dev->parent = parent; dev->groups = dax_attribute_groups; dev->release = dev_dax_release; - dev_set_name(dev, "dax%d.%d", dax_region->id, dev_dax->id); + dev_set_name(dev, "dax%d.%d", dax_region->id, id); rc = cdev_device_add(cdev, dev); if (rc) { @@ -636,7 +648,8 @@ struct dev_dax *devm_create_dev_dax(struct dax_region *dax_region, return dev_dax; err_dax: - ida_simple_remove(&dax_region->ida, dev_dax->id); + if (dev_dax->id >= 0) + ida_simple_remove(&dax_region->ida, dev_dax->id); err_id: kfree(dev_dax); diff --git a/drivers/dax/pmem.c b/drivers/dax/pmem.c index 9f2a0b4fd801..8d8c852ba8f2 100644 --- a/drivers/dax/pmem.c +++ b/drivers/dax/pmem.c @@ -58,13 +58,12 @@ static void dax_pmem_percpu_kill(void *data) static int dax_pmem_probe(struct device *dev) { - int rc; void *addr; struct resource res; + int rc, id, region_id; struct nd_pfn_sb *pfn_sb; struct dev_dax *dev_dax; struct dax_pmem *dax_pmem; - struct nd_region *nd_region; struct nd_namespace_io *nsio; struct dax_region *dax_region; struct nd_namespace_common *ndns; @@ -123,14 +122,17 @@ static int dax_pmem_probe(struct device *dev) /* adjust the dax_region resource to the start of data */ res.start += le64_to_cpu(pfn_sb->dataoff); - nd_region = to_nd_region(dev->parent); - dax_region = alloc_dax_region(dev, nd_region->id, &res, + rc = sscanf(dev_name(&ndns->dev), "namespace%d.%d", ®ion_id, &id); + if (rc != 2) + return -EINVAL; + + dax_region = alloc_dax_region(dev, region_id, &res, le32_to_cpu(pfn_sb->align), addr, PFN_DEV|PFN_MAP); if (!dax_region) return -ENOMEM; /* TODO: support for subdividing a dax region... */ - dev_dax = devm_create_dev_dax(dax_region, &res, 1); + dev_dax = devm_create_dev_dax(dax_region, id, &res, 1); /* child dev_dax instances now own the lifetime of the dax_region */ dax_region_put(dax_region); diff --git a/drivers/devfreq/governor_userspace.c b/drivers/devfreq/governor_userspace.c index 176976068bcd..77028c27593c 100644 --- a/drivers/devfreq/governor_userspace.c +++ b/drivers/devfreq/governor_userspace.c @@ -86,7 +86,7 @@ static struct attribute *dev_entries[] = { &dev_attr_set_freq.attr, NULL, }; -static struct attribute_group dev_attr_group = { +static const struct attribute_group dev_attr_group = { .name = "userspace", .attrs = dev_entries, }; diff --git a/drivers/devfreq/rk3399_dmc.c b/drivers/devfreq/rk3399_dmc.c index 40a2499730fc..1b89ebbad02c 100644 --- a/drivers/devfreq/rk3399_dmc.c +++ b/drivers/devfreq/rk3399_dmc.c @@ -336,8 +336,9 @@ static int rk3399_dmcfreq_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - dev_err(&pdev->dev, "Cannot get the dmc interrupt resource\n"); - return -EINVAL; + dev_err(&pdev->dev, + "Cannot get the dmc interrupt resource: %d\n", irq); + return irq; } data = devm_kzalloc(dev, sizeof(struct rk3399_dmcfreq), GFP_KERNEL); if (!data) diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c index 214fff96fa4a..ae712159246f 100644 --- a/drivers/devfreq/tegra-devfreq.c +++ b/drivers/devfreq/tegra-devfreq.c @@ -688,9 +688,9 @@ static int tegra_devfreq_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq <= 0) { - dev_err(&pdev->dev, "Failed to get IRQ\n"); - return -ENODEV; + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ: %d\n", irq); + return irq; } platform_set_drvdata(pdev, tegra); diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c index 57da14c15987..56e0a0e1b600 100644 --- a/drivers/dma-buf/dma-fence.c +++ b/drivers/dma-buf/dma-fence.c @@ -75,11 +75,6 @@ int dma_fence_signal_locked(struct dma_fence *fence) if (WARN_ON(!fence)) return -EINVAL; - if (!ktime_to_ns(fence->timestamp)) { - fence->timestamp = ktime_get(); - smp_mb__before_atomic(); - } - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) { ret = -EINVAL; @@ -87,8 +82,11 @@ int dma_fence_signal_locked(struct dma_fence *fence) * we might have raced with the unlocked dma_fence_signal, * still run through all callbacks */ - } else + } else { + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); + } list_for_each_entry_safe(cur, tmp, &fence->cb_list, node) { list_del_init(&cur->node); @@ -115,14 +113,11 @@ int dma_fence_signal(struct dma_fence *fence) if (!fence) return -EINVAL; - if (!ktime_to_ns(fence->timestamp)) { - fence->timestamp = ktime_get(); - smp_mb__before_atomic(); - } - if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) return -EINVAL; + fence->timestamp = ktime_get(); + set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags); trace_dma_fence_signaled(fence); if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags)) { diff --git a/drivers/dma-buf/sync_debug.c b/drivers/dma-buf/sync_debug.c index 82a6e7f6d37f..59a3b2f8ee91 100644 --- a/drivers/dma-buf/sync_debug.c +++ b/drivers/dma-buf/sync_debug.c @@ -84,7 +84,7 @@ static void sync_print_fence(struct seq_file *s, show ? "_" : "", sync_status_str(status)); - if (status) { + if (test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) { struct timespec64 ts64 = ktime_to_timespec64(fence->timestamp); diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 545e2c5c4815..d7e219d2669d 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -391,7 +391,13 @@ static void sync_fill_fence_info(struct dma_fence *fence, sizeof(info->driver_name)); info->status = dma_fence_get_status(fence); - info->timestamp_ns = ktime_to_ns(fence->timestamp); + while (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) && + !test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags)) + cpu_relax(); + info->timestamp_ns = + test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags) ? + ktime_to_ns(fence->timestamp) : + ktime_set(0, 0); } static long sync_file_ioctl_fence_info(struct sync_file *sync_file, diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index f7425960f6a5..37e24f525162 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -17,6 +17,7 @@ cflags-$(CONFIG_ARM) := $(subst -pg,,$(KBUILD_CFLAGS)) \ cflags-$(CONFIG_EFI_ARMSTUB) += -I$(srctree)/scripts/dtc/libfdt KBUILD_CFLAGS := $(cflags-y) -DDISABLE_BRANCH_PROFILING \ + -D__NO_FORTIFY \ $(call cc-option,-ffreestanding) \ $(call cc-option,-fno-stack-protector) diff --git a/drivers/fsi/fsi-core.c b/drivers/fsi/fsi-core.c index a485864cb512..06432d84cbf8 100644 --- a/drivers/fsi/fsi-core.c +++ b/drivers/fsi/fsi-core.c @@ -532,7 +532,7 @@ static inline uint32_t fsi_smode_sid(int x) return (x & FSI_SMODE_SID_MASK) << FSI_SMODE_SID_SHIFT; } -static const uint32_t fsi_slave_smode(int id) +static uint32_t fsi_slave_smode(int id) { return FSI_SMODE_WSC | FSI_SMODE_ECRC | fsi_smode_sid(id) @@ -883,17 +883,16 @@ struct bus_type fsi_bus_type = { }; EXPORT_SYMBOL_GPL(fsi_bus_type); -static int fsi_init(void) +static int __init fsi_init(void) { return bus_register(&fsi_bus_type); } +postcore_initcall(fsi_init); static void fsi_exit(void) { bus_unregister(&fsi_bus_type); } - -module_init(fsi_init); module_exit(fsi_exit); module_param(discard_errors, int, 0664); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 5f8ada1d872b..37971d9402e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -101,7 +101,6 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) if (adev->kfd) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = 0xFF00, - .num_mec = adev->gfx.mec.num_mec, .num_pipe_per_mec = adev->gfx.mec.num_pipe_per_mec, .num_queue_per_pipe = adev->gfx.mec.num_queue_per_pipe }; @@ -122,7 +121,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev) /* According to linux/bitmap.h we shouldn't use bitmap_clear if * nbits is not compile time constant */ - last_valid_bit = adev->gfx.mec.num_mec + last_valid_bit = 1 /* only first MEC can have compute queues */ * adev->gfx.mec.num_pipe_per_mec * adev->gfx.mec.num_queue_per_pipe; for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 88187bfc5ea3..3f95f7cb4019 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -226,10 +226,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->shared_resources = *gpu_resources; - /* We only use the first MEC */ - if (kfd->shared_resources.num_mec > 1) - kfd->shared_resources.num_mec = 1; - /* calculate max size of mqds needed for queues */ size = max_num_of_queues_per_device * kfd->device_info->mqd_size_aligned; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 955aa304ff48..602769ced3bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -77,13 +77,6 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) return false; } -unsigned int get_mec_num(struct device_queue_manager *dqm) -{ - BUG_ON(!dqm || !dqm->dev); - - return dqm->dev->shared_resources.num_mec; -} - unsigned int get_queues_num(struct device_queue_manager *dqm) { BUG_ON(!dqm || !dqm->dev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index 66b9615bc3c1..faf820a06400 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -180,7 +180,6 @@ void device_queue_manager_init_cik(struct device_queue_manager_asic_ops *ops); void device_queue_manager_init_vi(struct device_queue_manager_asic_ops *ops); void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd); -unsigned int get_mec_num(struct device_queue_manager *dqm); unsigned int get_queues_num(struct device_queue_manager *dqm); unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 91ef1484b3bb..36f376677a53 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -63,9 +63,6 @@ struct kgd2kfd_shared_resources { /* Bit n == 1 means VMID n is available for KFD. */ unsigned int compute_vmid_bitmap; - /* number of mec available from the hardware */ - uint32_t num_mec; - /* number of pipes per mec */ uint32_t num_pipe_per_mec; diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c index 99f9a4beb859..67fe19e5a9c6 100644 --- a/drivers/gpu/drm/bridge/panel.c +++ b/drivers/gpu/drm/bridge/panel.c @@ -161,7 +161,7 @@ struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel, int ret; if (!panel) - return ERR_PTR(EINVAL); + return ERR_PTR(-EINVAL); panel_bridge = devm_kzalloc(panel->dev, sizeof(*panel_bridge), GFP_KERNEL); diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c index ec1ed94b2390..d34e5096887a 100644 --- a/drivers/gpu/drm/drm_dp_aux_dev.c +++ b/drivers/gpu/drm/drm_dp_aux_dev.c @@ -32,6 +32,7 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/uaccess.h> +#include <linux/uio.h> #include <drm/drm_dp_helper.h> #include <drm/drm_crtc.h> #include <drm/drmP.h> @@ -140,101 +141,83 @@ static loff_t auxdev_llseek(struct file *file, loff_t offset, int whence) return fixed_size_llseek(file, offset, whence, AUX_MAX_OFFSET); } -static ssize_t auxdev_read(struct file *file, char __user *buf, size_t count, - loff_t *offset) +static ssize_t auxdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { - size_t bytes_pending, num_bytes_processed = 0; - struct drm_dp_aux_dev *aux_dev = file->private_data; + struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data; + loff_t pos = iocb->ki_pos; ssize_t res = 0; if (!atomic_inc_not_zero(&aux_dev->usecount)) return -ENODEV; - bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - (*offset)); - - if (!access_ok(VERIFY_WRITE, buf, bytes_pending)) { - res = -EFAULT; - goto out; - } + iov_iter_truncate(to, AUX_MAX_OFFSET - pos); - while (bytes_pending > 0) { - uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; - ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); + while (iov_iter_count(to)) { + uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min(iov_iter_count(to), sizeof(buf)); if (signal_pending(current)) { - res = num_bytes_processed ? - num_bytes_processed : -ERESTARTSYS; - goto out; + res = -ERESTARTSYS; + break; } - res = drm_dp_dpcd_read(aux_dev->aux, *offset, localbuf, todo); - if (res <= 0) { - res = num_bytes_processed ? num_bytes_processed : res; - goto out; - } - if (__copy_to_user(buf + num_bytes_processed, localbuf, res)) { - res = num_bytes_processed ? - num_bytes_processed : -EFAULT; - goto out; + res = drm_dp_dpcd_read(aux_dev->aux, pos, buf, todo); + if (res <= 0) + break; + + if (copy_to_iter(buf, res, to) != res) { + res = -EFAULT; + break; } - bytes_pending -= res; - *offset += res; - num_bytes_processed += res; - res = num_bytes_processed; + + pos += res; } -out: + if (pos != iocb->ki_pos) + res = pos - iocb->ki_pos; + iocb->ki_pos = pos; + atomic_dec(&aux_dev->usecount); wake_up_atomic_t(&aux_dev->usecount); return res; } -static ssize_t auxdev_write(struct file *file, const char __user *buf, - size_t count, loff_t *offset) +static ssize_t auxdev_write_iter(struct kiocb *iocb, struct iov_iter *from) { - size_t bytes_pending, num_bytes_processed = 0; - struct drm_dp_aux_dev *aux_dev = file->private_data; + struct drm_dp_aux_dev *aux_dev = iocb->ki_filp->private_data; + loff_t pos = iocb->ki_pos; ssize_t res = 0; if (!atomic_inc_not_zero(&aux_dev->usecount)) return -ENODEV; - bytes_pending = min((loff_t)count, AUX_MAX_OFFSET - *offset); - - if (!access_ok(VERIFY_READ, buf, bytes_pending)) { - res = -EFAULT; - goto out; - } + iov_iter_truncate(from, AUX_MAX_OFFSET - pos); - while (bytes_pending > 0) { - uint8_t localbuf[DP_AUX_MAX_PAYLOAD_BYTES]; - ssize_t todo = min_t(size_t, bytes_pending, sizeof(localbuf)); + while (iov_iter_count(from)) { + uint8_t buf[DP_AUX_MAX_PAYLOAD_BYTES]; + ssize_t todo = min(iov_iter_count(from), sizeof(buf)); if (signal_pending(current)) { - res = num_bytes_processed ? - num_bytes_processed : -ERESTARTSYS; - goto out; + res = -ERESTARTSYS; + break; } - if (__copy_from_user(localbuf, - buf + num_bytes_processed, todo)) { - res = num_bytes_processed ? - num_bytes_processed : -EFAULT; - goto out; + if (!copy_from_iter_full(buf, todo, from)) { + res = -EFAULT; + break; } - res = drm_dp_dpcd_write(aux_dev->aux, *offset, localbuf, todo); - if (res <= 0) { - res = num_bytes_processed ? num_bytes_processed : res; - goto out; - } - bytes_pending -= res; - *offset += res; - num_bytes_processed += res; - res = num_bytes_processed; + res = drm_dp_dpcd_write(aux_dev->aux, pos, buf, todo); + if (res <= 0) + break; + + pos += res; } -out: + if (pos != iocb->ki_pos) + res = pos - iocb->ki_pos; + iocb->ki_pos = pos; + atomic_dec(&aux_dev->usecount); wake_up_atomic_t(&aux_dev->usecount); return res; @@ -251,8 +234,8 @@ static int auxdev_release(struct inode *inode, struct file *file) static const struct file_operations auxdev_fops = { .owner = THIS_MODULE, .llseek = auxdev_llseek, - .read = auxdev_read, - .write = auxdev_write, + .read_iter = auxdev_read_iter, + .write_iter = auxdev_write_iter, .open = auxdev_open, .release = auxdev_release, }; diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index bfd237c15e76..ae5f06895562 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -330,6 +330,13 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg, return false; } + /* + * ignore out-of-order messages or messages that are part of a + * failed transaction + */ + if (!recv_hdr.somt && !msg->have_somt) + return false; + /* get length contained in this portion */ msg->curchunk_len = recv_hdr.msg_len; msg->curchunk_hdrlen = hdrlen; @@ -2164,7 +2171,7 @@ out_unlock: } EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume); -static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) +static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) { int len; u8 replyblock[32]; @@ -2179,12 +2186,12 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) replyblock, len); if (ret != len) { DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret); - return; + return false; } ret = drm_dp_sideband_msg_build(msg, replyblock, len, true); if (!ret) { DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]); - return; + return false; } replylen = msg->curchunk_len + msg->curchunk_hdrlen; @@ -2196,21 +2203,32 @@ static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, replyblock, len); if (ret != len) { - DRM_DEBUG_KMS("failed to read a chunk\n"); + DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n", + len, ret); + return false; } + ret = drm_dp_sideband_msg_build(msg, replyblock, len, false); - if (ret == false) + if (!ret) { DRM_DEBUG_KMS("failed to build sideband msg\n"); + return false; + } + curreply += len; replylen -= len; } + return true; } static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, false); + if (!drm_dp_get_one_sb_msg(mgr, false)) { + memset(&mgr->down_rep_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->down_rep_recv.have_eomt) { struct drm_dp_sideband_msg_tx *txmsg; @@ -2266,7 +2284,12 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) { int ret = 0; - drm_dp_get_one_sb_msg(mgr, true); + + if (!drm_dp_get_one_sb_msg(mgr, true)) { + memset(&mgr->up_req_recv, 0, + sizeof(struct drm_dp_sideband_msg_rx)); + return 0; + } if (mgr->up_req_recv.have_eomt) { struct drm_dp_sideband_msg_req_body msg; @@ -2318,7 +2341,9 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n", msg.u.resource_stat.port_number, msg.u.resource_stat.available_pbn); } - drm_dp_put_mst_branch_device(mstb); + if (mstb) + drm_dp_put_mst_branch_device(mstb); + memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); } return ret; diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index fc8ef42203ec..b3ef4f1c2630 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -832,6 +832,7 @@ unlock: drm_atomic_clean_old_fb(dev, plane_mask, ret); if (ret == -EDEADLK) { + drm_atomic_state_clear(state); drm_modeset_backoff(&ctx); goto retry; } diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c index 0b2d8c4a2fa5..d1f202852028 100644 --- a/drivers/gpu/drm/drm_ioc32.c +++ b/drivers/gpu/drm/drm_ioc32.c @@ -112,6 +112,9 @@ static int compat_drm_version(struct file *file, unsigned int cmd, v32.version_major = v.version_major; v32.version_minor = v.version_minor; v32.version_patchlevel = v.version_patchlevel; + v32.name_len = v.name_len; + v32.date_len = v.date_len; + v32.desc_len = v.desc_len; if (copy_to_user((void __user *)arg, &v32, sizeof(v32))) return -EFAULT; return 0; diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c index 463e4d81fb0d..e9f33cd805dd 100644 --- a/drivers/gpu/drm/drm_vblank.c +++ b/drivers/gpu/drm/drm_vblank.c @@ -242,7 +242,7 @@ static void drm_update_vblank_count(struct drm_device *dev, unsigned int pipe, * Otherwise reinitialize delayed at next vblank interrupt and assign 0 * for now, to mark the vblanktimestamp as invalid. */ - if (!rc && in_vblank_irq) + if (!rc && !in_vblank_irq) t_vblank = (struct timeval) {0, 0}; store_vblank(dev, pipe, diff, &t_vblank, cur_vblank); diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index 51241de5e7a7..713848c36349 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -2536,6 +2536,11 @@ static int scan_workload(struct intel_vgpu_workload *workload) gma_head == gma_tail) return 0; + if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { + ret = -EINVAL; + goto out; + } + ret = ip_gma_set(&s, gma_head); if (ret) goto out; @@ -2579,6 +2584,11 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) s.rb_va = wa_ctx->indirect_ctx.shadow_va; s.workload = workload; + if (!intel_gvt_ggtt_validate_range(s.vgpu, s.ring_start, s.ring_size)) { + ret = -EINVAL; + goto out; + } + ret = ip_gma_set(&s, gma_head); if (ret) goto out; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index e0261fcc5b50..2deb05f618fb 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -197,6 +197,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_B << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); + if (IS_BROADWELL(dev_priv)) { + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &= + ~PORT_CLK_SEL_MASK; + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |= + PORT_CLK_SEL_LCPLL_810; + } vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; @@ -211,6 +217,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_C << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); + if (IS_BROADWELL(dev_priv)) { + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &= + ~PORT_CLK_SEL_MASK; + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |= + PORT_CLK_SEL_LCPLL_810; + } vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; @@ -225,6 +237,12 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (PORT_D << TRANS_DDI_PORT_SHIFT) | TRANS_DDI_FUNC_ENABLE); + if (IS_BROADWELL(dev_priv)) { + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &= + ~PORT_CLK_SEL_MASK; + vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |= + PORT_CLK_SEL_LCPLL_810; + } vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; @@ -244,6 +262,10 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; } + + /* Clear host CRT status, so guest couldn't detect this host CRT. */ + if (IS_BROADWELL(dev_priv)) + vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; } static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 66374dba3b1a..6166e34d892b 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2259,6 +2259,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ret = setup_spt_oos(gvt); if (ret) { gvt_err("fail to initialize SPT oos\n"); + dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); + __free_page(gvt->gtt.scratch_ggtt_page); return ret; } } diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 1414d7e6148d..17febe830ff6 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -367,21 +367,24 @@ static int lcpll_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, static int dpy_reg_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes) { - *(u32 *)p_data = (1 << 17); - return 0; -} - -static int dpy_reg_mmio_read_2(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) -{ - *(u32 *)p_data = 3; - return 0; -} + switch (offset) { + case 0xe651c: + case 0xe661c: + case 0xe671c: + case 0xe681c: + vgpu_vreg(vgpu, offset) = 1 << 17; + break; + case 0xe6c04: + vgpu_vreg(vgpu, offset) = 0x3; + break; + case 0xe6e1c: + vgpu_vreg(vgpu, offset) = 0x2f << 16; + break; + default: + return -EINVAL; + } -static int dpy_reg_mmio_read_3(struct intel_vgpu *vgpu, unsigned int offset, - void *p_data, unsigned int bytes) -{ - *(u32 *)p_data = (0x2f << 16); + read_vreg(vgpu, offset, p_data, bytes); return 0; } @@ -1925,7 +1928,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_F(_PCH_DPD_AUX_CH_CTL, 6 * 4, 0, 0, 0, D_PRE_SKL, NULL, dp_aux_ch_ctl_mmio_write); - MMIO_RO(PCH_ADPA, D_ALL, 0, ADPA_CRT_HOTPLUG_MONITOR_MASK, NULL, pch_adpa_mmio_write); + MMIO_DH(PCH_ADPA, D_PRE_SKL, NULL, pch_adpa_mmio_write); MMIO_DH(_PCH_TRANSACONF, D_ALL, NULL, transconf_mmio_write); MMIO_DH(_PCH_TRANSBCONF, D_ALL, NULL, transconf_mmio_write); @@ -2011,8 +2014,8 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_DH(0xe661c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe671c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_DH(0xe681c, D_ALL, dpy_reg_mmio_read, NULL); - MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read_2, NULL); - MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read_3, NULL); + MMIO_DH(0xe6c04, D_ALL, dpy_reg_mmio_read, NULL); + MMIO_DH(0xe6e1c, D_ALL, dpy_reg_mmio_read, NULL); MMIO_RO(PCH_PORT_HOTPLUG, D_ALL, 0, PORTA_HOTPLUG_STATUS_MASK diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 1ae0b4083ce1..fd0c85f9ef3c 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -232,16 +232,20 @@ static void gvt_cache_destroy(struct intel_vgpu *vgpu) struct device *dev = mdev_dev(vgpu->vdev.mdev); unsigned long gfn; - mutex_lock(&vgpu->vdev.cache_lock); - while ((node = rb_first(&vgpu->vdev.cache))) { + for (;;) { + mutex_lock(&vgpu->vdev.cache_lock); + node = rb_first(&vgpu->vdev.cache); + if (!node) { + mutex_unlock(&vgpu->vdev.cache_lock); + break; + } dma = rb_entry(node, struct gvt_dma, node); gvt_dma_unmap_iova(vgpu, dma->iova); gfn = dma->gfn; - - vfio_unpin_pages(dev, &gfn, 1); __gvt_cache_remove_entry(vgpu, dma); + mutex_unlock(&vgpu->vdev.cache_lock); + vfio_unpin_pages(dev, &gfn, 1); } - mutex_unlock(&vgpu->vdev.cache_lock); } static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c index 488fdea348a9..4f7057d62d88 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.c +++ b/drivers/gpu/drm/i915/gvt/scheduler.c @@ -174,15 +174,6 @@ static int shadow_context_status_change(struct notifier_block *nb, atomic_set(&workload->shadow_ctx_active, 1); break; case INTEL_CONTEXT_SCHEDULE_OUT: - /* If the status is -EINPROGRESS means this workload - * doesn't meet any issue during dispatching so when - * get the SCHEDULE_OUT set the status to be zero for - * good. If the status is NOT -EINPROGRESS means there - * is something wrong happened during dispatching and - * the status should not be set to zero - */ - if (workload->status == -EINPROGRESS) - workload->status = 0; atomic_set(&workload->shadow_ctx_active, 0); break; default: @@ -427,6 +418,18 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) wait_event(workload->shadow_ctx_status_wq, !atomic_read(&workload->shadow_ctx_active)); + /* If this request caused GPU hang, req->fence.error will + * be set to -EIO. Use -EIO to set workload status so + * that when this request caused GPU hang, didn't trigger + * context switch interrupt to guest. + */ + if (likely(workload->status == -EINPROGRESS)) { + if (workload->req->fence.error == -EIO) + workload->status = -EIO; + else + workload->status = 0; + } + i915_gem_request_put(fetch_and_zero(&workload->req)); if (!workload->status && !vgpu->resetting) { @@ -464,8 +467,6 @@ struct workload_thread_param { int ring_id; }; -static DEFINE_MUTEX(scheduler_mutex); - static int workload_thread(void *priv) { struct workload_thread_param *p = (struct workload_thread_param *)priv; @@ -497,8 +498,6 @@ static int workload_thread(void *priv) if (!workload) break; - mutex_lock(&scheduler_mutex); - gvt_dbg_sched("ring id %d next workload %p vgpu %d\n", workload->ring_id, workload, workload->vgpu->id); @@ -537,9 +536,6 @@ complete: FORCEWAKE_ALL); intel_runtime_pm_put(gvt->dev_priv); - - mutex_unlock(&scheduler_mutex); - } return 0; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 3f44076ec8a0..00d8967c8512 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -3087,7 +3087,7 @@ static void intel_connector_info(struct seq_file *m, connector->display_info.cea_rev); } - if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST) + if (!intel_encoder) return; switch (connector->connector_type) { diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index ee2325b180e7..fc307e03943c 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1132,10 +1132,12 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) * and the registers being closely associated. * * According to chipset errata, on the 965GM, MSI interrupts may - * be lost or delayed, but we use them anyways to avoid - * stuck interrupts on some machines. + * be lost or delayed, and was defeatured. MSI interrupts seem to + * get lost on g4x as well, and interrupt delivery seems to stay + * properly dead afterwards. So we'll just disable them for all + * pre-gen5 chipsets. */ - if (!IS_I945G(dev_priv) && !IS_I945GM(dev_priv)) { + if (INTEL_GEN(dev_priv) >= 5) { if (pci_enable_msi(pdev) < 0) DRM_DEBUG_DRIVER("can't enable MSI"); } diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 7dcac3bfb771..969bac8404f1 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2434,8 +2434,9 @@ rebuild_st: * again with !__GFP_NORETRY. However, we still * want to fail this allocation rather than * trigger the out-of-memory killer and for - * this we want the future __GFP_MAYFAIL. + * this we want __GFP_RETRY_MAYFAIL. */ + gfp |= __GFP_RETRY_MAYFAIL; } } while (1); diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 9337446f1068..054b2e54cdaf 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -288,20 +288,26 @@ static int eb_create(struct i915_execbuffer *eb) * direct lookup. */ do { + unsigned int flags; + + /* While we can still reduce the allocation size, don't + * raise a warning and allow the allocation to fail. + * On the last pass though, we want to try as hard + * as possible to perform the allocation and warn + * if it fails. + */ + flags = GFP_TEMPORARY; + if (size > 1) + flags |= __GFP_NORETRY | __GFP_NOWARN; + eb->buckets = kzalloc(sizeof(struct hlist_head) << size, - GFP_TEMPORARY | - __GFP_NORETRY | - __GFP_NOWARN); + flags); if (eb->buckets) break; } while (--size); - if (unlikely(!eb->buckets)) { - eb->buckets = kzalloc(sizeof(struct hlist_head), - GFP_TEMPORARY); - if (unlikely(!eb->buckets)) - return -ENOMEM; - } + if (unlikely(!size)) + return -ENOMEM; eb->lut_size = size; } else { @@ -452,7 +458,7 @@ eb_add_vma(struct i915_execbuffer *eb, return err; } - if (eb->lut_size >= 0) { + if (eb->lut_size > 0) { vma->exec_handle = entry->handle; hlist_add_head(&vma->exec_node, &eb->buckets[hash_32(entry->handle, @@ -894,7 +900,7 @@ static void eb_release_vmas(const struct i915_execbuffer *eb) static void eb_reset_vmas(const struct i915_execbuffer *eb) { eb_release_vmas(eb); - if (eb->lut_size >= 0) + if (eb->lut_size > 0) memset(eb->buckets, 0, sizeof(struct hlist_head) << eb->lut_size); } @@ -903,7 +909,7 @@ static void eb_destroy(const struct i915_execbuffer *eb) { GEM_BUG_ON(eb->reloc_cache.rq); - if (eb->lut_size >= 0) + if (eb->lut_size > 0) kfree(eb->buckets); } @@ -2180,8 +2186,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, } } - if (eb_create(&eb)) - return -ENOMEM; + err = eb_create(&eb); + if (err) + goto err_out_fence; + + GEM_BUG_ON(!eb.lut_size); /* * Take a local wakeref for preparing to dispatch the execbuf as @@ -2340,6 +2349,7 @@ err_unlock: err_rpm: intel_runtime_pm_put(eb.i915); eb_destroy(&eb); +err_out_fence: if (out_fence_fd != -1) put_unused_fd(out_fence_fd); err_in_fence: diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 38c44407bafc..9cd22f83b0cf 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -2067,10 +2067,6 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return ret; } - ret = alloc_oa_buffer(dev_priv); - if (ret) - goto err_oa_buf_alloc; - /* PRM - observability performance counters: * * OACONTROL, performance counter enable, note: @@ -2086,6 +2082,10 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, intel_runtime_pm_get(dev_priv); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + ret = alloc_oa_buffer(dev_priv); + if (ret) + goto err_oa_buf_alloc; + ret = dev_priv->perf.oa.ops.enable_metric_set(dev_priv); if (ret) goto err_enable; @@ -2097,11 +2097,11 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream, return 0; err_enable: - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); - intel_runtime_pm_put(dev_priv); free_oa_buffer(dev_priv); err_oa_buf_alloc: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); + intel_runtime_pm_put(dev_priv); if (stream->ctx) oa_put_render_ctx_id(stream); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c8647cfa81ba..64cc674b652a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1802,7 +1802,7 @@ enum skl_disp_power_wells { #define POST_CURSOR_2(x) ((x) << 6) #define POST_CURSOR_2_MASK (0x3F << 6) #define CURSOR_COEFF(x) ((x) << 0) -#define CURSOR_COEFF_MASK (0x3F << 6) +#define CURSOR_COEFF_MASK (0x3F << 0) #define _CNL_PORT_TX_DW5_GRP_AE 0x162354 #define _CNL_PORT_TX_DW5_GRP_B 0x1623D4 diff --git a/drivers/gpu/drm/i915/intel_cdclk.c b/drivers/gpu/drm/i915/intel_cdclk.c index b8914db7d2e1..1241e5891b29 100644 --- a/drivers/gpu/drm/i915/intel_cdclk.c +++ b/drivers/gpu/drm/i915/intel_cdclk.c @@ -491,6 +491,14 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, int cdclk = cdclk_state->cdclk; u32 val, cmd; + /* There are cases where we can end up here with power domains + * off and a CDCLK frequency other than the minimum, like when + * issuing a modeset without actually changing any display after + * a system suspend. So grab the PIPE-A domain, which covers + * the HW blocks needed for the following programming. + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */ cmd = 2; else if (cdclk == 266667) @@ -549,6 +557,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv, intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } static void chv_set_cdclk(struct drm_i915_private *dev_priv, @@ -568,6 +578,14 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, return; } + /* There are cases where we can end up here with power domains + * off and a CDCLK frequency other than the minimum, like when + * issuing a modeset without actually changing any display after + * a system suspend. So grab the PIPE-A domain, which covers + * the HW blocks needed for the following programming. + */ + intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A); + /* * Specs are full of misinformation, but testing on actual * hardware has shown that we just need to write the desired @@ -590,6 +608,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv, intel_update_cdclk(dev_priv); vlv_program_pfi_credits(dev_priv); + + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } static int bdw_calc_cdclk(int max_pixclk) diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index a4487c5b7e37..5b4de719bec3 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c @@ -821,9 +821,10 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) | GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE); - /* WaDisableKillLogic:bxt,skl,kbl,cfl */ - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - ECOCHK_DIS_TLB); + /* WaDisableKillLogic:bxt,skl,kbl */ + if (!IS_COFFEELAKE(dev_priv)) + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | + ECOCHK_DIS_TLB); /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */ /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */ @@ -894,10 +895,9 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) WA_SET_BIT_MASKED(HDC_CHICKEN0, HDC_FORCE_NON_COHERENT); - /* WaDisableHDCInvalidation:skl,bxt,kbl */ - if (!IS_COFFEELAKE(dev_priv)) - I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | - BDW_DISABLE_HDC_INVALIDATION); + /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */ + I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) | + BDW_DISABLE_HDC_INVALIDATION); /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ if (IS_SKYLAKE(dev_priv) || diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 03347c6ae599..0c4cde6b2e6f 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -535,13 +535,14 @@ static void intel_fbdev_destroy(struct intel_fbdev *ifbdev) drm_fb_helper_fini(&ifbdev->helper); - if (ifbdev->fb) { + if (ifbdev->vma) { mutex_lock(&ifbdev->helper.dev->struct_mutex); intel_unpin_fb_vma(ifbdev->vma); mutex_unlock(&ifbdev->helper.dev->struct_mutex); + } + if (ifbdev->fb) drm_framebuffer_remove(&ifbdev->fb->base); - } kfree(ifbdev); } @@ -765,7 +766,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous struct intel_fbdev *ifbdev = dev_priv->fbdev; struct fb_info *info; - if (!ifbdev || !ifbdev->fb) + if (!ifbdev || !ifbdev->vma) return; info = ifbdev->helper.fbdev; @@ -812,7 +813,7 @@ void intel_fbdev_output_poll_changed(struct drm_device *dev) { struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; - if (ifbdev && ifbdev->fb) + if (ifbdev && ifbdev->vma) drm_fb_helper_hotplug_event(&ifbdev->helper); } @@ -824,7 +825,7 @@ void intel_fbdev_restore_mode(struct drm_device *dev) return; intel_fbdev_sync(ifbdev); - if (!ifbdev->fb) + if (!ifbdev->vma) return; if (drm_fb_helper_restore_fbdev_mode_unlocked(&ifbdev->helper) == 0) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c index d15cc9d3a5cd..89dc25a5a53b 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_dmabuf.c @@ -246,9 +246,9 @@ static int igt_dmabuf_export_vmap(void *arg) i915_gem_object_put(obj); ptr = dma_buf_vmap(dmabuf); - if (IS_ERR(ptr)) { - err = PTR_ERR(ptr); - pr_err("dma_buf_vmap failed with err=%d\n", err); + if (!ptr) { + pr_err("dma_buf_vmap failed\n"); + err = -ENOMEM; goto out; } diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c index 49546222c6d3..6276bb834b4f 100644 --- a/drivers/gpu/drm/imx/ipuv3-plane.c +++ b/drivers/gpu/drm/imx/ipuv3-plane.c @@ -54,7 +54,7 @@ static const uint32_t ipu_plane_formats[] = { DRM_FORMAT_RGBA8888, DRM_FORMAT_RGBX8888, DRM_FORMAT_BGRA8888, - DRM_FORMAT_BGRA8888, + DRM_FORMAT_BGRX8888, DRM_FORMAT_UYVY, DRM_FORMAT_VYUY, DRM_FORMAT_YUYV, diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c index 636031a30e17..8aca20209cb8 100644 --- a/drivers/gpu/drm/imx/parallel-display.c +++ b/drivers/gpu/drm/imx/parallel-display.c @@ -237,7 +237,7 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data) /* port@1 is the output port */ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge); - if (ret) + if (ret && ret != -ENODEV) return ret; imxpd->dev = dev; diff --git a/drivers/gpu/drm/mediatek/Makefile b/drivers/gpu/drm/mediatek/Makefile index bf2e5be1ab30..e37b55a23a65 100644 --- a/drivers/gpu/drm/mediatek/Makefile +++ b/drivers/gpu/drm/mediatek/Makefile @@ -1,4 +1,5 @@ -mediatek-drm-y := mtk_disp_ovl.o \ +mediatek-drm-y := mtk_disp_color.o \ + mtk_disp_ovl.o \ mtk_disp_rdma.o \ mtk_drm_crtc.o \ mtk_drm_ddp.o \ diff --git a/drivers/gpu/drm/mediatek/mtk_disp_color.c b/drivers/gpu/drm/mediatek/mtk_disp_color.c new file mode 100644 index 000000000000..ef79a6d55646 --- /dev/null +++ b/drivers/gpu/drm/mediatek/mtk_disp_color.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2017 MediaTek Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <drm/drmP.h> +#include <linux/clk.h> +#include <linux/component.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> + +#include "mtk_drm_crtc.h" +#include "mtk_drm_ddp_comp.h" + +#define DISP_COLOR_CFG_MAIN 0x0400 +#define DISP_COLOR_START_MT2701 0x0f00 +#define DISP_COLOR_START_MT8173 0x0c00 +#define DISP_COLOR_START(comp) ((comp)->data->color_offset) +#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50) +#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54) + +#define COLOR_BYPASS_ALL BIT(7) +#define COLOR_SEQ_SEL BIT(13) + +struct mtk_disp_color_data { + unsigned int color_offset; +}; + +/** + * struct mtk_disp_color - DISP_COLOR driver structure + * @ddp_comp - structure containing type enum and hardware resources + * @crtc - associated crtc to report irq events to + */ +struct mtk_disp_color { + struct mtk_ddp_comp ddp_comp; + struct drm_crtc *crtc; + const struct mtk_disp_color_data *data; +}; + +static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) +{ + return container_of(comp, struct mtk_disp_color, ddp_comp); +} + +static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, + unsigned int h, unsigned int vrefresh, + unsigned int bpc) +{ + struct mtk_disp_color *color = comp_to_color(comp); + + writel(w, comp->regs + DISP_COLOR_WIDTH(color)); + writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); +} + +static void mtk_color_start(struct mtk_ddp_comp *comp) +{ + struct mtk_disp_color *color = comp_to_color(comp); + + writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL, + comp->regs + DISP_COLOR_CFG_MAIN); + writel(0x1, comp->regs + DISP_COLOR_START(color)); +} + +static const struct mtk_ddp_comp_funcs mtk_disp_color_funcs = { + .config = mtk_color_config, + .start = mtk_color_start, +}; + +static int mtk_disp_color_bind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_disp_color *priv = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + int ret; + + ret = mtk_ddp_comp_register(drm_dev, &priv->ddp_comp); + if (ret < 0) { + dev_err(dev, "Failed to register component %s: %d\n", + dev->of_node->full_name, ret); + return ret; + } + + return 0; +} + +static void mtk_disp_color_unbind(struct device *dev, struct device *master, + void *data) +{ + struct mtk_disp_color *priv = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + + mtk_ddp_comp_unregister(drm_dev, &priv->ddp_comp); +} + +static const struct component_ops mtk_disp_color_component_ops = { + .bind = mtk_disp_color_bind, + .unbind = mtk_disp_color_unbind, +}; + +static int mtk_disp_color_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mtk_disp_color *priv; + int comp_id; + int ret; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + comp_id = mtk_ddp_comp_get_id(dev->of_node, MTK_DISP_COLOR); + if (comp_id < 0) { + dev_err(dev, "Failed to identify by alias: %d\n", comp_id); + return comp_id; + } + + ret = mtk_ddp_comp_init(dev, dev->of_node, &priv->ddp_comp, comp_id, + &mtk_disp_color_funcs); + if (ret) { + dev_err(dev, "Failed to initialize component: %d\n", ret); + return ret; + } + + priv->data = of_device_get_match_data(dev); + + platform_set_drvdata(pdev, priv); + + ret = component_add(dev, &mtk_disp_color_component_ops); + if (ret) + dev_err(dev, "Failed to add component: %d\n", ret); + + return ret; +} + +static int mtk_disp_color_remove(struct platform_device *pdev) +{ + component_del(&pdev->dev, &mtk_disp_color_component_ops); + + return 0; +} + +static const struct mtk_disp_color_data mt2701_color_driver_data = { + .color_offset = DISP_COLOR_START_MT2701, +}; + +static const struct mtk_disp_color_data mt8173_color_driver_data = { + .color_offset = DISP_COLOR_START_MT8173, +}; + +static const struct of_device_id mtk_disp_color_driver_dt_match[] = { + { .compatible = "mediatek,mt2701-disp-color", + .data = &mt2701_color_driver_data}, + { .compatible = "mediatek,mt8173-disp-color", + .data = &mt8173_color_driver_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, mtk_disp_color_driver_dt_match); + +struct platform_driver mtk_disp_color_driver = { + .probe = mtk_disp_color_probe, + .remove = mtk_disp_color_remove, + .driver = { + .name = "mediatek-disp-color", + .owner = THIS_MODULE, + .of_match_table = mtk_disp_color_driver_dt_match, + }, +}; diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index a14d7d64d7b1..35bc5babdbf7 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -42,9 +42,12 @@ #define OVL_RDMA_MEM_GMC 0x40402020 #define OVL_CON_BYTE_SWAP BIT(24) +#define OVL_CON_MTX_YUV_TO_RGB (6 << 16) #define OVL_CON_CLRFMT_RGB (1 << 12) #define OVL_CON_CLRFMT_RGBA8888 (2 << 12) #define OVL_CON_CLRFMT_ARGB8888 (3 << 12) +#define OVL_CON_CLRFMT_UYVY (4 << 12) +#define OVL_CON_CLRFMT_YUYV (5 << 12) #define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ 0 : OVL_CON_CLRFMT_RGB) #define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \ @@ -176,6 +179,10 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt) case DRM_FORMAT_XBGR8888: case DRM_FORMAT_ABGR8888: return OVL_CON_CLRFMT_RGBA8888 | OVL_CON_BYTE_SWAP; + case DRM_FORMAT_UYVY: + return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB; + case DRM_FORMAT_YUYV: + return OVL_CON_CLRFMT_YUYV | OVL_CON_MTX_YUV_TO_RGB; } } diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c index 6582e1f56d37..cb32c9369f3a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c @@ -559,6 +559,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev, mtk_crtc->ddp_comp = devm_kmalloc_array(dev, mtk_crtc->ddp_comp_nr, sizeof(*mtk_crtc->ddp_comp), GFP_KERNEL); + if (!mtk_crtc->ddp_comp) + return -ENOMEM; mtk_crtc->mutex = mtk_disp_mutex_get(priv->mutex_dev, pipe); if (IS_ERR(mtk_crtc->mutex)) { diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c index 8b52416b6e41..07d7ea2268ef 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c @@ -38,13 +38,6 @@ #define DISP_REG_UFO_START 0x0000 -#define DISP_COLOR_CFG_MAIN 0x0400 -#define DISP_COLOR_START_MT2701 0x0f00 -#define DISP_COLOR_START_MT8173 0x0c00 -#define DISP_COLOR_START(comp) ((comp)->data->color_offset) -#define DISP_COLOR_WIDTH(comp) (DISP_COLOR_START(comp) + 0x50) -#define DISP_COLOR_HEIGHT(comp) (DISP_COLOR_START(comp) + 0x54) - #define DISP_AAL_EN 0x0000 #define DISP_AAL_SIZE 0x0030 @@ -55,9 +48,6 @@ #define LUT_10BIT_MASK 0x03ff -#define COLOR_BYPASS_ALL BIT(7) -#define COLOR_SEQ_SEL BIT(13) - #define OD_RELAYMODE BIT(0) #define UFO_BYPASS BIT(2) @@ -82,20 +72,6 @@ #define DITHER_ADD_LSHIFT_G(x) (((x) & 0x7) << 4) #define DITHER_ADD_RSHIFT_G(x) (((x) & 0x7) << 0) -struct mtk_disp_color_data { - unsigned int color_offset; -}; - -struct mtk_disp_color { - struct mtk_ddp_comp ddp_comp; - const struct mtk_disp_color_data *data; -}; - -static inline struct mtk_disp_color *comp_to_color(struct mtk_ddp_comp *comp) -{ - return container_of(comp, struct mtk_disp_color, ddp_comp); -} - void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, unsigned int CFG) { @@ -119,25 +95,6 @@ void mtk_dither_set(struct mtk_ddp_comp *comp, unsigned int bpc, } } -static void mtk_color_config(struct mtk_ddp_comp *comp, unsigned int w, - unsigned int h, unsigned int vrefresh, - unsigned int bpc) -{ - struct mtk_disp_color *color = comp_to_color(comp); - - writel(w, comp->regs + DISP_COLOR_WIDTH(color)); - writel(h, comp->regs + DISP_COLOR_HEIGHT(color)); -} - -static void mtk_color_start(struct mtk_ddp_comp *comp) -{ - struct mtk_disp_color *color = comp_to_color(comp); - - writel(COLOR_BYPASS_ALL | COLOR_SEQ_SEL, - comp->regs + DISP_COLOR_CFG_MAIN); - writel(0x1, comp->regs + DISP_COLOR_START(color)); -} - static void mtk_od_config(struct mtk_ddp_comp *comp, unsigned int w, unsigned int h, unsigned int vrefresh, unsigned int bpc) @@ -229,11 +186,6 @@ static const struct mtk_ddp_comp_funcs ddp_gamma = { .stop = mtk_gamma_stop, }; -static const struct mtk_ddp_comp_funcs ddp_color = { - .config = mtk_color_config, - .start = mtk_color_start, -}; - static const struct mtk_ddp_comp_funcs ddp_od = { .config = mtk_od_config, .start = mtk_od_start, @@ -268,8 +220,8 @@ struct mtk_ddp_comp_match { static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL] = { MTK_DISP_AAL, 0, &ddp_aal }, [DDP_COMPONENT_BLS] = { MTK_DISP_BLS, 0, NULL }, - [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, &ddp_color }, - [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, &ddp_color }, + [DDP_COMPONENT_COLOR0] = { MTK_DISP_COLOR, 0, NULL }, + [DDP_COMPONENT_COLOR1] = { MTK_DISP_COLOR, 1, NULL }, [DDP_COMPONENT_DPI0] = { MTK_DPI, 0, NULL }, [DDP_COMPONENT_DSI0] = { MTK_DSI, 0, NULL }, [DDP_COMPONENT_DSI1] = { MTK_DSI, 1, NULL }, @@ -286,22 +238,6 @@ static const struct mtk_ddp_comp_match mtk_ddp_matches[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA1] = { MTK_DISP_WDMA, 1, NULL }, }; -static const struct mtk_disp_color_data mt2701_color_driver_data = { - .color_offset = DISP_COLOR_START_MT2701, -}; - -static const struct mtk_disp_color_data mt8173_color_driver_data = { - .color_offset = DISP_COLOR_START_MT8173, -}; - -static const struct of_device_id mtk_disp_color_driver_dt_match[] = { - { .compatible = "mediatek,mt2701-disp-color", - .data = &mt2701_color_driver_data}, - { .compatible = "mediatek,mt8173-disp-color", - .data = &mt8173_color_driver_data}, - {}, -}; - int mtk_ddp_comp_get_id(struct device_node *node, enum mtk_ddp_comp_type comp_type) { @@ -324,23 +260,11 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node, enum mtk_ddp_comp_type type; struct device_node *larb_node; struct platform_device *larb_pdev; - const struct of_device_id *match; - struct mtk_disp_color *color; if (comp_id < 0 || comp_id >= DDP_COMPONENT_ID_MAX) return -EINVAL; type = mtk_ddp_matches[comp_id].type; - if (type == MTK_DISP_COLOR) { - devm_kfree(dev, comp); - color = devm_kzalloc(dev, sizeof(*color), GFP_KERNEL); - if (!color) - return -ENOMEM; - - match = of_match_node(mtk_disp_color_driver_dt_match, node); - color->data = match->data; - comp = &color->ddp_comp; - } comp->id = comp_id; comp->funcs = funcs ?: mtk_ddp_matches[comp_id].funcs; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index f6c8ec4c7dbc..41d2cffe953e 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -439,11 +439,12 @@ static int mtk_drm_probe(struct platform_device *pdev) private->comp_node[comp_id] = of_node_get(node); /* - * Currently only the OVL, RDMA, DSI, and DPI blocks have + * Currently only the COLOR, OVL, RDMA, DSI, and DPI blocks have * separate component platform drivers and initialize their own * DDP component structure. The others are initialized here. */ - if (comp_type == MTK_DISP_OVL || + if (comp_type == MTK_DISP_COLOR || + comp_type == MTK_DISP_OVL || comp_type == MTK_DISP_RDMA || comp_type == MTK_DSI || comp_type == MTK_DPI) { @@ -566,6 +567,7 @@ static struct platform_driver mtk_drm_platform_driver = { static struct platform_driver * const mtk_drm_drivers[] = { &mtk_ddp_driver, + &mtk_disp_color_driver, &mtk_disp_ovl_driver, &mtk_disp_rdma_driver, &mtk_dpi_driver, @@ -576,33 +578,14 @@ static struct platform_driver * const mtk_drm_drivers[] = { static int __init mtk_drm_init(void) { - int ret; - int i; - - for (i = 0; i < ARRAY_SIZE(mtk_drm_drivers); i++) { - ret = platform_driver_register(mtk_drm_drivers[i]); - if (ret < 0) { - pr_err("Failed to register %s driver: %d\n", - mtk_drm_drivers[i]->driver.name, ret); - goto err; - } - } - - return 0; - -err: - while (--i >= 0) - platform_driver_unregister(mtk_drm_drivers[i]); - - return ret; + return platform_register_drivers(mtk_drm_drivers, + ARRAY_SIZE(mtk_drm_drivers)); } static void __exit mtk_drm_exit(void) { - int i; - - for (i = ARRAY_SIZE(mtk_drm_drivers) - 1; i >= 0; i--) - platform_driver_unregister(mtk_drm_drivers[i]); + platform_unregister_drivers(mtk_drm_drivers, + ARRAY_SIZE(mtk_drm_drivers)); } module_init(mtk_drm_init); diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.h b/drivers/gpu/drm/mediatek/mtk_drm_drv.h index aef8747d810b..c3378c452c0a 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.h @@ -59,6 +59,7 @@ struct mtk_drm_private { }; extern struct platform_driver mtk_ddp_driver; +extern struct platform_driver mtk_disp_color_driver; extern struct platform_driver mtk_disp_ovl_driver; extern struct platform_driver mtk_disp_rdma_driver; extern struct platform_driver mtk_dpi_driver; diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c index e405e89ed5e5..1a59b9ab4aa8 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c @@ -28,6 +28,8 @@ static const u32 formats[] = { DRM_FORMAT_XRGB8888, DRM_FORMAT_ARGB8888, DRM_FORMAT_RGB565, + DRM_FORMAT_UYVY, + DRM_FORMAT_YUYV, }; static void mtk_plane_reset(struct drm_plane *plane) diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c index b5cc6e12334c..97253c8f813b 100644 --- a/drivers/gpu/drm/mediatek/mtk_dsi.c +++ b/drivers/gpu/drm/mediatek/mtk_dsi.c @@ -930,7 +930,7 @@ static u32 mtk_dsi_recv_cnt(u8 type, u8 *read_data) DRM_INFO("type is 0x02, try again\n"); break; default: - DRM_INFO("type(0x%x) cannot be non-recognite\n", type); + DRM_INFO("type(0x%x) not recognized\n", type); break; } diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c index 0a4ffd724146..71eb4fbbfc85 100644 --- a/drivers/gpu/drm/mediatek/mtk_hdmi.c +++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c @@ -1778,33 +1778,14 @@ static struct platform_driver * const mtk_hdmi_drivers[] = { static int __init mtk_hdmitx_init(void) { - int ret; - int i; - - for (i = 0; i < ARRAY_SIZE(mtk_hdmi_drivers); i++) { - ret = platform_driver_register(mtk_hdmi_drivers[i]); - if (ret < 0) { - pr_err("Failed to register %s driver: %d\n", - mtk_hdmi_drivers[i]->driver.name, ret); - goto err; - } - } - - return 0; - -err: - while (--i >= 0) - platform_driver_unregister(mtk_hdmi_drivers[i]); - - return ret; + return platform_register_drivers(mtk_hdmi_drivers, + ARRAY_SIZE(mtk_hdmi_drivers)); } static void __exit mtk_hdmitx_exit(void) { - int i; - - for (i = ARRAY_SIZE(mtk_hdmi_drivers) - 1; i >= 0; i--) - platform_driver_unregister(mtk_hdmi_drivers[i]); + platform_unregister_drivers(mtk_hdmi_drivers, + ARRAY_SIZE(mtk_hdmi_drivers)); } module_init(mtk_hdmitx_init); diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index fa4f8f008e4d..e67ed383e11b 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -31,6 +31,7 @@ #include "radeon_asic.h" #include "atom.h" #include <linux/backlight.h> +#include <linux/dmi.h> extern int atom_debug; @@ -2184,9 +2185,17 @@ int radeon_atom_pick_dig_encoder(struct drm_encoder *encoder, int fe_idx) goto assigned; } - /* on DCE32 and encoder can driver any block so just crtc id */ + /* + * On DCE32 any encoder can drive any block so usually just use crtc id, + * but Apple thinks different at least on iMac10,1, so there use linkb, + * otherwise the internal eDP panel will stay dark. + */ if (ASIC_IS_DCE32(rdev)) { - enc_idx = radeon_crtc->crtc_id; + if (dmi_match(DMI_PRODUCT_NAME, "iMac10,1")) + enc_idx = (dig->linkb) ? 1 : 0; + else + enc_idx = radeon_crtc->crtc_id; + goto assigned; } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index 699fe7f9b8bf..a2ab6dcdf4a2 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -184,7 +184,6 @@ void radeon_kfd_device_init(struct radeon_device *rdev) if (rdev->kfd) { struct kgd2kfd_shared_resources gpu_resources = { .compute_vmid_bitmap = 0xFF00, - .num_mec = 1, .num_pipe_per_mec = 4, .num_queue_per_pipe = 8 }; diff --git a/drivers/gpu/drm/rockchip/cdn-dp-core.c b/drivers/gpu/drm/rockchip/cdn-dp-core.c index 14fa1f8351e8..9b0b0588bbed 100644 --- a/drivers/gpu/drm/rockchip/cdn-dp-core.c +++ b/drivers/gpu/drm/rockchip/cdn-dp-core.c @@ -1195,7 +1195,7 @@ static int cdn_dp_probe(struct platform_device *pdev) continue; port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL); - if (!dp) + if (!port) return -ENOMEM; port->extcon = extcon; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index 47905faf5586..c7e96b82cf63 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -45,13 +45,13 @@ struct rockchip_crtc_state { * * @crtc: array of enabled CRTCs, used to map from "pipe" to drm_crtc. * @num_pipe: number of pipes for this device. + * @mm_lock: protect drm_mm on multi-threads. */ struct rockchip_drm_private { struct drm_fb_helper fbdev_helper; struct drm_gem_object *fbdev_bo; struct drm_atomic_state *state; struct iommu_domain *domain; - /* protect drm_mm on multi-threads */ struct mutex mm_lock; struct drm_mm mm; struct list_head psr_list; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index df9e57064f19..b74ac717e56a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -29,12 +29,11 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) ssize_t ret; mutex_lock(&private->mm_lock); - ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm, rk_obj->base.size, PAGE_SIZE, 0, 0); - mutex_unlock(&private->mm_lock); + if (ret < 0) { DRM_ERROR("out of I/O virtual memory: %zd\n", ret); return ret; @@ -56,7 +55,9 @@ static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj) return 0; err_remove_node: + mutex_lock(&private->mm_lock); drm_mm_remove_node(&rk_obj->mm); + mutex_unlock(&private->mm_lock); return ret; } diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 403bbd5f99a9..a12cc7ea99b6 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -520,6 +520,34 @@ static void vc4_crtc_disable(struct drm_crtc *crtc) SCALER_DISPSTATX_EMPTY); } +static void vc4_crtc_update_dlist(struct drm_crtc *crtc) +{ + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); + struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); + + if (crtc->state->event) { + unsigned long flags; + + crtc->state->event->pipe = drm_crtc_index(crtc); + + WARN_ON(drm_crtc_vblank_get(crtc) != 0); + + spin_lock_irqsave(&dev->event_lock, flags); + vc4_crtc->event = crtc->state->event; + crtc->state->event = NULL; + + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + + spin_unlock_irqrestore(&dev->event_lock, flags); + } else { + HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), + vc4_state->mm.start); + } +} + static void vc4_crtc_enable(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -530,6 +558,12 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) require_hvs_enabled(dev); + /* Enable vblank irq handling before crtc is started otherwise + * drm_crtc_get_vblank() fails in vc4_crtc_update_dlist(). + */ + drm_crtc_vblank_on(crtc); + vc4_crtc_update_dlist(crtc); + /* Turn on the scaler, which will wait for vstart to start * compositing. */ @@ -541,9 +575,6 @@ static void vc4_crtc_enable(struct drm_crtc *crtc) /* Turn on the pixel valve, which will emit the vstart signal. */ CRTC_WRITE(PV_V_CONTROL, CRTC_READ(PV_V_CONTROL) | PV_VCONTROL_VIDEN); - - /* Enable vblank irq handling after crtc is started. */ - drm_crtc_vblank_on(crtc); } static bool vc4_crtc_mode_fixup(struct drm_crtc *crtc, @@ -598,7 +629,6 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, { struct drm_device *dev = crtc->dev; struct vc4_dev *vc4 = to_vc4_dev(dev); - struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_state = to_vc4_crtc_state(crtc->state); struct drm_plane *plane; bool debug_dump_regs = false; @@ -620,25 +650,15 @@ static void vc4_crtc_atomic_flush(struct drm_crtc *crtc, WARN_ON_ONCE(dlist_next - dlist_start != vc4_state->mm.size); - if (crtc->state->event) { - unsigned long flags; - - crtc->state->event->pipe = drm_crtc_index(crtc); - - WARN_ON(drm_crtc_vblank_get(crtc) != 0); - - spin_lock_irqsave(&dev->event_lock, flags); - vc4_crtc->event = crtc->state->event; - crtc->state->event = NULL; - - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - - spin_unlock_irqrestore(&dev->event_lock, flags); - } else { - HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel), - vc4_state->mm.start); - } + /* Only update DISPLIST if the CRTC was already running and is not + * being disabled. + * vc4_crtc_enable() takes care of updating the dlist just after + * re-enabling VBLANK interrupts and before enabling the engine. + * If the CRTC is being disabled, there's no point in updating this + * information. + */ + if (crtc->state->active && old_state->active) + vc4_crtc_update_dlist(crtc); if (debug_dump_regs) { DRM_INFO("CRTC %d HVS after:\n", drm_crtc_index(crtc)); diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index ccdff1ee1f0c..199f6a01fc62 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -340,13 +340,45 @@ static unsigned find_battery_quirk(struct hid_device *hdev) return quirks; } +static int hidinput_scale_battery_capacity(struct hid_device *dev, + int value) +{ + if (dev->battery_min < dev->battery_max && + value >= dev->battery_min && value <= dev->battery_max) + value = ((value - dev->battery_min) * 100) / + (dev->battery_max - dev->battery_min); + + return value; +} + +static int hidinput_query_battery_capacity(struct hid_device *dev) +{ + u8 *buf; + int ret; + + buf = kmalloc(2, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, + dev->battery_report_type, HID_REQ_GET_REPORT); + if (ret != 2) { + kfree(buf); + return -ENODATA; + } + + ret = hidinput_scale_battery_capacity(dev, buf[1]); + kfree(buf); + return ret; +} + static int hidinput_get_battery_property(struct power_supply *psy, enum power_supply_property prop, union power_supply_propval *val) { struct hid_device *dev = power_supply_get_drvdata(psy); + int value; int ret = 0; - __u8 *buf; switch (prop) { case POWER_SUPPLY_PROP_PRESENT: @@ -355,29 +387,15 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CAPACITY: - - buf = kmalloc(2 * sizeof(__u8), GFP_KERNEL); - if (!buf) { - ret = -ENOMEM; - break; - } - ret = hid_hw_raw_request(dev, dev->battery_report_id, buf, 2, - dev->battery_report_type, - HID_REQ_GET_REPORT); - - if (ret != 2) { - ret = -ENODATA; - kfree(buf); - break; + if (dev->battery_report_type == HID_FEATURE_REPORT) { + value = hidinput_query_battery_capacity(dev); + if (value < 0) + return value; + } else { + value = dev->battery_capacity; } - ret = 0; - if (dev->battery_min < dev->battery_max && - buf[1] >= dev->battery_min && - buf[1] <= dev->battery_max) - val->intval = (100 * (buf[1] - dev->battery_min)) / - (dev->battery_max - dev->battery_min); - kfree(buf); + val->intval = value; break; case POWER_SUPPLY_PROP_MODEL_NAME: @@ -385,7 +403,22 @@ static int hidinput_get_battery_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_STATUS: - val->intval = POWER_SUPPLY_STATUS_DISCHARGING; + if (!dev->battery_reported && + dev->battery_report_type == HID_FEATURE_REPORT) { + value = hidinput_query_battery_capacity(dev); + if (value < 0) + return value; + + dev->battery_capacity = value; + dev->battery_reported = true; + } + + if (!dev->battery_reported) + val->intval = POWER_SUPPLY_STATUS_UNKNOWN; + else if (dev->battery_capacity == 100) + val->intval = POWER_SUPPLY_STATUS_FULL; + else + val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; case POWER_SUPPLY_PROP_SCOPE: @@ -400,18 +433,16 @@ static int hidinput_get_battery_property(struct power_supply *psy, return ret; } -static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field) +static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, struct hid_field *field) { - struct power_supply_desc *psy_desc = NULL; + struct power_supply_desc *psy_desc; struct power_supply_config psy_cfg = { .drv_data = dev, }; unsigned quirks; s32 min, max; + int error; - if (field->usage->hid != HID_DC_BATTERYSTRENGTH) - return false; /* no match */ - - if (dev->battery != NULL) - goto out; /* already initialized? */ + if (dev->battery) + return 0; /* already initialized? */ quirks = find_battery_quirk(dev); @@ -419,16 +450,18 @@ static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, dev->bus, dev->vendor, dev->product, dev->version, quirks); if (quirks & HID_BATTERY_QUIRK_IGNORE) - goto out; + return 0; psy_desc = kzalloc(sizeof(*psy_desc), GFP_KERNEL); - if (psy_desc == NULL) - goto out; - - psy_desc->name = kasprintf(GFP_KERNEL, "hid-%s-battery", dev->uniq); - if (psy_desc->name == NULL) { - kfree(psy_desc); - goto out; + if (!psy_desc) + return -ENOMEM; + + psy_desc->name = kasprintf(GFP_KERNEL, "hid-%s-battery", + strlen(dev->uniq) ? + dev->uniq : dev_name(&dev->dev)); + if (!psy_desc->name) { + error = -ENOMEM; + goto err_free_mem; } psy_desc->type = POWER_SUPPLY_TYPE_BATTERY; @@ -455,17 +488,20 @@ static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg); if (IS_ERR(dev->battery)) { - hid_warn(dev, "can't register power supply: %ld\n", - PTR_ERR(dev->battery)); - kfree(psy_desc->name); - kfree(psy_desc); - dev->battery = NULL; - } else { - power_supply_powers(dev->battery, &dev->dev); + error = PTR_ERR(dev->battery); + hid_warn(dev, "can't register power supply: %d\n", error); + goto err_free_name; } -out: - return true; + power_supply_powers(dev->battery, &dev->dev); + return 0; + +err_free_name: + kfree(psy_desc->name); +err_free_mem: + kfree(psy_desc); + dev->battery = NULL; + return error; } static void hidinput_cleanup_battery(struct hid_device *dev) @@ -481,16 +517,39 @@ static void hidinput_cleanup_battery(struct hid_device *dev) kfree(psy_desc); dev->battery = NULL; } + +static void hidinput_update_battery(struct hid_device *dev, int value) +{ + int capacity; + + if (!dev->battery) + return; + + if (value == 0 || value < dev->battery_min || value > dev->battery_max) + return; + + capacity = hidinput_scale_battery_capacity(dev, value); + + if (!dev->battery_reported || capacity != dev->battery_capacity) { + dev->battery_capacity = capacity; + dev->battery_reported = true; + power_supply_changed(dev->battery); + } +} #else /* !CONFIG_HID_BATTERY_STRENGTH */ -static bool hidinput_setup_battery(struct hid_device *dev, unsigned report_type, - struct hid_field *field) +static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type, + struct hid_field *field) { - return false; + return 0; } static void hidinput_cleanup_battery(struct hid_device *dev) { } + +static void hidinput_update_battery(struct hid_device *dev, int value) +{ +} #endif /* CONFIG_HID_BATTERY_STRENGTH */ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_field *field, @@ -710,6 +769,11 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel } break; + case 0x3b: /* Battery Strength */ + hidinput_setup_battery(device, HID_INPUT_REPORT, field); + usage->type = EV_PWR; + goto ignore; + case 0x3c: /* Invert */ map_key_clear(BTN_TOOL_RUBBER); break; @@ -944,11 +1008,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel break; case HID_UP_GENDEVCTRLS: - if (hidinput_setup_battery(device, HID_INPUT_REPORT, field)) + switch (usage->hid) { + case HID_DC_BATTERYSTRENGTH: + hidinput_setup_battery(device, HID_INPUT_REPORT, field); + usage->type = EV_PWR; goto ignore; - else - goto unknown; - break; + } + goto unknown; case HID_UP_HPVENDOR: /* Reported on a Dutch layout HP5308 */ set_bit(EV_REP, input->evbit); @@ -1031,7 +1097,6 @@ mapped: if (usage->code > max) goto ignore; - if (usage->type == EV_ABS) { int a = field->logical_minimum; @@ -1090,14 +1155,19 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct struct input_dev *input; unsigned *quirks = &hid->quirks; - if (!field->hidinput) + if (!usage->type) return; - input = field->hidinput->input; + if (usage->type == EV_PWR) { + hidinput_update_battery(hid, value); + return; + } - if (!usage->type) + if (!field->hidinput) return; + input = field->hidinput->input; + if (usage->hat_min < usage->hat_max || usage->hat_dir) { int hat_dir = usage->hat_dir; if (!hat_dir) @@ -1373,6 +1443,7 @@ static void report_features(struct hid_device *hid) struct hid_driver *drv = hid->driver; struct hid_report_enum *rep_enum; struct hid_report *rep; + struct hid_usage *usage; int i, j; rep_enum = &hid->report_enum[HID_FEATURE_REPORT]; @@ -1383,12 +1454,15 @@ static void report_features(struct hid_device *hid) continue; for (j = 0; j < rep->field[i]->maxusage; j++) { + usage = &rep->field[i]->usage[j]; + /* Verify if Battery Strength feature is available */ - hidinput_setup_battery(hid, HID_FEATURE_REPORT, rep->field[i]); + if (usage->hid == HID_DC_BATTERYSTRENGTH) + hidinput_setup_battery(hid, HID_FEATURE_REPORT, + rep->field[i]); if (drv->feature_mapping) - drv->feature_mapping(hid, rep->field[i], - rep->field[i]->usage + j); + drv->feature_mapping(hid, rep->field[i], usage); } } } diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c index e9bf0bb87ac4..e57cc40cb768 100644 --- a/drivers/hv/channel.c +++ b/drivers/hv/channel.c @@ -606,6 +606,8 @@ static int vmbus_close_internal(struct vmbus_channel *channel) get_order(channel->ringbuffer_pagecount * PAGE_SIZE)); out: + /* re-enable tasklet for use on re-open */ + tasklet_enable(&channel->callback_event); return ret; } diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index 0af7fd311979..76c34f4fde13 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -566,6 +566,8 @@ static int applesmc_init_smcreg_try(void) if (ret) return ret; s->fan_count = tmp[0]; + if (s->fan_count > 10) + s->fan_count = 10; ret = applesmc_get_lower_bound(&s->temp_begin, "T"); if (ret) @@ -811,7 +813,8 @@ static ssize_t applesmc_show_fan_speed(struct device *dev, char newkey[5]; u8 buffer[2]; - sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); + scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], + to_index(attr)); ret = applesmc_read_key(newkey, buffer, 2); speed = ((buffer[0] << 8 | buffer[1]) >> 2); @@ -834,7 +837,8 @@ static ssize_t applesmc_store_fan_speed(struct device *dev, if (kstrtoul(sysfsbuf, 10, &speed) < 0 || speed >= 0x4000) return -EINVAL; /* Bigger than a 14-bit value */ - sprintf(newkey, fan_speed_fmt[to_option(attr)], to_index(attr)); + scnprintf(newkey, sizeof(newkey), fan_speed_fmt[to_option(attr)], + to_index(attr)); buffer[0] = (speed >> 6) & 0xff; buffer[1] = (speed << 2) & 0xff; @@ -903,7 +907,7 @@ static ssize_t applesmc_show_fan_position(struct device *dev, char newkey[5]; u8 buffer[17]; - sprintf(newkey, FAN_ID_FMT, to_index(attr)); + scnprintf(newkey, sizeof(newkey), FAN_ID_FMT, to_index(attr)); ret = applesmc_read_key(newkey, buffer, 16); buffer[16] = 0; @@ -1116,7 +1120,8 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num) } for (i = 0; i < num; i++) { node = &grp->nodes[i]; - sprintf(node->name, grp->format, i + 1); + scnprintf(node->name, sizeof(node->name), grp->format, + i + 1); node->sda.index = (grp->option << 16) | (i & 0xffff); node->sda.dev_attr.show = grp->show; node->sda.dev_attr.store = grp->store; diff --git a/drivers/i2c/Makefile b/drivers/i2c/Makefile index 45095b3d16a9..7bb65a4369e1 100644 --- a/drivers/i2c/Makefile +++ b/drivers/i2c/Makefile @@ -4,6 +4,11 @@ obj-$(CONFIG_I2C_BOARDINFO) += i2c-boardinfo.o obj-$(CONFIG_I2C) += i2c-core.o +i2c-core-objs := i2c-core-base.o i2c-core-smbus.o +i2c-core-$(CONFIG_ACPI) += i2c-core-acpi.o +i2c-core-$(CONFIG_I2C_SLAVE) += i2c-core-slave.o +i2c-core-$(CONFIG_OF) += i2c-core-of.o + obj-$(CONFIG_I2C_SMBUS) += i2c-smbus.o obj-$(CONFIG_I2C_CHARDEV) += i2c-dev.o obj-$(CONFIG_I2C_MUX) += i2c-mux.o @@ -12,4 +17,4 @@ obj-$(CONFIG_I2C_STUB) += i2c-stub.o obj-$(CONFIG_I2C_SLAVE_EEPROM) += i2c-slave-eeprom.o ccflags-$(CONFIG_I2C_DEBUG_CORE) := -DDEBUG -CFLAGS_i2c-core.o := -Wno-deprecated-declarations +CFLAGS_i2c-core-base.o := -Wno-deprecated-declarations diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c index a8e89df665b9..1147bddb8b2c 100644 --- a/drivers/i2c/algos/i2c-algo-bit.c +++ b/drivers/i2c/algos/i2c-algo-bit.c @@ -553,9 +553,16 @@ static int bit_xfer(struct i2c_adapter *i2c_adap, nak_ok = pmsg->flags & I2C_M_IGNORE_NAK; if (!(pmsg->flags & I2C_M_NOSTART)) { if (i) { - bit_dbg(3, &i2c_adap->dev, "emitting " - "repeated start condition\n"); - i2c_repstart(adap); + if (msgs[i - 1].flags & I2C_M_STOP) { + bit_dbg(3, &i2c_adap->dev, + "emitting enforced stop/start condition\n"); + i2c_stop(adap); + i2c_start(adap); + } else { + bit_dbg(3, &i2c_adap->dev, + "emitting repeated start condition\n"); + i2c_repstart(adap); + } } ret = bit_doAddress(i2c_adap, pmsg); if ((ret != 0) && !nak_ok) { diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 144cbadc7c72..1006b230b236 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -129,6 +129,8 @@ config I2C_I801 Broxton (SOC) Lewisburg (PCH) Gemini Lake (SOC) + Cannon Lake-H (PCH) + Cannon Lake-LP (PCH) This driver can also be built as a module. If so, the module will be called i2c-i801. @@ -326,6 +328,16 @@ config I2C_POWERMAC comment "I2C system bus drivers (mostly embedded / system-on-chip)" +config I2C_ASPEED + tristate "Aspeed I2C Controller" + depends on ARCH_ASPEED || COMPILE_TEST + help + If you say yes to this option, support will be included for the + Aspeed I2C controller. + + This driver can also be built as a module. If so, the module + will be called i2c-aspeed. + config I2C_AT91 tristate "Atmel AT91 I2C Two-Wire interface (TWI)" depends on ARCH_AT91 @@ -474,11 +486,22 @@ config I2C_DESIGNWARE_PLATFORM depends on (ACPI && COMMON_CLK) || !ACPI help If you say yes to this option, support will be included for the - Synopsys DesignWare I2C adapter. Only master mode is supported. + Synopsys DesignWare I2C adapter. This driver can also be built as a module. If so, the module will be called i2c-designware-platform. +config I2C_DESIGNWARE_SLAVE + bool "Synopsys DesignWare Slave" + select I2C_SLAVE + depends on I2C_DESIGNWARE_PLATFORM + help + If you say yes to this option, support will be included for the + Synopsys DesignWare I2C slave adapter. + + This is not a standalone module, this module compiles together with + i2c-designware-core. + config I2C_DESIGNWARE_PCI tristate "Synopsys DesignWare PCI" depends on PCI @@ -1258,4 +1281,13 @@ config I2C_OPAL This driver can also be built as a module. If so, the module will be called as i2c-opal. +config I2C_ZX2967 + tristate "ZTE ZX2967 I2C support" + depends on ARCH_ZX + default y + help + Selecting this option will add ZX2967 I2C driver. + This driver can also be built as a module. If so, the module will be + called i2c-zx2967. + endmenu diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile index 30b60855fbcd..1b2fc815a4d8 100644 --- a/drivers/i2c/busses/Makefile +++ b/drivers/i2c/busses/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_I2C_HYDRA) += i2c-hydra.o obj-$(CONFIG_I2C_POWERMAC) += i2c-powermac.o # Embedded system I2C/SMBus host controller drivers +obj-$(CONFIG_I2C_ASPEED) += i2c-aspeed.o obj-$(CONFIG_I2C_AT91) += i2c-at91.o obj-$(CONFIG_I2C_AU1550) += i2c-au1550.o obj-$(CONFIG_I2C_AXXIA) += i2c-axxia.o @@ -40,6 +41,10 @@ obj-$(CONFIG_I2C_CBUS_GPIO) += i2c-cbus-gpio.o obj-$(CONFIG_I2C_CPM) += i2c-cpm.o obj-$(CONFIG_I2C_DAVINCI) += i2c-davinci.o obj-$(CONFIG_I2C_DESIGNWARE_CORE) += i2c-designware-core.o +i2c-designware-core-objs := i2c-designware-common.o i2c-designware-master.o +ifeq ($(CONFIG_I2C_DESIGNWARE_SLAVE),y) +i2c-designware-core-objs += i2c-designware-slave.o +endif obj-$(CONFIG_I2C_DESIGNWARE_PLATFORM) += i2c-designware-platform.o i2c-designware-platform-objs := i2c-designware-platdrv.o i2c-designware-platform-$(CONFIG_I2C_DESIGNWARE_BAYTRAIL) += i2c-designware-baytrail.o @@ -102,6 +107,7 @@ obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o obj-$(CONFIG_I2C_XLR) += i2c-xlr.o obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o obj-$(CONFIG_I2C_RCAR) += i2c-rcar.o +obj-$(CONFIG_I2C_ZX2967) += i2c-zx2967.o # External I2C/SMBus adapter drivers obj-$(CONFIG_I2C_DIOLAN_U2C) += i2c-diolan-u2c.o diff --git a/drivers/i2c/busses/i2c-aspeed.c b/drivers/i2c/busses/i2c-aspeed.c new file mode 100644 index 000000000000..f19348328a71 --- /dev/null +++ b/drivers/i2c/busses/i2c-aspeed.c @@ -0,0 +1,891 @@ +/* + * Aspeed 24XX/25XX I2C Controller. + * + * Copyright (C) 2012-2017 ASPEED Technology Inc. + * Copyright 2017 IBM Corporation + * Copyright 2017 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip/chained_irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/slab.h> + +/* I2C Register */ +#define ASPEED_I2C_FUN_CTRL_REG 0x00 +#define ASPEED_I2C_AC_TIMING_REG1 0x04 +#define ASPEED_I2C_AC_TIMING_REG2 0x08 +#define ASPEED_I2C_INTR_CTRL_REG 0x0c +#define ASPEED_I2C_INTR_STS_REG 0x10 +#define ASPEED_I2C_CMD_REG 0x14 +#define ASPEED_I2C_DEV_ADDR_REG 0x18 +#define ASPEED_I2C_BYTE_BUF_REG 0x20 + +/* Global Register Definition */ +/* 0x00 : I2C Interrupt Status Register */ +/* 0x08 : I2C Interrupt Target Assignment */ + +/* Device Register Definition */ +/* 0x00 : I2CD Function Control Register */ +#define ASPEED_I2CD_MULTI_MASTER_DIS BIT(15) +#define ASPEED_I2CD_SDA_DRIVE_1T_EN BIT(8) +#define ASPEED_I2CD_M_SDA_DRIVE_1T_EN BIT(7) +#define ASPEED_I2CD_M_HIGH_SPEED_EN BIT(6) +#define ASPEED_I2CD_SLAVE_EN BIT(1) +#define ASPEED_I2CD_MASTER_EN BIT(0) + +/* 0x04 : I2CD Clock and AC Timing Control Register #1 */ +#define ASPEED_I2CD_TIME_SCL_HIGH_SHIFT 16 +#define ASPEED_I2CD_TIME_SCL_HIGH_MASK GENMASK(19, 16) +#define ASPEED_I2CD_TIME_SCL_LOW_SHIFT 12 +#define ASPEED_I2CD_TIME_SCL_LOW_MASK GENMASK(15, 12) +#define ASPEED_I2CD_TIME_BASE_DIVISOR_MASK GENMASK(3, 0) +#define ASPEED_I2CD_TIME_SCL_REG_MAX GENMASK(3, 0) +/* 0x08 : I2CD Clock and AC Timing Control Register #2 */ +#define ASPEED_NO_TIMEOUT_CTRL 0 + +/* 0x0c : I2CD Interrupt Control Register & + * 0x10 : I2CD Interrupt Status Register + * + * These share bit definitions, so use the same values for the enable & + * status bits. + */ +#define ASPEED_I2CD_INTR_SDA_DL_TIMEOUT BIT(14) +#define ASPEED_I2CD_INTR_BUS_RECOVER_DONE BIT(13) +#define ASPEED_I2CD_INTR_SLAVE_MATCH BIT(7) +#define ASPEED_I2CD_INTR_SCL_TIMEOUT BIT(6) +#define ASPEED_I2CD_INTR_ABNORMAL BIT(5) +#define ASPEED_I2CD_INTR_NORMAL_STOP BIT(4) +#define ASPEED_I2CD_INTR_ARBIT_LOSS BIT(3) +#define ASPEED_I2CD_INTR_RX_DONE BIT(2) +#define ASPEED_I2CD_INTR_TX_NAK BIT(1) +#define ASPEED_I2CD_INTR_TX_ACK BIT(0) +#define ASPEED_I2CD_INTR_ALL \ + (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | \ + ASPEED_I2CD_INTR_BUS_RECOVER_DONE | \ + ASPEED_I2CD_INTR_SCL_TIMEOUT | \ + ASPEED_I2CD_INTR_ABNORMAL | \ + ASPEED_I2CD_INTR_NORMAL_STOP | \ + ASPEED_I2CD_INTR_ARBIT_LOSS | \ + ASPEED_I2CD_INTR_RX_DONE | \ + ASPEED_I2CD_INTR_TX_NAK | \ + ASPEED_I2CD_INTR_TX_ACK) + +/* 0x14 : I2CD Command/Status Register */ +#define ASPEED_I2CD_SCL_LINE_STS BIT(18) +#define ASPEED_I2CD_SDA_LINE_STS BIT(17) +#define ASPEED_I2CD_BUS_BUSY_STS BIT(16) +#define ASPEED_I2CD_BUS_RECOVER_CMD BIT(11) + +/* Command Bit */ +#define ASPEED_I2CD_M_STOP_CMD BIT(5) +#define ASPEED_I2CD_M_S_RX_CMD_LAST BIT(4) +#define ASPEED_I2CD_M_RX_CMD BIT(3) +#define ASPEED_I2CD_S_TX_CMD BIT(2) +#define ASPEED_I2CD_M_TX_CMD BIT(1) +#define ASPEED_I2CD_M_START_CMD BIT(0) + +/* 0x18 : I2CD Slave Device Address Register */ +#define ASPEED_I2CD_DEV_ADDR_MASK GENMASK(6, 0) + +enum aspeed_i2c_master_state { + ASPEED_I2C_MASTER_START, + ASPEED_I2C_MASTER_TX_FIRST, + ASPEED_I2C_MASTER_TX, + ASPEED_I2C_MASTER_RX_FIRST, + ASPEED_I2C_MASTER_RX, + ASPEED_I2C_MASTER_STOP, + ASPEED_I2C_MASTER_INACTIVE, +}; + +enum aspeed_i2c_slave_state { + ASPEED_I2C_SLAVE_START, + ASPEED_I2C_SLAVE_READ_REQUESTED, + ASPEED_I2C_SLAVE_READ_PROCESSED, + ASPEED_I2C_SLAVE_WRITE_REQUESTED, + ASPEED_I2C_SLAVE_WRITE_RECEIVED, + ASPEED_I2C_SLAVE_STOP, +}; + +struct aspeed_i2c_bus { + struct i2c_adapter adap; + struct device *dev; + void __iomem *base; + /* Synchronizes I/O mem access to base. */ + spinlock_t lock; + struct completion cmd_complete; + unsigned long parent_clk_frequency; + u32 bus_frequency; + /* Transaction state. */ + enum aspeed_i2c_master_state master_state; + struct i2c_msg *msgs; + size_t buf_index; + size_t msgs_index; + size_t msgs_count; + bool send_stop; + int cmd_err; + /* Protected only by i2c_lock_bus */ + int master_xfer_result; +#if IS_ENABLED(CONFIG_I2C_SLAVE) + struct i2c_client *slave; + enum aspeed_i2c_slave_state slave_state; +#endif /* CONFIG_I2C_SLAVE */ +}; + +static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus); + +static int aspeed_i2c_recover_bus(struct aspeed_i2c_bus *bus) +{ + unsigned long time_left, flags; + int ret = 0; + u32 command; + + spin_lock_irqsave(&bus->lock, flags); + command = readl(bus->base + ASPEED_I2C_CMD_REG); + + if (command & ASPEED_I2CD_SDA_LINE_STS) { + /* Bus is idle: no recovery needed. */ + if (command & ASPEED_I2CD_SCL_LINE_STS) + goto out; + dev_dbg(bus->dev, "SCL hung (state %x), attempting recovery\n", + command); + + reinit_completion(&bus->cmd_complete); + writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); + spin_unlock_irqrestore(&bus->lock, flags); + + time_left = wait_for_completion_timeout( + &bus->cmd_complete, bus->adap.timeout); + + spin_lock_irqsave(&bus->lock, flags); + if (time_left == 0) + goto reset_out; + else if (bus->cmd_err) + goto reset_out; + /* Recovery failed. */ + else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & + ASPEED_I2CD_SCL_LINE_STS)) + goto reset_out; + /* Bus error. */ + } else { + dev_dbg(bus->dev, "SDA hung (state %x), attempting recovery\n", + command); + + reinit_completion(&bus->cmd_complete); + /* Writes 1 to 8 SCL clock cycles until SDA is released. */ + writel(ASPEED_I2CD_BUS_RECOVER_CMD, + bus->base + ASPEED_I2C_CMD_REG); + spin_unlock_irqrestore(&bus->lock, flags); + + time_left = wait_for_completion_timeout( + &bus->cmd_complete, bus->adap.timeout); + + spin_lock_irqsave(&bus->lock, flags); + if (time_left == 0) + goto reset_out; + else if (bus->cmd_err) + goto reset_out; + /* Recovery failed. */ + else if (!(readl(bus->base + ASPEED_I2C_CMD_REG) & + ASPEED_I2CD_SDA_LINE_STS)) + goto reset_out; + } + +out: + spin_unlock_irqrestore(&bus->lock, flags); + + return ret; + +reset_out: + spin_unlock_irqrestore(&bus->lock, flags); + + return aspeed_i2c_reset(bus); +} + +#if IS_ENABLED(CONFIG_I2C_SLAVE) +static bool aspeed_i2c_slave_irq(struct aspeed_i2c_bus *bus) +{ + u32 command, irq_status, status_ack = 0; + struct i2c_client *slave = bus->slave; + bool irq_handled = true; + u8 value; + + spin_lock(&bus->lock); + if (!slave) { + irq_handled = false; + goto out; + } + + command = readl(bus->base + ASPEED_I2C_CMD_REG); + irq_status = readl(bus->base + ASPEED_I2C_INTR_STS_REG); + + /* Slave was requested, restart state machine. */ + if (irq_status & ASPEED_I2CD_INTR_SLAVE_MATCH) { + status_ack |= ASPEED_I2CD_INTR_SLAVE_MATCH; + bus->slave_state = ASPEED_I2C_SLAVE_START; + } + + /* Slave is not currently active, irq was for someone else. */ + if (bus->slave_state == ASPEED_I2C_SLAVE_STOP) { + irq_handled = false; + goto out; + } + + dev_dbg(bus->dev, "slave irq status 0x%08x, cmd 0x%08x\n", + irq_status, command); + + /* Slave was sent something. */ + if (irq_status & ASPEED_I2CD_INTR_RX_DONE) { + value = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; + /* Handle address frame. */ + if (bus->slave_state == ASPEED_I2C_SLAVE_START) { + if (value & 0x1) + bus->slave_state = + ASPEED_I2C_SLAVE_READ_REQUESTED; + else + bus->slave_state = + ASPEED_I2C_SLAVE_WRITE_REQUESTED; + } + status_ack |= ASPEED_I2CD_INTR_RX_DONE; + } + + /* Slave was asked to stop. */ + if (irq_status & ASPEED_I2CD_INTR_NORMAL_STOP) { + status_ack |= ASPEED_I2CD_INTR_NORMAL_STOP; + bus->slave_state = ASPEED_I2C_SLAVE_STOP; + } + if (irq_status & ASPEED_I2CD_INTR_TX_NAK) { + status_ack |= ASPEED_I2CD_INTR_TX_NAK; + bus->slave_state = ASPEED_I2C_SLAVE_STOP; + } + + switch (bus->slave_state) { + case ASPEED_I2C_SLAVE_READ_REQUESTED: + if (irq_status & ASPEED_I2CD_INTR_TX_ACK) + dev_err(bus->dev, "Unexpected ACK on read request.\n"); + bus->slave_state = ASPEED_I2C_SLAVE_READ_PROCESSED; + + i2c_slave_event(slave, I2C_SLAVE_READ_REQUESTED, &value); + writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); + writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); + break; + case ASPEED_I2C_SLAVE_READ_PROCESSED: + status_ack |= ASPEED_I2CD_INTR_TX_ACK; + if (!(irq_status & ASPEED_I2CD_INTR_TX_ACK)) + dev_err(bus->dev, + "Expected ACK after processed read.\n"); + i2c_slave_event(slave, I2C_SLAVE_READ_PROCESSED, &value); + writel(value, bus->base + ASPEED_I2C_BYTE_BUF_REG); + writel(ASPEED_I2CD_S_TX_CMD, bus->base + ASPEED_I2C_CMD_REG); + break; + case ASPEED_I2C_SLAVE_WRITE_REQUESTED: + bus->slave_state = ASPEED_I2C_SLAVE_WRITE_RECEIVED; + i2c_slave_event(slave, I2C_SLAVE_WRITE_REQUESTED, &value); + break; + case ASPEED_I2C_SLAVE_WRITE_RECEIVED: + i2c_slave_event(slave, I2C_SLAVE_WRITE_RECEIVED, &value); + break; + case ASPEED_I2C_SLAVE_STOP: + i2c_slave_event(slave, I2C_SLAVE_STOP, &value); + break; + default: + dev_err(bus->dev, "unhandled slave_state: %d\n", + bus->slave_state); + break; + } + + if (status_ack != irq_status) + dev_err(bus->dev, + "irq handled != irq. expected %x, but was %x\n", + irq_status, status_ack); + writel(status_ack, bus->base + ASPEED_I2C_INTR_STS_REG); + +out: + spin_unlock(&bus->lock); + return irq_handled; +} +#endif /* CONFIG_I2C_SLAVE */ + +/* precondition: bus.lock has been acquired. */ +static void aspeed_i2c_do_start(struct aspeed_i2c_bus *bus) +{ + u32 command = ASPEED_I2CD_M_START_CMD | ASPEED_I2CD_M_TX_CMD; + struct i2c_msg *msg = &bus->msgs[bus->msgs_index]; + u8 slave_addr = msg->addr << 1; + + bus->master_state = ASPEED_I2C_MASTER_START; + bus->buf_index = 0; + + if (msg->flags & I2C_M_RD) { + slave_addr |= 1; + command |= ASPEED_I2CD_M_RX_CMD; + /* Need to let the hardware know to NACK after RX. */ + if (msg->len == 1 && !(msg->flags & I2C_M_RECV_LEN)) + command |= ASPEED_I2CD_M_S_RX_CMD_LAST; + } + + writel(slave_addr, bus->base + ASPEED_I2C_BYTE_BUF_REG); + writel(command, bus->base + ASPEED_I2C_CMD_REG); +} + +/* precondition: bus.lock has been acquired. */ +static void aspeed_i2c_do_stop(struct aspeed_i2c_bus *bus) +{ + bus->master_state = ASPEED_I2C_MASTER_STOP; + writel(ASPEED_I2CD_M_STOP_CMD, bus->base + ASPEED_I2C_CMD_REG); +} + +/* precondition: bus.lock has been acquired. */ +static void aspeed_i2c_next_msg_or_stop(struct aspeed_i2c_bus *bus) +{ + if (bus->msgs_index + 1 < bus->msgs_count) { + bus->msgs_index++; + aspeed_i2c_do_start(bus); + } else { + aspeed_i2c_do_stop(bus); + } +} + +static int aspeed_i2c_is_irq_error(u32 irq_status) +{ + if (irq_status & ASPEED_I2CD_INTR_ARBIT_LOSS) + return -EAGAIN; + if (irq_status & (ASPEED_I2CD_INTR_SDA_DL_TIMEOUT | + ASPEED_I2CD_INTR_SCL_TIMEOUT)) + return -EBUSY; + if (irq_status & (ASPEED_I2CD_INTR_ABNORMAL)) + return -EPROTO; + + return 0; +} + +static bool aspeed_i2c_master_irq(struct aspeed_i2c_bus *bus) +{ + u32 irq_status, status_ack = 0, command = 0; + struct i2c_msg *msg; + u8 recv_byte; + int ret; + + spin_lock(&bus->lock); + irq_status = readl(bus->base + ASPEED_I2C_INTR_STS_REG); + /* Ack all interrupt bits. */ + writel(irq_status, bus->base + ASPEED_I2C_INTR_STS_REG); + + if (irq_status & ASPEED_I2CD_INTR_BUS_RECOVER_DONE) { + bus->master_state = ASPEED_I2C_MASTER_INACTIVE; + status_ack |= ASPEED_I2CD_INTR_BUS_RECOVER_DONE; + goto out_complete; + } + + /* + * We encountered an interrupt that reports an error: the hardware + * should clear the command queue effectively taking us back to the + * INACTIVE state. + */ + ret = aspeed_i2c_is_irq_error(irq_status); + if (ret < 0) { + dev_dbg(bus->dev, "received error interrupt: 0x%08x", + irq_status); + bus->cmd_err = ret; + bus->master_state = ASPEED_I2C_MASTER_INACTIVE; + goto out_complete; + } + + /* We are in an invalid state; reset bus to a known state. */ + if (!bus->msgs && bus->master_state != ASPEED_I2C_MASTER_STOP) { + dev_err(bus->dev, "bus in unknown state"); + bus->cmd_err = -EIO; + aspeed_i2c_do_stop(bus); + goto out_no_complete; + } + msg = &bus->msgs[bus->msgs_index]; + + /* + * START is a special case because we still have to handle a subsequent + * TX or RX immediately after we handle it, so we handle it here and + * then update the state and handle the new state below. + */ + if (bus->master_state == ASPEED_I2C_MASTER_START) { + if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { + pr_devel("no slave present at %02x", msg->addr); + status_ack |= ASPEED_I2CD_INTR_TX_NAK; + bus->cmd_err = -ENXIO; + aspeed_i2c_do_stop(bus); + goto out_no_complete; + } + status_ack |= ASPEED_I2CD_INTR_TX_ACK; + if (msg->len == 0) { /* SMBUS_QUICK */ + aspeed_i2c_do_stop(bus); + goto out_no_complete; + } + if (msg->flags & I2C_M_RD) + bus->master_state = ASPEED_I2C_MASTER_RX_FIRST; + else + bus->master_state = ASPEED_I2C_MASTER_TX_FIRST; + } + + switch (bus->master_state) { + case ASPEED_I2C_MASTER_TX: + if (unlikely(irq_status & ASPEED_I2CD_INTR_TX_NAK)) { + dev_dbg(bus->dev, "slave NACKed TX"); + status_ack |= ASPEED_I2CD_INTR_TX_NAK; + goto error_and_stop; + } else if (unlikely(!(irq_status & ASPEED_I2CD_INTR_TX_ACK))) { + dev_err(bus->dev, "slave failed to ACK TX"); + goto error_and_stop; + } + status_ack |= ASPEED_I2CD_INTR_TX_ACK; + /* fallthrough intended */ + case ASPEED_I2C_MASTER_TX_FIRST: + if (bus->buf_index < msg->len) { + bus->master_state = ASPEED_I2C_MASTER_TX; + writel(msg->buf[bus->buf_index++], + bus->base + ASPEED_I2C_BYTE_BUF_REG); + writel(ASPEED_I2CD_M_TX_CMD, + bus->base + ASPEED_I2C_CMD_REG); + } else { + aspeed_i2c_next_msg_or_stop(bus); + } + goto out_no_complete; + case ASPEED_I2C_MASTER_RX_FIRST: + /* RX may not have completed yet (only address cycle) */ + if (!(irq_status & ASPEED_I2CD_INTR_RX_DONE)) + goto out_no_complete; + /* fallthrough intended */ + case ASPEED_I2C_MASTER_RX: + if (unlikely(!(irq_status & ASPEED_I2CD_INTR_RX_DONE))) { + dev_err(bus->dev, "master failed to RX"); + goto error_and_stop; + } + status_ack |= ASPEED_I2CD_INTR_RX_DONE; + + recv_byte = readl(bus->base + ASPEED_I2C_BYTE_BUF_REG) >> 8; + msg->buf[bus->buf_index++] = recv_byte; + + if (msg->flags & I2C_M_RECV_LEN) { + if (unlikely(recv_byte > I2C_SMBUS_BLOCK_MAX)) { + bus->cmd_err = -EPROTO; + aspeed_i2c_do_stop(bus); + goto out_no_complete; + } + msg->len = recv_byte + + ((msg->flags & I2C_CLIENT_PEC) ? 2 : 1); + msg->flags &= ~I2C_M_RECV_LEN; + } + + if (bus->buf_index < msg->len) { + bus->master_state = ASPEED_I2C_MASTER_RX; + command = ASPEED_I2CD_M_RX_CMD; + if (bus->buf_index + 1 == msg->len) + command |= ASPEED_I2CD_M_S_RX_CMD_LAST; + writel(command, bus->base + ASPEED_I2C_CMD_REG); + } else { + aspeed_i2c_next_msg_or_stop(bus); + } + goto out_no_complete; + case ASPEED_I2C_MASTER_STOP: + if (unlikely(!(irq_status & ASPEED_I2CD_INTR_NORMAL_STOP))) { + dev_err(bus->dev, "master failed to STOP"); + bus->cmd_err = -EIO; + /* Do not STOP as we have already tried. */ + } else { + status_ack |= ASPEED_I2CD_INTR_NORMAL_STOP; + } + + bus->master_state = ASPEED_I2C_MASTER_INACTIVE; + goto out_complete; + case ASPEED_I2C_MASTER_INACTIVE: + dev_err(bus->dev, + "master received interrupt 0x%08x, but is inactive", + irq_status); + bus->cmd_err = -EIO; + /* Do not STOP as we should be inactive. */ + goto out_complete; + default: + WARN(1, "unknown master state\n"); + bus->master_state = ASPEED_I2C_MASTER_INACTIVE; + bus->cmd_err = -EINVAL; + goto out_complete; + } +error_and_stop: + bus->cmd_err = -EIO; + aspeed_i2c_do_stop(bus); + goto out_no_complete; +out_complete: + bus->msgs = NULL; + if (bus->cmd_err) + bus->master_xfer_result = bus->cmd_err; + else + bus->master_xfer_result = bus->msgs_index + 1; + complete(&bus->cmd_complete); +out_no_complete: + if (irq_status != status_ack) + dev_err(bus->dev, + "irq handled != irq. expected 0x%08x, but was 0x%08x\n", + irq_status, status_ack); + spin_unlock(&bus->lock); + return !!irq_status; +} + +static irqreturn_t aspeed_i2c_bus_irq(int irq, void *dev_id) +{ + struct aspeed_i2c_bus *bus = dev_id; + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + if (aspeed_i2c_slave_irq(bus)) { + dev_dbg(bus->dev, "irq handled by slave.\n"); + return IRQ_HANDLED; + } +#endif /* CONFIG_I2C_SLAVE */ + + return aspeed_i2c_master_irq(bus) ? IRQ_HANDLED : IRQ_NONE; +} + +static int aspeed_i2c_master_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct aspeed_i2c_bus *bus = i2c_get_adapdata(adap); + unsigned long time_left, flags; + int ret = 0; + + spin_lock_irqsave(&bus->lock, flags); + bus->cmd_err = 0; + + /* If bus is busy, attempt recovery. We assume a single master + * environment. + */ + if (readl(bus->base + ASPEED_I2C_CMD_REG) & ASPEED_I2CD_BUS_BUSY_STS) { + spin_unlock_irqrestore(&bus->lock, flags); + ret = aspeed_i2c_recover_bus(bus); + if (ret) + return ret; + spin_lock_irqsave(&bus->lock, flags); + } + + bus->cmd_err = 0; + bus->msgs = msgs; + bus->msgs_index = 0; + bus->msgs_count = num; + + reinit_completion(&bus->cmd_complete); + aspeed_i2c_do_start(bus); + spin_unlock_irqrestore(&bus->lock, flags); + + time_left = wait_for_completion_timeout(&bus->cmd_complete, + bus->adap.timeout); + + if (time_left == 0) + return -ETIMEDOUT; + else + return bus->master_xfer_result; +} + +static u32 aspeed_i2c_functionality(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_SMBUS_BLOCK_DATA; +} + +#if IS_ENABLED(CONFIG_I2C_SLAVE) +/* precondition: bus.lock has been acquired. */ +static void __aspeed_i2c_reg_slave(struct aspeed_i2c_bus *bus, u16 slave_addr) +{ + u32 addr_reg_val, func_ctrl_reg_val; + + /* Set slave addr. */ + addr_reg_val = readl(bus->base + ASPEED_I2C_DEV_ADDR_REG); + addr_reg_val &= ~ASPEED_I2CD_DEV_ADDR_MASK; + addr_reg_val |= slave_addr & ASPEED_I2CD_DEV_ADDR_MASK; + writel(addr_reg_val, bus->base + ASPEED_I2C_DEV_ADDR_REG); + + /* Turn on slave mode. */ + func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); + func_ctrl_reg_val |= ASPEED_I2CD_SLAVE_EN; + writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); +} + +static int aspeed_i2c_reg_slave(struct i2c_client *client) +{ + struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); + unsigned long flags; + + spin_lock_irqsave(&bus->lock, flags); + if (bus->slave) { + spin_unlock_irqrestore(&bus->lock, flags); + return -EINVAL; + } + + __aspeed_i2c_reg_slave(bus, client->addr); + + bus->slave = client; + bus->slave_state = ASPEED_I2C_SLAVE_STOP; + spin_unlock_irqrestore(&bus->lock, flags); + + return 0; +} + +static int aspeed_i2c_unreg_slave(struct i2c_client *client) +{ + struct aspeed_i2c_bus *bus = i2c_get_adapdata(client->adapter); + u32 func_ctrl_reg_val; + unsigned long flags; + + spin_lock_irqsave(&bus->lock, flags); + if (!bus->slave) { + spin_unlock_irqrestore(&bus->lock, flags); + return -EINVAL; + } + + /* Turn off slave mode. */ + func_ctrl_reg_val = readl(bus->base + ASPEED_I2C_FUN_CTRL_REG); + func_ctrl_reg_val &= ~ASPEED_I2CD_SLAVE_EN; + writel(func_ctrl_reg_val, bus->base + ASPEED_I2C_FUN_CTRL_REG); + + bus->slave = NULL; + spin_unlock_irqrestore(&bus->lock, flags); + + return 0; +} +#endif /* CONFIG_I2C_SLAVE */ + +static const struct i2c_algorithm aspeed_i2c_algo = { + .master_xfer = aspeed_i2c_master_xfer, + .functionality = aspeed_i2c_functionality, +#if IS_ENABLED(CONFIG_I2C_SLAVE) + .reg_slave = aspeed_i2c_reg_slave, + .unreg_slave = aspeed_i2c_unreg_slave, +#endif /* CONFIG_I2C_SLAVE */ +}; + +static u32 aspeed_i2c_get_clk_reg_val(u32 divisor) +{ + u32 base_clk, clk_high, clk_low, tmp; + + /* + * The actual clock frequency of SCL is: + * SCL_freq = APB_freq / (base_freq * (SCL_high + SCL_low)) + * = APB_freq / divisor + * where base_freq is a programmable clock divider; its value is + * base_freq = 1 << base_clk + * SCL_high is the number of base_freq clock cycles that SCL stays high + * and SCL_low is the number of base_freq clock cycles that SCL stays + * low for a period of SCL. + * The actual register has a minimum SCL_high and SCL_low minimum of 1; + * thus, they start counting at zero. So + * SCL_high = clk_high + 1 + * SCL_low = clk_low + 1 + * Thus, + * SCL_freq = APB_freq / + * ((1 << base_clk) * (clk_high + 1 + clk_low + 1)) + * The documentation recommends clk_high >= 8 and clk_low >= 7 when + * possible; this last constraint gives us the following solution: + */ + base_clk = divisor > 33 ? ilog2((divisor - 1) / 32) + 1 : 0; + tmp = divisor / (1 << base_clk); + clk_high = tmp / 2 + tmp % 2; + clk_low = tmp - clk_high; + + clk_high -= 1; + clk_low -= 1; + + return ((clk_high << ASPEED_I2CD_TIME_SCL_HIGH_SHIFT) + & ASPEED_I2CD_TIME_SCL_HIGH_MASK) + | ((clk_low << ASPEED_I2CD_TIME_SCL_LOW_SHIFT) + & ASPEED_I2CD_TIME_SCL_LOW_MASK) + | (base_clk & ASPEED_I2CD_TIME_BASE_DIVISOR_MASK); +} + +/* precondition: bus.lock has been acquired. */ +static int aspeed_i2c_init_clk(struct aspeed_i2c_bus *bus) +{ + u32 divisor, clk_reg_val; + + divisor = bus->parent_clk_frequency / bus->bus_frequency; + clk_reg_val = aspeed_i2c_get_clk_reg_val(divisor); + writel(clk_reg_val, bus->base + ASPEED_I2C_AC_TIMING_REG1); + writel(ASPEED_NO_TIMEOUT_CTRL, bus->base + ASPEED_I2C_AC_TIMING_REG2); + + return 0; +} + +/* precondition: bus.lock has been acquired. */ +static int aspeed_i2c_init(struct aspeed_i2c_bus *bus, + struct platform_device *pdev) +{ + u32 fun_ctrl_reg = ASPEED_I2CD_MASTER_EN; + int ret; + + /* Disable everything. */ + writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); + + ret = aspeed_i2c_init_clk(bus); + if (ret < 0) + return ret; + + if (!of_property_read_bool(pdev->dev.of_node, "multi-master")) + fun_ctrl_reg |= ASPEED_I2CD_MULTI_MASTER_DIS; + + /* Enable Master Mode */ + writel(readl(bus->base + ASPEED_I2C_FUN_CTRL_REG) | fun_ctrl_reg, + bus->base + ASPEED_I2C_FUN_CTRL_REG); + +#if IS_ENABLED(CONFIG_I2C_SLAVE) + /* If slave has already been registered, re-enable it. */ + if (bus->slave) + __aspeed_i2c_reg_slave(bus, bus->slave->addr); +#endif /* CONFIG_I2C_SLAVE */ + + /* Set interrupt generation of I2C controller */ + writel(ASPEED_I2CD_INTR_ALL, bus->base + ASPEED_I2C_INTR_CTRL_REG); + + return 0; +} + +static int aspeed_i2c_reset(struct aspeed_i2c_bus *bus) +{ + struct platform_device *pdev = to_platform_device(bus->dev); + unsigned long flags; + int ret; + + spin_lock_irqsave(&bus->lock, flags); + + /* Disable and ack all interrupts. */ + writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); + writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); + + ret = aspeed_i2c_init(bus, pdev); + + spin_unlock_irqrestore(&bus->lock, flags); + + return ret; +} + +static int aspeed_i2c_probe_bus(struct platform_device *pdev) +{ + struct aspeed_i2c_bus *bus; + struct clk *parent_clk; + struct resource *res; + int irq, ret; + + bus = devm_kzalloc(&pdev->dev, sizeof(*bus), GFP_KERNEL); + if (!bus) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bus->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(bus->base)) + return PTR_ERR(bus->base); + + parent_clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(parent_clk)) + return PTR_ERR(parent_clk); + bus->parent_clk_frequency = clk_get_rate(parent_clk); + /* We just need the clock rate, we don't actually use the clk object. */ + devm_clk_put(&pdev->dev, parent_clk); + + ret = of_property_read_u32(pdev->dev.of_node, + "bus-frequency", &bus->bus_frequency); + if (ret < 0) { + dev_err(&pdev->dev, + "Could not read bus-frequency property\n"); + bus->bus_frequency = 100000; + } + + /* Initialize the I2C adapter */ + spin_lock_init(&bus->lock); + init_completion(&bus->cmd_complete); + bus->adap.owner = THIS_MODULE; + bus->adap.retries = 0; + bus->adap.timeout = 5 * HZ; + bus->adap.algo = &aspeed_i2c_algo; + bus->adap.dev.parent = &pdev->dev; + bus->adap.dev.of_node = pdev->dev.of_node; + strlcpy(bus->adap.name, pdev->name, sizeof(bus->adap.name)); + i2c_set_adapdata(&bus->adap, bus); + + bus->dev = &pdev->dev; + + /* Clean up any left over interrupt state. */ + writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); + writel(0xffffffff, bus->base + ASPEED_I2C_INTR_STS_REG); + /* + * bus.lock does not need to be held because the interrupt handler has + * not been enabled yet. + */ + ret = aspeed_i2c_init(bus, pdev); + if (ret < 0) + return ret; + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + ret = devm_request_irq(&pdev->dev, irq, aspeed_i2c_bus_irq, + 0, dev_name(&pdev->dev), bus); + if (ret < 0) + return ret; + + ret = i2c_add_adapter(&bus->adap); + if (ret < 0) + return ret; + + platform_set_drvdata(pdev, bus); + + dev_info(bus->dev, "i2c bus %d registered, irq %d\n", + bus->adap.nr, irq); + + return 0; +} + +static int aspeed_i2c_remove_bus(struct platform_device *pdev) +{ + struct aspeed_i2c_bus *bus = platform_get_drvdata(pdev); + unsigned long flags; + + spin_lock_irqsave(&bus->lock, flags); + + /* Disable everything. */ + writel(0, bus->base + ASPEED_I2C_FUN_CTRL_REG); + writel(0, bus->base + ASPEED_I2C_INTR_CTRL_REG); + + spin_unlock_irqrestore(&bus->lock, flags); + + i2c_del_adapter(&bus->adap); + + return 0; +} + +static const struct of_device_id aspeed_i2c_bus_of_table[] = { + { .compatible = "aspeed,ast2400-i2c-bus", }, + { .compatible = "aspeed,ast2500-i2c-bus", }, + { }, +}; +MODULE_DEVICE_TABLE(of, aspeed_i2c_bus_of_table); + +static struct platform_driver aspeed_i2c_bus_driver = { + .probe = aspeed_i2c_probe_bus, + .remove = aspeed_i2c_remove_bus, + .driver = { + .name = "aspeed-i2c-bus", + .of_match_table = aspeed_i2c_bus_of_table, + }, +}; +module_platform_driver(aspeed_i2c_bus_driver); + +MODULE_AUTHOR("Brendan Higgins <brendanhiggins@google.com>"); +MODULE_DESCRIPTION("Aspeed I2C Bus Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c index fabbb9e49161..38dd61d621df 100644 --- a/drivers/i2c/busses/i2c-at91.c +++ b/drivers/i2c/busses/i2c-at91.c @@ -274,7 +274,7 @@ static void at91_twi_write_next_byte(struct at91_twi_dev *dev) if (!dev->use_alt_cmd) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); - dev_dbg(dev->dev, "wrote 0x%x, to go %d\n", *dev->buf, dev->buf_len); + dev_dbg(dev->dev, "wrote 0x%x, to go %zu\n", *dev->buf, dev->buf_len); ++dev->buf; } @@ -402,7 +402,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) dev->msg->flags &= ~I2C_M_RECV_LEN; dev->buf_len += *dev->buf; dev->msg->len = dev->buf_len + 1; - dev_dbg(dev->dev, "received block length %d\n", + dev_dbg(dev->dev, "received block length %zu\n", dev->buf_len); } else { /* abort and send the stop by reading one more byte */ @@ -415,7 +415,7 @@ static void at91_twi_read_next_byte(struct at91_twi_dev *dev) if (!dev->use_alt_cmd && dev->buf_len == 1) at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_STOP); - dev_dbg(dev->dev, "read 0x%x, to go %d\n", *dev->buf, dev->buf_len); + dev_dbg(dev->dev, "read 0x%x, to go %zu\n", *dev->buf, dev->buf_len); ++dev->buf; } @@ -622,7 +622,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev) * writing the corresponding bit into the Control Register. */ - dev_dbg(dev->dev, "transfer: %s %d bytes.\n", + dev_dbg(dev->dev, "transfer: %s %zu bytes.\n", (dev->msg->flags & I2C_M_RD) ? "read" : "write", dev->buf_len); reinit_completion(&dev->cmd_complete); @@ -1083,12 +1083,16 @@ static int at91_twi_probe(struct platform_device *pdev) dev_err(dev->dev, "no clock defined\n"); return -ENODEV; } - clk_prepare_enable(dev->clk); + rc = clk_prepare_enable(dev->clk); + if (rc) + return rc; if (dev->dev->of_node) { rc = at91_twi_configure_dma(dev, phy_addr); - if (rc == -EPROBE_DEFER) + if (rc == -EPROBE_DEFER) { + clk_disable_unprepare(dev->clk); return rc; + } } if (!of_property_read_u32(pdev->dev.of_node, "atmel,fifo-size", diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c index 45d6771fac8c..75d80161931f 100644 --- a/drivers/i2c/busses/i2c-cadence.c +++ b/drivers/i2c/busses/i2c-cadence.c @@ -405,14 +405,14 @@ static void cdns_i2c_mrecv(struct cdns_i2c *id) cdns_i2c_writereg(id->recv_count, CDNS_I2C_XFER_SIZE_OFFSET); } + /* Set the slave address in address register - triggers operation */ + cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, + CDNS_I2C_ADDR_OFFSET); /* Clear the bus hold flag if bytes to receive is less than FIFO size */ if (!id->bus_hold_flag && ((id->p_msg->flags & I2C_M_RECV_LEN) != I2C_M_RECV_LEN) && (id->recv_count <= CDNS_I2C_FIFO_DEPTH)) cdns_i2c_clear_bus_hold(id); - /* Set the slave address in address register - triggers operation */ - cdns_i2c_writereg(id->p_msg->addr & CDNS_I2C_ADDR_MASK, - CDNS_I2C_ADDR_OFFSET); cdns_i2c_writereg(CDNS_I2C_ENABLED_INTR_MASK, CDNS_I2C_IER_OFFSET); } diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c new file mode 100644 index 000000000000..d1a69372432f --- /dev/null +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -0,0 +1,281 @@ +/* + * Synopsys DesignWare I2C adapter driver. + * + * Based on the TI DAVINCI I2C adapter driver. + * + * Copyright (C) 2006 Texas Instruments. + * Copyright (C) 2007 MontaVista Software Inc. + * Copyright (C) 2009 Provigent Ltd. + * + * ---------------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * ---------------------------------------------------------------------------- + * + */ +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> + +#include "i2c-designware-core.h" + +static char *abort_sources[] = { + [ABRT_7B_ADDR_NOACK] = + "slave address not acknowledged (7bit mode)", + [ABRT_10ADDR1_NOACK] = + "first address byte not acknowledged (10bit mode)", + [ABRT_10ADDR2_NOACK] = + "second address byte not acknowledged (10bit mode)", + [ABRT_TXDATA_NOACK] = + "data not acknowledged", + [ABRT_GCALL_NOACK] = + "no acknowledgement for a general call", + [ABRT_GCALL_READ] = + "read after general call", + [ABRT_SBYTE_ACKDET] = + "start byte acknowledged", + [ABRT_SBYTE_NORSTRT] = + "trying to send start byte when restart is disabled", + [ABRT_10B_RD_NORSTRT] = + "trying to read when restart is disabled (10bit mode)", + [ABRT_MASTER_DIS] = + "trying to use disabled adapter", + [ARB_LOST] = + "lost arbitration", + [ABRT_SLAVE_FLUSH_TXFIFO] = + "read command so flush old data in the TX FIFO", + [ABRT_SLAVE_ARBLOST] = + "slave lost the bus while transmitting data to a remote master", + [ABRT_SLAVE_RD_INTX] = + "incorrect slave-transmitter mode configuration", +}; + +u32 dw_readl(struct dw_i2c_dev *dev, int offset) +{ + u32 value; + + if (dev->flags & ACCESS_16BIT) + value = readw_relaxed(dev->base + offset) | + (readw_relaxed(dev->base + offset + 2) << 16); + else + value = readl_relaxed(dev->base + offset); + + if (dev->flags & ACCESS_SWAP) + return swab32(value); + else + return value; +} + +void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset) +{ + if (dev->flags & ACCESS_SWAP) + b = swab32(b); + + if (dev->flags & ACCESS_16BIT) { + writew_relaxed((u16)b, dev->base + offset); + writew_relaxed((u16)(b >> 16), dev->base + offset + 2); + } else { + writel_relaxed(b, dev->base + offset); + } +} + +u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) +{ + /* + * DesignWare I2C core doesn't seem to have solid strategy to meet + * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec + * will result in violation of the tHD;STA spec. + */ + if (cond) + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH + * + * This is based on the DW manuals, and represents an ideal + * configuration. The resulting I2C bus speed will be + * faster than any of the others. + * + * If your hardware is free from tHD;STA issue, try this one. + */ + return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset; + else + /* + * Conditional expression: + * + * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) + * + * This is just experimental rule; the tHD;STA period turned + * out to be proportinal to (_HCNT + 3). With this setting, + * we could meet both tHIGH and tHD;STA timing specs. + * + * If unsure, you'd better to take this alternative. + * + * The reason why we need to take into account "tf" here, + * is the same as described in i2c_dw_scl_lcnt(). + */ + return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000 + - 3 + offset; +} + +u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) +{ + /* + * Conditional expression: + * + * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf) + * + * DW I2C core starts counting the SCL CNTs for the LOW period + * of the SCL clock (tLOW) as soon as it pulls the SCL line. + * In order to meet the tLOW timing spec, we need to take into + * account the fall time of SCL signal (tf). Default tf value + * should be 0.3 us, for safety. + */ + return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset; +} + +void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable) +{ + dw_writel(dev, enable, DW_IC_ENABLE); +} + +void __i2c_dw_enable_and_wait(struct dw_i2c_dev *dev, bool enable) +{ + int timeout = 100; + + do { + __i2c_dw_enable(dev, enable); + if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable) + return; + + /* + * Wait 10 times the signaling period of the highest I2C + * transfer supported by the driver (for 400KHz this is + * 25us) as described in the DesignWare I2C databook. + */ + usleep_range(25, 250); + } while (timeout--); + + dev_warn(dev->dev, "timeout in %sabling adapter\n", + enable ? "en" : "dis"); +} + +unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev) +{ + /* + * Clock is not necessary if we got LCNT/HCNT values directly from + * the platform code. + */ + if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) + return 0; + return dev->get_clk_rate_khz(dev); +} + +int i2c_dw_acquire_lock(struct dw_i2c_dev *dev) +{ + int ret; + + if (!dev->acquire_lock) + return 0; + + ret = dev->acquire_lock(dev); + if (!ret) + return 0; + + dev_err(dev->dev, "couldn't acquire bus ownership\n"); + + return ret; +} + +void i2c_dw_release_lock(struct dw_i2c_dev *dev) +{ + if (dev->release_lock) + dev->release_lock(dev); +} + +/* + * Waiting for bus not busy + */ +int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) +{ + int timeout = TIMEOUT; + + while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) { + if (timeout <= 0) { + dev_warn(dev->dev, "timeout waiting for bus ready\n"); + return -ETIMEDOUT; + } + timeout--; + usleep_range(1000, 1100); + } + + return 0; +} + +int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) +{ + unsigned long abort_source = dev->abort_source; + int i; + + if (abort_source & DW_IC_TX_ABRT_NOACK) { + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_dbg(dev->dev, + "%s: %s\n", __func__, abort_sources[i]); + return -EREMOTEIO; + } + + for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) + dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); + + if (abort_source & DW_IC_TX_ARB_LOST) + return -EAGAIN; + else if (abort_source & DW_IC_TX_ABRT_GCALL_READ) + return -EINVAL; /* wrong msgs[] data */ + else + return -EIO; +} + +u32 i2c_dw_func(struct i2c_adapter *adap) +{ + struct dw_i2c_dev *dev = i2c_get_adapdata(adap); + + return dev->functionality; +} + +void i2c_dw_disable(struct dw_i2c_dev *dev) +{ + /* Disable controller */ + __i2c_dw_enable_and_wait(dev, false); + + /* Disable all interupts */ + dw_writel(dev, 0, DW_IC_INTR_MASK); + dw_readl(dev, DW_IC_CLR_INTR); +} + +void i2c_dw_disable_int(struct dw_i2c_dev *dev) +{ + dw_writel(dev, 0, DW_IC_INTR_MASK); +} + +u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev) +{ + return dw_readl(dev, DW_IC_COMP_PARAM_1); +} +EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); + +MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); +MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index a7cf429daf60..9fee4c054d3d 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -1,5 +1,5 @@ /* - * Synopsys DesignWare I2C adapter driver (master only). + * Synopsys DesignWare I2C adapter driver. * * Based on the TI DAVINCI I2C adapter driver. * @@ -37,9 +37,152 @@ #define DW_IC_CON_SPEED_FAST 0x4 #define DW_IC_CON_SPEED_HIGH 0x6 #define DW_IC_CON_SPEED_MASK 0x6 +#define DW_IC_CON_10BITADDR_SLAVE 0x8 #define DW_IC_CON_10BITADDR_MASTER 0x10 #define DW_IC_CON_RESTART_EN 0x20 #define DW_IC_CON_SLAVE_DISABLE 0x40 +#define DW_IC_CON_STOP_DET_IFADDRESSED 0x80 +#define DW_IC_CON_TX_EMPTY_CTRL 0x100 +#define DW_IC_CON_RX_FIFO_FULL_HLD_CTRL 0x200 + +/* + * Registers offset + */ +#define DW_IC_CON 0x0 +#define DW_IC_TAR 0x4 +#define DW_IC_SAR 0x8 +#define DW_IC_DATA_CMD 0x10 +#define DW_IC_SS_SCL_HCNT 0x14 +#define DW_IC_SS_SCL_LCNT 0x18 +#define DW_IC_FS_SCL_HCNT 0x1c +#define DW_IC_FS_SCL_LCNT 0x20 +#define DW_IC_HS_SCL_HCNT 0x24 +#define DW_IC_HS_SCL_LCNT 0x28 +#define DW_IC_INTR_STAT 0x2c +#define DW_IC_INTR_MASK 0x30 +#define DW_IC_RAW_INTR_STAT 0x34 +#define DW_IC_RX_TL 0x38 +#define DW_IC_TX_TL 0x3c +#define DW_IC_CLR_INTR 0x40 +#define DW_IC_CLR_RX_UNDER 0x44 +#define DW_IC_CLR_RX_OVER 0x48 +#define DW_IC_CLR_TX_OVER 0x4c +#define DW_IC_CLR_RD_REQ 0x50 +#define DW_IC_CLR_TX_ABRT 0x54 +#define DW_IC_CLR_RX_DONE 0x58 +#define DW_IC_CLR_ACTIVITY 0x5c +#define DW_IC_CLR_STOP_DET 0x60 +#define DW_IC_CLR_START_DET 0x64 +#define DW_IC_CLR_GEN_CALL 0x68 +#define DW_IC_ENABLE 0x6c +#define DW_IC_STATUS 0x70 +#define DW_IC_TXFLR 0x74 +#define DW_IC_RXFLR 0x78 +#define DW_IC_SDA_HOLD 0x7c +#define DW_IC_TX_ABRT_SOURCE 0x80 +#define DW_IC_ENABLE_STATUS 0x9c +#define DW_IC_CLR_RESTART_DET 0xa8 +#define DW_IC_COMP_PARAM_1 0xf4 +#define DW_IC_COMP_VERSION 0xf8 +#define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A +#define DW_IC_COMP_TYPE 0xfc +#define DW_IC_COMP_TYPE_VALUE 0x44570140 + +#define DW_IC_INTR_RX_UNDER 0x001 +#define DW_IC_INTR_RX_OVER 0x002 +#define DW_IC_INTR_RX_FULL 0x004 +#define DW_IC_INTR_TX_OVER 0x008 +#define DW_IC_INTR_TX_EMPTY 0x010 +#define DW_IC_INTR_RD_REQ 0x020 +#define DW_IC_INTR_TX_ABRT 0x040 +#define DW_IC_INTR_RX_DONE 0x080 +#define DW_IC_INTR_ACTIVITY 0x100 +#define DW_IC_INTR_STOP_DET 0x200 +#define DW_IC_INTR_START_DET 0x400 +#define DW_IC_INTR_GEN_CALL 0x800 +#define DW_IC_INTR_RESTART_DET 0x1000 + +#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ + DW_IC_INTR_TX_ABRT | \ + DW_IC_INTR_STOP_DET) +#define DW_IC_INTR_MASTER_MASK (DW_IC_INTR_DEFAULT_MASK | \ + DW_IC_INTR_TX_EMPTY) +#define DW_IC_INTR_SLAVE_MASK (DW_IC_INTR_DEFAULT_MASK | \ + DW_IC_INTR_RX_DONE | \ + DW_IC_INTR_RX_UNDER | \ + DW_IC_INTR_RD_REQ) + +#define DW_IC_STATUS_ACTIVITY 0x1 +#define DW_IC_STATUS_TFE BIT(2) +#define DW_IC_STATUS_MASTER_ACTIVITY BIT(5) +#define DW_IC_STATUS_SLAVE_ACTIVITY BIT(6) + +#define DW_IC_SDA_HOLD_RX_SHIFT 16 +#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) + +#define DW_IC_ERR_TX_ABRT 0x1 + +#define DW_IC_TAR_10BITADDR_MASTER BIT(12) + +#define DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) +#define DW_IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) + +/* + * status codes + */ +#define STATUS_IDLE 0x0 +#define STATUS_WRITE_IN_PROGRESS 0x1 +#define STATUS_READ_IN_PROGRESS 0x2 + +#define TIMEOUT 20 /* ms */ + +/* + * operation modes + */ +#define DW_IC_MASTER 0 +#define DW_IC_SLAVE 1 + +/* + * Hardware abort codes from the DW_IC_TX_ABRT_SOURCE register + * + * Only expected abort codes are listed here + * refer to the datasheet for the full list + */ +#define ABRT_7B_ADDR_NOACK 0 +#define ABRT_10ADDR1_NOACK 1 +#define ABRT_10ADDR2_NOACK 2 +#define ABRT_TXDATA_NOACK 3 +#define ABRT_GCALL_NOACK 4 +#define ABRT_GCALL_READ 5 +#define ABRT_SBYTE_ACKDET 7 +#define ABRT_SBYTE_NORSTRT 9 +#define ABRT_10B_RD_NORSTRT 10 +#define ABRT_MASTER_DIS 11 +#define ARB_LOST 12 +#define ABRT_SLAVE_FLUSH_TXFIFO 13 +#define ABRT_SLAVE_ARBLOST 14 +#define ABRT_SLAVE_RD_INTX 15 + +#define DW_IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK) +#define DW_IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK) +#define DW_IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK) +#define DW_IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK) +#define DW_IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK) +#define DW_IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ) +#define DW_IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET) +#define DW_IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT) +#define DW_IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT) +#define DW_IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS) +#define DW_IC_TX_ARB_LOST (1UL << ARB_LOST) +#define DW_IC_RX_ABRT_SLAVE_RD_INTX (1UL << ABRT_SLAVE_RD_INTX) +#define DW_IC_RX_ABRT_SLAVE_ARBLOST (1UL << ABRT_SLAVE_ARBLOST) +#define DW_IC_RX_ABRT_SLAVE_FLUSH_TXFIFO (1UL << ABRT_SLAVE_FLUSH_TXFIFO) + +#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \ + DW_IC_TX_ABRT_10ADDR1_NOACK | \ + DW_IC_TX_ABRT_10ADDR2_NOACK | \ + DW_IC_TX_ABRT_TXDATA_NOACK | \ + DW_IC_TX_ABRT_GCALL_NOACK) /** @@ -48,8 +191,9 @@ * @base: IO registers pointer * @cmd_complete: tx completion indicator * @clk: input reference clock + * @slave: represent an I2C slave device * @cmd_err: run time hadware error code - * @msgs: points to an array of messages currently being transfered + * @msgs: points to an array of messages currently being transferred * @msgs_num: the number of elements in msgs * @msg_write_idx: the element index of the current tx message in the msgs * array @@ -64,6 +208,7 @@ * @abort_source: copy of the TX_ABRT_SOURCE register * @irq: interrupt number for the i2c master * @adapter: i2c subsystem adapter node + * @slave_cfg: configuration for the slave device * @tx_fifo_depth: depth of the hardware tx fifo * @rx_fifo_depth: depth of the hardware rx fifo * @rx_outstanding: current master-rx elements in tx fifo @@ -80,6 +225,10 @@ * @acquire_lock: function to acquire a hardware lock on the bus * @release_lock: function to release a hardware lock on the bus * @pm_disabled: true if power-management should be disabled for this i2c-bus + * @disable: function to disable the controller + * @disable_int: function to disable all interrupts + * @init: function to initialize the I2C hardware + * @mode: operation mode - DW_IC_MASTER or DW_IC_SLAVE * * HCNT and LCNT parameters can be used if the platform knows more accurate * values than the one computed based only on the input clock frequency. @@ -91,6 +240,7 @@ struct dw_i2c_dev { struct completion cmd_complete; struct clk *clk; struct reset_control *rst; + struct i2c_client *slave; u32 (*get_clk_rate_khz) (struct dw_i2c_dev *dev); struct dw_pci_controller *controller; int cmd_err; @@ -110,6 +260,7 @@ struct dw_i2c_dev { struct i2c_adapter adapter; u32 functionality; u32 master_cfg; + u32 slave_cfg; unsigned int tx_fifo_depth; unsigned int rx_fifo_depth; int rx_outstanding; @@ -129,6 +280,10 @@ struct dw_i2c_dev { int (*acquire_lock)(struct dw_i2c_dev *dev); void (*release_lock)(struct dw_i2c_dev *dev); bool pm_disabled; + void (*disable)(struct dw_i2c_dev *dev); + void (*disable_int)(struct dw_i2c_dev *dev); + int (*init)(struct dw_i2c_dev *dev); + int mode; }; #define ACCESS_SWAP 0x00000001 @@ -137,11 +292,28 @@ struct dw_i2c_dev { #define MODEL_CHERRYTRAIL 0x00000100 -extern int i2c_dw_init(struct dw_i2c_dev *dev); -extern void i2c_dw_disable(struct dw_i2c_dev *dev); -extern void i2c_dw_disable_int(struct dw_i2c_dev *dev); +u32 dw_readl(struct dw_i2c_dev *dev, int offset); +void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset); +u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset); +u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset); +void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable); +void __i2c_dw_enable_and_wait(struct dw_i2c_dev *dev, bool enable); +unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev); +int i2c_dw_acquire_lock(struct dw_i2c_dev *dev); +void i2c_dw_release_lock(struct dw_i2c_dev *dev); +int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev); +int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev); +u32 i2c_dw_func(struct i2c_adapter *adap); +void i2c_dw_disable(struct dw_i2c_dev *dev); +void i2c_dw_disable_int(struct dw_i2c_dev *dev); + extern u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev); extern int i2c_dw_probe(struct dw_i2c_dev *dev); +#if IS_ENABLED(CONFIG_I2C_DESIGNWARE_SLAVE) +extern int i2c_dw_probe_slave(struct dw_i2c_dev *dev); +#else +static inline int i2c_dw_probe_slave(struct dw_i2c_dev *dev) { return -EINVAL; } +#endif #if IS_ENABLED(CONFIG_I2C_DESIGNWARE_BAYTRAIL) extern int i2c_dw_probe_lock_support(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-master.c index c453717b753b..418c233075d3 100644 --- a/drivers/i2c/busses/i2c-designware-core.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -21,311 +21,37 @@ * ---------------------------------------------------------------------------- * */ -#include <linux/export.h> -#include <linux/errno.h> +#include <linux/delay.h> #include <linux/err.h> +#include <linux/errno.h> +#include <linux/export.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> -#include <linux/pm_runtime.h> -#include <linux/delay.h> #include <linux/module.h> -#include "i2c-designware-core.h" - -/* - * Registers offset - */ -#define DW_IC_CON 0x0 -#define DW_IC_TAR 0x4 -#define DW_IC_DATA_CMD 0x10 -#define DW_IC_SS_SCL_HCNT 0x14 -#define DW_IC_SS_SCL_LCNT 0x18 -#define DW_IC_FS_SCL_HCNT 0x1c -#define DW_IC_FS_SCL_LCNT 0x20 -#define DW_IC_HS_SCL_HCNT 0x24 -#define DW_IC_HS_SCL_LCNT 0x28 -#define DW_IC_INTR_STAT 0x2c -#define DW_IC_INTR_MASK 0x30 -#define DW_IC_RAW_INTR_STAT 0x34 -#define DW_IC_RX_TL 0x38 -#define DW_IC_TX_TL 0x3c -#define DW_IC_CLR_INTR 0x40 -#define DW_IC_CLR_RX_UNDER 0x44 -#define DW_IC_CLR_RX_OVER 0x48 -#define DW_IC_CLR_TX_OVER 0x4c -#define DW_IC_CLR_RD_REQ 0x50 -#define DW_IC_CLR_TX_ABRT 0x54 -#define DW_IC_CLR_RX_DONE 0x58 -#define DW_IC_CLR_ACTIVITY 0x5c -#define DW_IC_CLR_STOP_DET 0x60 -#define DW_IC_CLR_START_DET 0x64 -#define DW_IC_CLR_GEN_CALL 0x68 -#define DW_IC_ENABLE 0x6c -#define DW_IC_STATUS 0x70 -#define DW_IC_TXFLR 0x74 -#define DW_IC_RXFLR 0x78 -#define DW_IC_SDA_HOLD 0x7c -#define DW_IC_TX_ABRT_SOURCE 0x80 -#define DW_IC_ENABLE_STATUS 0x9c -#define DW_IC_COMP_PARAM_1 0xf4 -#define DW_IC_COMP_VERSION 0xf8 -#define DW_IC_SDA_HOLD_MIN_VERS 0x3131312A -#define DW_IC_COMP_TYPE 0xfc -#define DW_IC_COMP_TYPE_VALUE 0x44570140 - -#define DW_IC_INTR_RX_UNDER 0x001 -#define DW_IC_INTR_RX_OVER 0x002 -#define DW_IC_INTR_RX_FULL 0x004 -#define DW_IC_INTR_TX_OVER 0x008 -#define DW_IC_INTR_TX_EMPTY 0x010 -#define DW_IC_INTR_RD_REQ 0x020 -#define DW_IC_INTR_TX_ABRT 0x040 -#define DW_IC_INTR_RX_DONE 0x080 -#define DW_IC_INTR_ACTIVITY 0x100 -#define DW_IC_INTR_STOP_DET 0x200 -#define DW_IC_INTR_START_DET 0x400 -#define DW_IC_INTR_GEN_CALL 0x800 - -#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ - DW_IC_INTR_TX_EMPTY | \ - DW_IC_INTR_TX_ABRT | \ - DW_IC_INTR_STOP_DET) - -#define DW_IC_STATUS_ACTIVITY 0x1 - -#define DW_IC_SDA_HOLD_RX_SHIFT 16 -#define DW_IC_SDA_HOLD_RX_MASK GENMASK(23, DW_IC_SDA_HOLD_RX_SHIFT) - -#define DW_IC_ERR_TX_ABRT 0x1 - -#define DW_IC_TAR_10BITADDR_MASTER BIT(12) - -#define DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH (BIT(2) | BIT(3)) -#define DW_IC_COMP_PARAM_1_SPEED_MODE_MASK GENMASK(3, 2) - -/* - * status codes - */ -#define STATUS_IDLE 0x0 -#define STATUS_WRITE_IN_PROGRESS 0x1 -#define STATUS_READ_IN_PROGRESS 0x2 - -#define TIMEOUT 20 /* ms */ - -/* - * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register - * - * only expected abort codes are listed here - * refer to the datasheet for the full list - */ -#define ABRT_7B_ADDR_NOACK 0 -#define ABRT_10ADDR1_NOACK 1 -#define ABRT_10ADDR2_NOACK 2 -#define ABRT_TXDATA_NOACK 3 -#define ABRT_GCALL_NOACK 4 -#define ABRT_GCALL_READ 5 -#define ABRT_SBYTE_ACKDET 7 -#define ABRT_SBYTE_NORSTRT 9 -#define ABRT_10B_RD_NORSTRT 10 -#define ABRT_MASTER_DIS 11 -#define ARB_LOST 12 - -#define DW_IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK) -#define DW_IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK) -#define DW_IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK) -#define DW_IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK) -#define DW_IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK) -#define DW_IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ) -#define DW_IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET) -#define DW_IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT) -#define DW_IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT) -#define DW_IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS) -#define DW_IC_TX_ARB_LOST (1UL << ARB_LOST) - -#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \ - DW_IC_TX_ABRT_10ADDR1_NOACK | \ - DW_IC_TX_ABRT_10ADDR2_NOACK | \ - DW_IC_TX_ABRT_TXDATA_NOACK | \ - DW_IC_TX_ABRT_GCALL_NOACK) - -static char *abort_sources[] = { - [ABRT_7B_ADDR_NOACK] = - "slave address not acknowledged (7bit mode)", - [ABRT_10ADDR1_NOACK] = - "first address byte not acknowledged (10bit mode)", - [ABRT_10ADDR2_NOACK] = - "second address byte not acknowledged (10bit mode)", - [ABRT_TXDATA_NOACK] = - "data not acknowledged", - [ABRT_GCALL_NOACK] = - "no acknowledgement for a general call", - [ABRT_GCALL_READ] = - "read after general call", - [ABRT_SBYTE_ACKDET] = - "start byte acknowledged", - [ABRT_SBYTE_NORSTRT] = - "trying to send start byte when restart is disabled", - [ABRT_10B_RD_NORSTRT] = - "trying to read when restart is disabled (10bit mode)", - [ABRT_MASTER_DIS] = - "trying to use disabled adapter", - [ARB_LOST] = - "lost arbitration", -}; - -static u32 dw_readl(struct dw_i2c_dev *dev, int offset) -{ - u32 value; - - if (dev->flags & ACCESS_16BIT) - value = readw_relaxed(dev->base + offset) | - (readw_relaxed(dev->base + offset + 2) << 16); - else - value = readl_relaxed(dev->base + offset); - - if (dev->flags & ACCESS_SWAP) - return swab32(value); - else - return value; -} - -static void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset) -{ - if (dev->flags & ACCESS_SWAP) - b = swab32(b); - - if (dev->flags & ACCESS_16BIT) { - writew_relaxed((u16)b, dev->base + offset); - writew_relaxed((u16)(b >> 16), dev->base + offset + 2); - } else { - writel_relaxed(b, dev->base + offset); - } -} - -static u32 -i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) -{ - /* - * DesignWare I2C core doesn't seem to have solid strategy to meet - * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec - * will result in violation of the tHD;STA spec. - */ - if (cond) - /* - * Conditional expression: - * - * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH - * - * This is based on the DW manuals, and represents an ideal - * configuration. The resulting I2C bus speed will be - * faster than any of the others. - * - * If your hardware is free from tHD;STA issue, try this one. - */ - return (ic_clk * tSYMBOL + 500000) / 1000000 - 8 + offset; - else - /* - * Conditional expression: - * - * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) - * - * This is just experimental rule; the tHD;STA period turned - * out to be proportinal to (_HCNT + 3). With this setting, - * we could meet both tHIGH and tHD;STA timing specs. - * - * If unsure, you'd better to take this alternative. - * - * The reason why we need to take into account "tf" here, - * is the same as described in i2c_dw_scl_lcnt(). - */ - return (ic_clk * (tSYMBOL + tf) + 500000) / 1000000 - - 3 + offset; -} - -static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) -{ - /* - * Conditional expression: - * - * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf) - * - * DW I2C core starts counting the SCL CNTs for the LOW period - * of the SCL clock (tLOW) as soon as it pulls the SCL line. - * In order to meet the tLOW timing spec, we need to take into - * account the fall time of SCL signal (tf). Default tf value - * should be 0.3 us, for safety. - */ - return ((ic_clk * (tLOW + tf) + 500000) / 1000000) - 1 + offset; -} - -static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable) -{ - dw_writel(dev, enable, DW_IC_ENABLE); -} - -static void __i2c_dw_enable_and_wait(struct dw_i2c_dev *dev, bool enable) -{ - int timeout = 100; - - do { - __i2c_dw_enable(dev, enable); - if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable) - return; - - /* - * Wait 10 times the signaling period of the highest I2C - * transfer supported by the driver (for 400KHz this is - * 25us) as described in the DesignWare I2C databook. - */ - usleep_range(25, 250); - } while (timeout--); - - dev_warn(dev->dev, "timeout in %sabling adapter\n", - enable ? "en" : "dis"); -} +#include <linux/pm_runtime.h> -static unsigned long i2c_dw_clk_rate(struct dw_i2c_dev *dev) -{ - /* - * Clock is not necessary if we got LCNT/HCNT values directly from - * the platform code. - */ - if (WARN_ON_ONCE(!dev->get_clk_rate_khz)) - return 0; - return dev->get_clk_rate_khz(dev); -} +#include "i2c-designware-core.h" -static int i2c_dw_acquire_lock(struct dw_i2c_dev *dev) +static void i2c_dw_configure_fifo_master(struct dw_i2c_dev *dev) { - int ret; - - if (!dev->acquire_lock) - return 0; - - ret = dev->acquire_lock(dev); - if (!ret) - return 0; - - dev_err(dev->dev, "couldn't acquire bus ownership\n"); - - return ret; -} + /* Configure Tx/Rx FIFO threshold levels */ + dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL); + dw_writel(dev, 0, DW_IC_RX_TL); -static void i2c_dw_release_lock(struct dw_i2c_dev *dev) -{ - if (dev->release_lock) - dev->release_lock(dev); + /* Configure the I2C master */ + dw_writel(dev, dev->master_cfg, DW_IC_CON); } /** - * i2c_dw_init() - initialize the designware i2c master hardware + * i2c_dw_init() - Initialize the designware I2C master hardware * @dev: device private data * * This functions configures and enables the I2C master. * This function is called during I2C init function, and in case of timeout at * run time. */ -int i2c_dw_init(struct dw_i2c_dev *dev) +static int i2c_dw_init_master(struct dw_i2c_dev *dev) { u32 hcnt, lcnt; u32 reg, comp_param1; @@ -344,8 +70,8 @@ int i2c_dw_init(struct dw_i2c_dev *dev) /* Configure register access mode 16bit */ dev->flags |= ACCESS_16BIT; } else if (reg != DW_IC_COMP_TYPE_VALUE) { - dev_err(dev->dev, "Unknown Synopsys component type: " - "0x%08x\n", reg); + dev_err(dev->dev, + "Unknown Synopsys component type: 0x%08x\n", reg); i2c_dw_release_lock(dev); return -ENODEV; } @@ -355,7 +81,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev) /* Disable the adapter */ __i2c_dw_enable_and_wait(dev, false); - /* set standard and fast speed deviders for high/low periods */ + /* Set standard and fast speed deviders for high/low periods */ sda_falling_time = dev->sda_falling_time ?: 300; /* ns */ scl_falling_time = dev->scl_falling_time ?: 300; /* ns */ @@ -440,37 +166,11 @@ int i2c_dw_init(struct dw_i2c_dev *dev) "Hardware too old to adjust SDA hold time.\n"); } - /* Configure Tx/Rx FIFO threshold levels */ - dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL); - dw_writel(dev, 0, DW_IC_RX_TL); - - /* configure the i2c master */ - dw_writel(dev, dev->master_cfg , DW_IC_CON); - + i2c_dw_configure_fifo_master(dev); i2c_dw_release_lock(dev); return 0; } -EXPORT_SYMBOL_GPL(i2c_dw_init); - -/* - * Waiting for bus not busy - */ -static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) -{ - int timeout = TIMEOUT; - - while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) { - if (timeout <= 0) { - dev_warn(dev->dev, "timeout waiting for bus ready\n"); - return -ETIMEDOUT; - } - timeout--; - usleep_range(1000, 1100); - } - - return 0; -} static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { @@ -480,7 +180,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) /* Disable the adapter */ __i2c_dw_enable_and_wait(dev, false); - /* if the slave address is ten bit address, enable 10BITADDR */ + /* If the slave address is ten bit address, enable 10BITADDR */ ic_con = dw_readl(dev, DW_IC_CON); if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) { ic_con |= DW_IC_CON_10BITADDR_MASTER; @@ -503,7 +203,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) */ dw_writel(dev, msgs[dev->msg_write_idx].addr | ic_tar, DW_IC_TAR); - /* enforce disabled interrupts (due to HW issues) */ + /* Enforce disabled interrupts (due to HW issues) */ i2c_dw_disable_int(dev); /* Enable the adapter */ @@ -511,7 +211,7 @@ static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) /* Clear and enable interrupts */ dw_readl(dev, DW_IC_CLR_INTR); - dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK); + dw_writel(dev, DW_IC_INTR_MASTER_MASK, DW_IC_INTR_MASK); } /* @@ -531,15 +231,15 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) u8 *buf = dev->tx_buf; bool need_restart = false; - intr_mask = DW_IC_INTR_DEFAULT_MASK; + intr_mask = DW_IC_INTR_MASTER_MASK; for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { u32 flags = msgs[dev->msg_write_idx].flags; /* - * if target address has changed, we need to - * reprogram the target address in the i2c - * adapter when we are done with this transfer + * If target address has changed, we need to + * reprogram the target address in the I2C + * adapter when we are done with this transfer. */ if (msgs[dev->msg_write_idx].addr != addr) { dev_err(dev->dev, @@ -583,7 +283,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) */ /* - * i2c-core.c always sets the buffer length of + * i2c-core always sets the buffer length of * I2C_FUNC_SMBUS_BLOCK_DATA to 1. The length will * be adjusted when receiving the first byte. * Thus we can't stop the transaction here. @@ -599,7 +299,7 @@ i2c_dw_xfer_msg(struct dw_i2c_dev *dev) if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { - /* avoid rx buffer overrun */ + /* Avoid rx buffer overrun */ if (dev->rx_outstanding >= dev->rx_fifo_depth) break; @@ -704,31 +404,8 @@ i2c_dw_read(struct dw_i2c_dev *dev) } } -static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) -{ - unsigned long abort_source = dev->abort_source; - int i; - - if (abort_source & DW_IC_TX_ABRT_NOACK) { - for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) - dev_dbg(dev->dev, - "%s: %s\n", __func__, abort_sources[i]); - return -EREMOTEIO; - } - - for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) - dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); - - if (abort_source & DW_IC_TX_ARB_LOST) - return -EAGAIN; - else if (abort_source & DW_IC_TX_ABRT_GCALL_READ) - return -EINVAL; /* wrong msgs[] data */ - else - return -EIO; -} - /* - * Prepare controller for a transaction and call i2c_dw_xfer_msg + * Prepare controller for a transaction and call i2c_dw_xfer_msg. */ static int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) @@ -759,14 +436,14 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) if (ret < 0) goto done; - /* start the transfers */ + /* Start the transfers */ i2c_dw_xfer_init(dev); - /* wait for tx to complete */ + /* Wait for tx to complete */ if (!wait_for_completion_timeout(&dev->cmd_complete, adap->timeout)) { dev_err(dev->dev, "controller timed out\n"); /* i2c_dw_init implicitly disables the adapter */ - i2c_dw_init(dev); + i2c_dw_init_master(dev); ret = -ETIMEDOUT; goto done; } @@ -786,7 +463,7 @@ i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) goto done; } - /* no error */ + /* No error */ if (likely(!dev->cmd_err && !dev->status)) { ret = num; goto done; @@ -814,15 +491,9 @@ done_nolock: return ret; } -static u32 i2c_dw_func(struct i2c_adapter *adap) -{ - struct dw_i2c_dev *dev = i2c_get_adapdata(adap); - return dev->functionality; -} - static const struct i2c_algorithm i2c_dw_algo = { - .master_xfer = i2c_dw_xfer, - .functionality = i2c_dw_func, + .master_xfer = i2c_dw_xfer, + .functionality = i2c_dw_func, }; static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) @@ -881,29 +552,21 @@ static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) } /* - * Interrupt service routine. This gets called whenever an I2C interrupt + * Interrupt service routine. This gets called whenever an I2C master interrupt * occurs. */ -static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) +static int i2c_dw_irq_handler_master(struct dw_i2c_dev *dev) { - struct dw_i2c_dev *dev = dev_id; - u32 stat, enabled; - - enabled = dw_readl(dev, DW_IC_ENABLE); - stat = dw_readl(dev, DW_IC_RAW_INTR_STAT); - dev_dbg(dev->dev, "%s: enabled=%#x stat=%#x\n", __func__, enabled, stat); - if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) - return IRQ_NONE; + u32 stat; stat = i2c_dw_read_clear_intrbits(dev); - if (stat & DW_IC_INTR_TX_ABRT) { dev->cmd_err |= DW_IC_ERR_TX_ABRT; dev->status = STATUS_IDLE; /* * Anytime TX_ABRT is set, the contents of the tx/rx - * buffers are flushed. Make sure to skip them. + * buffers are flushed. Make sure to skip them. */ dw_writel(dev, 0, DW_IC_INTR_MASK); goto tx_aborted; @@ -925,49 +588,46 @@ tx_aborted: if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) complete(&dev->cmd_complete); else if (unlikely(dev->flags & ACCESS_INTR_MASK)) { - /* workaround to trigger pending interrupt */ + /* Workaround to trigger pending interrupt */ stat = dw_readl(dev, DW_IC_INTR_MASK); i2c_dw_disable_int(dev); dw_writel(dev, stat, DW_IC_INTR_MASK); } - return IRQ_HANDLED; + return 0; } -void i2c_dw_disable(struct dw_i2c_dev *dev) +static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) { - /* Disable controller */ - __i2c_dw_enable_and_wait(dev, false); + struct dw_i2c_dev *dev = dev_id; + u32 stat, enabled; - /* Disable all interupts */ - dw_writel(dev, 0, DW_IC_INTR_MASK); - dw_readl(dev, DW_IC_CLR_INTR); -} -EXPORT_SYMBOL_GPL(i2c_dw_disable); + enabled = dw_readl(dev, DW_IC_ENABLE); + stat = dw_readl(dev, DW_IC_RAW_INTR_STAT); + dev_dbg(dev->dev, "enabled=%#x stat=%#x\n", enabled, stat); + if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) + return IRQ_NONE; -void i2c_dw_disable_int(struct dw_i2c_dev *dev) -{ - dw_writel(dev, 0, DW_IC_INTR_MASK); -} -EXPORT_SYMBOL_GPL(i2c_dw_disable_int); + i2c_dw_irq_handler_master(dev); -u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev) -{ - return dw_readl(dev, DW_IC_COMP_PARAM_1); + return IRQ_HANDLED; } -EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); int i2c_dw_probe(struct dw_i2c_dev *dev) { struct i2c_adapter *adap = &dev->adapter; unsigned long irq_flags; - int r; + int ret; init_completion(&dev->cmd_complete); - r = i2c_dw_init(dev); - if (r) - return r; + dev->init = i2c_dw_init_master; + dev->disable = i2c_dw_disable; + dev->disable_int = i2c_dw_disable_int; + + ret = dev->init(dev); + if (ret) + return ret; snprintf(adap->name, sizeof(adap->name), "Synopsys DesignWare I2C adapter"); @@ -984,12 +644,12 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) } i2c_dw_disable_int(dev); - r = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags, - dev_name(dev->dev), dev); - if (r) { + ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr, irq_flags, + dev_name(dev->dev), dev); + if (ret) { dev_err(dev->dev, "failure requesting irq %i: %d\n", - dev->irq, r); - return r; + dev->irq, ret); + return ret; } /* @@ -999,14 +659,14 @@ int i2c_dw_probe(struct dw_i2c_dev *dev) * registered I2C slaves that do I2C transfers in their probe. */ pm_runtime_get_noresume(dev->dev); - r = i2c_add_numbered_adapter(adap); - if (r) - dev_err(dev->dev, "failure adding adapter: %d\n", r); + ret = i2c_add_numbered_adapter(adap); + if (ret) + dev_err(dev->dev, "failure adding adapter: %d\n", ret); pm_runtime_put_noidle(dev->dev); - return r; + return ret; } EXPORT_SYMBOL_GPL(i2c_dw_probe); -MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); +MODULE_DESCRIPTION("Synopsys DesignWare I2C bus master adapter"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c index ed485b69b449..86e1bd0b82e9 100644 --- a/drivers/i2c/busses/i2c-designware-pcidrv.c +++ b/drivers/i2c/busses/i2c-designware-pcidrv.c @@ -187,16 +187,19 @@ static struct dw_pci_controller dw_pci_controllers[] = { static int i2c_dw_pci_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev); + + i_dev->disable(i_dev); - i2c_dw_disable(pci_get_drvdata(pdev)); return 0; } static int i2c_dw_pci_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); + struct dw_i2c_dev *i_dev = pci_get_drvdata(pdev); - return i2c_dw_init(pci_get_drvdata(pdev)); + return i_dev->init(i_dev); } #endif @@ -296,7 +299,7 @@ static void i2c_dw_pci_remove(struct pci_dev *pdev) { struct dw_i2c_dev *dev = pci_get_drvdata(pdev); - i2c_dw_disable(dev); + dev->disable(dev); pm_runtime_forbid(&pdev->dev); pm_runtime_get_noresume(&pdev->dev); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index d1263b82d646..2ea6d0d25a01 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -1,5 +1,5 @@ /* - * Synopsys DesignWare I2C adapter driver (master only). + * Synopsys DesignWare I2C adapter driver. * * Based on the TI DAVINCI I2C adapter driver. * @@ -21,27 +21,28 @@ * ---------------------------------------------------------------------------- * */ -#include <linux/kernel.h> -#include <linux/module.h> +#include <linux/acpi.h> +#include <linux/clk-provider.h> +#include <linux/clk.h> #include <linux/delay.h> #include <linux/dmi.h> -#include <linux/i2c.h> -#include <linux/clk.h> -#include <linux/clk-provider.h> -#include <linux/errno.h> -#include <linux/sched.h> #include <linux/err.h> +#include <linux/errno.h> +#include <linux/i2c.h> #include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> #include <linux/of.h> +#include <linux/platform_data/i2c-designware.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/property.h> -#include <linux/io.h> #include <linux/reset.h> +#include <linux/sched.h> #include <linux/slab.h> -#include <linux/acpi.h> -#include <linux/platform_data/i2c-designware.h> + #include "i2c-designware-core.h" static u32 i2c_dw_get_clk_rate_khz(struct dw_i2c_dev *dev) @@ -171,6 +172,49 @@ static inline int dw_i2c_acpi_configure(struct platform_device *pdev) } #endif +static void i2c_dw_configure_master(struct dw_i2c_dev *dev) +{ + dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; + + dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | + DW_IC_CON_RESTART_EN; + + dev->mode = DW_IC_MASTER; + + switch (dev->clk_freq) { + case 100000: + dev->master_cfg |= DW_IC_CON_SPEED_STD; + break; + case 3400000: + dev->master_cfg |= DW_IC_CON_SPEED_HIGH; + break; + default: + dev->master_cfg |= DW_IC_CON_SPEED_FAST; + } +} + +static void i2c_dw_configure_slave(struct dw_i2c_dev *dev) +{ + dev->functionality = I2C_FUNC_SLAVE | DW_IC_DEFAULT_FUNCTIONALITY; + + dev->slave_cfg = DW_IC_CON_RX_FIFO_FULL_HLD_CTRL | + DW_IC_CON_RESTART_EN | DW_IC_CON_STOP_DET_IFADDRESSED | + DW_IC_CON_SPEED_FAST; + + dev->mode = DW_IC_SLAVE; + + switch (dev->clk_freq) { + case 100000: + dev->slave_cfg |= DW_IC_CON_SPEED_STD; + break; + case 3400000: + dev->slave_cfg |= DW_IC_CON_SPEED_HIGH; + break; + default: + dev->slave_cfg |= DW_IC_CON_SPEED_FAST; + } +} + static int i2c_dw_plat_prepare_clk(struct dw_i2c_dev *i_dev, bool prepare) { if (IS_ERR(i_dev->clk)) @@ -209,11 +253,11 @@ static void dw_i2c_set_fifo_size(struct dw_i2c_dev *dev, int id) static int dw_i2c_plat_probe(struct platform_device *pdev) { struct dw_i2c_platform_data *pdata = dev_get_platdata(&pdev->dev); - struct dw_i2c_dev *dev; struct i2c_adapter *adap; - struct resource *mem; - int irq, r; + struct dw_i2c_dev *dev; u32 acpi_speed, ht = 0; + struct resource *mem; + int irq, ret; irq = platform_get_irq(pdev, 0); if (irq < 0) @@ -276,29 +320,18 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) && dev->clk_freq != 1000000 && dev->clk_freq != 3400000) { dev_err(&pdev->dev, "Only 100kHz, 400kHz, 1MHz and 3.4MHz supported"); - r = -EINVAL; + ret = -EINVAL; goto exit_reset; } - r = i2c_dw_probe_lock_support(dev); - if (r) + ret = i2c_dw_probe_lock_support(dev); + if (ret) goto exit_reset; - dev->functionality = I2C_FUNC_10BIT_ADDR | DW_IC_DEFAULT_FUNCTIONALITY; - - dev->master_cfg = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE | - DW_IC_CON_RESTART_EN; - - switch (dev->clk_freq) { - case 100000: - dev->master_cfg |= DW_IC_CON_SPEED_STD; - break; - case 3400000: - dev->master_cfg |= DW_IC_CON_SPEED_HIGH; - break; - default: - dev->master_cfg |= DW_IC_CON_SPEED_FAST; - } + if (i2c_detect_slave_mode(&pdev->dev)) + i2c_dw_configure_slave(dev); + else + i2c_dw_configure_master(dev); dev->clk = devm_clk_get(&pdev->dev, NULL); if (!i2c_dw_plat_prepare_clk(dev, true)) { @@ -327,11 +360,15 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); } - r = i2c_dw_probe(dev); - if (r) + if (dev->mode == DW_IC_SLAVE) + ret = i2c_dw_probe_slave(dev); + else + ret = i2c_dw_probe(dev); + + if (ret) goto exit_probe; - return r; + return ret; exit_probe: if (!dev->pm_disabled) @@ -339,7 +376,7 @@ exit_probe: exit_reset: if (!IS_ERR_OR_NULL(dev->rst)) reset_control_assert(dev->rst); - return r; + return ret; } static int dw_i2c_plat_remove(struct platform_device *pdev) @@ -350,7 +387,7 @@ static int dw_i2c_plat_remove(struct platform_device *pdev) i2c_del_adapter(&dev->adapter); - i2c_dw_disable(dev); + dev->disable(dev); pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); @@ -394,7 +431,7 @@ static int dw_i2c_plat_suspend(struct device *dev) struct platform_device *pdev = to_platform_device(dev); struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); - i2c_dw_disable(i_dev); + i_dev->disable(i_dev); i2c_dw_plat_prepare_clk(i_dev, false); return 0; @@ -406,7 +443,7 @@ static int dw_i2c_plat_resume(struct device *dev) struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev); i2c_dw_plat_prepare_clk(i_dev, true); - i2c_dw_init(i_dev); + i_dev->init(i_dev); return 0; } @@ -423,7 +460,7 @@ static const struct dev_pm_ops dw_i2c_dev_pm_ops = { #define DW_I2C_DEV_PMOPS NULL #endif -/* work with hotplug and coldplug */ +/* Work with hotplug and coldplug */ MODULE_ALIAS("platform:i2c_designware"); static struct platform_driver dw_i2c_driver = { diff --git a/drivers/i2c/busses/i2c-designware-slave.c b/drivers/i2c/busses/i2c-designware-slave.c new file mode 100644 index 000000000000..0548c7ea578c --- /dev/null +++ b/drivers/i2c/busses/i2c-designware-slave.c @@ -0,0 +1,393 @@ +/* + * Synopsys DesignWare I2C adapter driver (slave only). + * + * Based on the Synopsys DesignWare I2C adapter driver (master). + * + * Copyright (C) 2016 Synopsys Inc. + * + * ---------------------------------------------------------------------------- + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * ---------------------------------------------------------------------------- + * + */ +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/errno.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/pm_runtime.h> + +#include "i2c-designware-core.h" + +static void i2c_dw_configure_fifo_slave(struct dw_i2c_dev *dev) +{ + /* Configure Tx/Rx FIFO threshold levels. */ + dw_writel(dev, 0, DW_IC_TX_TL); + dw_writel(dev, 0, DW_IC_RX_TL); + + /* Configure the I2C slave. */ + dw_writel(dev, dev->slave_cfg, DW_IC_CON); + dw_writel(dev, DW_IC_INTR_SLAVE_MASK, DW_IC_INTR_MASK); +} + +/** + * i2c_dw_init_slave() - Initialize the designware i2c slave hardware + * @dev: device private data + * + * This function configures and enables the I2C in slave mode. + * This function is called during I2C init function, and in case of timeout at + * run time. + */ +static int i2c_dw_init_slave(struct dw_i2c_dev *dev) +{ + u32 sda_falling_time, scl_falling_time; + u32 reg, comp_param1; + u32 hcnt, lcnt; + int ret; + + ret = i2c_dw_acquire_lock(dev); + if (ret) + return ret; + + reg = dw_readl(dev, DW_IC_COMP_TYPE); + if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) { + /* Configure register endianness access. */ + dev->flags |= ACCESS_SWAP; + } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) { + /* Configure register access mode 16bit. */ + dev->flags |= ACCESS_16BIT; + } else if (reg != DW_IC_COMP_TYPE_VALUE) { + dev_err(dev->dev, + "Unknown Synopsys component type: 0x%08x\n", reg); + i2c_dw_release_lock(dev); + return -ENODEV; + } + + comp_param1 = dw_readl(dev, DW_IC_COMP_PARAM_1); + + /* Disable the adapter. */ + __i2c_dw_enable_and_wait(dev, false); + + /* Set standard and fast speed deviders for high/low periods. */ + sda_falling_time = dev->sda_falling_time ?: 300; /* ns */ + scl_falling_time = dev->scl_falling_time ?: 300; /* ns */ + + /* Set SCL timing parameters for standard-mode. */ + if (dev->ss_hcnt && dev->ss_lcnt) { + hcnt = dev->ss_hcnt; + lcnt = dev->ss_lcnt; + } else { + hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev), + 4000, /* tHD;STA = tHIGH = 4.0 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev), + 4700, /* tLOW = 4.7 us */ + scl_falling_time, + 0); /* No offset */ + } + dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT); + dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT); + dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); + + /* Set SCL timing parameters for fast-mode or fast-mode plus. */ + if ((dev->clk_freq == 1000000) && dev->fp_hcnt && dev->fp_lcnt) { + hcnt = dev->fp_hcnt; + lcnt = dev->fp_lcnt; + } else if (dev->fs_hcnt && dev->fs_lcnt) { + hcnt = dev->fs_hcnt; + lcnt = dev->fs_lcnt; + } else { + hcnt = i2c_dw_scl_hcnt(i2c_dw_clk_rate(dev), + 600, /* tHD;STA = tHIGH = 0.6 us */ + sda_falling_time, + 0, /* 0: DW default, 1: Ideal */ + 0); /* No offset */ + lcnt = i2c_dw_scl_lcnt(i2c_dw_clk_rate(dev), + 1300, /* tLOW = 1.3 us */ + scl_falling_time, + 0); /* No offset */ + } + dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT); + dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT); + dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); + + if ((dev->slave_cfg & DW_IC_CON_SPEED_MASK) == + DW_IC_CON_SPEED_HIGH) { + if ((comp_param1 & DW_IC_COMP_PARAM_1_SPEED_MODE_MASK) + != DW_IC_COMP_PARAM_1_SPEED_MODE_HIGH) { + dev_err(dev->dev, "High Speed not supported!\n"); + dev->slave_cfg &= ~DW_IC_CON_SPEED_MASK; + dev->slave_cfg |= DW_IC_CON_SPEED_FAST; + } else if (dev->hs_hcnt && dev->hs_lcnt) { + hcnt = dev->hs_hcnt; + lcnt = dev->hs_lcnt; + dw_writel(dev, hcnt, DW_IC_HS_SCL_HCNT); + dw_writel(dev, lcnt, DW_IC_HS_SCL_LCNT); + dev_dbg(dev->dev, "HighSpeed-mode HCNT:LCNT = %d:%d\n", + hcnt, lcnt); + } + } + + /* Configure SDA Hold Time if required. */ + reg = dw_readl(dev, DW_IC_COMP_VERSION); + if (reg >= DW_IC_SDA_HOLD_MIN_VERS) { + if (!dev->sda_hold_time) { + /* Keep previous hold time setting if no one set it. */ + dev->sda_hold_time = dw_readl(dev, DW_IC_SDA_HOLD); + } + /* + * Workaround for avoiding TX arbitration lost in case I2C + * slave pulls SDA down "too quickly" after falling egde of + * SCL by enabling non-zero SDA RX hold. Specification says it + * extends incoming SDA low to high transition while SCL is + * high but it apprears to help also above issue. + */ + if (!(dev->sda_hold_time & DW_IC_SDA_HOLD_RX_MASK)) + dev->sda_hold_time |= 1 << DW_IC_SDA_HOLD_RX_SHIFT; + dw_writel(dev, dev->sda_hold_time, DW_IC_SDA_HOLD); + } else { + dev_warn(dev->dev, + "Hardware too old to adjust SDA hold time.\n"); + } + + i2c_dw_configure_fifo_slave(dev); + i2c_dw_release_lock(dev); + + return 0; +} + +static int i2c_dw_reg_slave(struct i2c_client *slave) +{ + struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter); + + if (dev->slave) + return -EBUSY; + if (slave->flags & I2C_CLIENT_TEN) + return -EAFNOSUPPORT; + /* + * Set slave address in the IC_SAR register, + * the address to which the DW_apb_i2c responds. + */ + __i2c_dw_enable(dev, false); + dw_writel(dev, slave->addr, DW_IC_SAR); + dev->slave = slave; + + __i2c_dw_enable(dev, true); + + dev->cmd_err = 0; + dev->msg_write_idx = 0; + dev->msg_read_idx = 0; + dev->msg_err = 0; + dev->status = STATUS_IDLE; + dev->abort_source = 0; + dev->rx_outstanding = 0; + + return 0; +} + +static int i2c_dw_unreg_slave(struct i2c_client *slave) +{ + struct dw_i2c_dev *dev = i2c_get_adapdata(slave->adapter); + + dev->disable_int(dev); + dev->disable(dev); + dev->slave = NULL; + + return 0; +} + +static u32 i2c_dw_read_clear_intrbits_slave(struct dw_i2c_dev *dev) +{ + u32 stat; + + /* + * The IC_INTR_STAT register just indicates "enabled" interrupts. + * Ths unmasked raw version of interrupt status bits are available + * in the IC_RAW_INTR_STAT register. + * + * That is, + * stat = dw_readl(IC_INTR_STAT); + * equals to, + * stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK); + * + * The raw version might be useful for debugging purposes. + */ + stat = dw_readl(dev, DW_IC_INTR_STAT); + + /* + * Do not use the IC_CLR_INTR register to clear interrupts, or + * you'll miss some interrupts, triggered during the period from + * dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR). + * + * Instead, use the separately-prepared IC_CLR_* registers. + */ + if (stat & DW_IC_INTR_TX_ABRT) + dw_readl(dev, DW_IC_CLR_TX_ABRT); + if (stat & DW_IC_INTR_RX_UNDER) + dw_readl(dev, DW_IC_CLR_RX_UNDER); + if (stat & DW_IC_INTR_RX_OVER) + dw_readl(dev, DW_IC_CLR_RX_OVER); + if (stat & DW_IC_INTR_TX_OVER) + dw_readl(dev, DW_IC_CLR_TX_OVER); + if (stat & DW_IC_INTR_RX_DONE) + dw_readl(dev, DW_IC_CLR_RX_DONE); + if (stat & DW_IC_INTR_ACTIVITY) + dw_readl(dev, DW_IC_CLR_ACTIVITY); + if (stat & DW_IC_INTR_STOP_DET) + dw_readl(dev, DW_IC_CLR_STOP_DET); + if (stat & DW_IC_INTR_START_DET) + dw_readl(dev, DW_IC_CLR_START_DET); + if (stat & DW_IC_INTR_GEN_CALL) + dw_readl(dev, DW_IC_CLR_GEN_CALL); + + return stat; +} + +/* + * Interrupt service routine. This gets called whenever an I2C slave interrupt + * occurs. + */ + +static int i2c_dw_irq_handler_slave(struct dw_i2c_dev *dev) +{ + u32 raw_stat, stat, enabled; + u8 val, slave_activity; + + stat = dw_readl(dev, DW_IC_INTR_STAT); + enabled = dw_readl(dev, DW_IC_ENABLE); + raw_stat = dw_readl(dev, DW_IC_RAW_INTR_STAT); + slave_activity = ((dw_readl(dev, DW_IC_STATUS) & + DW_IC_STATUS_SLAVE_ACTIVITY) >> 6); + + if (!enabled || !(raw_stat & ~DW_IC_INTR_ACTIVITY)) + return 0; + + dev_dbg(dev->dev, + "%#x STATUS SLAVE_ACTIVITY=%#x : RAW_INTR_STAT=%#x : INTR_STAT=%#x\n", + enabled, slave_activity, raw_stat, stat); + + if ((stat & DW_IC_INTR_RX_FULL) && (stat & DW_IC_INTR_STOP_DET)) + i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_REQUESTED, &val); + + if (stat & DW_IC_INTR_RD_REQ) { + if (slave_activity) { + if (stat & DW_IC_INTR_RX_FULL) { + val = dw_readl(dev, DW_IC_DATA_CMD); + + if (!i2c_slave_event(dev->slave, + I2C_SLAVE_WRITE_RECEIVED, + &val)) { + dev_vdbg(dev->dev, "Byte %X acked!", + val); + } + dw_readl(dev, DW_IC_CLR_RD_REQ); + stat = i2c_dw_read_clear_intrbits_slave(dev); + } else { + dw_readl(dev, DW_IC_CLR_RD_REQ); + dw_readl(dev, DW_IC_CLR_RX_UNDER); + stat = i2c_dw_read_clear_intrbits_slave(dev); + } + if (!i2c_slave_event(dev->slave, + I2C_SLAVE_READ_REQUESTED, + &val)) + dw_writel(dev, val, DW_IC_DATA_CMD); + } + } + + if (stat & DW_IC_INTR_RX_DONE) { + if (!i2c_slave_event(dev->slave, I2C_SLAVE_READ_PROCESSED, + &val)) + dw_readl(dev, DW_IC_CLR_RX_DONE); + + i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); + stat = i2c_dw_read_clear_intrbits_slave(dev); + return 1; + } + + if (stat & DW_IC_INTR_RX_FULL) { + val = dw_readl(dev, DW_IC_DATA_CMD); + if (!i2c_slave_event(dev->slave, I2C_SLAVE_WRITE_RECEIVED, + &val)) + dev_vdbg(dev->dev, "Byte %X acked!", val); + } else { + i2c_slave_event(dev->slave, I2C_SLAVE_STOP, &val); + stat = i2c_dw_read_clear_intrbits_slave(dev); + } + + return 1; +} + +static irqreturn_t i2c_dw_isr_slave(int this_irq, void *dev_id) +{ + struct dw_i2c_dev *dev = dev_id; + int ret; + + i2c_dw_read_clear_intrbits_slave(dev); + ret = i2c_dw_irq_handler_slave(dev); + if (ret > 0) + complete(&dev->cmd_complete); + + return IRQ_RETVAL(ret); +} + +static struct i2c_algorithm i2c_dw_algo = { + .functionality = i2c_dw_func, + .reg_slave = i2c_dw_reg_slave, + .unreg_slave = i2c_dw_unreg_slave, +}; + +int i2c_dw_probe_slave(struct dw_i2c_dev *dev) +{ + struct i2c_adapter *adap = &dev->adapter; + int ret; + + init_completion(&dev->cmd_complete); + + dev->init = i2c_dw_init_slave; + dev->disable = i2c_dw_disable; + dev->disable_int = i2c_dw_disable_int; + + ret = dev->init(dev); + if (ret) + return ret; + + snprintf(adap->name, sizeof(adap->name), + "Synopsys DesignWare I2C Slave adapter"); + adap->retries = 3; + adap->algo = &i2c_dw_algo; + adap->dev.parent = dev->dev; + i2c_set_adapdata(adap, dev); + + ret = devm_request_irq(dev->dev, dev->irq, i2c_dw_isr_slave, + IRQF_SHARED, dev_name(dev->dev), dev); + if (ret) { + dev_err(dev->dev, "failure requesting irq %i: %d\n", + dev->irq, ret); + return ret; + } + + ret = i2c_add_numbered_adapter(adap); + if (ret) + dev_err(dev->dev, "failure adding adapter: %d\n", ret); + pm_runtime_put_noidle(dev->dev); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_dw_probe_slave); + +MODULE_AUTHOR("Luis Oliveira <lolivei@synopsys.com>"); +MODULE_DESCRIPTION("Synopsys DesignWare I2C bus slave adapter"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/busses/i2c-emev2.c b/drivers/i2c/busses/i2c-emev2.c index 312912708854..d2e84480fbe9 100644 --- a/drivers/i2c/busses/i2c-emev2.c +++ b/drivers/i2c/busses/i2c-emev2.c @@ -375,7 +375,9 @@ static int em_i2c_probe(struct platform_device *pdev) if (IS_ERR(priv->sclk)) return PTR_ERR(priv->sclk); - clk_prepare_enable(priv->sclk); + ret = clk_prepare_enable(priv->sclk); + if (ret) + return ret; priv->adap.timeout = msecs_to_jiffies(100); priv->adap.retries = 5; diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index 6484fa6dbb84..c9536e17d6ff 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -66,6 +66,8 @@ * Lewisburg Supersku (PCH) 0xa223 32 hard yes yes yes * Kaby Lake PCH-H (PCH) 0xa2a3 32 hard yes yes yes * Gemini Lake (SOC) 0x31d4 32 hard yes yes yes + * Cannon Lake-H (PCH) 0xa323 32 hard yes yes yes + * Cannon Lake-LP (PCH) 0x9da3 32 hard yes yes yes * * Features supported by this driver: * Software PEC no @@ -226,10 +228,12 @@ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_SMBUS 0x9c22 #define PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_SMBUS 0x9ca2 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS 0x9d23 +#define PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS 0x9da3 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS 0xa123 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS 0xa1a3 #define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS 0xa223 #define PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS 0xa2a3 +#define PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS 0xa323 struct i801_mux_config { char *gpio_chip; @@ -1026,6 +1030,8 @@ static const struct pci_device_id i801_ids[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_KABYLAKE_PCH_H_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS) }, { 0, } }; @@ -1499,6 +1505,8 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) switch (dev->device) { case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS: case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS: + case PCI_DEVICE_ID_INTEL_CANNONLAKE_H_SMBUS: + case PCI_DEVICE_ID_INTEL_CANNONLAKE_LP_SMBUS: case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS: case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS: case PCI_DEVICE_ID_INTEL_DNV_SMBUS: diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c index 5738556b6aac..d4e8f1954f23 100644 --- a/drivers/i2c/busses/i2c-mxs.c +++ b/drivers/i2c/busses/i2c-mxs.c @@ -419,7 +419,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, ret = mxs_i2c_pio_wait_xfer_end(i2c); if (ret) { - dev_err(i2c->dev, + dev_dbg(i2c->dev, "PIO: Failed to send SELECT command!\n"); goto cleanup; } @@ -431,7 +431,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, ret = mxs_i2c_pio_wait_xfer_end(i2c); if (ret) { - dev_err(i2c->dev, + dev_dbg(i2c->dev, "PIO: Failed to send READ command!\n"); goto cleanup; } @@ -528,7 +528,7 @@ static int mxs_i2c_pio_setup_xfer(struct i2c_adapter *adap, /* Wait for the end of the transfer. */ ret = mxs_i2c_pio_wait_xfer_end(i2c); if (ret) { - dev_err(i2c->dev, + dev_dbg(i2c->dev, "PIO: Failed to finish WRITE cmd!\n"); break; } diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c index 3bd2e7d06e4b..853a2abedb05 100644 --- a/drivers/i2c/busses/i2c-pca-platform.c +++ b/drivers/i2c/busses/i2c-pca-platform.c @@ -22,14 +22,17 @@ #include <linux/i2c-algo-pca.h> #include <linux/i2c-pca-platform.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> #include <asm/irq.h> struct i2c_pca_pf_data { void __iomem *reg_base; int irq; /* if 0, use polling */ - int gpio; + struct gpio_desc *gpio; wait_queue_head_t wait; struct i2c_adapter adap; struct i2c_algo_pca_data algo_data; @@ -104,17 +107,17 @@ static int i2c_pca_pf_waitforcompletion(void *pd) static void i2c_pca_pf_dummyreset(void *pd) { struct i2c_pca_pf_data *i2c = pd; - printk(KERN_WARNING "%s: No reset-pin found. Chip may get stuck!\n", - i2c->adap.name); + + dev_warn(&i2c->adap.dev, "No reset-pin found. Chip may get stuck!\n"); } static void i2c_pca_pf_resetchip(void *pd) { struct i2c_pca_pf_data *i2c = pd; - gpio_set_value(i2c->gpio, 0); + gpiod_set_value(i2c->gpio, 1); ndelay(100); - gpio_set_value(i2c->gpio, 1); + gpiod_set_value(i2c->gpio, 0); } static irqreturn_t i2c_pca_pf_handler(int this_irq, void *dev_id) @@ -136,36 +139,27 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) struct resource *res; struct i2c_pca9564_pf_platform_data *platform_data = dev_get_platdata(&pdev->dev); + struct device_node *np = pdev->dev.of_node; int ret = 0; int irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); /* If irq is 0, we do polling. */ + if (irq < 0) + irq = 0; - if (res == NULL) { - ret = -ENODEV; - goto e_print; - } + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; - if (!request_mem_region(res->start, resource_size(res), res->name)) { - ret = -ENOMEM; - goto e_print; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + i2c->reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(i2c->reg_base)) + return PTR_ERR(i2c->reg_base); - i2c = kzalloc(sizeof(struct i2c_pca_pf_data), GFP_KERNEL); - if (!i2c) { - ret = -ENOMEM; - goto e_alloc; - } init_waitqueue_head(&i2c->wait); - i2c->reg_base = ioremap(res->start, resource_size(res)); - if (!i2c->reg_base) { - ret = -ENOMEM; - goto e_remap; - } i2c->io_base = res->start; i2c->io_size = resource_size(res); i2c->irq = irq; @@ -177,20 +171,43 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) (unsigned long) res->start); i2c->adap.algo_data = &i2c->algo_data; i2c->adap.dev.parent = &pdev->dev; + i2c->adap.dev.of_node = np; if (platform_data) { i2c->adap.timeout = platform_data->timeout; i2c->algo_data.i2c_clock = platform_data->i2c_clock_speed; - i2c->gpio = platform_data->gpio; + if (gpio_is_valid(platform_data->gpio)) { + ret = devm_gpio_request_one(&pdev->dev, + platform_data->gpio, + GPIOF_ACTIVE_LOW, + i2c->adap.name); + if (ret == 0) { + i2c->gpio = gpio_to_desc(platform_data->gpio); + gpiod_direction_output(i2c->gpio, 0); + } else { + dev_warn(&pdev->dev, "Registering gpio failed!\n"); + i2c->gpio = NULL; + } + } + } else if (np) { + i2c->adap.timeout = HZ; + i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset-gpios", GPIOD_OUT_LOW); + if (IS_ERR(i2c->gpio)) + return PTR_ERR(i2c->gpio); + of_property_read_u32_index(np, "clock-frequency", 0, + &i2c->algo_data.i2c_clock); } else { i2c->adap.timeout = HZ; i2c->algo_data.i2c_clock = 59000; - i2c->gpio = -1; + i2c->gpio = NULL; } i2c->algo_data.data = i2c; i2c->algo_data.wait_for_completion = i2c_pca_pf_waitforcompletion; - i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset; + if (i2c->gpio) + i2c->algo_data.reset_chip = i2c_pca_pf_resetchip; + else + i2c->algo_data.reset_chip = i2c_pca_pf_dummyreset; switch (res->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: @@ -208,52 +225,22 @@ static int i2c_pca_pf_probe(struct platform_device *pdev) break; } - /* Use gpio_is_valid() when in mainline */ - if (i2c->gpio > -1) { - ret = gpio_request(i2c->gpio, i2c->adap.name); - if (ret == 0) { - gpio_direction_output(i2c->gpio, 1); - i2c->algo_data.reset_chip = i2c_pca_pf_resetchip; - } else { - printk(KERN_WARNING "%s: Registering gpio failed!\n", - i2c->adap.name); - i2c->gpio = ret; - } - } - if (irq) { - ret = request_irq(irq, i2c_pca_pf_handler, + ret = devm_request_irq(&pdev->dev, irq, i2c_pca_pf_handler, IRQF_TRIGGER_FALLING, pdev->name, i2c); if (ret) - goto e_reqirq; + return ret; } - if (i2c_pca_add_numbered_bus(&i2c->adap) < 0) { - ret = -ENODEV; - goto e_adapt; - } + ret = i2c_pca_add_numbered_bus(&i2c->adap); + if (ret) + return ret; platform_set_drvdata(pdev, i2c); - printk(KERN_INFO "%s registered.\n", i2c->adap.name); + dev_info(&pdev->dev, "registered.\n"); return 0; - -e_adapt: - if (irq) - free_irq(irq, i2c); -e_reqirq: - if (i2c->gpio > -1) - gpio_free(i2c->gpio); - - iounmap(i2c->reg_base); -e_remap: - kfree(i2c); -e_alloc: - release_mem_region(res->start, resource_size(res)); -e_print: - printk(KERN_ERR "Registering PCA9564/PCA9665 FAILED! (%d)\n", ret); - return ret; } static int i2c_pca_pf_remove(struct platform_device *pdev) @@ -262,24 +249,24 @@ static int i2c_pca_pf_remove(struct platform_device *pdev) i2c_del_adapter(&i2c->adap); - if (i2c->irq) - free_irq(i2c->irq, i2c); - - if (i2c->gpio > -1) - gpio_free(i2c->gpio); - - iounmap(i2c->reg_base); - release_mem_region(i2c->io_base, i2c->io_size); - kfree(i2c); - return 0; } +#ifdef CONFIG_OF +static const struct of_device_id i2c_pca_of_match_table[] = { + { .compatible = "nxp,pca9564" }, + { .compatible = "nxp,pca9665" }, + {}, +}; +MODULE_DEVICE_TABLE(of, i2c_pca_of_match_table); +#endif + static struct platform_driver i2c_pca_pf_driver = { .probe = i2c_pca_pf_probe, .remove = i2c_pca_pf_remove, .driver = { .name = "i2c-pca-platform", + .of_match_table = of_match_ptr(i2c_pca_of_match_table), }, }; diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c index 8be3e6cb8fe6..93c1a54981df 100644 --- a/drivers/i2c/busses/i2c-rcar.c +++ b/drivers/i2c/busses/i2c-rcar.c @@ -1,5 +1,5 @@ /* - * Driver for the Renesas RCar I2C unit + * Driver for the Renesas R-Car I2C unit * * Copyright (C) 2014-15 Wolfram Sang <wsa@sang-engineering.com> * Copyright (C) 2011-2015 Renesas Electronics Corporation @@ -783,7 +783,12 @@ static int rcar_unreg_slave(struct i2c_client *slave) static u32 rcar_i2c_func(struct i2c_adapter *adap) { - /* This HW can't do SMBUS_QUICK and NOSTART */ + /* + * This HW can't do: + * I2C_SMBUS_QUICK (setting FSB during START didn't work) + * I2C_M_NOSTART (automatically sends address after START) + * I2C_M_IGNORE_NAK (automatically sends STOP after NAK) + */ return I2C_FUNC_I2C | I2C_FUNC_SLAVE | (I2C_FUNC_SMBUS_EMUL & ~I2C_FUNC_SMBUS_QUICK); } diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c index 3d7559348745..2e097d97d258 100644 --- a/drivers/i2c/busses/i2c-sh_mobile.c +++ b/drivers/i2c/busses/i2c-sh_mobile.c @@ -24,7 +24,6 @@ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/i2c.h> -#include <linux/i2c/i2c-sh_mobile.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -879,10 +878,10 @@ static int sh_mobile_i2c_hook_irqs(struct platform_device *dev, struct sh_mobile static int sh_mobile_i2c_probe(struct platform_device *dev) { - struct i2c_sh_mobile_platform_data *pdata = dev_get_platdata(&dev->dev); struct sh_mobile_i2c_data *pd; struct i2c_adapter *adap; struct resource *res; + const struct of_device_id *match; int ret; u32 bus_speed; @@ -910,30 +909,18 @@ static int sh_mobile_i2c_probe(struct platform_device *dev) if (IS_ERR(pd->reg)) return PTR_ERR(pd->reg); - /* Use platform data bus speed or STANDARD_MODE */ ret = of_property_read_u32(dev->dev.of_node, "clock-frequency", &bus_speed); pd->bus_speed = ret ? STANDARD_MODE : bus_speed; - pd->clks_per_count = 1; - if (dev->dev.of_node) { - const struct of_device_id *match; - - match = of_match_device(sh_mobile_i2c_dt_ids, &dev->dev); - if (match) { - const struct sh_mobile_dt_config *config; + match = of_match_device(sh_mobile_i2c_dt_ids, &dev->dev); + if (match) { + const struct sh_mobile_dt_config *config = match->data; - config = match->data; - pd->clks_per_count = config->clks_per_count; + pd->clks_per_count = config->clks_per_count; - if (config->setup) - config->setup(pd); - } - } else { - if (pdata && pdata->bus_speed) - pd->bus_speed = pdata->bus_speed; - if (pdata && pdata->clks_per_count) - pd->clks_per_count = pdata->clks_per_count; + if (config->setup) + config->setup(pd); } /* The IIC blocks on SH-Mobile ARM processors diff --git a/drivers/i2c/busses/i2c-xgene-slimpro.c b/drivers/i2c/busses/i2c-xgene-slimpro.c index 6ba6c83ca8f1..7e89ba6fcf6f 100644 --- a/drivers/i2c/busses/i2c-xgene-slimpro.c +++ b/drivers/i2c/busses/i2c-xgene-slimpro.c @@ -22,10 +22,12 @@ * using the APM X-Gene SLIMpro mailbox driver. * */ +#include <acpi/pcc.h> #include <linux/acpi.h> #include <linux/dma-mapping.h> #include <linux/i2c.h> #include <linux/interrupt.h> +#include <linux/io.h> #include <linux/mailbox_client.h> #include <linux/module.h> #include <linux/of.h> @@ -89,6 +91,8 @@ ((addrlen << SLIMPRO_IIC_ADDRLEN_SHIFT) & SLIMPRO_IIC_ADDRLEN_MASK) | \ ((datalen << SLIMPRO_IIC_DATALEN_SHIFT) & SLIMPRO_IIC_DATALEN_MASK)) +#define SLIMPRO_MSG_TYPE(v) (((v) & 0xF0000000) >> 28) + /* * Encode for upper address for block data */ @@ -99,19 +103,47 @@ & 0x3FF00000)) #define SLIMPRO_IIC_ENCODE_ADDR(a) ((a) & 0x000FFFFF) +#define SLIMPRO_IIC_MSG_DWORD_COUNT 3 + +/* PCC related defines */ +#define PCC_SIGNATURE 0x50424300 +#define PCC_STS_CMD_COMPLETE BIT(0) +#define PCC_STS_SCI_DOORBELL BIT(1) +#define PCC_STS_ERR BIT(2) +#define PCC_STS_PLAT_NOTIFY BIT(3) +#define PCC_CMD_GENERATE_DB_INT BIT(15) + struct slimpro_i2c_dev { struct i2c_adapter adapter; struct device *dev; struct mbox_chan *mbox_chan; struct mbox_client mbox_client; + int mbox_idx; struct completion rd_complete; u8 dma_buffer[I2C_SMBUS_BLOCK_MAX + 1]; /* dma_buffer[0] is used for length */ u32 *resp_msg; + phys_addr_t comm_base_addr; + void *pcc_comm_addr; }; #define to_slimpro_i2c_dev(cl) \ container_of(cl, struct slimpro_i2c_dev, mbox_client) +/* + * This function tests and clears a bitmask then returns its old value + */ +static u16 xgene_word_tst_and_clr(u16 *addr, u16 mask) +{ + u16 ret, val; + + val = le16_to_cpu(READ_ONCE(*addr)); + ret = val & mask; + val &= ~mask; + WRITE_ONCE(*addr, cpu_to_le16(val)); + + return ret; +} + static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg) { struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl); @@ -129,9 +161,53 @@ static void slimpro_i2c_rx_cb(struct mbox_client *cl, void *mssg) complete(&ctx->rd_complete); } +static void slimpro_i2c_pcc_rx_cb(struct mbox_client *cl, void *msg) +{ + struct slimpro_i2c_dev *ctx = to_slimpro_i2c_dev(cl); + struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr; + + /* Check if platform sends interrupt */ + if (!xgene_word_tst_and_clr(&generic_comm_base->status, + PCC_STS_SCI_DOORBELL)) + return; + + if (xgene_word_tst_and_clr(&generic_comm_base->status, + PCC_STS_CMD_COMPLETE)) { + msg = generic_comm_base + 1; + + /* Response message msg[1] contains the return value. */ + if (ctx->resp_msg) + *ctx->resp_msg = ((u32 *)msg)[1]; + + complete(&ctx->rd_complete); + } +} + +static void slimpro_i2c_pcc_tx_prepare(struct slimpro_i2c_dev *ctx, u32 *msg) +{ + struct acpi_pcct_shared_memory *generic_comm_base = ctx->pcc_comm_addr; + u32 *ptr = (void *)(generic_comm_base + 1); + u16 status; + int i; + + WRITE_ONCE(generic_comm_base->signature, + cpu_to_le32(PCC_SIGNATURE | ctx->mbox_idx)); + + WRITE_ONCE(generic_comm_base->command, + cpu_to_le16(SLIMPRO_MSG_TYPE(msg[0]) | PCC_CMD_GENERATE_DB_INT)); + + status = le16_to_cpu(READ_ONCE(generic_comm_base->status)); + status &= ~PCC_STS_CMD_COMPLETE; + WRITE_ONCE(generic_comm_base->status, cpu_to_le16(status)); + + /* Copy the message to the PCC comm space */ + for (i = 0; i < SLIMPRO_IIC_MSG_DWORD_COUNT; i++) + WRITE_ONCE(ptr[i], cpu_to_le32(msg[i])); +} + static int start_i2c_msg_xfer(struct slimpro_i2c_dev *ctx) { - if (ctx->mbox_client.tx_block) { + if (ctx->mbox_client.tx_block || !acpi_disabled) { if (!wait_for_completion_timeout(&ctx->rd_complete, msecs_to_jiffies(MAILBOX_OP_TIMEOUT))) return -ETIMEDOUT; @@ -144,49 +220,60 @@ static int start_i2c_msg_xfer(struct slimpro_i2c_dev *ctx) return 0; } -static int slimpro_i2c_rd(struct slimpro_i2c_dev *ctx, u32 chip, - u32 addr, u32 addrlen, u32 protocol, - u32 readlen, u32 *data) +static int slimpro_i2c_send_msg(struct slimpro_i2c_dev *ctx, + u32 *msg, + u32 *data) { - u32 msg[3]; int rc; - msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, - SLIMPRO_IIC_READ, protocol, addrlen, readlen); - msg[1] = SLIMPRO_IIC_ENCODE_ADDR(addr); - msg[2] = 0; ctx->resp_msg = data; - rc = mbox_send_message(ctx->mbox_chan, &msg); + + if (!acpi_disabled) { + reinit_completion(&ctx->rd_complete); + slimpro_i2c_pcc_tx_prepare(ctx, msg); + } + + rc = mbox_send_message(ctx->mbox_chan, msg); if (rc < 0) goto err; rc = start_i2c_msg_xfer(ctx); + err: + if (!acpi_disabled) + mbox_chan_txdone(ctx->mbox_chan, 0); + ctx->resp_msg = NULL; + return rc; } +static int slimpro_i2c_rd(struct slimpro_i2c_dev *ctx, u32 chip, + u32 addr, u32 addrlen, u32 protocol, + u32 readlen, u32 *data) +{ + u32 msg[3]; + + msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, + SLIMPRO_IIC_READ, protocol, addrlen, readlen); + msg[1] = SLIMPRO_IIC_ENCODE_ADDR(addr); + msg[2] = 0; + + return slimpro_i2c_send_msg(ctx, msg, data); +} + static int slimpro_i2c_wr(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, u32 addrlen, u32 protocol, u32 writelen, u32 data) { u32 msg[3]; - int rc; msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, SLIMPRO_IIC_WRITE, protocol, addrlen, writelen); msg[1] = SLIMPRO_IIC_ENCODE_ADDR(addr); msg[2] = data; - ctx->resp_msg = msg; - - rc = mbox_send_message(ctx->mbox_chan, &msg); - if (rc < 0) - goto err; - rc = start_i2c_msg_xfer(ctx); -err: - ctx->resp_msg = NULL; - return rc; + return slimpro_i2c_send_msg(ctx, msg, msg); } static int slimpro_i2c_blkrd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, @@ -201,8 +288,7 @@ static int slimpro_i2c_blkrd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, if (dma_mapping_error(ctx->dev, paddr)) { dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n", ctx->dma_buffer); - rc = -ENOMEM; - goto err; + return -ENOMEM; } msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, SLIMPRO_IIC_READ, @@ -212,21 +298,13 @@ static int slimpro_i2c_blkrd(struct slimpro_i2c_dev *ctx, u32 chip, u32 addr, SLIMPRO_IIC_ENCODE_UPPER_BUFADDR(paddr) | SLIMPRO_IIC_ENCODE_ADDR(addr); msg[2] = (u32)paddr; - ctx->resp_msg = msg; - - rc = mbox_send_message(ctx->mbox_chan, &msg); - if (rc < 0) - goto err_unmap; - rc = start_i2c_msg_xfer(ctx); + rc = slimpro_i2c_send_msg(ctx, msg, msg); /* Copy to destination */ memcpy(data, ctx->dma_buffer, readlen); -err_unmap: dma_unmap_single(ctx->dev, paddr, readlen, DMA_FROM_DEVICE); -err: - ctx->resp_msg = NULL; return rc; } @@ -244,8 +322,7 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip, if (dma_mapping_error(ctx->dev, paddr)) { dev_err(&ctx->adapter.dev, "Error in mapping dma buffer %p\n", ctx->dma_buffer); - rc = -ENOMEM; - goto err; + return -ENOMEM; } msg[0] = SLIMPRO_IIC_ENCODE_MSG(SLIMPRO_IIC_BUS, chip, SLIMPRO_IIC_WRITE, @@ -254,21 +331,13 @@ static int slimpro_i2c_blkwr(struct slimpro_i2c_dev *ctx, u32 chip, SLIMPRO_IIC_ENCODE_UPPER_BUFADDR(paddr) | SLIMPRO_IIC_ENCODE_ADDR(addr); msg[2] = (u32)paddr; - ctx->resp_msg = msg; if (ctx->mbox_client.tx_block) reinit_completion(&ctx->rd_complete); - rc = mbox_send_message(ctx->mbox_chan, &msg); - if (rc < 0) - goto err_unmap; + rc = slimpro_i2c_send_msg(ctx, msg, msg); - rc = start_i2c_msg_xfer(ctx); - -err_unmap: dma_unmap_single(ctx->dev, paddr, writelen, DMA_TO_DEVICE); -err: - ctx->resp_msg = NULL; return rc; } @@ -394,17 +463,73 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev) /* Request mailbox channel */ cl->dev = &pdev->dev; - cl->rx_callback = slimpro_i2c_rx_cb; - cl->tx_block = true; init_completion(&ctx->rd_complete); cl->tx_tout = MAILBOX_OP_TIMEOUT; cl->knows_txdone = false; - ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX); - if (IS_ERR(ctx->mbox_chan)) { - dev_err(&pdev->dev, "i2c mailbox channel request failed\n"); - return PTR_ERR(ctx->mbox_chan); - } + if (acpi_disabled) { + cl->tx_block = true; + cl->rx_callback = slimpro_i2c_rx_cb; + ctx->mbox_chan = mbox_request_channel(cl, MAILBOX_I2C_INDEX); + if (IS_ERR(ctx->mbox_chan)) { + dev_err(&pdev->dev, "i2c mailbox channel request failed\n"); + return PTR_ERR(ctx->mbox_chan); + } + } else { + struct acpi_pcct_hw_reduced *cppc_ss; + + if (device_property_read_u32(&pdev->dev, "pcc-channel", + &ctx->mbox_idx)) + ctx->mbox_idx = MAILBOX_I2C_INDEX; + + cl->tx_block = false; + cl->rx_callback = slimpro_i2c_pcc_rx_cb; + ctx->mbox_chan = pcc_mbox_request_channel(cl, ctx->mbox_idx); + if (IS_ERR(ctx->mbox_chan)) { + dev_err(&pdev->dev, "PCC mailbox channel request failed\n"); + return PTR_ERR(ctx->mbox_chan); + } + + /* + * The PCC mailbox controller driver should + * have parsed the PCCT (global table of all + * PCC channels) and stored pointers to the + * subspace communication region in con_priv. + */ + cppc_ss = ctx->mbox_chan->con_priv; + if (!cppc_ss) { + dev_err(&pdev->dev, "PPC subspace not found\n"); + rc = -ENOENT; + goto mbox_err; + } + + if (!ctx->mbox_chan->mbox->txdone_irq) { + dev_err(&pdev->dev, "PCC IRQ not supported\n"); + rc = -ENOENT; + goto mbox_err; + } + + /* + * This is the shared communication region + * for the OS and Platform to communicate over. + */ + ctx->comm_base_addr = cppc_ss->base_address; + if (ctx->comm_base_addr) { + ctx->pcc_comm_addr = memremap(ctx->comm_base_addr, + cppc_ss->length, + MEMREMAP_WB); + } else { + dev_err(&pdev->dev, "Failed to get PCC comm region\n"); + rc = -ENOENT; + goto mbox_err; + } + if (!ctx->pcc_comm_addr) { + dev_err(&pdev->dev, + "Failed to ioremap PCC comm region\n"); + rc = -ENOMEM; + goto mbox_err; + } + } rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) dev_warn(&pdev->dev, "Unable to set dma mask\n"); @@ -419,13 +544,19 @@ static int xgene_slimpro_i2c_probe(struct platform_device *pdev) ACPI_COMPANION_SET(&adapter->dev, ACPI_COMPANION(&pdev->dev)); i2c_set_adapdata(adapter, ctx); rc = i2c_add_adapter(adapter); - if (rc) { - mbox_free_channel(ctx->mbox_chan); - return rc; - } + if (rc) + goto mbox_err; dev_info(&pdev->dev, "Mailbox I2C Adapter registered\n"); return 0; + +mbox_err: + if (acpi_disabled) + mbox_free_channel(ctx->mbox_chan); + else + pcc_mbox_free_channel(ctx->mbox_chan); + + return rc; } static int xgene_slimpro_i2c_remove(struct platform_device *pdev) @@ -434,7 +565,10 @@ static int xgene_slimpro_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&ctx->adapter); - mbox_free_channel(ctx->mbox_chan); + if (acpi_disabled) + mbox_free_channel(ctx->mbox_chan); + else + pcc_mbox_free_channel(ctx->mbox_chan); return 0; } diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index ae80228104e9..6b106e94bc09 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -393,6 +393,7 @@ static int xlp9xx_i2c_probe(struct platform_device *pdev) init_completion(&priv->msg_complete); priv->adapter.dev.parent = &pdev->dev; priv->adapter.algo = &xlp9xx_i2c_algo; + priv->adapter.class = I2C_CLASS_HWMON; ACPI_COMPANION_SET(&priv->adapter.dev, ACPI_COMPANION(&pdev->dev)); priv->adapter.dev.of_node = pdev->dev.of_node; priv->dev = &pdev->dev; diff --git a/drivers/i2c/busses/i2c-zx2967.c b/drivers/i2c/busses/i2c-zx2967.c new file mode 100644 index 000000000000..48281c1b30c6 --- /dev/null +++ b/drivers/i2c/busses/i2c-zx2967.c @@ -0,0 +1,609 @@ +/* + * Copyright (C) 2017 Sanechips Technology Co., Ltd. + * Copyright 2017 Linaro Ltd. + * + * Author: Baoyou Xie <baoyou.xie@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/platform_device.h> + +#define REG_CMD 0x04 +#define REG_DEVADDR_H 0x0C +#define REG_DEVADDR_L 0x10 +#define REG_CLK_DIV_FS 0x14 +#define REG_CLK_DIV_HS 0x18 +#define REG_WRCONF 0x1C +#define REG_RDCONF 0x20 +#define REG_DATA 0x24 +#define REG_STAT 0x28 + +#define I2C_STOP 0 +#define I2C_MASTER BIT(0) +#define I2C_ADDR_MODE_TEN BIT(1) +#define I2C_IRQ_MSK_ENABLE BIT(3) +#define I2C_RW_READ BIT(4) +#define I2C_CMB_RW_EN BIT(5) +#define I2C_START BIT(6) + +#define I2C_ADDR_LOW_MASK GENMASK(6, 0) +#define I2C_ADDR_LOW_SHIFT 0 +#define I2C_ADDR_HI_MASK GENMASK(2, 0) +#define I2C_ADDR_HI_SHIFT 7 + +#define I2C_WFIFO_RESET BIT(7) +#define I2C_RFIFO_RESET BIT(7) + +#define I2C_IRQ_ACK_CLEAR BIT(7) +#define I2C_INT_MASK GENMASK(6, 0) + +#define I2C_TRANS_DONE BIT(0) +#define I2C_SR_EDEVICE BIT(1) +#define I2C_SR_EDATA BIT(2) + +#define I2C_FIFO_MAX 16 + +#define I2C_TIMEOUT msecs_to_jiffies(1000) + +#define DEV(i2c) ((i2c)->adap.dev.parent) + +struct zx2967_i2c { + struct i2c_adapter adap; + struct clk *clk; + struct completion complete; + u32 clk_freq; + void __iomem *reg_base; + size_t residue; + int irq; + int msg_rd; + u8 *cur_trans; + u8 access_cnt; + bool is_suspended; + int error; +}; + +static void zx2967_i2c_writel(struct zx2967_i2c *i2c, + u32 val, unsigned long reg) +{ + writel_relaxed(val, i2c->reg_base + reg); +} + +static u32 zx2967_i2c_readl(struct zx2967_i2c *i2c, unsigned long reg) +{ + return readl_relaxed(i2c->reg_base + reg); +} + +static void zx2967_i2c_writesb(struct zx2967_i2c *i2c, + void *data, unsigned long reg, int len) +{ + writesb(i2c->reg_base + reg, data, len); +} + +static void zx2967_i2c_readsb(struct zx2967_i2c *i2c, + void *data, unsigned long reg, int len) +{ + readsb(i2c->reg_base + reg, data, len); +} + +static void zx2967_i2c_start_ctrl(struct zx2967_i2c *i2c) +{ + u32 status; + u32 ctl; + + status = zx2967_i2c_readl(i2c, REG_STAT); + status |= I2C_IRQ_ACK_CLEAR; + zx2967_i2c_writel(i2c, status, REG_STAT); + + ctl = zx2967_i2c_readl(i2c, REG_CMD); + if (i2c->msg_rd) + ctl |= I2C_RW_READ; + else + ctl &= ~I2C_RW_READ; + ctl &= ~I2C_CMB_RW_EN; + ctl |= I2C_START; + zx2967_i2c_writel(i2c, ctl, REG_CMD); +} + +static void zx2967_i2c_flush_fifos(struct zx2967_i2c *i2c) +{ + u32 offset; + u32 val; + + if (i2c->msg_rd) { + offset = REG_RDCONF; + val = I2C_RFIFO_RESET; + } else { + offset = REG_WRCONF; + val = I2C_WFIFO_RESET; + } + + val |= zx2967_i2c_readl(i2c, offset); + zx2967_i2c_writel(i2c, val, offset); +} + +static int zx2967_i2c_empty_rx_fifo(struct zx2967_i2c *i2c, u32 size) +{ + u8 val[I2C_FIFO_MAX] = {0}; + int i; + + if (size > I2C_FIFO_MAX) { + dev_err(DEV(i2c), "fifo size %d over the max value %d\n", + size, I2C_FIFO_MAX); + return -EINVAL; + } + + zx2967_i2c_readsb(i2c, val, REG_DATA, size); + for (i = 0; i < size; i++) { + *i2c->cur_trans++ = val[i]; + i2c->residue--; + } + + barrier(); + + return 0; +} + +static int zx2967_i2c_fill_tx_fifo(struct zx2967_i2c *i2c) +{ + size_t residue = i2c->residue; + u8 *buf = i2c->cur_trans; + + if (residue == 0) { + dev_err(DEV(i2c), "residue is %d\n", (int)residue); + return -EINVAL; + } + + if (residue <= I2C_FIFO_MAX) { + zx2967_i2c_writesb(i2c, buf, REG_DATA, residue); + + /* Again update before writing to FIFO to make sure isr sees. */ + i2c->residue = 0; + i2c->cur_trans = NULL; + } else { + zx2967_i2c_writesb(i2c, buf, REG_DATA, I2C_FIFO_MAX); + i2c->residue -= I2C_FIFO_MAX; + i2c->cur_trans += I2C_FIFO_MAX; + } + + barrier(); + + return 0; +} + +static int zx2967_i2c_reset_hardware(struct zx2967_i2c *i2c) +{ + u32 val; + u32 clk_div; + + val = I2C_MASTER | I2C_IRQ_MSK_ENABLE; + zx2967_i2c_writel(i2c, val, REG_CMD); + + clk_div = clk_get_rate(i2c->clk) / i2c->clk_freq - 1; + zx2967_i2c_writel(i2c, clk_div, REG_CLK_DIV_FS); + zx2967_i2c_writel(i2c, clk_div, REG_CLK_DIV_HS); + + zx2967_i2c_writel(i2c, I2C_FIFO_MAX - 1, REG_WRCONF); + zx2967_i2c_writel(i2c, I2C_FIFO_MAX - 1, REG_RDCONF); + zx2967_i2c_writel(i2c, 1, REG_RDCONF); + + zx2967_i2c_flush_fifos(i2c); + + return 0; +} + +static void zx2967_i2c_isr_clr(struct zx2967_i2c *i2c) +{ + u32 status; + + status = zx2967_i2c_readl(i2c, REG_STAT); + status |= I2C_IRQ_ACK_CLEAR; + zx2967_i2c_writel(i2c, status, REG_STAT); +} + +static irqreturn_t zx2967_i2c_isr(int irq, void *dev_id) +{ + u32 status; + struct zx2967_i2c *i2c = (struct zx2967_i2c *)dev_id; + + status = zx2967_i2c_readl(i2c, REG_STAT) & I2C_INT_MASK; + zx2967_i2c_isr_clr(i2c); + + if (status & I2C_SR_EDEVICE) + i2c->error = -ENXIO; + else if (status & I2C_SR_EDATA) + i2c->error = -EIO; + else if (status & I2C_TRANS_DONE) + i2c->error = 0; + else + goto done; + + complete(&i2c->complete); +done: + return IRQ_HANDLED; +} + +static void zx2967_set_addr(struct zx2967_i2c *i2c, u16 addr) +{ + u16 val; + + val = (addr >> I2C_ADDR_LOW_SHIFT) & I2C_ADDR_LOW_MASK; + zx2967_i2c_writel(i2c, val, REG_DEVADDR_L); + + val = (addr >> I2C_ADDR_HI_SHIFT) & I2C_ADDR_HI_MASK; + zx2967_i2c_writel(i2c, val, REG_DEVADDR_H); + if (val) + val = zx2967_i2c_readl(i2c, REG_CMD) | I2C_ADDR_MODE_TEN; + else + val = zx2967_i2c_readl(i2c, REG_CMD) & ~I2C_ADDR_MODE_TEN; + zx2967_i2c_writel(i2c, val, REG_CMD); +} + +static int zx2967_i2c_xfer_bytes(struct zx2967_i2c *i2c, u32 bytes) +{ + unsigned long time_left; + int rd = i2c->msg_rd; + int ret; + + reinit_completion(&i2c->complete); + + if (rd) { + zx2967_i2c_writel(i2c, bytes - 1, REG_RDCONF); + } else { + ret = zx2967_i2c_fill_tx_fifo(i2c); + if (ret) + return ret; + } + + zx2967_i2c_start_ctrl(i2c); + + time_left = wait_for_completion_timeout(&i2c->complete, + I2C_TIMEOUT); + if (time_left == 0) + return -ETIMEDOUT; + + if (i2c->error) + return i2c->error; + + return rd ? zx2967_i2c_empty_rx_fifo(i2c, bytes) : 0; +} + +static int zx2967_i2c_xfer_msg(struct zx2967_i2c *i2c, + struct i2c_msg *msg) +{ + int ret; + int i; + + if (msg->len == 0) + return -EINVAL; + + zx2967_i2c_flush_fifos(i2c); + + i2c->cur_trans = msg->buf; + i2c->residue = msg->len; + i2c->access_cnt = msg->len / I2C_FIFO_MAX; + i2c->msg_rd = msg->flags & I2C_M_RD; + + for (i = 0; i < i2c->access_cnt; i++) { + ret = zx2967_i2c_xfer_bytes(i2c, I2C_FIFO_MAX); + if (ret) + return ret; + } + + if (i2c->residue > 0) { + ret = zx2967_i2c_xfer_bytes(i2c, i2c->residue); + if (ret) + return ret; + } + + i2c->residue = 0; + i2c->access_cnt = 0; + + return 0; +} + +static int zx2967_i2c_xfer(struct i2c_adapter *adap, + struct i2c_msg *msgs, int num) +{ + struct zx2967_i2c *i2c = i2c_get_adapdata(adap); + int ret; + int i; + + if (i2c->is_suspended) + return -EBUSY; + + zx2967_set_addr(i2c, msgs->addr); + + for (i = 0; i < num; i++) { + ret = zx2967_i2c_xfer_msg(i2c, &msgs[i]); + if (ret) + return ret; + } + + return num; +} + +static void +zx2967_smbus_xfer_prepare(struct zx2967_i2c *i2c, u16 addr, + char read_write, u8 command, int size, + union i2c_smbus_data *data) +{ + u32 val; + + val = zx2967_i2c_readl(i2c, REG_RDCONF); + val |= I2C_RFIFO_RESET; + zx2967_i2c_writel(i2c, val, REG_RDCONF); + zx2967_set_addr(i2c, addr); + val = zx2967_i2c_readl(i2c, REG_CMD); + val &= ~I2C_RW_READ; + zx2967_i2c_writel(i2c, val, REG_CMD); + + switch (size) { + case I2C_SMBUS_BYTE: + zx2967_i2c_writel(i2c, command, REG_DATA); + break; + case I2C_SMBUS_BYTE_DATA: + zx2967_i2c_writel(i2c, command, REG_DATA); + if (read_write == I2C_SMBUS_WRITE) + zx2967_i2c_writel(i2c, data->byte, REG_DATA); + break; + case I2C_SMBUS_WORD_DATA: + zx2967_i2c_writel(i2c, command, REG_DATA); + if (read_write == I2C_SMBUS_WRITE) { + zx2967_i2c_writel(i2c, (data->word >> 8), REG_DATA); + zx2967_i2c_writel(i2c, (data->word & 0xff), + REG_DATA); + } + break; + } +} + +static int zx2967_smbus_xfer_read(struct zx2967_i2c *i2c, int size, + union i2c_smbus_data *data) +{ + unsigned long time_left; + u8 buf[2]; + u32 val; + + reinit_completion(&i2c->complete); + + val = zx2967_i2c_readl(i2c, REG_CMD); + val |= I2C_CMB_RW_EN; + zx2967_i2c_writel(i2c, val, REG_CMD); + + val = zx2967_i2c_readl(i2c, REG_CMD); + val |= I2C_START; + zx2967_i2c_writel(i2c, val, REG_CMD); + + time_left = wait_for_completion_timeout(&i2c->complete, + I2C_TIMEOUT); + if (time_left == 0) + return -ETIMEDOUT; + + if (i2c->error) + return i2c->error; + + switch (size) { + case I2C_SMBUS_BYTE: + case I2C_SMBUS_BYTE_DATA: + val = zx2967_i2c_readl(i2c, REG_DATA); + data->byte = val; + break; + case I2C_SMBUS_WORD_DATA: + case I2C_SMBUS_PROC_CALL: + buf[0] = zx2967_i2c_readl(i2c, REG_DATA); + buf[1] = zx2967_i2c_readl(i2c, REG_DATA); + data->word = (buf[0] << 8) | buf[1]; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int zx2967_smbus_xfer_write(struct zx2967_i2c *i2c) +{ + unsigned long time_left; + u32 val; + + reinit_completion(&i2c->complete); + val = zx2967_i2c_readl(i2c, REG_CMD); + val |= I2C_START; + zx2967_i2c_writel(i2c, val, REG_CMD); + + time_left = wait_for_completion_timeout(&i2c->complete, + I2C_TIMEOUT); + if (time_left == 0) + return -ETIMEDOUT; + + if (i2c->error) + return i2c->error; + + return 0; +} + +static int zx2967_smbus_xfer(struct i2c_adapter *adap, u16 addr, + unsigned short flags, char read_write, + u8 command, int size, union i2c_smbus_data *data) +{ + struct zx2967_i2c *i2c = i2c_get_adapdata(adap); + + if (size == I2C_SMBUS_QUICK) + read_write = I2C_SMBUS_WRITE; + + switch (size) { + case I2C_SMBUS_QUICK: + case I2C_SMBUS_BYTE: + case I2C_SMBUS_BYTE_DATA: + case I2C_SMBUS_WORD_DATA: + zx2967_smbus_xfer_prepare(i2c, addr, read_write, + command, size, data); + break; + default: + return -EOPNOTSUPP; + } + + if (read_write == I2C_SMBUS_READ) + return zx2967_smbus_xfer_read(i2c, size, data); + + return zx2967_smbus_xfer_write(i2c); +} + +static u32 zx2967_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | + I2C_FUNC_SMBUS_QUICK | + I2C_FUNC_SMBUS_BYTE | + I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA | + I2C_FUNC_SMBUS_PROC_CALL | + I2C_FUNC_SMBUS_I2C_BLOCK; +} + +static int __maybe_unused zx2967_i2c_suspend(struct device *dev) +{ + struct zx2967_i2c *i2c = dev_get_drvdata(dev); + + i2c->is_suspended = true; + clk_disable_unprepare(i2c->clk); + + return 0; +} + +static int __maybe_unused zx2967_i2c_resume(struct device *dev) +{ + struct zx2967_i2c *i2c = dev_get_drvdata(dev); + + i2c->is_suspended = false; + clk_prepare_enable(i2c->clk); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(zx2967_i2c_dev_pm_ops, + zx2967_i2c_suspend, zx2967_i2c_resume); + +static const struct i2c_algorithm zx2967_i2c_algo = { + .master_xfer = zx2967_i2c_xfer, + .smbus_xfer = zx2967_smbus_xfer, + .functionality = zx2967_i2c_func, +}; + +static const struct of_device_id zx2967_i2c_of_match[] = { + { .compatible = "zte,zx296718-i2c", }, + { }, +}; +MODULE_DEVICE_TABLE(of, zx2967_i2c_of_match); + +static int zx2967_i2c_probe(struct platform_device *pdev) +{ + struct zx2967_i2c *i2c; + void __iomem *reg_base; + struct resource *res; + struct clk *clk; + int ret; + + i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL); + if (!i2c) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + reg_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(reg_base)) + return PTR_ERR(reg_base); + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "missing controller clock"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "failed to enable i2c_clk\n"); + return ret; + } + + ret = device_property_read_u32(&pdev->dev, "clock-frequency", + &i2c->clk_freq); + if (ret) { + dev_err(&pdev->dev, "missing clock-frequency"); + return ret; + } + + ret = platform_get_irq(pdev, 0); + if (ret < 0) + return ret; + + i2c->irq = ret; + i2c->reg_base = reg_base; + i2c->clk = clk; + + init_completion(&i2c->complete); + platform_set_drvdata(pdev, i2c); + + ret = zx2967_i2c_reset_hardware(i2c); + if (ret) { + dev_err(&pdev->dev, "failed to initialize i2c controller\n"); + goto err_clk_unprepare; + } + + ret = devm_request_irq(&pdev->dev, i2c->irq, + zx2967_i2c_isr, 0, dev_name(&pdev->dev), i2c); + if (ret) { + dev_err(&pdev->dev, "failed to request irq %i\n", i2c->irq); + goto err_clk_unprepare; + } + + i2c_set_adapdata(&i2c->adap, i2c); + strlcpy(i2c->adap.name, "zx2967 i2c adapter", + sizeof(i2c->adap.name)); + i2c->adap.algo = &zx2967_i2c_algo; + i2c->adap.nr = pdev->id; + i2c->adap.dev.parent = &pdev->dev; + i2c->adap.dev.of_node = pdev->dev.of_node; + + ret = i2c_add_numbered_adapter(&i2c->adap); + if (ret) + goto err_clk_unprepare; + + return 0; + +err_clk_unprepare: + clk_disable_unprepare(i2c->clk); + return ret; +} + +static int zx2967_i2c_remove(struct platform_device *pdev) +{ + struct zx2967_i2c *i2c = platform_get_drvdata(pdev); + + i2c_del_adapter(&i2c->adap); + clk_disable_unprepare(i2c->clk); + + return 0; +} + +static struct platform_driver zx2967_i2c_driver = { + .probe = zx2967_i2c_probe, + .remove = zx2967_i2c_remove, + .driver = { + .name = "zx2967_i2c", + .of_match_table = zx2967_i2c_of_match, + .pm = &zx2967_i2c_dev_pm_ops, + }, +}; +module_platform_driver(zx2967_i2c_driver); + +MODULE_AUTHOR("Baoyou Xie <baoyou.xie@linaro.org>"); +MODULE_DESCRIPTION("ZTE ZX2967 I2C Bus Controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/i2c/i2c-core-acpi.c b/drivers/i2c/i2c-core-acpi.c new file mode 100644 index 000000000000..4842ec3a5451 --- /dev/null +++ b/drivers/i2c/i2c-core-acpi.c @@ -0,0 +1,665 @@ +/* + * Linux I2C core ACPI support code + * + * Copyright (C) 2014 Intel Corp, Author: Lan Tianyu <tianyu.lan@intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/slab.h> + +#include "i2c-core.h" + +struct i2c_acpi_handler_data { + struct acpi_connection_info info; + struct i2c_adapter *adapter; +}; + +struct gsb_buffer { + u8 status; + u8 len; + union { + u16 wdata; + u8 bdata; + u8 data[0]; + }; +} __packed; + +struct i2c_acpi_lookup { + struct i2c_board_info *info; + acpi_handle adapter_handle; + acpi_handle device_handle; + acpi_handle search_handle; + int n; + int index; + u32 speed; + u32 min_speed; +}; + +static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data) +{ + struct i2c_acpi_lookup *lookup = data; + struct i2c_board_info *info = lookup->info; + struct acpi_resource_i2c_serialbus *sb; + acpi_status status; + + if (info->addr || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) + return 1; + + sb = &ares->data.i2c_serial_bus; + if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) + return 1; + + if (lookup->index != -1 && lookup->n++ != lookup->index) + return 1; + + status = acpi_get_handle(lookup->device_handle, + sb->resource_source.string_ptr, + &lookup->adapter_handle); + if (!ACPI_SUCCESS(status)) + return 1; + + info->addr = sb->slave_address; + lookup->speed = sb->connection_speed; + if (sb->access_mode == ACPI_I2C_10BIT_MODE) + info->flags |= I2C_CLIENT_TEN; + + return 1; +} + +static const struct acpi_device_id i2c_acpi_ignored_device_ids[] = { + /* + * ACPI video acpi_devices, which are handled by the acpi-video driver + * sometimes contain a SERIAL_TYPE_I2C ACPI resource, ignore these. + */ + { ACPI_VIDEO_HID, 0 }, + {} +}; + +static int i2c_acpi_do_lookup(struct acpi_device *adev, + struct i2c_acpi_lookup *lookup) +{ + struct i2c_board_info *info = lookup->info; + struct list_head resource_list; + int ret; + + if (acpi_bus_get_status(adev) || !adev->status.present || + acpi_device_enumerated(adev)) + return -EINVAL; + + if (acpi_match_device_ids(adev, i2c_acpi_ignored_device_ids) == 0) + return -ENODEV; + + memset(info, 0, sizeof(*info)); + lookup->device_handle = acpi_device_handle(adev); + + /* Look up for I2cSerialBus resource */ + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, + i2c_acpi_fill_info, lookup); + acpi_dev_free_resource_list(&resource_list); + + if (ret < 0 || !info->addr) + return -EINVAL; + + return 0; +} + +static int i2c_acpi_get_info(struct acpi_device *adev, + struct i2c_board_info *info, + struct i2c_adapter *adapter, + acpi_handle *adapter_handle) +{ + struct list_head resource_list; + struct resource_entry *entry; + struct i2c_acpi_lookup lookup; + int ret; + + memset(&lookup, 0, sizeof(lookup)); + lookup.info = info; + lookup.index = -1; + + ret = i2c_acpi_do_lookup(adev, &lookup); + if (ret) + return ret; + + if (adapter) { + /* The adapter must match the one in I2cSerialBus() connector */ + if (ACPI_HANDLE(&adapter->dev) != lookup.adapter_handle) + return -ENODEV; + } else { + struct acpi_device *adapter_adev; + + /* The adapter must be present */ + if (acpi_bus_get_device(lookup.adapter_handle, &adapter_adev)) + return -ENODEV; + if (acpi_bus_get_status(adapter_adev) || + !adapter_adev->status.present) + return -ENODEV; + } + + info->fwnode = acpi_fwnode_handle(adev); + if (adapter_handle) + *adapter_handle = lookup.adapter_handle; + + /* Then fill IRQ number if any */ + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); + if (ret < 0) + return -EINVAL; + + resource_list_for_each_entry(entry, &resource_list) { + if (resource_type(entry->res) == IORESOURCE_IRQ) { + info->irq = entry->res->start; + break; + } + } + + acpi_dev_free_resource_list(&resource_list); + + acpi_set_modalias(adev, dev_name(&adev->dev), info->type, + sizeof(info->type)); + + return 0; +} + +static void i2c_acpi_register_device(struct i2c_adapter *adapter, + struct acpi_device *adev, + struct i2c_board_info *info) +{ + adev->power.flags.ignore_parent = true; + acpi_device_set_enumerated(adev); + + if (!i2c_new_device(adapter, info)) { + adev->power.flags.ignore_parent = false; + dev_err(&adapter->dev, + "failed to add I2C device %s from ACPI\n", + dev_name(&adev->dev)); + } +} + +static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct i2c_adapter *adapter = data; + struct acpi_device *adev; + struct i2c_board_info info; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + if (i2c_acpi_get_info(adev, &info, adapter, NULL)) + return AE_OK; + + i2c_acpi_register_device(adapter, adev, &info); + + return AE_OK; +} + +#define I2C_ACPI_MAX_SCAN_DEPTH 32 + +/** + * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter + * @adap: pointer to adapter + * + * Enumerate all I2C slave devices behind this adapter by walking the ACPI + * namespace. When a device is found it will be added to the Linux device + * model and bound to the corresponding ACPI handle. + */ +void i2c_acpi_register_devices(struct i2c_adapter *adap) +{ + acpi_status status; + + if (!has_acpi_companion(&adap->dev)) + return; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + I2C_ACPI_MAX_SCAN_DEPTH, + i2c_acpi_add_device, NULL, + adap, NULL); + if (ACPI_FAILURE(status)) + dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); +} + +static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct i2c_acpi_lookup *lookup = data; + struct acpi_device *adev; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + if (i2c_acpi_do_lookup(adev, lookup)) + return AE_OK; + + if (lookup->search_handle != lookup->adapter_handle) + return AE_OK; + + if (lookup->speed <= lookup->min_speed) + lookup->min_speed = lookup->speed; + + return AE_OK; +} + +/** + * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI + * @dev: The device owning the bus + * + * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves + * devices connected to this bus and use the speed of slowest device. + * + * Returns the speed in Hz or zero + */ +u32 i2c_acpi_find_bus_speed(struct device *dev) +{ + struct i2c_acpi_lookup lookup; + struct i2c_board_info dummy; + acpi_status status; + + if (!has_acpi_companion(dev)) + return 0; + + memset(&lookup, 0, sizeof(lookup)); + lookup.search_handle = ACPI_HANDLE(dev); + lookup.min_speed = UINT_MAX; + lookup.info = &dummy; + lookup.index = -1; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, + I2C_ACPI_MAX_SCAN_DEPTH, + i2c_acpi_lookup_speed, NULL, + &lookup, NULL); + + if (ACPI_FAILURE(status)) { + dev_warn(dev, "unable to find I2C bus speed from ACPI\n"); + return 0; + } + + return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0; +} +EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); + +static int i2c_acpi_match_adapter(struct device *dev, void *data) +{ + struct i2c_adapter *adapter = i2c_verify_adapter(dev); + + if (!adapter) + return 0; + + return ACPI_HANDLE(dev) == (acpi_handle)data; +} + +static int i2c_acpi_match_device(struct device *dev, void *data) +{ + return ACPI_COMPANION(dev) == data; +} + +static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) +{ + struct device *dev; + + dev = bus_find_device(&i2c_bus_type, NULL, handle, + i2c_acpi_match_adapter); + return dev ? i2c_verify_adapter(dev) : NULL; +} + +static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) +{ + struct device *dev; + + dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); + return dev ? i2c_verify_client(dev) : NULL; +} + +static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, + void *arg) +{ + struct acpi_device *adev = arg; + struct i2c_board_info info; + acpi_handle adapter_handle; + struct i2c_adapter *adapter; + struct i2c_client *client; + + switch (value) { + case ACPI_RECONFIG_DEVICE_ADD: + if (i2c_acpi_get_info(adev, &info, NULL, &adapter_handle)) + break; + + adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); + if (!adapter) + break; + + i2c_acpi_register_device(adapter, adev, &info); + break; + case ACPI_RECONFIG_DEVICE_REMOVE: + if (!acpi_device_enumerated(adev)) + break; + + client = i2c_acpi_find_client_by_adev(adev); + if (!client) + break; + + i2c_unregister_device(client); + put_device(&client->dev); + break; + } + + return NOTIFY_OK; +} + +struct notifier_block i2c_acpi_notifier = { + .notifier_call = i2c_acpi_notify, +}; + +/** + * i2c_acpi_new_device - Create i2c-client for the Nth I2cSerialBus resource + * @dev: Device owning the ACPI resources to get the client from + * @index: Index of ACPI resource to get + * @info: describes the I2C device; note this is modified (addr gets set) + * Context: can sleep + * + * By default the i2c subsys creates an i2c-client for the first I2cSerialBus + * resource of an acpi_device, but some acpi_devices have multiple I2cSerialBus + * resources, in that case this function can be used to create an i2c-client + * for other I2cSerialBus resources in the Current Resource Settings table. + * + * Also see i2c_new_device, which this function calls to create the i2c-client. + * + * Returns a pointer to the new i2c-client, or NULL if the adapter is not found. + */ +struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, + struct i2c_board_info *info) +{ + struct i2c_acpi_lookup lookup; + struct i2c_adapter *adapter; + struct acpi_device *adev; + LIST_HEAD(resource_list); + int ret; + + adev = ACPI_COMPANION(dev); + if (!adev) + return NULL; + + memset(&lookup, 0, sizeof(lookup)); + lookup.info = info; + lookup.device_handle = acpi_device_handle(adev); + lookup.index = index; + + ret = acpi_dev_get_resources(adev, &resource_list, + i2c_acpi_fill_info, &lookup); + acpi_dev_free_resource_list(&resource_list); + + if (ret < 0 || !info->addr) + return NULL; + + adapter = i2c_acpi_find_adapter_by_handle(lookup.adapter_handle); + if (!adapter) + return NULL; + + return i2c_new_device(adapter, info); +} +EXPORT_SYMBOL_GPL(i2c_acpi_new_device); + +#ifdef CONFIG_ACPI_I2C_OPREGION +static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, + u8 cmd, u8 *data, u8 data_len) +{ + + struct i2c_msg msgs[2]; + int ret; + u8 *buffer; + + buffer = kzalloc(data_len, GFP_KERNEL); + if (!buffer) + return AE_NO_MEMORY; + + msgs[0].addr = client->addr; + msgs[0].flags = client->flags; + msgs[0].len = 1; + msgs[0].buf = &cmd; + + msgs[1].addr = client->addr; + msgs[1].flags = client->flags | I2C_M_RD; + msgs[1].len = data_len; + msgs[1].buf = buffer; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + dev_err(&client->adapter->dev, "i2c read failed\n"); + else + memcpy(data, buffer, data_len); + + kfree(buffer); + return ret; +} + +static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, + u8 cmd, u8 *data, u8 data_len) +{ + + struct i2c_msg msgs[1]; + u8 *buffer; + int ret = AE_OK; + + buffer = kzalloc(data_len + 1, GFP_KERNEL); + if (!buffer) + return AE_NO_MEMORY; + + buffer[0] = cmd; + memcpy(buffer + 1, data, data_len); + + msgs[0].addr = client->addr; + msgs[0].flags = client->flags; + msgs[0].len = data_len + 1; + msgs[0].buf = buffer; + + ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + dev_err(&client->adapter->dev, "i2c write failed\n"); + + kfree(buffer); + return ret; +} + +static acpi_status +i2c_acpi_space_handler(u32 function, acpi_physical_address command, + u32 bits, u64 *value64, + void *handler_context, void *region_context) +{ + struct gsb_buffer *gsb = (struct gsb_buffer *)value64; + struct i2c_acpi_handler_data *data = handler_context; + struct acpi_connection_info *info = &data->info; + struct acpi_resource_i2c_serialbus *sb; + struct i2c_adapter *adapter = data->adapter; + struct i2c_client *client; + struct acpi_resource *ares; + u32 accessor_type = function >> 16; + u8 action = function & ACPI_IO_MASK; + acpi_status ret; + int status; + + ret = acpi_buffer_to_resource(info->connection, info->length, &ares); + if (ACPI_FAILURE(ret)) + return ret; + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) { + ret = AE_NO_MEMORY; + goto err; + } + + if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { + ret = AE_BAD_PARAMETER; + goto err; + } + + sb = &ares->data.i2c_serial_bus; + if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { + ret = AE_BAD_PARAMETER; + goto err; + } + + client->adapter = adapter; + client->addr = sb->slave_address; + + if (sb->access_mode == ACPI_I2C_10BIT_MODE) + client->flags |= I2C_CLIENT_TEN; + + switch (accessor_type) { + case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: + if (action == ACPI_READ) { + status = i2c_smbus_read_byte(client); + if (status >= 0) { + gsb->bdata = status; + status = 0; + } + } else { + status = i2c_smbus_write_byte(client, gsb->bdata); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_BYTE: + if (action == ACPI_READ) { + status = i2c_smbus_read_byte_data(client, command); + if (status >= 0) { + gsb->bdata = status; + status = 0; + } + } else { + status = i2c_smbus_write_byte_data(client, command, + gsb->bdata); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_WORD: + if (action == ACPI_READ) { + status = i2c_smbus_read_word_data(client, command); + if (status >= 0) { + gsb->wdata = status; + status = 0; + } + } else { + status = i2c_smbus_write_word_data(client, command, + gsb->wdata); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_BLOCK: + if (action == ACPI_READ) { + status = i2c_smbus_read_block_data(client, command, + gsb->data); + if (status >= 0) { + gsb->len = status; + status = 0; + } + } else { + status = i2c_smbus_write_block_data(client, command, + gsb->len, gsb->data); + } + break; + + case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: + if (action == ACPI_READ) { + status = acpi_gsb_i2c_read_bytes(client, command, + gsb->data, info->access_length); + if (status > 0) + status = 0; + } else { + status = acpi_gsb_i2c_write_bytes(client, command, + gsb->data, info->access_length); + } + break; + + default: + dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", + accessor_type, client->addr); + ret = AE_BAD_PARAMETER; + goto err; + } + + gsb->status = status; + + err: + kfree(client); + ACPI_FREE(ares); + return ret; +} + + +int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) +{ + acpi_handle handle; + struct i2c_acpi_handler_data *data; + acpi_status status; + + if (!adapter->dev.parent) + return -ENODEV; + + handle = ACPI_HANDLE(adapter->dev.parent); + + if (!handle) + return -ENODEV; + + data = kzalloc(sizeof(struct i2c_acpi_handler_data), + GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->adapter = adapter; + status = acpi_bus_attach_private_data(handle, (void *)data); + if (ACPI_FAILURE(status)) { + kfree(data); + return -ENOMEM; + } + + status = acpi_install_address_space_handler(handle, + ACPI_ADR_SPACE_GSBUS, + &i2c_acpi_space_handler, + NULL, + data); + if (ACPI_FAILURE(status)) { + dev_err(&adapter->dev, "Error installing i2c space handler\n"); + acpi_bus_detach_private_data(handle); + kfree(data); + return -ENOMEM; + } + + acpi_walk_dep_device_list(handle); + return 0; +} + +void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) +{ + acpi_handle handle; + struct i2c_acpi_handler_data *data; + acpi_status status; + + if (!adapter->dev.parent) + return; + + handle = ACPI_HANDLE(adapter->dev.parent); + + if (!handle) + return; + + acpi_remove_address_space_handler(handle, + ACPI_ADR_SPACE_GSBUS, + &i2c_acpi_space_handler); + + status = acpi_bus_get_private_data(handle, (void **)&data); + if (ACPI_SUCCESS(status)) + kfree(data); + + acpi_bus_detach_private_data(handle); +} +#endif /* CONFIG_ACPI_I2C_OPREGION */ diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core-base.c index 82576aaccc90..c89dac7fd2e7 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core-base.c @@ -1,36 +1,26 @@ -/* i2c-core.c - a device driver for the iic-bus interface */ -/* ------------------------------------------------------------------------- */ -/* Copyright (C) 1995-99 Simon G. Vogl - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. */ -/* ------------------------------------------------------------------------- */ - -/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>. - All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl> - SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and - Jean Delvare <jdelvare@suse.de> - Mux support by Rodolfo Giometti <giometti@enneenne.com> and - Michael Lawnick <michael.lawnick.ext@nsn.com> - OF support is copyright (c) 2008 Jochen Friedrich <jochen@scram.de> - (based on a previous patch from Jon Smirl <jonsmirl@gmail.com>) and - (c) 2013 Wolfram Sang <wsa@the-dreams.de> - I2C ACPI code Copyright (C) 2014 Intel Corp - Author: Lan Tianyu <tianyu.lan@intel.com> - I2C slave support (c) 2014 by Wolfram Sang <wsa@sang-engineering.com> +/* + * Linux I2C core + * + * Copyright (C) 1995-99 Simon G. Vogl + * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> + * Mux support by Rodolfo Giometti <giometti@enneenne.com> and + * Michael Lawnick <michael.lawnick.ext@nsn.com> + * + * Copyright (C) 2013-2017 Wolfram Sang <wsa@the-dreams.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS + * FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. */ #define pr_fmt(fmt) "i2c-core: " fmt #include <dt-bindings/i2c/i2c.h> -#include <linux/uaccess.h> #include <linux/acpi.h> #include <linux/clk/clk-conf.h> #include <linux/completion.h> @@ -38,7 +28,6 @@ #include <linux/err.h> #include <linux/errno.h> #include <linux/gpio.h> -#include <linux/hardirq.h> #include <linux/i2c.h> #include <linux/idr.h> #include <linux/init.h> @@ -68,9 +57,10 @@ #define I2C_ADDR_7BITS_MAX 0x77 #define I2C_ADDR_7BITS_COUNT (I2C_ADDR_7BITS_MAX + 1) -/* core_lock protects i2c_adapter_idr, and guarantees - that device detection, deletion of detected devices, and attach_adapter - calls are serialized */ +/* + * core_lock protects i2c_adapter_idr, and guarantees that device detection, + * deletion of detected devices, and attach_adapter calls are serialized + */ static DEFINE_MUTEX(core_lock); static DEFINE_IDR(i2c_adapter_idr); @@ -90,652 +80,6 @@ void i2c_transfer_trace_unreg(void) static_key_slow_dec(&i2c_trace_msg); } -#if defined(CONFIG_ACPI) -struct i2c_acpi_handler_data { - struct acpi_connection_info info; - struct i2c_adapter *adapter; -}; - -struct gsb_buffer { - u8 status; - u8 len; - union { - u16 wdata; - u8 bdata; - u8 data[0]; - }; -} __packed; - -struct i2c_acpi_lookup { - struct i2c_board_info *info; - acpi_handle adapter_handle; - acpi_handle device_handle; - acpi_handle search_handle; - int n; - int index; - u32 speed; - u32 min_speed; -}; - -static int i2c_acpi_fill_info(struct acpi_resource *ares, void *data) -{ - struct i2c_acpi_lookup *lookup = data; - struct i2c_board_info *info = lookup->info; - struct acpi_resource_i2c_serialbus *sb; - acpi_status status; - - if (info->addr || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) - return 1; - - sb = &ares->data.i2c_serial_bus; - if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) - return 1; - - if (lookup->index != -1 && lookup->n++ != lookup->index) - return 1; - - status = acpi_get_handle(lookup->device_handle, - sb->resource_source.string_ptr, - &lookup->adapter_handle); - if (!ACPI_SUCCESS(status)) - return 1; - - info->addr = sb->slave_address; - lookup->speed = sb->connection_speed; - if (sb->access_mode == ACPI_I2C_10BIT_MODE) - info->flags |= I2C_CLIENT_TEN; - - return 1; -} - -static int i2c_acpi_do_lookup(struct acpi_device *adev, - struct i2c_acpi_lookup *lookup) -{ - struct i2c_board_info *info = lookup->info; - struct list_head resource_list; - int ret; - - if (acpi_bus_get_status(adev) || !adev->status.present || - acpi_device_enumerated(adev)) - return -EINVAL; - - memset(info, 0, sizeof(*info)); - lookup->device_handle = acpi_device_handle(adev); - - /* Look up for I2cSerialBus resource */ - INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, - i2c_acpi_fill_info, lookup); - acpi_dev_free_resource_list(&resource_list); - - if (ret < 0 || !info->addr) - return -EINVAL; - - return 0; -} - -static int i2c_acpi_get_info(struct acpi_device *adev, - struct i2c_board_info *info, - struct i2c_adapter *adapter, - acpi_handle *adapter_handle) -{ - struct list_head resource_list; - struct resource_entry *entry; - struct i2c_acpi_lookup lookup; - int ret; - - memset(&lookup, 0, sizeof(lookup)); - lookup.info = info; - lookup.index = -1; - - ret = i2c_acpi_do_lookup(adev, &lookup); - if (ret) - return ret; - - if (adapter) { - /* The adapter must match the one in I2cSerialBus() connector */ - if (ACPI_HANDLE(&adapter->dev) != lookup.adapter_handle) - return -ENODEV; - } else { - struct acpi_device *adapter_adev; - - /* The adapter must be present */ - if (acpi_bus_get_device(lookup.adapter_handle, &adapter_adev)) - return -ENODEV; - if (acpi_bus_get_status(adapter_adev) || - !adapter_adev->status.present) - return -ENODEV; - } - - info->fwnode = acpi_fwnode_handle(adev); - if (adapter_handle) - *adapter_handle = lookup.adapter_handle; - - /* Then fill IRQ number if any */ - INIT_LIST_HEAD(&resource_list); - ret = acpi_dev_get_resources(adev, &resource_list, NULL, NULL); - if (ret < 0) - return -EINVAL; - - resource_list_for_each_entry(entry, &resource_list) { - if (resource_type(entry->res) == IORESOURCE_IRQ) { - info->irq = entry->res->start; - break; - } - } - - acpi_dev_free_resource_list(&resource_list); - - acpi_set_modalias(adev, dev_name(&adev->dev), info->type, - sizeof(info->type)); - - return 0; -} - -static void i2c_acpi_register_device(struct i2c_adapter *adapter, - struct acpi_device *adev, - struct i2c_board_info *info) -{ - adev->power.flags.ignore_parent = true; - acpi_device_set_enumerated(adev); - - if (!i2c_new_device(adapter, info)) { - adev->power.flags.ignore_parent = false; - dev_err(&adapter->dev, - "failed to add I2C device %s from ACPI\n", - dev_name(&adev->dev)); - } -} - -static acpi_status i2c_acpi_add_device(acpi_handle handle, u32 level, - void *data, void **return_value) -{ - struct i2c_adapter *adapter = data; - struct acpi_device *adev; - struct i2c_board_info info; - - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - - if (i2c_acpi_get_info(adev, &info, adapter, NULL)) - return AE_OK; - - i2c_acpi_register_device(adapter, adev, &info); - - return AE_OK; -} - -#define I2C_ACPI_MAX_SCAN_DEPTH 32 - -/** - * i2c_acpi_register_devices - enumerate I2C slave devices behind adapter - * @adap: pointer to adapter - * - * Enumerate all I2C slave devices behind this adapter by walking the ACPI - * namespace. When a device is found it will be added to the Linux device - * model and bound to the corresponding ACPI handle. - */ -static void i2c_acpi_register_devices(struct i2c_adapter *adap) -{ - acpi_status status; - - if (!has_acpi_companion(&adap->dev)) - return; - - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - I2C_ACPI_MAX_SCAN_DEPTH, - i2c_acpi_add_device, NULL, - adap, NULL); - if (ACPI_FAILURE(status)) - dev_warn(&adap->dev, "failed to enumerate I2C slaves\n"); -} - -static acpi_status i2c_acpi_lookup_speed(acpi_handle handle, u32 level, - void *data, void **return_value) -{ - struct i2c_acpi_lookup *lookup = data; - struct acpi_device *adev; - - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - - if (i2c_acpi_do_lookup(adev, lookup)) - return AE_OK; - - if (lookup->search_handle != lookup->adapter_handle) - return AE_OK; - - if (lookup->speed <= lookup->min_speed) - lookup->min_speed = lookup->speed; - - return AE_OK; -} - -/** - * i2c_acpi_find_bus_speed - find I2C bus speed from ACPI - * @dev: The device owning the bus - * - * Find the I2C bus speed by walking the ACPI namespace for all I2C slaves - * devices connected to this bus and use the speed of slowest device. - * - * Returns the speed in Hz or zero - */ -u32 i2c_acpi_find_bus_speed(struct device *dev) -{ - struct i2c_acpi_lookup lookup; - struct i2c_board_info dummy; - acpi_status status; - - if (!has_acpi_companion(dev)) - return 0; - - memset(&lookup, 0, sizeof(lookup)); - lookup.search_handle = ACPI_HANDLE(dev); - lookup.min_speed = UINT_MAX; - lookup.info = &dummy; - lookup.index = -1; - - status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, - I2C_ACPI_MAX_SCAN_DEPTH, - i2c_acpi_lookup_speed, NULL, - &lookup, NULL); - - if (ACPI_FAILURE(status)) { - dev_warn(dev, "unable to find I2C bus speed from ACPI\n"); - return 0; - } - - return lookup.min_speed != UINT_MAX ? lookup.min_speed : 0; -} -EXPORT_SYMBOL_GPL(i2c_acpi_find_bus_speed); - -static int i2c_acpi_match_adapter(struct device *dev, void *data) -{ - struct i2c_adapter *adapter = i2c_verify_adapter(dev); - - if (!adapter) - return 0; - - return ACPI_HANDLE(dev) == (acpi_handle)data; -} - -static int i2c_acpi_match_device(struct device *dev, void *data) -{ - return ACPI_COMPANION(dev) == data; -} - -static struct i2c_adapter *i2c_acpi_find_adapter_by_handle(acpi_handle handle) -{ - struct device *dev; - - dev = bus_find_device(&i2c_bus_type, NULL, handle, - i2c_acpi_match_adapter); - return dev ? i2c_verify_adapter(dev) : NULL; -} - -static struct i2c_client *i2c_acpi_find_client_by_adev(struct acpi_device *adev) -{ - struct device *dev; - - dev = bus_find_device(&i2c_bus_type, NULL, adev, i2c_acpi_match_device); - return dev ? i2c_verify_client(dev) : NULL; -} - -static int i2c_acpi_notify(struct notifier_block *nb, unsigned long value, - void *arg) -{ - struct acpi_device *adev = arg; - struct i2c_board_info info; - acpi_handle adapter_handle; - struct i2c_adapter *adapter; - struct i2c_client *client; - - switch (value) { - case ACPI_RECONFIG_DEVICE_ADD: - if (i2c_acpi_get_info(adev, &info, NULL, &adapter_handle)) - break; - - adapter = i2c_acpi_find_adapter_by_handle(adapter_handle); - if (!adapter) - break; - - i2c_acpi_register_device(adapter, adev, &info); - break; - case ACPI_RECONFIG_DEVICE_REMOVE: - if (!acpi_device_enumerated(adev)) - break; - - client = i2c_acpi_find_client_by_adev(adev); - if (!client) - break; - - i2c_unregister_device(client); - put_device(&client->dev); - break; - } - - return NOTIFY_OK; -} - -static struct notifier_block i2c_acpi_notifier = { - .notifier_call = i2c_acpi_notify, -}; - -/** - * i2c_acpi_new_device - Create i2c-client for the Nth I2cSerialBus resource - * @dev: Device owning the ACPI resources to get the client from - * @index: Index of ACPI resource to get - * @info: describes the I2C device; note this is modified (addr gets set) - * Context: can sleep - * - * By default the i2c subsys creates an i2c-client for the first I2cSerialBus - * resource of an acpi_device, but some acpi_devices have multiple I2cSerialBus - * resources, in that case this function can be used to create an i2c-client - * for other I2cSerialBus resources in the Current Resource Settings table. - * - * Also see i2c_new_device, which this function calls to create the i2c-client. - * - * Returns a pointer to the new i2c-client, or NULL if the adapter is not found. - */ -struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, - struct i2c_board_info *info) -{ - struct i2c_acpi_lookup lookup; - struct i2c_adapter *adapter; - struct acpi_device *adev; - LIST_HEAD(resource_list); - int ret; - - adev = ACPI_COMPANION(dev); - if (!adev) - return NULL; - - memset(&lookup, 0, sizeof(lookup)); - lookup.info = info; - lookup.device_handle = acpi_device_handle(adev); - lookup.index = index; - - ret = acpi_dev_get_resources(adev, &resource_list, - i2c_acpi_fill_info, &lookup); - acpi_dev_free_resource_list(&resource_list); - - if (ret < 0 || !info->addr) - return NULL; - - adapter = i2c_acpi_find_adapter_by_handle(lookup.adapter_handle); - if (!adapter) - return NULL; - - return i2c_new_device(adapter, info); -} -EXPORT_SYMBOL_GPL(i2c_acpi_new_device); -#else /* CONFIG_ACPI */ -static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } -extern struct notifier_block i2c_acpi_notifier; -#endif /* CONFIG_ACPI */ - -#ifdef CONFIG_ACPI_I2C_OPREGION -static int acpi_gsb_i2c_read_bytes(struct i2c_client *client, - u8 cmd, u8 *data, u8 data_len) -{ - - struct i2c_msg msgs[2]; - int ret; - u8 *buffer; - - buffer = kzalloc(data_len, GFP_KERNEL); - if (!buffer) - return AE_NO_MEMORY; - - msgs[0].addr = client->addr; - msgs[0].flags = client->flags; - msgs[0].len = 1; - msgs[0].buf = &cmd; - - msgs[1].addr = client->addr; - msgs[1].flags = client->flags | I2C_M_RD; - msgs[1].len = data_len; - msgs[1].buf = buffer; - - ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (ret < 0) - dev_err(&client->adapter->dev, "i2c read failed\n"); - else - memcpy(data, buffer, data_len); - - kfree(buffer); - return ret; -} - -static int acpi_gsb_i2c_write_bytes(struct i2c_client *client, - u8 cmd, u8 *data, u8 data_len) -{ - - struct i2c_msg msgs[1]; - u8 *buffer; - int ret = AE_OK; - - buffer = kzalloc(data_len + 1, GFP_KERNEL); - if (!buffer) - return AE_NO_MEMORY; - - buffer[0] = cmd; - memcpy(buffer + 1, data, data_len); - - msgs[0].addr = client->addr; - msgs[0].flags = client->flags; - msgs[0].len = data_len + 1; - msgs[0].buf = buffer; - - ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); - if (ret < 0) - dev_err(&client->adapter->dev, "i2c write failed\n"); - - kfree(buffer); - return ret; -} - -static acpi_status -i2c_acpi_space_handler(u32 function, acpi_physical_address command, - u32 bits, u64 *value64, - void *handler_context, void *region_context) -{ - struct gsb_buffer *gsb = (struct gsb_buffer *)value64; - struct i2c_acpi_handler_data *data = handler_context; - struct acpi_connection_info *info = &data->info; - struct acpi_resource_i2c_serialbus *sb; - struct i2c_adapter *adapter = data->adapter; - struct i2c_client *client; - struct acpi_resource *ares; - u32 accessor_type = function >> 16; - u8 action = function & ACPI_IO_MASK; - acpi_status ret; - int status; - - ret = acpi_buffer_to_resource(info->connection, info->length, &ares); - if (ACPI_FAILURE(ret)) - return ret; - - client = kzalloc(sizeof(*client), GFP_KERNEL); - if (!client) { - ret = AE_NO_MEMORY; - goto err; - } - - if (!value64 || ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS) { - ret = AE_BAD_PARAMETER; - goto err; - } - - sb = &ares->data.i2c_serial_bus; - if (sb->type != ACPI_RESOURCE_SERIAL_TYPE_I2C) { - ret = AE_BAD_PARAMETER; - goto err; - } - - client->adapter = adapter; - client->addr = sb->slave_address; - - if (sb->access_mode == ACPI_I2C_10BIT_MODE) - client->flags |= I2C_CLIENT_TEN; - - switch (accessor_type) { - case ACPI_GSB_ACCESS_ATTRIB_SEND_RCV: - if (action == ACPI_READ) { - status = i2c_smbus_read_byte(client); - if (status >= 0) { - gsb->bdata = status; - status = 0; - } - } else { - status = i2c_smbus_write_byte(client, gsb->bdata); - } - break; - - case ACPI_GSB_ACCESS_ATTRIB_BYTE: - if (action == ACPI_READ) { - status = i2c_smbus_read_byte_data(client, command); - if (status >= 0) { - gsb->bdata = status; - status = 0; - } - } else { - status = i2c_smbus_write_byte_data(client, command, - gsb->bdata); - } - break; - - case ACPI_GSB_ACCESS_ATTRIB_WORD: - if (action == ACPI_READ) { - status = i2c_smbus_read_word_data(client, command); - if (status >= 0) { - gsb->wdata = status; - status = 0; - } - } else { - status = i2c_smbus_write_word_data(client, command, - gsb->wdata); - } - break; - - case ACPI_GSB_ACCESS_ATTRIB_BLOCK: - if (action == ACPI_READ) { - status = i2c_smbus_read_block_data(client, command, - gsb->data); - if (status >= 0) { - gsb->len = status; - status = 0; - } - } else { - status = i2c_smbus_write_block_data(client, command, - gsb->len, gsb->data); - } - break; - - case ACPI_GSB_ACCESS_ATTRIB_MULTIBYTE: - if (action == ACPI_READ) { - status = acpi_gsb_i2c_read_bytes(client, command, - gsb->data, info->access_length); - if (status > 0) - status = 0; - } else { - status = acpi_gsb_i2c_write_bytes(client, command, - gsb->data, info->access_length); - } - break; - - default: - dev_warn(&adapter->dev, "protocol 0x%02x not supported for client 0x%02x\n", - accessor_type, client->addr); - ret = AE_BAD_PARAMETER; - goto err; - } - - gsb->status = status; - - err: - kfree(client); - ACPI_FREE(ares); - return ret; -} - - -static int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) -{ - acpi_handle handle; - struct i2c_acpi_handler_data *data; - acpi_status status; - - if (!adapter->dev.parent) - return -ENODEV; - - handle = ACPI_HANDLE(adapter->dev.parent); - - if (!handle) - return -ENODEV; - - data = kzalloc(sizeof(struct i2c_acpi_handler_data), - GFP_KERNEL); - if (!data) - return -ENOMEM; - - data->adapter = adapter; - status = acpi_bus_attach_private_data(handle, (void *)data); - if (ACPI_FAILURE(status)) { - kfree(data); - return -ENOMEM; - } - - status = acpi_install_address_space_handler(handle, - ACPI_ADR_SPACE_GSBUS, - &i2c_acpi_space_handler, - NULL, - data); - if (ACPI_FAILURE(status)) { - dev_err(&adapter->dev, "Error installing i2c space handler\n"); - acpi_bus_detach_private_data(handle); - kfree(data); - return -ENOMEM; - } - - acpi_walk_dep_device_list(handle); - return 0; -} - -static void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) -{ - acpi_handle handle; - struct i2c_acpi_handler_data *data; - acpi_status status; - - if (!adapter->dev.parent) - return; - - handle = ACPI_HANDLE(adapter->dev.parent); - - if (!handle) - return; - - acpi_remove_address_space_handler(handle, - ACPI_ADR_SPACE_GSBUS, - &i2c_acpi_space_handler); - - status = acpi_bus_get_private_data(handle, (void **)&data); - if (ACPI_SUCCESS(status)) - kfree(data); - - acpi_bus_detach_private_data(handle); -} -#else /* CONFIG_ACPI_I2C_OPREGION */ -static inline void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) -{ } - -static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) -{ return 0; } -#endif /* CONFIG_ACPI_I2C_OPREGION */ - -/* ------------------------------------------------------------------------- */ - const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id, const struct i2c_client *client) { @@ -1195,7 +539,7 @@ static unsigned short i2c_encode_flags_to_addr(struct i2c_client *client) /* This is a permissive address validity check, I2C address map constraints * are purposely not enforced, except for the general call address. */ -static int i2c_check_addr_validity(unsigned addr, unsigned short flags) +int i2c_check_addr_validity(unsigned addr, unsigned short flags) { if (flags & I2C_CLIENT_TEN) { /* 10-bit address, all values are valid */ @@ -1213,7 +557,7 @@ static int i2c_check_addr_validity(unsigned addr, unsigned short flags) * device uses a reserved address, then it shouldn't be probed. 7-bit * addressing is assumed, 10-bit address devices are rare and should be * explicitly enumerated. */ -static int i2c_check_7bit_addr_validity_strict(unsigned short addr) +int i2c_check_7bit_addr_validity_strict(unsigned short addr) { /* * Reserved addresses per I2C specification: @@ -1764,210 +1108,6 @@ static void i2c_scan_static_board_info(struct i2c_adapter *adapter) up_read(&__i2c_board_lock); } -/* OF support code */ - -#if IS_ENABLED(CONFIG_OF) -static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, - struct device_node *node) -{ - struct i2c_client *result; - struct i2c_board_info info = {}; - struct dev_archdata dev_ad = {}; - const __be32 *addr_be; - u32 addr; - int len; - - dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name); - - if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) { - dev_err(&adap->dev, "of_i2c: modalias failure on %s\n", - node->full_name); - return ERR_PTR(-EINVAL); - } - - addr_be = of_get_property(node, "reg", &len); - if (!addr_be || (len < sizeof(*addr_be))) { - dev_err(&adap->dev, "of_i2c: invalid reg on %s\n", - node->full_name); - return ERR_PTR(-EINVAL); - } - - addr = be32_to_cpup(addr_be); - if (addr & I2C_TEN_BIT_ADDRESS) { - addr &= ~I2C_TEN_BIT_ADDRESS; - info.flags |= I2C_CLIENT_TEN; - } - - if (addr & I2C_OWN_SLAVE_ADDRESS) { - addr &= ~I2C_OWN_SLAVE_ADDRESS; - info.flags |= I2C_CLIENT_SLAVE; - } - - if (i2c_check_addr_validity(addr, info.flags)) { - dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", - addr, node->full_name); - return ERR_PTR(-EINVAL); - } - - info.addr = addr; - info.of_node = of_node_get(node); - info.archdata = &dev_ad; - - if (of_property_read_bool(node, "host-notify")) - info.flags |= I2C_CLIENT_HOST_NOTIFY; - - if (of_get_property(node, "wakeup-source", NULL)) - info.flags |= I2C_CLIENT_WAKE; - - result = i2c_new_device(adap, &info); - if (result == NULL) { - dev_err(&adap->dev, "of_i2c: Failure registering %s\n", - node->full_name); - of_node_put(node); - return ERR_PTR(-EINVAL); - } - return result; -} - -static void of_i2c_register_devices(struct i2c_adapter *adap) -{ - struct device_node *bus, *node; - struct i2c_client *client; - - /* Only register child devices if the adapter has a node pointer set */ - if (!adap->dev.of_node) - return; - - dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); - - bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus"); - if (!bus) - bus = of_node_get(adap->dev.of_node); - - for_each_available_child_of_node(bus, node) { - if (of_node_test_and_set_flag(node, OF_POPULATED)) - continue; - - client = of_i2c_register_device(adap, node); - if (IS_ERR(client)) { - dev_warn(&adap->dev, - "Failed to create I2C device for %s\n", - node->full_name); - of_node_clear_flag(node, OF_POPULATED); - } - } - - of_node_put(bus); -} - -static int of_dev_node_match(struct device *dev, void *data) -{ - return dev->of_node == data; -} - -/* must call put_device() when done with returned i2c_client device */ -struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) -{ - struct device *dev; - struct i2c_client *client; - - dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); - if (!dev) - return NULL; - - client = i2c_verify_client(dev); - if (!client) - put_device(dev); - - return client; -} -EXPORT_SYMBOL(of_find_i2c_device_by_node); - -/* must call put_device() when done with returned i2c_adapter device */ -struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) -{ - struct device *dev; - struct i2c_adapter *adapter; - - dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); - if (!dev) - return NULL; - - adapter = i2c_verify_adapter(dev); - if (!adapter) - put_device(dev); - - return adapter; -} -EXPORT_SYMBOL(of_find_i2c_adapter_by_node); - -/* must call i2c_put_adapter() when done with returned i2c_adapter device */ -struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node) -{ - struct i2c_adapter *adapter; - - adapter = of_find_i2c_adapter_by_node(node); - if (!adapter) - return NULL; - - if (!try_module_get(adapter->owner)) { - put_device(&adapter->dev); - adapter = NULL; - } - - return adapter; -} -EXPORT_SYMBOL(of_get_i2c_adapter_by_node); - -static const struct of_device_id* -i2c_of_match_device_sysfs(const struct of_device_id *matches, - struct i2c_client *client) -{ - const char *name; - - for (; matches->compatible[0]; matches++) { - /* - * Adding devices through the i2c sysfs interface provides us - * a string to match which may be compatible with the device - * tree compatible strings, however with no actual of_node the - * of_match_device() will not match - */ - if (sysfs_streq(client->name, matches->compatible)) - return matches; - - name = strchr(matches->compatible, ','); - if (!name) - name = matches->compatible; - else - name++; - - if (sysfs_streq(client->name, name)) - return matches; - } - - return NULL; -} - -const struct of_device_id -*i2c_of_match_device(const struct of_device_id *matches, - struct i2c_client *client) -{ - const struct of_device_id *match; - - if (!(client && matches)) - return NULL; - - match = of_match_device(matches, &client->dev); - if (match) - return match; - - return i2c_of_match_device_sysfs(matches, client); -} -EXPORT_SYMBOL_GPL(i2c_of_match_device); -#else -static void of_i2c_register_devices(struct i2c_adapter *adap) { } -#endif /* CONFIG_OF */ - static int i2c_do_add_adapter(struct i2c_driver *driver, struct i2c_adapter *adap) { @@ -2562,62 +1702,6 @@ void i2c_clients_command(struct i2c_adapter *adap, unsigned int cmd, void *arg) } EXPORT_SYMBOL(i2c_clients_command); -#if IS_ENABLED(CONFIG_OF_DYNAMIC) -static int of_i2c_notify(struct notifier_block *nb, unsigned long action, - void *arg) -{ - struct of_reconfig_data *rd = arg; - struct i2c_adapter *adap; - struct i2c_client *client; - - switch (of_reconfig_get_state_change(action, rd)) { - case OF_RECONFIG_CHANGE_ADD: - adap = of_find_i2c_adapter_by_node(rd->dn->parent); - if (adap == NULL) - return NOTIFY_OK; /* not for us */ - - if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { - put_device(&adap->dev); - return NOTIFY_OK; - } - - client = of_i2c_register_device(adap, rd->dn); - put_device(&adap->dev); - - if (IS_ERR(client)) { - dev_err(&adap->dev, "failed to create client for '%s'\n", - rd->dn->full_name); - of_node_clear_flag(rd->dn, OF_POPULATED); - return notifier_from_errno(PTR_ERR(client)); - } - break; - case OF_RECONFIG_CHANGE_REMOVE: - /* already depopulated? */ - if (!of_node_check_flag(rd->dn, OF_POPULATED)) - return NOTIFY_OK; - - /* find our device by node */ - client = of_find_i2c_device_by_node(rd->dn); - if (client == NULL) - return NOTIFY_OK; /* no? not meant for us */ - - /* unregister takes one ref away */ - i2c_unregister_device(client); - - /* and put the reference of the find */ - put_device(&client->dev); - break; - } - - return NOTIFY_OK; -} -static struct notifier_block i2c_of_notifier = { - .notifier_call = of_i2c_notify, -}; -#else -extern struct notifier_block i2c_of_notifier; -#endif /* CONFIG_OF_DYNAMIC */ - static int __init i2c_init(void) { int retval; @@ -3156,676 +2240,6 @@ void i2c_put_adapter(struct i2c_adapter *adap) } EXPORT_SYMBOL(i2c_put_adapter); -/* The SMBus parts */ - -#define POLY (0x1070U << 3) -static u8 crc8(u16 data) -{ - int i; - - for (i = 0; i < 8; i++) { - if (data & 0x8000) - data = data ^ POLY; - data = data << 1; - } - return (u8)(data >> 8); -} - -/* Incremental CRC8 over count bytes in the array pointed to by p */ -static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count) -{ - int i; - - for (i = 0; i < count; i++) - crc = crc8((crc ^ p[i]) << 8); - return crc; -} - -/* Assume a 7-bit address, which is reasonable for SMBus */ -static u8 i2c_smbus_msg_pec(u8 pec, struct i2c_msg *msg) -{ - /* The address will be sent first */ - u8 addr = i2c_8bit_addr_from_msg(msg); - pec = i2c_smbus_pec(pec, &addr, 1); - - /* The data buffer follows */ - return i2c_smbus_pec(pec, msg->buf, msg->len); -} - -/* Used for write only transactions */ -static inline void i2c_smbus_add_pec(struct i2c_msg *msg) -{ - msg->buf[msg->len] = i2c_smbus_msg_pec(0, msg); - msg->len++; -} - -/* Return <0 on CRC error - If there was a write before this read (most cases) we need to take the - partial CRC from the write part into account. - Note that this function does modify the message (we need to decrease the - message length to hide the CRC byte from the caller). */ -static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg) -{ - u8 rpec = msg->buf[--msg->len]; - cpec = i2c_smbus_msg_pec(cpec, msg); - - if (rpec != cpec) { - pr_debug("Bad PEC 0x%02x vs. 0x%02x\n", - rpec, cpec); - return -EBADMSG; - } - return 0; -} - -/** - * i2c_smbus_read_byte - SMBus "receive byte" protocol - * @client: Handle to slave device - * - * This executes the SMBus "receive byte" protocol, returning negative errno - * else the byte received from the device. - */ -s32 i2c_smbus_read_byte(const struct i2c_client *client) -{ - union i2c_smbus_data data; - int status; - - status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_READ, 0, - I2C_SMBUS_BYTE, &data); - return (status < 0) ? status : data.byte; -} -EXPORT_SYMBOL(i2c_smbus_read_byte); - -/** - * i2c_smbus_write_byte - SMBus "send byte" protocol - * @client: Handle to slave device - * @value: Byte to be sent - * - * This executes the SMBus "send byte" protocol, returning negative errno - * else zero on success. - */ -s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value) -{ - return i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL); -} -EXPORT_SYMBOL(i2c_smbus_write_byte); - -/** - * i2c_smbus_read_byte_data - SMBus "read byte" protocol - * @client: Handle to slave device - * @command: Byte interpreted by slave - * - * This executes the SMBus "read byte" protocol, returning negative errno - * else a data byte received from the device. - */ -s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command) -{ - union i2c_smbus_data data; - int status; - - status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_READ, command, - I2C_SMBUS_BYTE_DATA, &data); - return (status < 0) ? status : data.byte; -} -EXPORT_SYMBOL(i2c_smbus_read_byte_data); - -/** - * i2c_smbus_write_byte_data - SMBus "write byte" protocol - * @client: Handle to slave device - * @command: Byte interpreted by slave - * @value: Byte being written - * - * This executes the SMBus "write byte" protocol, returning negative errno - * else zero on success. - */ -s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command, - u8 value) -{ - union i2c_smbus_data data; - data.byte = value; - return i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_WRITE, command, - I2C_SMBUS_BYTE_DATA, &data); -} -EXPORT_SYMBOL(i2c_smbus_write_byte_data); - -/** - * i2c_smbus_read_word_data - SMBus "read word" protocol - * @client: Handle to slave device - * @command: Byte interpreted by slave - * - * This executes the SMBus "read word" protocol, returning negative errno - * else a 16-bit unsigned "word" received from the device. - */ -s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command) -{ - union i2c_smbus_data data; - int status; - - status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_READ, command, - I2C_SMBUS_WORD_DATA, &data); - return (status < 0) ? status : data.word; -} -EXPORT_SYMBOL(i2c_smbus_read_word_data); - -/** - * i2c_smbus_write_word_data - SMBus "write word" protocol - * @client: Handle to slave device - * @command: Byte interpreted by slave - * @value: 16-bit "word" being written - * - * This executes the SMBus "write word" protocol, returning negative errno - * else zero on success. - */ -s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command, - u16 value) -{ - union i2c_smbus_data data; - data.word = value; - return i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_WRITE, command, - I2C_SMBUS_WORD_DATA, &data); -} -EXPORT_SYMBOL(i2c_smbus_write_word_data); - -/** - * i2c_smbus_read_block_data - SMBus "block read" protocol - * @client: Handle to slave device - * @command: Byte interpreted by slave - * @values: Byte array into which data will be read; big enough to hold - * the data returned by the slave. SMBus allows at most 32 bytes. - * - * This executes the SMBus "block read" protocol, returning negative errno - * else the number of data bytes in the slave's response. - * - * Note that using this function requires that the client's adapter support - * the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter drivers - * support this; its emulation through I2C messaging relies on a specific - * mechanism (I2C_M_RECV_LEN) which may not be implemented. - */ -s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command, - u8 *values) -{ - union i2c_smbus_data data; - int status; - - status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_READ, command, - I2C_SMBUS_BLOCK_DATA, &data); - if (status) - return status; - - memcpy(values, &data.block[1], data.block[0]); - return data.block[0]; -} -EXPORT_SYMBOL(i2c_smbus_read_block_data); - -/** - * i2c_smbus_write_block_data - SMBus "block write" protocol - * @client: Handle to slave device - * @command: Byte interpreted by slave - * @length: Size of data block; SMBus allows at most 32 bytes - * @values: Byte array which will be written. - * - * This executes the SMBus "block write" protocol, returning negative errno - * else zero on success. - */ -s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command, - u8 length, const u8 *values) -{ - union i2c_smbus_data data; - - if (length > I2C_SMBUS_BLOCK_MAX) - length = I2C_SMBUS_BLOCK_MAX; - data.block[0] = length; - memcpy(&data.block[1], values, length); - return i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_WRITE, command, - I2C_SMBUS_BLOCK_DATA, &data); -} -EXPORT_SYMBOL(i2c_smbus_write_block_data); - -/* Returns the number of read bytes */ -s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command, - u8 length, u8 *values) -{ - union i2c_smbus_data data; - int status; - - if (length > I2C_SMBUS_BLOCK_MAX) - length = I2C_SMBUS_BLOCK_MAX; - data.block[0] = length; - status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_READ, command, - I2C_SMBUS_I2C_BLOCK_DATA, &data); - if (status < 0) - return status; - - memcpy(values, &data.block[1], data.block[0]); - return data.block[0]; -} -EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data); - -s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command, - u8 length, const u8 *values) -{ - union i2c_smbus_data data; - - if (length > I2C_SMBUS_BLOCK_MAX) - length = I2C_SMBUS_BLOCK_MAX; - data.block[0] = length; - memcpy(data.block + 1, values, length); - return i2c_smbus_xfer(client->adapter, client->addr, client->flags, - I2C_SMBUS_WRITE, command, - I2C_SMBUS_I2C_BLOCK_DATA, &data); -} -EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data); - -/* Simulate a SMBus command using the i2c protocol - No checking of parameters is done! */ -static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, - unsigned short flags, - char read_write, u8 command, int size, - union i2c_smbus_data *data) -{ - /* So we need to generate a series of msgs. In the case of writing, we - need to use only one message; when reading, we need two. We initialize - most things with sane defaults, to keep the code below somewhat - simpler. */ - unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3]; - unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2]; - int num = read_write == I2C_SMBUS_READ ? 2 : 1; - int i; - u8 partial_pec = 0; - int status; - struct i2c_msg msg[2] = { - { - .addr = addr, - .flags = flags, - .len = 1, - .buf = msgbuf0, - }, { - .addr = addr, - .flags = flags | I2C_M_RD, - .len = 0, - .buf = msgbuf1, - }, - }; - - msgbuf0[0] = command; - switch (size) { - case I2C_SMBUS_QUICK: - msg[0].len = 0; - /* Special case: The read/write field is used as data */ - msg[0].flags = flags | (read_write == I2C_SMBUS_READ ? - I2C_M_RD : 0); - num = 1; - break; - case I2C_SMBUS_BYTE: - if (read_write == I2C_SMBUS_READ) { - /* Special case: only a read! */ - msg[0].flags = I2C_M_RD | flags; - num = 1; - } - break; - case I2C_SMBUS_BYTE_DATA: - if (read_write == I2C_SMBUS_READ) - msg[1].len = 1; - else { - msg[0].len = 2; - msgbuf0[1] = data->byte; - } - break; - case I2C_SMBUS_WORD_DATA: - if (read_write == I2C_SMBUS_READ) - msg[1].len = 2; - else { - msg[0].len = 3; - msgbuf0[1] = data->word & 0xff; - msgbuf0[2] = data->word >> 8; - } - break; - case I2C_SMBUS_PROC_CALL: - num = 2; /* Special case */ - read_write = I2C_SMBUS_READ; - msg[0].len = 3; - msg[1].len = 2; - msgbuf0[1] = data->word & 0xff; - msgbuf0[2] = data->word >> 8; - break; - case I2C_SMBUS_BLOCK_DATA: - if (read_write == I2C_SMBUS_READ) { - msg[1].flags |= I2C_M_RECV_LEN; - msg[1].len = 1; /* block length will be added by - the underlying bus driver */ - } else { - msg[0].len = data->block[0] + 2; - if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 2) { - dev_err(&adapter->dev, - "Invalid block write size %d\n", - data->block[0]); - return -EINVAL; - } - for (i = 1; i < msg[0].len; i++) - msgbuf0[i] = data->block[i-1]; - } - break; - case I2C_SMBUS_BLOCK_PROC_CALL: - num = 2; /* Another special case */ - read_write = I2C_SMBUS_READ; - if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { - dev_err(&adapter->dev, - "Invalid block write size %d\n", - data->block[0]); - return -EINVAL; - } - msg[0].len = data->block[0] + 2; - for (i = 1; i < msg[0].len; i++) - msgbuf0[i] = data->block[i-1]; - msg[1].flags |= I2C_M_RECV_LEN; - msg[1].len = 1; /* block length will be added by - the underlying bus driver */ - break; - case I2C_SMBUS_I2C_BLOCK_DATA: - if (read_write == I2C_SMBUS_READ) { - msg[1].len = data->block[0]; - } else { - msg[0].len = data->block[0] + 1; - if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) { - dev_err(&adapter->dev, - "Invalid block write size %d\n", - data->block[0]); - return -EINVAL; - } - for (i = 1; i <= data->block[0]; i++) - msgbuf0[i] = data->block[i]; - } - break; - default: - dev_err(&adapter->dev, "Unsupported transaction %d\n", size); - return -EOPNOTSUPP; - } - - i = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK - && size != I2C_SMBUS_I2C_BLOCK_DATA); - if (i) { - /* Compute PEC if first message is a write */ - if (!(msg[0].flags & I2C_M_RD)) { - if (num == 1) /* Write only */ - i2c_smbus_add_pec(&msg[0]); - else /* Write followed by read */ - partial_pec = i2c_smbus_msg_pec(0, &msg[0]); - } - /* Ask for PEC if last message is a read */ - if (msg[num-1].flags & I2C_M_RD) - msg[num-1].len++; - } - - status = i2c_transfer(adapter, msg, num); - if (status < 0) - return status; - - /* Check PEC if last message is a read */ - if (i && (msg[num-1].flags & I2C_M_RD)) { - status = i2c_smbus_check_pec(partial_pec, &msg[num-1]); - if (status < 0) - return status; - } - - if (read_write == I2C_SMBUS_READ) - switch (size) { - case I2C_SMBUS_BYTE: - data->byte = msgbuf0[0]; - break; - case I2C_SMBUS_BYTE_DATA: - data->byte = msgbuf1[0]; - break; - case I2C_SMBUS_WORD_DATA: - case I2C_SMBUS_PROC_CALL: - data->word = msgbuf1[0] | (msgbuf1[1] << 8); - break; - case I2C_SMBUS_I2C_BLOCK_DATA: - for (i = 0; i < data->block[0]; i++) - data->block[i+1] = msgbuf1[i]; - break; - case I2C_SMBUS_BLOCK_DATA: - case I2C_SMBUS_BLOCK_PROC_CALL: - for (i = 0; i < msgbuf1[0] + 1; i++) - data->block[i] = msgbuf1[i]; - break; - } - return 0; -} - -/** - * i2c_smbus_xfer - execute SMBus protocol operations - * @adapter: Handle to I2C bus - * @addr: Address of SMBus slave on that bus - * @flags: I2C_CLIENT_* flags (usually zero or I2C_CLIENT_PEC) - * @read_write: I2C_SMBUS_READ or I2C_SMBUS_WRITE - * @command: Byte interpreted by slave, for protocols which use such bytes - * @protocol: SMBus protocol operation to execute, such as I2C_SMBUS_PROC_CALL - * @data: Data to be read or written - * - * This executes an SMBus protocol operation, and returns a negative - * errno code else zero on success. - */ -s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, - char read_write, u8 command, int protocol, - union i2c_smbus_data *data) -{ - unsigned long orig_jiffies; - int try; - s32 res; - - /* If enabled, the following two tracepoints are conditional on - * read_write and protocol. - */ - trace_smbus_write(adapter, addr, flags, read_write, - command, protocol, data); - trace_smbus_read(adapter, addr, flags, read_write, - command, protocol); - - flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB; - - if (adapter->algo->smbus_xfer) { - i2c_lock_bus(adapter, I2C_LOCK_SEGMENT); - - /* Retry automatically on arbitration loss */ - orig_jiffies = jiffies; - for (res = 0, try = 0; try <= adapter->retries; try++) { - res = adapter->algo->smbus_xfer(adapter, addr, flags, - read_write, command, - protocol, data); - if (res != -EAGAIN) - break; - if (time_after(jiffies, - orig_jiffies + adapter->timeout)) - break; - } - i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT); - - if (res != -EOPNOTSUPP || !adapter->algo->master_xfer) - goto trace; - /* - * Fall back to i2c_smbus_xfer_emulated if the adapter doesn't - * implement native support for the SMBus operation. - */ - } - - res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write, - command, protocol, data); - -trace: - /* If enabled, the reply tracepoint is conditional on read_write. */ - trace_smbus_reply(adapter, addr, flags, read_write, - command, protocol, data); - trace_smbus_result(adapter, addr, flags, read_write, - command, protocol, res); - - return res; -} -EXPORT_SYMBOL(i2c_smbus_xfer); - -/** - * i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate - * @client: Handle to slave device - * @command: Byte interpreted by slave - * @length: Size of data block; SMBus allows at most I2C_SMBUS_BLOCK_MAX bytes - * @values: Byte array into which data will be read; big enough to hold - * the data returned by the slave. SMBus allows at most - * I2C_SMBUS_BLOCK_MAX bytes. - * - * This executes the SMBus "block read" protocol if supported by the adapter. - * If block read is not supported, it emulates it using either word or byte - * read protocols depending on availability. - * - * The addresses of the I2C slave device that are accessed with this function - * must be mapped to a linear region, so that a block read will have the same - * effect as a byte read. Before using this function you must double-check - * if the I2C slave does support exchanging a block transfer with a byte - * transfer. - */ -s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, - u8 command, u8 length, u8 *values) -{ - u8 i = 0; - int status; - - if (length > I2C_SMBUS_BLOCK_MAX) - length = I2C_SMBUS_BLOCK_MAX; - - if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) - return i2c_smbus_read_i2c_block_data(client, command, length, values); - - if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) - return -EOPNOTSUPP; - - if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) { - while ((i + 2) <= length) { - status = i2c_smbus_read_word_data(client, command + i); - if (status < 0) - return status; - values[i] = status & 0xff; - values[i + 1] = status >> 8; - i += 2; - } - } - - while (i < length) { - status = i2c_smbus_read_byte_data(client, command + i); - if (status < 0) - return status; - values[i] = status; - i++; - } - - return i; -} -EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated); - -#if IS_ENABLED(CONFIG_I2C_SLAVE) -int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) -{ - int ret; - - if (!client || !slave_cb) { - WARN(1, "insufficient data\n"); - return -EINVAL; - } - - if (!(client->flags & I2C_CLIENT_SLAVE)) - dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", - __func__); - - if (!(client->flags & I2C_CLIENT_TEN)) { - /* Enforce stricter address checking */ - ret = i2c_check_7bit_addr_validity_strict(client->addr); - if (ret) { - dev_err(&client->dev, "%s: invalid address\n", __func__); - return ret; - } - } - - if (!client->adapter->algo->reg_slave) { - dev_err(&client->dev, "%s: not supported by adapter\n", __func__); - return -EOPNOTSUPP; - } - - client->slave_cb = slave_cb; - - i2c_lock_adapter(client->adapter); - ret = client->adapter->algo->reg_slave(client); - i2c_unlock_adapter(client->adapter); - - if (ret) { - client->slave_cb = NULL; - dev_err(&client->dev, "%s: adapter returned error %d\n", __func__, ret); - } - - return ret; -} -EXPORT_SYMBOL_GPL(i2c_slave_register); - -int i2c_slave_unregister(struct i2c_client *client) -{ - int ret; - - if (!client->adapter->algo->unreg_slave) { - dev_err(&client->dev, "%s: not supported by adapter\n", __func__); - return -EOPNOTSUPP; - } - - i2c_lock_adapter(client->adapter); - ret = client->adapter->algo->unreg_slave(client); - i2c_unlock_adapter(client->adapter); - - if (ret == 0) - client->slave_cb = NULL; - else - dev_err(&client->dev, "%s: adapter returned error %d\n", __func__, ret); - - return ret; -} -EXPORT_SYMBOL_GPL(i2c_slave_unregister); - -/** - * i2c_detect_slave_mode - detect operation mode - * @dev: The device owning the bus - * - * This checks the device nodes for an I2C slave by checking the address - * used in the reg property. If the address match the I2C_OWN_SLAVE_ADDRESS - * flag this means the device is configured to act as a I2C slave and it will - * be listening at that address. - * - * Returns true if an I2C own slave address is detected, otherwise returns - * false. - */ -bool i2c_detect_slave_mode(struct device *dev) -{ - if (IS_BUILTIN(CONFIG_OF) && dev->of_node) { - struct device_node *child; - u32 reg; - - for_each_child_of_node(dev->of_node, child) { - of_property_read_u32(child, "reg", ®); - if (reg & I2C_OWN_SLAVE_ADDRESS) { - of_node_put(child); - return true; - } - } - } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) { - dev_dbg(dev, "ACPI slave is not supported yet\n"); - } - return false; -} -EXPORT_SYMBOL_GPL(i2c_detect_slave_mode); - -#endif - MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>"); MODULE_DESCRIPTION("I2C-Bus main module"); MODULE_LICENSE("GPL"); diff --git a/drivers/i2c/i2c-core-of.c b/drivers/i2c/i2c-core-of.c new file mode 100644 index 000000000000..ccf82fdbcd8e --- /dev/null +++ b/drivers/i2c/i2c-core-of.c @@ -0,0 +1,276 @@ +/* + * Linux I2C core OF support code + * + * Copyright (C) 2008 Jochen Friedrich <jochen@scram.de> + * based on a previous patch from Jon Smirl <jonsmirl@gmail.com> + * + * Copyright (C) 2013 Wolfram Sang <wsa@the-dreams.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <dt-bindings/i2c/i2c.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> + +#include "i2c-core.h" + +static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap, + struct device_node *node) +{ + struct i2c_client *result; + struct i2c_board_info info = {}; + struct dev_archdata dev_ad = {}; + const __be32 *addr_be; + u32 addr; + int len; + + dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name); + + if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) { + dev_err(&adap->dev, "of_i2c: modalias failure on %s\n", + node->full_name); + return ERR_PTR(-EINVAL); + } + + addr_be = of_get_property(node, "reg", &len); + if (!addr_be || (len < sizeof(*addr_be))) { + dev_err(&adap->dev, "of_i2c: invalid reg on %s\n", + node->full_name); + return ERR_PTR(-EINVAL); + } + + addr = be32_to_cpup(addr_be); + if (addr & I2C_TEN_BIT_ADDRESS) { + addr &= ~I2C_TEN_BIT_ADDRESS; + info.flags |= I2C_CLIENT_TEN; + } + + if (addr & I2C_OWN_SLAVE_ADDRESS) { + addr &= ~I2C_OWN_SLAVE_ADDRESS; + info.flags |= I2C_CLIENT_SLAVE; + } + + if (i2c_check_addr_validity(addr, info.flags)) { + dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n", + addr, node->full_name); + return ERR_PTR(-EINVAL); + } + + info.addr = addr; + info.of_node = of_node_get(node); + info.archdata = &dev_ad; + + if (of_property_read_bool(node, "host-notify")) + info.flags |= I2C_CLIENT_HOST_NOTIFY; + + if (of_get_property(node, "wakeup-source", NULL)) + info.flags |= I2C_CLIENT_WAKE; + + result = i2c_new_device(adap, &info); + if (result == NULL) { + dev_err(&adap->dev, "of_i2c: Failure registering %s\n", + node->full_name); + of_node_put(node); + return ERR_PTR(-EINVAL); + } + return result; +} + +void of_i2c_register_devices(struct i2c_adapter *adap) +{ + struct device_node *bus, *node; + struct i2c_client *client; + + /* Only register child devices if the adapter has a node pointer set */ + if (!adap->dev.of_node) + return; + + dev_dbg(&adap->dev, "of_i2c: walking child nodes\n"); + + bus = of_get_child_by_name(adap->dev.of_node, "i2c-bus"); + if (!bus) + bus = of_node_get(adap->dev.of_node); + + for_each_available_child_of_node(bus, node) { + if (of_node_test_and_set_flag(node, OF_POPULATED)) + continue; + + client = of_i2c_register_device(adap, node); + if (IS_ERR(client)) { + dev_warn(&adap->dev, + "Failed to create I2C device for %s\n", + node->full_name); + of_node_clear_flag(node, OF_POPULATED); + } + } + + of_node_put(bus); +} + +static int of_dev_node_match(struct device *dev, void *data) +{ + return dev->of_node == data; +} + +/* must call put_device() when done with returned i2c_client device */ +struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) +{ + struct device *dev; + struct i2c_client *client; + + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); + if (!dev) + return NULL; + + client = i2c_verify_client(dev); + if (!client) + put_device(dev); + + return client; +} +EXPORT_SYMBOL(of_find_i2c_device_by_node); + +/* must call put_device() when done with returned i2c_adapter device */ +struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) +{ + struct device *dev; + struct i2c_adapter *adapter; + + dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match); + if (!dev) + return NULL; + + adapter = i2c_verify_adapter(dev); + if (!adapter) + put_device(dev); + + return adapter; +} +EXPORT_SYMBOL(of_find_i2c_adapter_by_node); + +/* must call i2c_put_adapter() when done with returned i2c_adapter device */ +struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node) +{ + struct i2c_adapter *adapter; + + adapter = of_find_i2c_adapter_by_node(node); + if (!adapter) + return NULL; + + if (!try_module_get(adapter->owner)) { + put_device(&adapter->dev); + adapter = NULL; + } + + return adapter; +} +EXPORT_SYMBOL(of_get_i2c_adapter_by_node); + +static const struct of_device_id* +i2c_of_match_device_sysfs(const struct of_device_id *matches, + struct i2c_client *client) +{ + const char *name; + + for (; matches->compatible[0]; matches++) { + /* + * Adding devices through the i2c sysfs interface provides us + * a string to match which may be compatible with the device + * tree compatible strings, however with no actual of_node the + * of_match_device() will not match + */ + if (sysfs_streq(client->name, matches->compatible)) + return matches; + + name = strchr(matches->compatible, ','); + if (!name) + name = matches->compatible; + else + name++; + + if (sysfs_streq(client->name, name)) + return matches; + } + + return NULL; +} + +const struct of_device_id +*i2c_of_match_device(const struct of_device_id *matches, + struct i2c_client *client) +{ + const struct of_device_id *match; + + if (!(client && matches)) + return NULL; + + match = of_match_device(matches, &client->dev); + if (match) + return match; + + return i2c_of_match_device_sysfs(matches, client); +} +EXPORT_SYMBOL_GPL(i2c_of_match_device); + +#if IS_ENABLED(CONFIG_OF_DYNAMIC) +static int of_i2c_notify(struct notifier_block *nb, unsigned long action, + void *arg) +{ + struct of_reconfig_data *rd = arg; + struct i2c_adapter *adap; + struct i2c_client *client; + + switch (of_reconfig_get_state_change(action, rd)) { + case OF_RECONFIG_CHANGE_ADD: + adap = of_find_i2c_adapter_by_node(rd->dn->parent); + if (adap == NULL) + return NOTIFY_OK; /* not for us */ + + if (of_node_test_and_set_flag(rd->dn, OF_POPULATED)) { + put_device(&adap->dev); + return NOTIFY_OK; + } + + client = of_i2c_register_device(adap, rd->dn); + put_device(&adap->dev); + + if (IS_ERR(client)) { + dev_err(&adap->dev, "failed to create client for '%s'\n", + rd->dn->full_name); + of_node_clear_flag(rd->dn, OF_POPULATED); + return notifier_from_errno(PTR_ERR(client)); + } + break; + case OF_RECONFIG_CHANGE_REMOVE: + /* already depopulated? */ + if (!of_node_check_flag(rd->dn, OF_POPULATED)) + return NOTIFY_OK; + + /* find our device by node */ + client = of_find_i2c_device_by_node(rd->dn); + if (client == NULL) + return NOTIFY_OK; /* no? not meant for us */ + + /* unregister takes one ref away */ + i2c_unregister_device(client); + + /* and put the reference of the find */ + put_device(&client->dev); + break; + } + + return NOTIFY_OK; +} + +struct notifier_block i2c_of_notifier = { + .notifier_call = of_i2c_notify, +}; +#endif /* CONFIG_OF_DYNAMIC */ diff --git a/drivers/i2c/i2c-core-slave.c b/drivers/i2c/i2c-core-slave.c new file mode 100644 index 000000000000..4a78c65e9971 --- /dev/null +++ b/drivers/i2c/i2c-core-slave.c @@ -0,0 +1,115 @@ +/* + * Linux I2C core slave support code + * + * Copyright (C) 2014 by Wolfram Sang <wsa@sang-engineering.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ + +#include <dt-bindings/i2c/i2c.h> +#include <linux/acpi.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/of.h> + +#include "i2c-core.h" + +int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb) +{ + int ret; + + if (!client || !slave_cb) { + WARN(1, "insufficient data\n"); + return -EINVAL; + } + + if (!(client->flags & I2C_CLIENT_SLAVE)) + dev_warn(&client->dev, "%s: client slave flag not set. You might see address collisions\n", + __func__); + + if (!(client->flags & I2C_CLIENT_TEN)) { + /* Enforce stricter address checking */ + ret = i2c_check_7bit_addr_validity_strict(client->addr); + if (ret) { + dev_err(&client->dev, "%s: invalid address\n", __func__); + return ret; + } + } + + if (!client->adapter->algo->reg_slave) { + dev_err(&client->dev, "%s: not supported by adapter\n", __func__); + return -EOPNOTSUPP; + } + + client->slave_cb = slave_cb; + + i2c_lock_adapter(client->adapter); + ret = client->adapter->algo->reg_slave(client); + i2c_unlock_adapter(client->adapter); + + if (ret) { + client->slave_cb = NULL; + dev_err(&client->dev, "%s: adapter returned error %d\n", __func__, ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_slave_register); + +int i2c_slave_unregister(struct i2c_client *client) +{ + int ret; + + if (!client->adapter->algo->unreg_slave) { + dev_err(&client->dev, "%s: not supported by adapter\n", __func__); + return -EOPNOTSUPP; + } + + i2c_lock_adapter(client->adapter); + ret = client->adapter->algo->unreg_slave(client); + i2c_unlock_adapter(client->adapter); + + if (ret == 0) + client->slave_cb = NULL; + else + dev_err(&client->dev, "%s: adapter returned error %d\n", __func__, ret); + + return ret; +} +EXPORT_SYMBOL_GPL(i2c_slave_unregister); + +/** + * i2c_detect_slave_mode - detect operation mode + * @dev: The device owning the bus + * + * This checks the device nodes for an I2C slave by checking the address + * used in the reg property. If the address match the I2C_OWN_SLAVE_ADDRESS + * flag this means the device is configured to act as a I2C slave and it will + * be listening at that address. + * + * Returns true if an I2C own slave address is detected, otherwise returns + * false. + */ +bool i2c_detect_slave_mode(struct device *dev) +{ + if (IS_BUILTIN(CONFIG_OF) && dev->of_node) { + struct device_node *child; + u32 reg; + + for_each_child_of_node(dev->of_node, child) { + of_property_read_u32(child, "reg", ®); + if (reg & I2C_OWN_SLAVE_ADDRESS) { + of_node_put(child); + return true; + } + } + } else if (IS_BUILTIN(CONFIG_ACPI) && ACPI_HANDLE(dev)) { + dev_dbg(dev, "ACPI slave is not supported yet\n"); + } + return false; +} +EXPORT_SYMBOL_GPL(i2c_detect_slave_mode); diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c new file mode 100644 index 000000000000..10f00a82ec9d --- /dev/null +++ b/drivers/i2c/i2c-core-smbus.c @@ -0,0 +1,594 @@ +/* + * Linux I2C core SMBus and SMBus emulation code + * + * This file contains the SMBus functions which are always included in the I2C + * core because they can be emulated via I2C. SMBus specific extensions + * (e.g. smbalert) are handled in a seperate i2c-smbus module. + * + * All SMBus-related things are written by Frodo Looijaard <frodol@dds.nl> + * SMBus 2.0 support by Mark Studebaker <mdsxyz123@yahoo.com> and + * Jean Delvare <jdelvare@suse.de> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + */ +#include <linux/device.h> +#include <linux/err.h> +#include <linux/i2c.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/smbus.h> + + +/* The SMBus parts */ + +#define POLY (0x1070U << 3) +static u8 crc8(u16 data) +{ + int i; + + for (i = 0; i < 8; i++) { + if (data & 0x8000) + data = data ^ POLY; + data = data << 1; + } + return (u8)(data >> 8); +} + +/* Incremental CRC8 over count bytes in the array pointed to by p */ +static u8 i2c_smbus_pec(u8 crc, u8 *p, size_t count) +{ + int i; + + for (i = 0; i < count; i++) + crc = crc8((crc ^ p[i]) << 8); + return crc; +} + +/* Assume a 7-bit address, which is reasonable for SMBus */ +static u8 i2c_smbus_msg_pec(u8 pec, struct i2c_msg *msg) +{ + /* The address will be sent first */ + u8 addr = i2c_8bit_addr_from_msg(msg); + pec = i2c_smbus_pec(pec, &addr, 1); + + /* The data buffer follows */ + return i2c_smbus_pec(pec, msg->buf, msg->len); +} + +/* Used for write only transactions */ +static inline void i2c_smbus_add_pec(struct i2c_msg *msg) +{ + msg->buf[msg->len] = i2c_smbus_msg_pec(0, msg); + msg->len++; +} + +/* Return <0 on CRC error + If there was a write before this read (most cases) we need to take the + partial CRC from the write part into account. + Note that this function does modify the message (we need to decrease the + message length to hide the CRC byte from the caller). */ +static int i2c_smbus_check_pec(u8 cpec, struct i2c_msg *msg) +{ + u8 rpec = msg->buf[--msg->len]; + cpec = i2c_smbus_msg_pec(cpec, msg); + + if (rpec != cpec) { + pr_debug("Bad PEC 0x%02x vs. 0x%02x\n", + rpec, cpec); + return -EBADMSG; + } + return 0; +} + +/** + * i2c_smbus_read_byte - SMBus "receive byte" protocol + * @client: Handle to slave device + * + * This executes the SMBus "receive byte" protocol, returning negative errno + * else the byte received from the device. + */ +s32 i2c_smbus_read_byte(const struct i2c_client *client) +{ + union i2c_smbus_data data; + int status; + + status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_READ, 0, + I2C_SMBUS_BYTE, &data); + return (status < 0) ? status : data.byte; +} +EXPORT_SYMBOL(i2c_smbus_read_byte); + +/** + * i2c_smbus_write_byte - SMBus "send byte" protocol + * @client: Handle to slave device + * @value: Byte to be sent + * + * This executes the SMBus "send byte" protocol, returning negative errno + * else zero on success. + */ +s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value) +{ + return i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_WRITE, value, I2C_SMBUS_BYTE, NULL); +} +EXPORT_SYMBOL(i2c_smbus_write_byte); + +/** + * i2c_smbus_read_byte_data - SMBus "read byte" protocol + * @client: Handle to slave device + * @command: Byte interpreted by slave + * + * This executes the SMBus "read byte" protocol, returning negative errno + * else a data byte received from the device. + */ +s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command) +{ + union i2c_smbus_data data; + int status; + + status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_READ, command, + I2C_SMBUS_BYTE_DATA, &data); + return (status < 0) ? status : data.byte; +} +EXPORT_SYMBOL(i2c_smbus_read_byte_data); + +/** + * i2c_smbus_write_byte_data - SMBus "write byte" protocol + * @client: Handle to slave device + * @command: Byte interpreted by slave + * @value: Byte being written + * + * This executes the SMBus "write byte" protocol, returning negative errno + * else zero on success. + */ +s32 i2c_smbus_write_byte_data(const struct i2c_client *client, u8 command, + u8 value) +{ + union i2c_smbus_data data; + data.byte = value; + return i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_WRITE, command, + I2C_SMBUS_BYTE_DATA, &data); +} +EXPORT_SYMBOL(i2c_smbus_write_byte_data); + +/** + * i2c_smbus_read_word_data - SMBus "read word" protocol + * @client: Handle to slave device + * @command: Byte interpreted by slave + * + * This executes the SMBus "read word" protocol, returning negative errno + * else a 16-bit unsigned "word" received from the device. + */ +s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command) +{ + union i2c_smbus_data data; + int status; + + status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_READ, command, + I2C_SMBUS_WORD_DATA, &data); + return (status < 0) ? status : data.word; +} +EXPORT_SYMBOL(i2c_smbus_read_word_data); + +/** + * i2c_smbus_write_word_data - SMBus "write word" protocol + * @client: Handle to slave device + * @command: Byte interpreted by slave + * @value: 16-bit "word" being written + * + * This executes the SMBus "write word" protocol, returning negative errno + * else zero on success. + */ +s32 i2c_smbus_write_word_data(const struct i2c_client *client, u8 command, + u16 value) +{ + union i2c_smbus_data data; + data.word = value; + return i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_WRITE, command, + I2C_SMBUS_WORD_DATA, &data); +} +EXPORT_SYMBOL(i2c_smbus_write_word_data); + +/** + * i2c_smbus_read_block_data - SMBus "block read" protocol + * @client: Handle to slave device + * @command: Byte interpreted by slave + * @values: Byte array into which data will be read; big enough to hold + * the data returned by the slave. SMBus allows at most 32 bytes. + * + * This executes the SMBus "block read" protocol, returning negative errno + * else the number of data bytes in the slave's response. + * + * Note that using this function requires that the client's adapter support + * the I2C_FUNC_SMBUS_READ_BLOCK_DATA functionality. Not all adapter drivers + * support this; its emulation through I2C messaging relies on a specific + * mechanism (I2C_M_RECV_LEN) which may not be implemented. + */ +s32 i2c_smbus_read_block_data(const struct i2c_client *client, u8 command, + u8 *values) +{ + union i2c_smbus_data data; + int status; + + status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_READ, command, + I2C_SMBUS_BLOCK_DATA, &data); + if (status) + return status; + + memcpy(values, &data.block[1], data.block[0]); + return data.block[0]; +} +EXPORT_SYMBOL(i2c_smbus_read_block_data); + +/** + * i2c_smbus_write_block_data - SMBus "block write" protocol + * @client: Handle to slave device + * @command: Byte interpreted by slave + * @length: Size of data block; SMBus allows at most 32 bytes + * @values: Byte array which will be written. + * + * This executes the SMBus "block write" protocol, returning negative errno + * else zero on success. + */ +s32 i2c_smbus_write_block_data(const struct i2c_client *client, u8 command, + u8 length, const u8 *values) +{ + union i2c_smbus_data data; + + if (length > I2C_SMBUS_BLOCK_MAX) + length = I2C_SMBUS_BLOCK_MAX; + data.block[0] = length; + memcpy(&data.block[1], values, length); + return i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_WRITE, command, + I2C_SMBUS_BLOCK_DATA, &data); +} +EXPORT_SYMBOL(i2c_smbus_write_block_data); + +/* Returns the number of read bytes */ +s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client, u8 command, + u8 length, u8 *values) +{ + union i2c_smbus_data data; + int status; + + if (length > I2C_SMBUS_BLOCK_MAX) + length = I2C_SMBUS_BLOCK_MAX; + data.block[0] = length; + status = i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_READ, command, + I2C_SMBUS_I2C_BLOCK_DATA, &data); + if (status < 0) + return status; + + memcpy(values, &data.block[1], data.block[0]); + return data.block[0]; +} +EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data); + +s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client, u8 command, + u8 length, const u8 *values) +{ + union i2c_smbus_data data; + + if (length > I2C_SMBUS_BLOCK_MAX) + length = I2C_SMBUS_BLOCK_MAX; + data.block[0] = length; + memcpy(data.block + 1, values, length); + return i2c_smbus_xfer(client->adapter, client->addr, client->flags, + I2C_SMBUS_WRITE, command, + I2C_SMBUS_I2C_BLOCK_DATA, &data); +} +EXPORT_SYMBOL(i2c_smbus_write_i2c_block_data); + +/* Simulate a SMBus command using the i2c protocol + No checking of parameters is done! */ +static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr, + unsigned short flags, + char read_write, u8 command, int size, + union i2c_smbus_data *data) +{ + /* So we need to generate a series of msgs. In the case of writing, we + need to use only one message; when reading, we need two. We initialize + most things with sane defaults, to keep the code below somewhat + simpler. */ + unsigned char msgbuf0[I2C_SMBUS_BLOCK_MAX+3]; + unsigned char msgbuf1[I2C_SMBUS_BLOCK_MAX+2]; + int num = read_write == I2C_SMBUS_READ ? 2 : 1; + int i; + u8 partial_pec = 0; + int status; + struct i2c_msg msg[2] = { + { + .addr = addr, + .flags = flags, + .len = 1, + .buf = msgbuf0, + }, { + .addr = addr, + .flags = flags | I2C_M_RD, + .len = 0, + .buf = msgbuf1, + }, + }; + + msgbuf0[0] = command; + switch (size) { + case I2C_SMBUS_QUICK: + msg[0].len = 0; + /* Special case: The read/write field is used as data */ + msg[0].flags = flags | (read_write == I2C_SMBUS_READ ? + I2C_M_RD : 0); + num = 1; + break; + case I2C_SMBUS_BYTE: + if (read_write == I2C_SMBUS_READ) { + /* Special case: only a read! */ + msg[0].flags = I2C_M_RD | flags; + num = 1; + } + break; + case I2C_SMBUS_BYTE_DATA: + if (read_write == I2C_SMBUS_READ) + msg[1].len = 1; + else { + msg[0].len = 2; + msgbuf0[1] = data->byte; + } + break; + case I2C_SMBUS_WORD_DATA: + if (read_write == I2C_SMBUS_READ) + msg[1].len = 2; + else { + msg[0].len = 3; + msgbuf0[1] = data->word & 0xff; + msgbuf0[2] = data->word >> 8; + } + break; + case I2C_SMBUS_PROC_CALL: + num = 2; /* Special case */ + read_write = I2C_SMBUS_READ; + msg[0].len = 3; + msg[1].len = 2; + msgbuf0[1] = data->word & 0xff; + msgbuf0[2] = data->word >> 8; + break; + case I2C_SMBUS_BLOCK_DATA: + if (read_write == I2C_SMBUS_READ) { + msg[1].flags |= I2C_M_RECV_LEN; + msg[1].len = 1; /* block length will be added by + the underlying bus driver */ + } else { + msg[0].len = data->block[0] + 2; + if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 2) { + dev_err(&adapter->dev, + "Invalid block write size %d\n", + data->block[0]); + return -EINVAL; + } + for (i = 1; i < msg[0].len; i++) + msgbuf0[i] = data->block[i-1]; + } + break; + case I2C_SMBUS_BLOCK_PROC_CALL: + num = 2; /* Another special case */ + read_write = I2C_SMBUS_READ; + if (data->block[0] > I2C_SMBUS_BLOCK_MAX) { + dev_err(&adapter->dev, + "Invalid block write size %d\n", + data->block[0]); + return -EINVAL; + } + msg[0].len = data->block[0] + 2; + for (i = 1; i < msg[0].len; i++) + msgbuf0[i] = data->block[i-1]; + msg[1].flags |= I2C_M_RECV_LEN; + msg[1].len = 1; /* block length will be added by + the underlying bus driver */ + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + if (read_write == I2C_SMBUS_READ) { + msg[1].len = data->block[0]; + } else { + msg[0].len = data->block[0] + 1; + if (msg[0].len > I2C_SMBUS_BLOCK_MAX + 1) { + dev_err(&adapter->dev, + "Invalid block write size %d\n", + data->block[0]); + return -EINVAL; + } + for (i = 1; i <= data->block[0]; i++) + msgbuf0[i] = data->block[i]; + } + break; + default: + dev_err(&adapter->dev, "Unsupported transaction %d\n", size); + return -EOPNOTSUPP; + } + + i = ((flags & I2C_CLIENT_PEC) && size != I2C_SMBUS_QUICK + && size != I2C_SMBUS_I2C_BLOCK_DATA); + if (i) { + /* Compute PEC if first message is a write */ + if (!(msg[0].flags & I2C_M_RD)) { + if (num == 1) /* Write only */ + i2c_smbus_add_pec(&msg[0]); + else /* Write followed by read */ + partial_pec = i2c_smbus_msg_pec(0, &msg[0]); + } + /* Ask for PEC if last message is a read */ + if (msg[num-1].flags & I2C_M_RD) + msg[num-1].len++; + } + + status = i2c_transfer(adapter, msg, num); + if (status < 0) + return status; + + /* Check PEC if last message is a read */ + if (i && (msg[num-1].flags & I2C_M_RD)) { + status = i2c_smbus_check_pec(partial_pec, &msg[num-1]); + if (status < 0) + return status; + } + + if (read_write == I2C_SMBUS_READ) + switch (size) { + case I2C_SMBUS_BYTE: + data->byte = msgbuf0[0]; + break; + case I2C_SMBUS_BYTE_DATA: + data->byte = msgbuf1[0]; + break; + case I2C_SMBUS_WORD_DATA: + case I2C_SMBUS_PROC_CALL: + data->word = msgbuf1[0] | (msgbuf1[1] << 8); + break; + case I2C_SMBUS_I2C_BLOCK_DATA: + for (i = 0; i < data->block[0]; i++) + data->block[i+1] = msgbuf1[i]; + break; + case I2C_SMBUS_BLOCK_DATA: + case I2C_SMBUS_BLOCK_PROC_CALL: + for (i = 0; i < msgbuf1[0] + 1; i++) + data->block[i] = msgbuf1[i]; + break; + } + return 0; +} + +/** + * i2c_smbus_xfer - execute SMBus protocol operations + * @adapter: Handle to I2C bus + * @addr: Address of SMBus slave on that bus + * @flags: I2C_CLIENT_* flags (usually zero or I2C_CLIENT_PEC) + * @read_write: I2C_SMBUS_READ or I2C_SMBUS_WRITE + * @command: Byte interpreted by slave, for protocols which use such bytes + * @protocol: SMBus protocol operation to execute, such as I2C_SMBUS_PROC_CALL + * @data: Data to be read or written + * + * This executes an SMBus protocol operation, and returns a negative + * errno code else zero on success. + */ +s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr, unsigned short flags, + char read_write, u8 command, int protocol, + union i2c_smbus_data *data) +{ + unsigned long orig_jiffies; + int try; + s32 res; + + /* If enabled, the following two tracepoints are conditional on + * read_write and protocol. + */ + trace_smbus_write(adapter, addr, flags, read_write, + command, protocol, data); + trace_smbus_read(adapter, addr, flags, read_write, + command, protocol); + + flags &= I2C_M_TEN | I2C_CLIENT_PEC | I2C_CLIENT_SCCB; + + if (adapter->algo->smbus_xfer) { + i2c_lock_bus(adapter, I2C_LOCK_SEGMENT); + + /* Retry automatically on arbitration loss */ + orig_jiffies = jiffies; + for (res = 0, try = 0; try <= adapter->retries; try++) { + res = adapter->algo->smbus_xfer(adapter, addr, flags, + read_write, command, + protocol, data); + if (res != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + adapter->timeout)) + break; + } + i2c_unlock_bus(adapter, I2C_LOCK_SEGMENT); + + if (res != -EOPNOTSUPP || !adapter->algo->master_xfer) + goto trace; + /* + * Fall back to i2c_smbus_xfer_emulated if the adapter doesn't + * implement native support for the SMBus operation. + */ + } + + res = i2c_smbus_xfer_emulated(adapter, addr, flags, read_write, + command, protocol, data); + +trace: + /* If enabled, the reply tracepoint is conditional on read_write. */ + trace_smbus_reply(adapter, addr, flags, read_write, + command, protocol, data); + trace_smbus_result(adapter, addr, flags, read_write, + command, protocol, res); + + return res; +} +EXPORT_SYMBOL(i2c_smbus_xfer); + +/** + * i2c_smbus_read_i2c_block_data_or_emulated - read block or emulate + * @client: Handle to slave device + * @command: Byte interpreted by slave + * @length: Size of data block; SMBus allows at most I2C_SMBUS_BLOCK_MAX bytes + * @values: Byte array into which data will be read; big enough to hold + * the data returned by the slave. SMBus allows at most + * I2C_SMBUS_BLOCK_MAX bytes. + * + * This executes the SMBus "block read" protocol if supported by the adapter. + * If block read is not supported, it emulates it using either word or byte + * read protocols depending on availability. + * + * The addresses of the I2C slave device that are accessed with this function + * must be mapped to a linear region, so that a block read will have the same + * effect as a byte read. Before using this function you must double-check + * if the I2C slave does support exchanging a block transfer with a byte + * transfer. + */ +s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client, + u8 command, u8 length, u8 *values) +{ + u8 i = 0; + int status; + + if (length > I2C_SMBUS_BLOCK_MAX) + length = I2C_SMBUS_BLOCK_MAX; + + if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_I2C_BLOCK)) + return i2c_smbus_read_i2c_block_data(client, command, length, values); + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA)) + return -EOPNOTSUPP; + + if (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_WORD_DATA)) { + while ((i + 2) <= length) { + status = i2c_smbus_read_word_data(client, command + i); + if (status < 0) + return status; + values[i] = status & 0xff; + values[i + 1] = status >> 8; + i += 2; + } + } + + while (i < length) { + status = i2c_smbus_read_byte_data(client, command + i); + if (status < 0) + return status; + values[i] = status; + i++; + } + + return i; +} +EXPORT_SYMBOL(i2c_smbus_read_i2c_block_data_or_emulated); diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 17700bfddcf5..3b63f5e5b89c 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -27,3 +27,27 @@ extern struct rw_semaphore __i2c_board_lock; extern struct list_head __i2c_board_list; extern int __i2c_first_dynamic_bus_num; +int i2c_check_addr_validity(unsigned addr, unsigned short flags); +int i2c_check_7bit_addr_validity_strict(unsigned short addr); + +#ifdef CONFIG_ACPI +void i2c_acpi_register_devices(struct i2c_adapter *adap); +#else /* CONFIG_ACPI */ +static inline void i2c_acpi_register_devices(struct i2c_adapter *adap) { } +#endif /* CONFIG_ACPI */ +extern struct notifier_block i2c_acpi_notifier; + +#ifdef CONFIG_ACPI_I2C_OPREGION +int i2c_acpi_install_space_handler(struct i2c_adapter *adapter); +void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter); +#else /* CONFIG_ACPI_I2C_OPREGION */ +static inline int i2c_acpi_install_space_handler(struct i2c_adapter *adapter) { return 0; } +static inline void i2c_acpi_remove_space_handler(struct i2c_adapter *adapter) { } +#endif /* CONFIG_ACPI_I2C_OPREGION */ + +#ifdef CONFIG_OF +void of_i2c_register_devices(struct i2c_adapter *adap); +#else +static inline void of_i2c_register_devices(struct i2c_adapter *adap) { } +#endif +extern struct notifier_block i2c_of_notifier; diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c index 06af583d5101..4a9ad91c5ba3 100644 --- a/drivers/i2c/i2c-stub.c +++ b/drivers/i2c/i2c-stub.c @@ -16,6 +16,7 @@ */ #define DEBUG 1 +#define pr_fmt(fmt) "i2c-stub: " fmt #include <linux/errno.h> #include <linux/i2c.h> @@ -342,7 +343,7 @@ static int __init i2c_stub_allocate_banks(int i) if (!chip->bank_words) return -ENOMEM; - pr_debug("i2c-stub: Allocated %u banks of %u words each (registers 0x%02x to 0x%02x)\n", + pr_debug("Allocated %u banks of %u words each (registers 0x%02x to 0x%02x)\n", chip->bank_mask, chip->bank_size, chip->bank_start, chip->bank_end); @@ -363,28 +364,27 @@ static int __init i2c_stub_init(void) int i, ret; if (!chip_addr[0]) { - pr_err("i2c-stub: Please specify a chip address\n"); + pr_err("Please specify a chip address\n"); return -ENODEV; } for (i = 0; i < MAX_CHIPS && chip_addr[i]; i++) { if (chip_addr[i] < 0x03 || chip_addr[i] > 0x77) { - pr_err("i2c-stub: Invalid chip address 0x%02x\n", + pr_err("Invalid chip address 0x%02x\n", chip_addr[i]); return -EINVAL; } - pr_info("i2c-stub: Virtual chip at 0x%02x\n", chip_addr[i]); + pr_info("Virtual chip at 0x%02x\n", chip_addr[i]); } /* Allocate memory for all chips at once */ stub_chips_nr = i; stub_chips = kcalloc(stub_chips_nr, sizeof(struct stub_chip), GFP_KERNEL); - if (!stub_chips) { - pr_err("i2c-stub: Out of memory\n"); + if (!stub_chips) return -ENOMEM; - } + for (i = 0; i < stub_chips_nr; i++) { INIT_LIST_HEAD(&stub_chips[i].smbus_blocks); diff --git a/drivers/ide/ide-timings.c b/drivers/ide/ide-timings.c index 0e05f75934c9..1858e3ce3993 100644 --- a/drivers/ide/ide-timings.c +++ b/drivers/ide/ide-timings.c @@ -104,19 +104,19 @@ u16 ide_pio_cycle_time(ide_drive_t *drive, u8 pio) EXPORT_SYMBOL_GPL(ide_pio_cycle_time); #define ENOUGH(v, unit) (((v) - 1) / (unit) + 1) -#define EZ(v, unit) ((v) ? ENOUGH(v, unit) : 0) +#define EZ(v, unit) ((v) ? ENOUGH((v) * 1000, unit) : 0) static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, int T, int UT) { - q->setup = EZ(t->setup * 1000, T); - q->act8b = EZ(t->act8b * 1000, T); - q->rec8b = EZ(t->rec8b * 1000, T); - q->cyc8b = EZ(t->cyc8b * 1000, T); - q->active = EZ(t->active * 1000, T); - q->recover = EZ(t->recover * 1000, T); - q->cycle = EZ(t->cycle * 1000, T); - q->udma = EZ(t->udma * 1000, UT); + q->setup = EZ(t->setup, T); + q->act8b = EZ(t->act8b, T); + q->rec8b = EZ(t->rec8b, T); + q->cyc8b = EZ(t->cyc8b, T); + q->active = EZ(t->active, T); + q->recover = EZ(t->recover, T); + q->cycle = EZ(t->cycle, T); + q->udma = EZ(t->udma, UT); } void ide_timing_merge(struct ide_timing *a, struct ide_timing *b, diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c index a6cb379a4ebc..01236cef7bfb 100644 --- a/drivers/infiniband/core/addr.c +++ b/drivers/infiniband/core/addr.c @@ -268,6 +268,7 @@ int rdma_translate_ip(const struct sockaddr *addr, return ret; ret = rdma_copy_addr(dev_addr, dev, NULL); + dev_addr->bound_dev_if = dev->ifindex; if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); dev_put(dev); @@ -280,6 +281,7 @@ int rdma_translate_ip(const struct sockaddr *addr, &((const struct sockaddr_in6 *)addr)->sin6_addr, dev, 1)) { ret = rdma_copy_addr(dev_addr, dev, NULL); + dev_addr->bound_dev_if = dev->ifindex; if (vlan_id) *vlan_id = rdma_vlan_dev_vlan_id(dev); break; @@ -405,10 +407,10 @@ static int addr4_resolve(struct sockaddr_in *src_in, fl4.saddr = src_ip; fl4.flowi4_oif = addr->bound_dev_if; rt = ip_route_output_key(addr->net, &fl4); - if (IS_ERR(rt)) { - ret = PTR_ERR(rt); - goto out; - } + ret = PTR_ERR_OR_ZERO(rt); + if (ret) + return ret; + src_in->sin_family = AF_INET; src_in->sin_addr.s_addr = fl4.saddr; @@ -423,8 +425,6 @@ static int addr4_resolve(struct sockaddr_in *src_in, *prt = rt; return 0; -out: - return ret; } #if IS_ENABLED(CONFIG_IPV6) @@ -509,6 +509,11 @@ static int addr_resolve(struct sockaddr *src_in, struct dst_entry *dst; int ret; + if (!addr->net) { + pr_warn_ratelimited("%s: missing namespace\n", __func__); + return -EINVAL; + } + if (src_in->sa_family == AF_INET) { struct rtable *rt = NULL; const struct sockaddr_in *dst_in4 = @@ -522,8 +527,12 @@ static int addr_resolve(struct sockaddr *src_in, if (resolve_neigh) ret = addr_resolve_neigh(&rt->dst, dst_in, addr, seq); - ndev = rt->dst.dev; - dev_hold(ndev); + if (addr->bound_dev_if) { + ndev = dev_get_by_index(addr->net, addr->bound_dev_if); + } else { + ndev = rt->dst.dev; + dev_hold(ndev); + } ip_rt_put(rt); } else { @@ -539,14 +548,27 @@ static int addr_resolve(struct sockaddr *src_in, if (resolve_neigh) ret = addr_resolve_neigh(dst, dst_in, addr, seq); - ndev = dst->dev; - dev_hold(ndev); + if (addr->bound_dev_if) { + ndev = dev_get_by_index(addr->net, addr->bound_dev_if); + } else { + ndev = dst->dev; + dev_hold(ndev); + } dst_release(dst); } - addr->bound_dev_if = ndev->ifindex; - addr->net = dev_net(ndev); + if (ndev->flags & IFF_LOOPBACK) { + ret = rdma_translate_ip(dst_in, addr, NULL); + /* + * Put the loopback device and get the translated + * device instead. + */ + dev_put(ndev); + ndev = dev_get_by_index(addr->net, addr->bound_dev_if); + } else { + addr->bound_dev_if = ndev->ifindex; + } dev_put(ndev); return ret; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 31bb82d8ecd7..0eb393237ba2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -623,22 +623,11 @@ static inline int cma_validate_port(struct ib_device *device, u8 port, if ((dev_type != ARPHRD_INFINIBAND) && rdma_protocol_ib(device, port)) return ret; - if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) { + if (dev_type == ARPHRD_ETHER && rdma_protocol_roce(device, port)) ndev = dev_get_by_index(&init_net, bound_if_index); - if (ndev && ndev->flags & IFF_LOOPBACK) { - pr_info("detected loopback device\n"); - dev_put(ndev); - - if (!device->get_netdev) - return -EOPNOTSUPP; - - ndev = device->get_netdev(device, port); - if (!ndev) - return -ENODEV; - } - } else { + else gid_type = IB_GID_TYPE_IB; - } + ret = ib_find_cached_gid_by_port(device, gid, gid_type, port, ndev, NULL); @@ -1044,6 +1033,8 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, } else ret = iw_cm_init_qp_attr(id_priv->cm_id.iw, qp_attr, qp_attr_mask); + qp_attr->port_num = id_priv->id.port_num; + *qp_attr_mask |= IB_QP_PORT; } else ret = -ENOSYS; @@ -2569,21 +2560,6 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) goto err2; } - if (ndev->flags & IFF_LOOPBACK) { - dev_put(ndev); - if (!id_priv->id.device->get_netdev) { - ret = -EOPNOTSUPP; - goto err2; - } - - ndev = id_priv->id.device->get_netdev(id_priv->id.device, - id_priv->id.port_num); - if (!ndev) { - ret = -ENODEV; - goto err2; - } - } - supported_gids = roce_gid_type_mask_support(id_priv->id.device, id_priv->id.port_num); gid_type = cma_route_gid_type(addr->dev_addr.network, diff --git a/drivers/infiniband/core/roce_gid_mgmt.c b/drivers/infiniband/core/roce_gid_mgmt.c index db958d3207ef..94a9eefb3cfc 100644 --- a/drivers/infiniband/core/roce_gid_mgmt.c +++ b/drivers/infiniband/core/roce_gid_mgmt.c @@ -42,6 +42,8 @@ #include <rdma/ib_cache.h> #include <rdma/ib_addr.h> +static struct workqueue_struct *gid_cache_wq; + enum gid_op_type { GID_DEL = 0, GID_ADD @@ -560,7 +562,7 @@ static int netdevice_queue_work(struct netdev_event_work_cmd *cmds, } INIT_WORK(&ndev_work->work, netdevice_event_work_handler); - queue_work(ib_wq, &ndev_work->work); + queue_work(gid_cache_wq, &ndev_work->work); return NOTIFY_DONE; } @@ -693,7 +695,7 @@ static int addr_event(struct notifier_block *this, unsigned long event, dev_hold(ndev); work->gid_attr.ndev = ndev; - queue_work(ib_wq, &work->work); + queue_work(gid_cache_wq, &work->work); return NOTIFY_DONE; } @@ -740,6 +742,10 @@ static struct notifier_block nb_inet6addr = { int __init roce_gid_mgmt_init(void) { + gid_cache_wq = alloc_ordered_workqueue("gid-cache-wq", 0); + if (!gid_cache_wq) + return -ENOMEM; + register_inetaddr_notifier(&nb_inetaddr); if (IS_ENABLED(CONFIG_IPV6)) register_inet6addr_notifier(&nb_inet6addr); @@ -764,4 +770,5 @@ void __exit roce_gid_mgmt_cleanup(void) * ib-core is removed, all physical devices have been removed, * so no issue with remaining hardware contexts. */ + destroy_workqueue(gid_cache_wq); } diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 8ba9bfb073d1..2c98533a0203 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1296,7 +1296,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, struct ib_uobject *uobj; struct ib_cq *cq; struct ib_ucq_object *obj; - struct ib_uverbs_event_queue *ev_queue; int ret = -EINVAL; if (copy_from_user(&cmd, buf, sizeof cmd)) @@ -1313,7 +1312,6 @@ ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file, */ uverbs_uobject_get(uobj); cq = uobj->object; - ev_queue = cq->cq_context; obj = container_of(cq->uobject, struct ib_ucq_object, uobject); memset(&resp, 0, sizeof(resp)); @@ -1935,7 +1933,8 @@ static int modify_qp(struct ib_uverbs_file *file, goto out; } - if (!rdma_is_port_valid(qp->device, cmd->base.port_num)) { + if ((cmd->base.attr_mask & IB_QP_PORT) && + !rdma_is_port_valid(qp->device, cmd->base.port_num)) { ret = -EINVAL; goto release_qp; } @@ -2005,28 +2004,13 @@ static int modify_qp(struct ib_uverbs_file *file, rdma_ah_set_port_num(&attr->alt_ah_attr, cmd->base.alt_dest.port_num); - if (qp->real_qp == qp) { - if (cmd->base.attr_mask & IB_QP_AV) { - ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); - if (ret) - goto release_qp; - } - ret = ib_security_modify_qp(qp, - attr, - modify_qp_mask(qp->qp_type, - cmd->base.attr_mask), - udata); - } else { - ret = ib_security_modify_qp(qp, - attr, - modify_qp_mask(qp->qp_type, - cmd->base.attr_mask), - NULL); - } + ret = ib_modify_qp_with_udata(qp, attr, + modify_qp_mask(qp->qp_type, + cmd->base.attr_mask), + udata); release_qp: uobj_put_obj_read(qp); - out: kfree(attr); @@ -2103,7 +2087,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, struct ib_uverbs_destroy_qp cmd; struct ib_uverbs_destroy_qp_resp resp; struct ib_uobject *uobj; - struct ib_qp *qp; struct ib_uqp_object *obj; int ret = -EINVAL; @@ -2117,7 +2100,6 @@ ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file, if (IS_ERR(uobj)) return PTR_ERR(uobj); - qp = uobj->object; obj = container_of(uobj, struct ib_uqp_object, uevent.uobject); /* * Make sure we don't free the memory in remove_commit as we still @@ -3019,7 +3001,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, { struct ib_uverbs_ex_destroy_wq cmd = {}; struct ib_uverbs_ex_destroy_wq_resp resp = {}; - struct ib_wq *wq; struct ib_uobject *uobj; struct ib_uwq_object *obj; size_t required_cmd_sz; @@ -3053,7 +3034,6 @@ int ib_uverbs_ex_destroy_wq(struct ib_uverbs_file *file, if (IS_ERR(uobj)) return PTR_ERR(uobj); - wq = uobj->object; obj = container_of(uobj, struct ib_uwq_object, uevent.uobject); /* * Make sure we don't free the memory in remove_commit as we still @@ -3743,10 +3723,8 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, struct ib_uverbs_destroy_srq cmd; struct ib_uverbs_destroy_srq_resp resp; struct ib_uobject *uobj; - struct ib_srq *srq; struct ib_uevent_object *obj; int ret = -EINVAL; - enum ib_srq_type srq_type; if (copy_from_user(&cmd, buf, sizeof cmd)) return -EFAULT; @@ -3756,9 +3734,7 @@ ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file, if (IS_ERR(uobj)) return PTR_ERR(uobj); - srq = uobj->object; obj = container_of(uobj, struct ib_uevent_object, uobject); - srq_type = srq->srq_type; /* * Make sure we don't free the memory in remove_commit as we still * needs the uobject memory to create the response. diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index c973a83c898b..fb98ed67d5bc 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -452,6 +452,19 @@ int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr, } EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr); +/* + * This function creates ah from the incoming packet. + * Incoming packet has dgid of the receiver node on which this code is + * getting executed and, sgid contains the GID of the sender. + * + * When resolving mac address of destination, the arrived dgid is used + * as sgid and, sgid is used as dgid because sgid contains destinations + * GID whom to respond to. + * + * This is why when calling rdma_addr_find_l2_eth_by_grh() function, the + * position of arguments dgid and sgid do not match the order of the + * parameters. + */ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, const struct ib_wc *wc, const struct ib_grh *grh, struct rdma_ah_attr *ah_attr) @@ -507,11 +520,6 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, } resolved_dev = dev_get_by_index(&init_net, if_index); - if (resolved_dev->flags & IFF_LOOPBACK) { - dev_put(resolved_dev); - resolved_dev = idev; - dev_hold(resolved_dev); - } rcu_read_lock(); if (resolved_dev != idev && !rdma_is_upper_dev_rcu(idev, resolved_dev)) @@ -887,6 +895,7 @@ static const struct { } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = { [IB_QPS_RESET] = { [IB_QPS_RESET] = { .valid = 1 }, + [IB_QPS_ERR] = { .valid = 1 }, [IB_QPS_INIT] = { .valid = 1, .req_param = { @@ -1268,20 +1277,36 @@ out: } EXPORT_SYMBOL(ib_resolve_eth_dmac); -int ib_modify_qp(struct ib_qp *qp, - struct ib_qp_attr *qp_attr, - int qp_attr_mask) +/** + * ib_modify_qp_with_udata - Modifies the attributes for the specified QP. + * @qp: The QP to modify. + * @attr: On input, specifies the QP attributes to modify. On output, + * the current values of selected QP attributes are returned. + * @attr_mask: A bit-mask used to specify which attributes of the QP + * are being modified. + * @udata: pointer to user's input output buffer information + * are being modified. + * It returns 0 on success and returns appropriate error code on error. + */ +int ib_modify_qp_with_udata(struct ib_qp *qp, struct ib_qp_attr *attr, + int attr_mask, struct ib_udata *udata) { + int ret; - if (qp_attr_mask & IB_QP_AV) { - int ret; - - ret = ib_resolve_eth_dmac(qp->device, &qp_attr->ah_attr); + if (attr_mask & IB_QP_AV) { + ret = ib_resolve_eth_dmac(qp->device, &attr->ah_attr); if (ret) return ret; } + return ib_security_modify_qp(qp, attr, attr_mask, udata); +} +EXPORT_SYMBOL(ib_modify_qp_with_udata); - return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); +int ib_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + return ib_modify_qp_with_udata(qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index 08772836fded..85527532c49d 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -51,6 +51,8 @@ #define BNXT_RE_PAGE_SIZE_8M BIT(23) #define BNXT_RE_PAGE_SIZE_1G BIT(30) +#define BNXT_RE_MAX_MR_SIZE BIT(30) + #define BNXT_RE_MAX_QPC_COUNT (64 * 1024) #define BNXT_RE_MAX_MRW_COUNT (64 * 1024) #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) @@ -60,6 +62,13 @@ #define BNXT_RE_RQ_WQE_THRESHOLD 32 +/* + * Setting the default ack delay value to 16, which means + * the default timeout is approx. 260ms(4 usec * 2 ^(timeout)) + */ + +#define BNXT_RE_DEFAULT_ACK_DELAY 16 + struct bnxt_re_work { struct work_struct work; unsigned long event; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index c7bd68311d0c..f0e01b3ac711 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -145,10 +145,8 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->fw_ver = (u64)(unsigned long)(dev_attr->fw_ver); bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ib_attr->sys_image_guid); - ib_attr->max_mr_size = ~0ull; - ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_8K | - BNXT_RE_PAGE_SIZE_64K | BNXT_RE_PAGE_SIZE_2M | - BNXT_RE_PAGE_SIZE_8M | BNXT_RE_PAGE_SIZE_1G; + ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; + ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K; ib_attr->vendor_id = rdev->en_dev->pdev->vendor; ib_attr->vendor_part_id = rdev->en_dev->pdev->device; @@ -174,9 +172,11 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_mr = dev_attr->max_mr; ib_attr->max_pd = dev_attr->max_pd; ib_attr->max_qp_rd_atom = dev_attr->max_qp_rd_atom; - ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_rd_atom; - ib_attr->atomic_cap = IB_ATOMIC_HCA; - ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; + ib_attr->max_qp_init_rd_atom = dev_attr->max_qp_init_rd_atom; + if (dev_attr->is_atomic) { + ib_attr->atomic_cap = IB_ATOMIC_HCA; + ib_attr->masked_atomic_cap = IB_ATOMIC_HCA; + } ib_attr->max_ee_rd_atom = 0; ib_attr->max_res_rd_atom = 0; @@ -201,7 +201,7 @@ int bnxt_re_query_device(struct ib_device *ibdev, ib_attr->max_fast_reg_page_list_len = MAX_PBL_LVL_1_PGS; ib_attr->max_pkeys = 1; - ib_attr->local_ca_ack_delay = 0; + ib_attr->local_ca_ack_delay = BNXT_RE_DEFAULT_ACK_DELAY; return 0; } @@ -390,15 +390,17 @@ int bnxt_re_del_gid(struct ib_device *ibdev, u8 port_num, return -EINVAL; ctx->refcnt--; if (!ctx->refcnt) { - rc = bnxt_qplib_del_sgid - (sgid_tbl, - &sgid_tbl->tbl[ctx->idx], true); - if (rc) + rc = bnxt_qplib_del_sgid(sgid_tbl, + &sgid_tbl->tbl[ctx->idx], + true); + if (rc) { dev_err(rdev_to_dev(rdev), "Failed to remove GID: %#x", rc); - ctx_tbl = sgid_tbl->ctx; - ctx_tbl[ctx->idx] = NULL; - kfree(ctx); + } else { + ctx_tbl = sgid_tbl->ctx; + ctx_tbl[ctx->idx] = NULL; + kfree(ctx); + } } } else { return -EINVAL; @@ -588,10 +590,10 @@ static int bnxt_re_create_fence_mr(struct bnxt_re_pd *pd) /* Create a fence MW only for kernel consumers */ mw = bnxt_re_alloc_mw(&pd->ib_pd, IB_MW_TYPE_1, NULL); - if (!mw) { + if (IS_ERR(mw)) { dev_err(rdev_to_dev(rdev), "Failed to create fence-MW for PD: %p\n", pd); - rc = -EINVAL; + rc = PTR_ERR(mw); goto fail; } fence->mw = mw; @@ -612,30 +614,13 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd) int rc; bnxt_re_destroy_fence_mr(pd); - if (ib_pd->uobject && pd->dpi.dbr) { - struct ib_ucontext *ib_uctx = ib_pd->uobject->context; - struct bnxt_re_ucontext *ucntx; - /* Free DPI only if this is the first PD allocated by the - * application and mark the context dpi as NULL - */ - ucntx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); - - rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, - &rdev->qplib_res.dpi_tbl, - &pd->dpi); + if (pd->qplib_pd.id) { + rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, + &rdev->qplib_res.pd_tbl, + &pd->qplib_pd); if (rc) - dev_err(rdev_to_dev(rdev), "Failed to deallocate HW DPI"); - /* Don't fail, continue*/ - ucntx->dpi = NULL; - } - - rc = bnxt_qplib_dealloc_pd(&rdev->qplib_res, - &rdev->qplib_res.pd_tbl, - &pd->qplib_pd); - if (rc) { - dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); - return rc; + dev_err(rdev_to_dev(rdev), "Failed to deallocate HW PD"); } kfree(pd); @@ -667,23 +652,22 @@ struct ib_pd *bnxt_re_alloc_pd(struct ib_device *ibdev, if (udata) { struct bnxt_re_pd_resp resp; - if (!ucntx->dpi) { + if (!ucntx->dpi.dbr) { /* Allocate DPI in alloc_pd to avoid failing of * ibv_devinfo and family of application when DPIs * are depleted. */ if (bnxt_qplib_alloc_dpi(&rdev->qplib_res.dpi_tbl, - &pd->dpi, ucntx)) { + &ucntx->dpi, ucntx)) { rc = -ENOMEM; goto dbfail; } - ucntx->dpi = &pd->dpi; } resp.pdid = pd->qplib_pd.id; /* Still allow mapping this DBR to the new user PD. */ - resp.dpi = ucntx->dpi->dpi; - resp.dbr = (u64)ucntx->dpi->umdbr; + resp.dpi = ucntx->dpi.dpi; + resp.dbr = (u64)ucntx->dpi.umdbr; rc = ib_copy_to_udata(udata, &resp, sizeof(resp)); if (rc) { @@ -960,7 +944,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, qplib_qp->rq.nmap = umem->nmap; } - qplib_qp->dpi = cntx->dpi; + qplib_qp->dpi = &cntx->dpi; return 0; rqfail: ib_umem_release(qp->sumem); @@ -1530,13 +1514,24 @@ int bnxt_re_modify_qp(struct ib_qp *ib_qp, struct ib_qp_attr *qp_attr, if (qp_attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_MAX_RD_ATOMIC; - qp->qplib_qp.max_rd_atomic = qp_attr->max_rd_atomic; + /* Cap the max_rd_atomic to device max */ + qp->qplib_qp.max_rd_atomic = min_t(u32, qp_attr->max_rd_atomic, + dev_attr->max_qp_rd_atom); } if (qp_attr_mask & IB_QP_SQ_PSN) { qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_SQ_PSN; qp->qplib_qp.sq.psn = qp_attr->sq_psn; } if (qp_attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + if (qp_attr->max_dest_rd_atomic > + dev_attr->max_qp_init_rd_atom) { + dev_err(rdev_to_dev(rdev), + "max_dest_rd_atomic requested%d is > dev_max%d", + qp_attr->max_dest_rd_atomic, + dev_attr->max_qp_init_rd_atom); + return -EINVAL; + } + qp->qplib_qp.modify_flags |= CMDQ_MODIFY_QP_MODIFY_MASK_MAX_DEST_RD_ATOMIC; qp->qplib_qp.max_dest_rd_atomic = qp_attr->max_dest_rd_atomic; @@ -2403,7 +2398,7 @@ struct ib_cq *bnxt_re_create_cq(struct ib_device *ibdev, } cq->qplib_cq.sghead = cq->umem->sg_head.sgl; cq->qplib_cq.nmap = cq->umem->nmap; - cq->qplib_cq.dpi = uctx->dpi; + cq->qplib_cq.dpi = &uctx->dpi; } else { cq->max_cql = min_t(u32, entries, MAX_CQL_PER_POLL); cq->cql = kcalloc(cq->max_cql, sizeof(struct bnxt_qplib_cqe), @@ -2905,6 +2900,7 @@ int bnxt_re_poll_cq(struct ib_cq *ib_cq, int num_entries, struct ib_wc *wc) spin_lock_irqsave(&cq->cq_lock, flags); budget = min_t(u32, num_entries, cq->max_cql); + num_entries = budget; if (!cq->cql) { dev_err(rdev_to_dev(cq->rdev), "POLL CQ : no CQL to use"); goto exit; @@ -3031,6 +3027,11 @@ int bnxt_re_req_notify_cq(struct ib_cq *ib_cq, else if (ib_cqn_flags & IB_CQ_SOLICITED) type = DBR_DBR_TYPE_CQ_ARMSE; + /* Poll to see if there are missed events */ + if ((ib_cqn_flags & IB_CQ_REPORT_MISSED_EVENTS) && + !(bnxt_qplib_is_cq_empty(&cq->qplib_cq))) + return 1; + bnxt_qplib_req_notify_cq(&cq->qplib_cq, type); return 0; @@ -3245,6 +3246,12 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, struct scatterlist *sg; int entry; + if (length > BNXT_RE_MAX_MR_SIZE) { + dev_err(rdev_to_dev(rdev), "MR Size: %lld > Max supported:%ld\n", + length, BNXT_RE_MAX_MR_SIZE); + return ERR_PTR(-ENOMEM); + } + mr = kzalloc(sizeof(*mr), GFP_KERNEL); if (!mr) return ERR_PTR(-ENOMEM); @@ -3388,8 +3395,26 @@ int bnxt_re_dealloc_ucontext(struct ib_ucontext *ib_uctx) struct bnxt_re_ucontext *uctx = container_of(ib_uctx, struct bnxt_re_ucontext, ib_uctx); + + struct bnxt_re_dev *rdev = uctx->rdev; + int rc = 0; + if (uctx->shpg) free_page((unsigned long)uctx->shpg); + + if (uctx->dpi.dbr) { + /* Free DPI only if this is the first PD allocated by the + * application and mark the context dpi as NULL + */ + rc = bnxt_qplib_dealloc_dpi(&rdev->qplib_res, + &rdev->qplib_res.dpi_tbl, + &uctx->dpi); + if (rc) + dev_err(rdev_to_dev(rdev), "Deallocte HW DPI failed!"); + /* Don't fail, continue*/ + uctx->dpi.dbr = NULL; + } + kfree(uctx); return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index 6c160f6a5398..a0bb7e33d7ca 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -59,7 +59,6 @@ struct bnxt_re_pd { struct bnxt_re_dev *rdev; struct ib_pd ib_pd; struct bnxt_qplib_pd qplib_pd; - struct bnxt_qplib_dpi dpi; struct bnxt_re_fence_data fence; }; @@ -127,7 +126,7 @@ struct bnxt_re_mw { struct bnxt_re_ucontext { struct bnxt_re_dev *rdev; struct ib_ucontext ib_uctx; - struct bnxt_qplib_dpi *dpi; + struct bnxt_qplib_dpi dpi; void *shpg; spinlock_t sh_lock; /* protect shpg */ }; diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 1fce5e73216b..ceae2d92fb08 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -333,6 +333,7 @@ static int bnxt_re_net_stats_ctx_alloc(struct bnxt_re_dev *rdev, bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_ALLOC, -1, -1); req.update_period_ms = cpu_to_le32(1000); req.stats_dma_addr = cpu_to_le64(dma_map); + req.stat_ctx_flags = STAT_CTX_ALLOC_REQ_STAT_CTX_FLAGS_ROCE; bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index f05500bcdcf1..9af1514e5944 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -1128,6 +1128,11 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, } /* Each SGE entry = 1 WQE size16 */ wqe_size16 = wqe->num_sge; + /* HW requires wqe size has room for atleast one SGE even if + * none was supplied by ULP + */ + if (!wqe->num_sge) + wqe_size16++; } /* Specifics */ @@ -1364,6 +1369,11 @@ int bnxt_qplib_post_recv(struct bnxt_qplib_qp *qp, rqe->flags = wqe->flags; rqe->wqe_size = wqe->num_sge + ((offsetof(typeof(*rqe), data) + 15) >> 4); + /* HW requires wqe size has room for atleast one SGE even if none + * was supplied by ULP + */ + if (!wqe->num_sge) + rqe->wqe_size++; /* Supply the rqe->wr_id index to the wr_id_tbl for now */ rqe->wr_id[0] = cpu_to_le32(sw_prod); @@ -1885,6 +1895,25 @@ flush_rq: return rc; } +bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq) +{ + struct cq_base *hw_cqe, **hw_cqe_ptr; + unsigned long flags; + u32 sw_cons, raw_cons; + bool rc = true; + + spin_lock_irqsave(&cq->hwq.lock, flags); + raw_cons = cq->hwq.cons; + sw_cons = HWQ_CMP(raw_cons, &cq->hwq); + hw_cqe_ptr = (struct cq_base **)cq->hwq.pbl_ptr; + hw_cqe = &hw_cqe_ptr[CQE_PG(sw_cons)][CQE_IDX(sw_cons)]; + + /* Check for Valid bit. If the CQE is valid, return false */ + rc = !CQE_CMP_VALID(hw_cqe, raw_cons, cq->hwq.max_elements); + spin_unlock_irqrestore(&cq->hwq.lock, flags); + return rc; +} + static int bnxt_qplib_cq_process_res_raweth_qp1(struct bnxt_qplib_cq *cq, struct cq_res_raweth_qp1 *hwcqe, struct bnxt_qplib_cqe **pcqe, diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index 36b7b7db0e3f..19176e06c98a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -449,6 +449,7 @@ int bnxt_qplib_create_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_destroy_cq(struct bnxt_qplib_res *res, struct bnxt_qplib_cq *cq); int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num, struct bnxt_qplib_qp **qp); +bool bnxt_qplib_is_cq_empty(struct bnxt_qplib_cq *cq); void bnxt_qplib_req_notify_cq(struct bnxt_qplib_cq *cq, u32 arm_type); void bnxt_qplib_free_nq(struct bnxt_qplib_nq *nq); int bnxt_qplib_alloc_nq(struct pci_dev *pdev, struct bnxt_qplib_nq *nq); diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index fde18cf0e406..ef91ab786dd4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -51,6 +51,19 @@ const struct bnxt_qplib_gid bnxt_qplib_gid_zero = {{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }; /* Device */ + +static bool bnxt_qplib_is_atomic_cap(struct bnxt_qplib_rcfw *rcfw) +{ + int rc; + u16 pcie_ctl2; + + rc = pcie_capability_read_word(rcfw->pdev, PCI_EXP_DEVCTL2, + &pcie_ctl2); + if (rc) + return false; + return !!(pcie_ctl2 & PCI_EXP_DEVCTL2_ATOMIC_REQ); +} + int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr) { @@ -81,6 +94,8 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, /* Extract the context from the side buffer */ attr->max_qp = le32_to_cpu(sb->max_qp); + /* max_qp value reported by FW for PF doesn't include the QP1 for PF */ + attr->max_qp += 1; attr->max_qp_rd_atom = sb->max_qp_rd_atom > BNXT_QPLIB_MAX_OUT_RD_ATOM ? BNXT_QPLIB_MAX_OUT_RD_ATOM : sb->max_qp_rd_atom; @@ -129,6 +144,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->tqm_alloc_reqs[i * 4 + 3] = *(++tqm_alloc); } + attr->is_atomic = bnxt_qplib_is_atomic_cap(rcfw); bail: bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); return rc; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index a543f959098b..2ce7e2a32cf0 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -42,6 +42,8 @@ #define BNXT_QPLIB_RESERVED_QP_WRS 128 +#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 + struct bnxt_qplib_dev_attr { char fw_ver[32]; u16 max_sgid; @@ -70,6 +72,7 @@ struct bnxt_qplib_dev_attr { u32 max_inline_data; u32 l2_db_size; u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; + bool is_atomic; }; struct bnxt_qplib_pd { diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 29d30744d6c9..0cd0c1fa27d4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -718,7 +718,7 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, struct iwch_mr *mhp; u32 mmid; u32 stag = 0; - int ret = 0; + int ret = -ENOMEM; if (mr_type != IB_MR_TYPE_MEM_REG || max_num_sg > T3_MAX_FASTREG_DEPTH) @@ -731,10 +731,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, goto err; mhp->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL); - if (!mhp->pages) { - ret = -ENOMEM; + if (!mhp->pages) goto pl_err; - } mhp->rhp = rhp; ret = iwch_alloc_pbl(mhp, max_num_sg); @@ -751,7 +749,8 @@ static struct ib_mr *iwch_alloc_mr(struct ib_pd *pd, mhp->attr.state = 1; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; - if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) + ret = insert_handle(rhp, &rhp->mmidr, mhp, mmid); + if (ret) goto err3; pr_debug("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index e16fcaf6b5a3..be07da1997e6 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c @@ -963,6 +963,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, goto err3; if (ucontext) { + ret = -ENOMEM; mm = kmalloc(sizeof *mm, GFP_KERNEL); if (!mm) goto err4; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index bfc77596acbe..cb7fc0d35d1d 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -569,7 +569,7 @@ static int build_rdma_read(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16) { if (wr->num_sge > 1) return -EINVAL; - if (wr->num_sge) { + if (wr->num_sge && wr->sg_list[0].length) { wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey); wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr >> 32)); diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 2ba00b89df6a..94b54850ec75 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12847,7 +12847,12 @@ static void remap_intr(struct hfi1_devdata *dd, int isrc, int msix_intr) /* clear from the handled mask of the general interrupt */ m = isrc / 64; n = isrc % 64; - dd->gi_mask[m] &= ~((u64)1 << n); + if (likely(m < CCE_NUM_INT_CSRS)) { + dd->gi_mask[m] &= ~((u64)1 << n); + } else { + dd_dev_err(dd, "remap interrupt err\n"); + return; + } /* direct the chip source to the given MSI-X interrupt */ m = isrc / 8; diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c index 650305cc0373..1a7af9f60c13 100644 --- a/drivers/infiniband/hw/hfi1/qp.c +++ b/drivers/infiniband/hw/hfi1/qp.c @@ -647,18 +647,17 @@ void qp_iter_print(struct seq_file *s, struct qp_iter *iter) qp->pid); } -void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, - gfp_t gfp) +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct hfi1_qp_priv *priv; - priv = kzalloc_node(sizeof(*priv), gfp, rdi->dparms.node); + priv = kzalloc_node(sizeof(*priv), GFP_KERNEL, rdi->dparms.node); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; - priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), gfp, + priv->s_ahg = kzalloc_node(sizeof(*priv->s_ahg), GFP_KERNEL, rdi->dparms.node); if (!priv->s_ahg) { kfree(priv); diff --git a/drivers/infiniband/hw/hfi1/qp.h b/drivers/infiniband/hw/hfi1/qp.h index 1eb9cd7b8c19..6fe542b6a927 100644 --- a/drivers/infiniband/hw/hfi1/qp.h +++ b/drivers/infiniband/hw/hfi1/qp.h @@ -123,8 +123,7 @@ void hfi1_migrate_qp(struct rvt_qp *qp); /* * Functions provided by hfi1 driver for rdmavt to use */ -void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, - gfp_t gfp); +void *qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); unsigned free_all_qps(struct rvt_dev_info *rdi); void notify_qp_reset(struct rvt_qp *qp); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c index 37d5d29597a4..23fad6d96944 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c @@ -228,14 +228,14 @@ int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, switch (wr->opcode) { case IB_WR_RDMA_READ: ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; - set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, - atomic_wr(wr)->rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; - set_raddr_seg(wqe, atomic_wr(wr)->remote_addr, - atomic_wr(wr)->rkey); + set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, + rdma_wr(wr)->rkey); break; case IB_WR_SEND: case IB_WR_SEND_WITH_INV: @@ -661,9 +661,11 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) union ib_gid dgid; u64 subnet_prefix; int attr_mask = 0; - int i; + int i, j; int ret; + u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; u8 phy_port; + u8 port = 0; u8 sl; priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; @@ -709,11 +711,27 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) attr.rnr_retry = 7; attr.timeout = 0x12; attr.path_mtu = IB_MTU_256; + attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); rdma_ah_set_static_rate(&attr.ah_attr, 3); subnet_prefix = cpu_to_be64(0xfe80000000000000LL); for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { + phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : + (i % HNS_ROCE_MAX_PORTS); + sl = i / HNS_ROCE_MAX_PORTS; + + for (j = 0; j < caps->num_ports; j++) { + if (hr_dev->iboe.phy_port[j] == phy_port) { + queue_en[i] = 1; + port = j; + break; + } + } + + if (!queue_en[i]) + continue; + free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); if (IS_ERR(free_mr->mr_free_qp[i])) { dev_err(dev, "Create loop qp failed!\n"); @@ -721,15 +739,7 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) } hr_qp = free_mr->mr_free_qp[i]; - sl = i / caps->num_ports; - - if (caps->num_ports == HNS_ROCE_MAX_PORTS) - phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : - (i % caps->num_ports); - else - phy_port = i % caps->num_ports; - - hr_qp->port = phy_port + 1; + hr_qp->port = port; hr_qp->phy_port = phy_port; hr_qp->ibqp.qp_type = IB_QPT_RC; hr_qp->ibqp.device = &hr_dev->ib_dev; @@ -739,23 +749,22 @@ static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) hr_qp->ibqp.recv_cq = cq; hr_qp->ibqp.send_cq = cq; - rdma_ah_set_port_num(&attr.ah_attr, phy_port + 1); - rdma_ah_set_sl(&attr.ah_attr, phy_port + 1); - attr.port_num = phy_port + 1; + rdma_ah_set_port_num(&attr.ah_attr, port + 1); + rdma_ah_set_sl(&attr.ah_attr, sl); + attr.port_num = port + 1; attr.dest_qp_num = hr_qp->qpn; memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), - hr_dev->dev_addr[phy_port], + hr_dev->dev_addr[port], MAC_ADDR_OCTET_NUM); memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); - memcpy(&dgid.raw[8], hr_dev->dev_addr[phy_port], 3); - memcpy(&dgid.raw[13], hr_dev->dev_addr[phy_port] + 3, 3); + memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); + memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); dgid.raw[11] = 0xff; dgid.raw[12] = 0xfe; dgid.raw[8] ^= 2; rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); - attr_mask |= IB_QP_PORT; ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, IB_QPS_RESET, IB_QPS_INIT); @@ -812,6 +821,9 @@ static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { hr_qp = free_mr->mr_free_qp[i]; + if (!hr_qp) + continue; + ret = hns_roce_v1_destroy_qp(&hr_qp->ibqp); if (ret) dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", @@ -963,7 +975,7 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; int i; int ret; - int ne; + int ne = 0; mr_work = container_of(work, struct hns_roce_mr_free_work, work); hr_mr = (struct hns_roce_mr *)mr_work->mr; @@ -976,6 +988,10 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { hr_qp = free_mr->mr_free_qp[i]; + if (!hr_qp) + continue; + ne++; + ret = hns_roce_v1_send_lp_wqe(hr_qp); if (ret) { dev_err(dev, @@ -985,7 +1001,6 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) } } - ne = HNS_ROCE_V1_RESV_QP; do { ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); if (ret < 0) { @@ -995,7 +1010,8 @@ static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) goto free_work; } ne -= ret; - msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); + usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, + (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); } while (ne && time_before_eq(jiffies, end)); if (ne != 0) @@ -2181,7 +2197,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, } wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; ++wq->tail; - } else { + } else { /* RQ conrespond to CQE */ wc->byte_len = le32_to_cpu(cqe->byte_cnt); opcode = roce_get_field(cqe->cqe_byte_4, @@ -3533,10 +3549,12 @@ static int check_qp_db_process_status(struct hns_roce_dev *hr_dev, old_cnt = roce_get_field(old_send, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S); - if (cur_cnt - old_cnt > SDB_ST_CMP_VAL) + if (cur_cnt - old_cnt > + SDB_ST_CMP_VAL) { success_flags = 1; - else { - send_ptr = roce_get_field(old_send, + } else { + send_ptr = + roce_get_field(old_send, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M, ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) + roce_get_field(sdb_retry_cnt, @@ -3641,6 +3659,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) struct hns_roce_dev *hr_dev; struct hns_roce_qp *hr_qp; struct device *dev; + unsigned long qpn; int ret; qp_work_entry = container_of(work, struct hns_roce_qp_work, work); @@ -3648,8 +3667,9 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) dev = &hr_dev->pdev->dev; priv = (struct hns_roce_v1_priv *)hr_dev->hw->priv; hr_qp = qp_work_entry->qp; + qpn = hr_qp->qpn; - dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", hr_qp->qpn); + dev_dbg(dev, "Schedule destroy QP(0x%lx) work.\n", qpn); qp_work_entry->sche_cnt++; @@ -3660,7 +3680,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) &qp_work_entry->db_wait_stage); if (ret) { dev_err(dev, "Check QP(0x%lx) db process status failed!\n", - hr_qp->qpn); + qpn); return; } @@ -3674,7 +3694,7 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) ret = hns_roce_v1_modify_qp(&hr_qp->ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); if (ret) { - dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", hr_qp->qpn); + dev_err(dev, "Modify QP(0x%lx) to RST failed!\n", qpn); return; } @@ -3683,14 +3703,14 @@ static void hns_roce_v1_destroy_qp_work_fn(struct work_struct *work) if (hr_qp->ibqp.qp_type == IB_QPT_RC) { /* RC QP, release QPN */ - hns_roce_release_range_qp(hr_dev, hr_qp->qpn, 1); + hns_roce_release_range_qp(hr_dev, qpn, 1); kfree(hr_qp); } else kfree(hr_to_hr_sqp(hr_qp)); kfree(qp_work_entry); - dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", hr_qp->qpn); + dev_dbg(dev, "Accomplished destroy QP(0x%lx) work.\n", qpn); } int hns_roce_v1_destroy_qp(struct ib_qp *ibqp) diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index c3b41f95e70a..d9777b662eba 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -125,8 +125,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, return -ENODEV; } - spin_lock_bh(&hr_dev->iboe.lock); - switch (event) { case NETDEV_UP: case NETDEV_CHANGE: @@ -144,7 +142,6 @@ static int handle_en_event(struct hns_roce_dev *hr_dev, u8 port, break; } - spin_unlock_bh(&hr_dev->iboe.lock); return 0; } diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h index da2eb5a281fa..9b1566468744 100644 --- a/drivers/infiniband/hw/i40iw/i40iw.h +++ b/drivers/infiniband/hw/i40iw/i40iw.h @@ -527,6 +527,7 @@ enum i40iw_status_code i40iw_add_mac_addr(struct i40iw_device *iwdev, int i40iw_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *); void i40iw_cq_wq_destroy(struct i40iw_device *iwdev, struct i40iw_sc_cq *cq); +void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev); void i40iw_rem_pdusecount(struct i40iw_pd *iwpd, struct i40iw_device *iwdev); void i40iw_add_pdusecount(struct i40iw_pd *iwpd); void i40iw_rem_devusecount(struct i40iw_device *iwdev); diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c index 6ae98aa7f74e..5a2fa743676c 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_cm.c +++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c @@ -3487,7 +3487,8 @@ static void i40iw_cm_disconn_true(struct i40iw_qp *iwqp) if (((original_hw_tcp_state == I40IW_TCP_STATE_CLOSED) || (original_hw_tcp_state == I40IW_TCP_STATE_TIME_WAIT) || (last_ae == I40IW_AE_RDMAP_ROE_BAD_LLP_CLOSE) || - (last_ae == I40IW_AE_LLP_CONNECTION_RESET))) { + (last_ae == I40IW_AE_LLP_CONNECTION_RESET) || + iwdev->reset)) { issue_close = 1; iwqp->cm_id = NULL; if (!iwqp->flush_issued) { @@ -4265,6 +4266,8 @@ void i40iw_cm_disconnect_all(struct i40iw_device *iwdev) cm_node = container_of(list_node, struct i40iw_cm_node, connected_entry); attr.qp_state = IB_QPS_ERR; i40iw_modify_qp(&cm_node->iwqp->ibqp, &attr, IB_QP_STATE, NULL); + if (iwdev->reset) + i40iw_cm_disconn(cm_node->iwqp); i40iw_rem_ref_cm_node(cm_node); } } diff --git a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c index a027e2072477..9ec1ae9a82c9 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_ctrl.c +++ b/drivers/infiniband/hw/i40iw/i40iw_ctrl.c @@ -1970,6 +1970,8 @@ static enum i40iw_status_code i40iw_sc_ccq_destroy(struct i40iw_sc_cq *ccq, ret_code = i40iw_cqp_poll_registers(cqp, tail, 1000); } + cqp->process_cqp_sds = i40iw_update_sds_noccq; + return ret_code; } diff --git a/drivers/infiniband/hw/i40iw/i40iw_main.c b/drivers/infiniband/hw/i40iw/i40iw_main.c index e0f47cc2effc..ae8463ff59a7 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_main.c +++ b/drivers/infiniband/hw/i40iw/i40iw_main.c @@ -243,6 +243,8 @@ static void i40iw_destroy_cqp(struct i40iw_device *iwdev, bool free_hwcqp) if (free_hwcqp) dev->cqp_ops->cqp_destroy(dev->cqp); + i40iw_cleanup_pending_cqp_op(iwdev); + i40iw_free_dma_mem(dev->hw, &cqp->sq); kfree(cqp->scratch_array); iwdev->cqp.scratch_array = NULL; @@ -274,13 +276,12 @@ static void i40iw_disable_irq(struct i40iw_sc_dev *dev, /** * i40iw_destroy_aeq - destroy aeq * @iwdev: iwarp device - * @reset: true if called before reset * * Issue a destroy aeq request and * free the resources associated with the aeq * The function is called during driver unload */ -static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) +static void i40iw_destroy_aeq(struct i40iw_device *iwdev) { enum i40iw_status_code status = I40IW_ERR_NOT_READY; struct i40iw_sc_dev *dev = &iwdev->sc_dev; @@ -288,7 +289,7 @@ static void i40iw_destroy_aeq(struct i40iw_device *iwdev, bool reset) if (!iwdev->msix_shared) i40iw_disable_irq(dev, iwdev->iw_msixtbl, (void *)iwdev); - if (reset) + if (iwdev->reset) goto exit; if (!dev->aeq_ops->aeq_destroy(&aeq->sc_aeq, 0, 1)) @@ -304,19 +305,17 @@ exit: * i40iw_destroy_ceq - destroy ceq * @iwdev: iwarp device * @iwceq: ceq to be destroyed - * @reset: true if called before reset * * Issue a destroy ceq request and * free the resources associated with the ceq */ static void i40iw_destroy_ceq(struct i40iw_device *iwdev, - struct i40iw_ceq *iwceq, - bool reset) + struct i40iw_ceq *iwceq) { enum i40iw_status_code status; struct i40iw_sc_dev *dev = &iwdev->sc_dev; - if (reset) + if (iwdev->reset) goto exit; status = dev->ceq_ops->ceq_destroy(&iwceq->sc_ceq, 0, 1); @@ -335,12 +334,11 @@ exit: /** * i40iw_dele_ceqs - destroy all ceq's * @iwdev: iwarp device - * @reset: true if called before reset * * Go through all of the device ceq's and for each ceq * disable the ceq interrupt and destroy the ceq */ -static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) +static void i40iw_dele_ceqs(struct i40iw_device *iwdev) { u32 i = 0; struct i40iw_sc_dev *dev = &iwdev->sc_dev; @@ -349,32 +347,31 @@ static void i40iw_dele_ceqs(struct i40iw_device *iwdev, bool reset) if (iwdev->msix_shared) { i40iw_disable_irq(dev, msix_vec, (void *)iwdev); - i40iw_destroy_ceq(iwdev, iwceq, reset); + i40iw_destroy_ceq(iwdev, iwceq); iwceq++; i++; } for (msix_vec++; i < iwdev->ceqs_count; i++, msix_vec++, iwceq++) { i40iw_disable_irq(dev, msix_vec, (void *)iwceq); - i40iw_destroy_ceq(iwdev, iwceq, reset); + i40iw_destroy_ceq(iwdev, iwceq); } } /** * i40iw_destroy_ccq - destroy control cq * @iwdev: iwarp device - * @reset: true if called before reset * * Issue destroy ccq request and * free the resources associated with the ccq */ -static void i40iw_destroy_ccq(struct i40iw_device *iwdev, bool reset) +static void i40iw_destroy_ccq(struct i40iw_device *iwdev) { struct i40iw_sc_dev *dev = &iwdev->sc_dev; struct i40iw_ccq *ccq = &iwdev->ccq; enum i40iw_status_code status = 0; - if (!reset) + if (!iwdev->reset) status = dev->ccq_ops->ccq_destroy(dev->ccq, 0, true); if (status) i40iw_pr_err("ccq destroy failed %d\n", status); @@ -810,7 +807,7 @@ static enum i40iw_status_code i40iw_setup_ceqs(struct i40iw_device *iwdev, iwceq->msix_idx = msix_vec->idx; status = i40iw_configure_ceq_vector(iwdev, iwceq, ceq_id, msix_vec); if (status) { - i40iw_destroy_ceq(iwdev, iwceq, false); + i40iw_destroy_ceq(iwdev, iwceq); break; } i40iw_enable_intr(&iwdev->sc_dev, msix_vec->idx); @@ -912,7 +909,7 @@ static enum i40iw_status_code i40iw_setup_aeq(struct i40iw_device *iwdev) status = i40iw_configure_aeq_vector(iwdev); if (status) { - i40iw_destroy_aeq(iwdev, false); + i40iw_destroy_aeq(iwdev); return status; } @@ -1442,12 +1439,11 @@ static enum i40iw_status_code i40iw_save_msix_info(struct i40iw_device *iwdev, /** * i40iw_deinit_device - clean up the device resources * @iwdev: iwarp device - * @reset: true if called before reset * * Destroy the ib device interface, remove the mac ip entry and ipv4/ipv6 addresses, * destroy the device queues and free the pble and the hmc objects */ -static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) +static void i40iw_deinit_device(struct i40iw_device *iwdev) { struct i40e_info *ldev = iwdev->ldev; @@ -1464,7 +1460,7 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) i40iw_destroy_rdma_device(iwdev->iwibdev); /* fallthrough */ case IP_ADDR_REGISTERED: - if (!reset) + if (!iwdev->reset) i40iw_del_macip_entry(iwdev, (u8)iwdev->mac_ip_table_idx); /* fallthrough */ case INET_NOTIFIER: @@ -1474,26 +1470,26 @@ static void i40iw_deinit_device(struct i40iw_device *iwdev, bool reset) unregister_inet6addr_notifier(&i40iw_inetaddr6_notifier); } /* fallthrough */ + case PBLE_CHUNK_MEM: + i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); + /* fallthrough */ case CEQ_CREATED: - i40iw_dele_ceqs(iwdev, reset); + i40iw_dele_ceqs(iwdev); /* fallthrough */ case AEQ_CREATED: - i40iw_destroy_aeq(iwdev, reset); + i40iw_destroy_aeq(iwdev); /* fallthrough */ case IEQ_CREATED: - i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, reset); + i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_IEQ, iwdev->reset); /* fallthrough */ case ILQ_CREATED: - i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, reset); + i40iw_puda_dele_resources(&iwdev->vsi, I40IW_PUDA_RSRC_TYPE_ILQ, iwdev->reset); /* fallthrough */ case CCQ_CREATED: - i40iw_destroy_ccq(iwdev, reset); - /* fallthrough */ - case PBLE_CHUNK_MEM: - i40iw_destroy_pble_pool(dev, iwdev->pble_rsrc); + i40iw_destroy_ccq(iwdev); /* fallthrough */ case HMC_OBJS_CREATED: - i40iw_del_hmc_objects(dev, dev->hmc_info, true, reset); + i40iw_del_hmc_objects(dev, dev->hmc_info, true, iwdev->reset); /* fallthrough */ case CQP_CREATED: i40iw_destroy_cqp(iwdev, true); @@ -1670,6 +1666,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) status = i40iw_hmc_init_pble(&iwdev->sc_dev, iwdev->pble_rsrc); if (status) break; + iwdev->init_state = PBLE_CHUNK_MEM; iwdev->virtchnl_wq = alloc_ordered_workqueue("iwvch", WQ_MEM_RECLAIM); i40iw_register_notifiers(); iwdev->init_state = INET_NOTIFIER; @@ -1693,7 +1690,7 @@ static int i40iw_open(struct i40e_info *ldev, struct i40e_client *client) } while (0); i40iw_pr_err("status = %d last completion = %d\n", status, iwdev->init_state); - i40iw_deinit_device(iwdev, false); + i40iw_deinit_device(iwdev); return -ERESTART; } @@ -1774,9 +1771,12 @@ static void i40iw_close(struct i40e_info *ldev, struct i40e_client *client, bool iwdev = &hdl->device; iwdev->closing = true; + if (reset) + iwdev->reset = true; + i40iw_cm_disconnect_all(iwdev); destroy_workqueue(iwdev->virtchnl_wq); - i40iw_deinit_device(iwdev, reset); + i40iw_deinit_device(iwdev); } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_puda.c b/drivers/infiniband/hw/i40iw/i40iw_puda.c index db41ab40da9c..71050c5d29a0 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_puda.c +++ b/drivers/infiniband/hw/i40iw/i40iw_puda.c @@ -408,6 +408,9 @@ enum i40iw_status_code i40iw_puda_send(struct i40iw_sc_qp *qp, set_64bit_val(wqe, 0, info->paddr); set_64bit_val(wqe, 8, LS_64(info->len, I40IWQPSQ_FRAG_LEN)); set_64bit_val(wqe, 16, header[0]); + + /* Ensure all data is written before writing valid bit */ + wmb(); set_64bit_val(wqe, 24, header[1]); i40iw_debug_buf(qp->dev, I40IW_DEBUG_PUDA, "PUDA SEND WQE", wqe, 32); @@ -1411,10 +1414,10 @@ static void i40iw_ieq_handle_exception(struct i40iw_puda_rsrc *ieq, if (!list_empty(rxlist)) { tmpbuf = (struct i40iw_puda_buf *)rxlist->next; - plist = &tmpbuf->list; while ((struct list_head *)tmpbuf != rxlist) { if ((int)(buf->seqnum - tmpbuf->seqnum) < 0) break; + plist = &tmpbuf->list; tmpbuf = (struct i40iw_puda_buf *)plist->next; } /* Insert buf before tmpbuf */ diff --git a/drivers/infiniband/hw/i40iw/i40iw_utils.c b/drivers/infiniband/hw/i40iw/i40iw_utils.c index 56d986924a4c..e311ec559f4e 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_utils.c +++ b/drivers/infiniband/hw/i40iw/i40iw_utils.c @@ -337,6 +337,7 @@ struct i40iw_cqp_request *i40iw_get_cqp_request(struct i40iw_cqp *cqp, bool wait */ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp_request) { + struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); unsigned long flags; if (cqp_request->dynamic) { @@ -350,6 +351,7 @@ void i40iw_free_cqp_request(struct i40iw_cqp *cqp, struct i40iw_cqp_request *cqp list_add_tail(&cqp_request->list, &cqp->cqp_avail_reqs); spin_unlock_irqrestore(&cqp->req_lock, flags); } + wake_up(&iwdev->close_wq); } /** @@ -365,6 +367,56 @@ void i40iw_put_cqp_request(struct i40iw_cqp *cqp, } /** + * i40iw_free_pending_cqp_request -free pending cqp request objs + * @cqp: cqp ptr + * @cqp_request: to be put back in cqp list + */ +static void i40iw_free_pending_cqp_request(struct i40iw_cqp *cqp, + struct i40iw_cqp_request *cqp_request) +{ + struct i40iw_device *iwdev = container_of(cqp, struct i40iw_device, cqp); + + if (cqp_request->waiting) { + cqp_request->compl_info.error = true; + cqp_request->request_done = true; + wake_up(&cqp_request->waitq); + } + i40iw_put_cqp_request(cqp, cqp_request); + wait_event_timeout(iwdev->close_wq, + !atomic_read(&cqp_request->refcount), + 1000); +} + +/** + * i40iw_cleanup_pending_cqp_op - clean-up cqp with no completions + * @iwdev: iwarp device + */ +void i40iw_cleanup_pending_cqp_op(struct i40iw_device *iwdev) +{ + struct i40iw_sc_dev *dev = &iwdev->sc_dev; + struct i40iw_cqp *cqp = &iwdev->cqp; + struct i40iw_cqp_request *cqp_request = NULL; + struct cqp_commands_info *pcmdinfo = NULL; + u32 i, pending_work, wqe_idx; + + pending_work = I40IW_RING_WORK_AVAILABLE(cqp->sc_cqp.sq_ring); + wqe_idx = I40IW_RING_GETCURRENT_TAIL(cqp->sc_cqp.sq_ring); + for (i = 0; i < pending_work; i++) { + cqp_request = (struct i40iw_cqp_request *)(unsigned long)cqp->scratch_array[wqe_idx]; + if (cqp_request) + i40iw_free_pending_cqp_request(cqp, cqp_request); + wqe_idx = (wqe_idx + 1) % I40IW_RING_GETSIZE(cqp->sc_cqp.sq_ring); + } + + while (!list_empty(&dev->cqp_cmd_head)) { + pcmdinfo = (struct cqp_commands_info *)i40iw_remove_head(&dev->cqp_cmd_head); + cqp_request = container_of(pcmdinfo, struct i40iw_cqp_request, info); + if (cqp_request) + i40iw_free_pending_cqp_request(cqp, cqp_request); + } +} + +/** * i40iw_free_qp - callback after destroy cqp completes * @cqp_request: cqp request for destroy qp * @num: not used @@ -546,8 +598,12 @@ void i40iw_rem_ref(struct ib_qp *ibqp) cqp_info->in.u.qp_destroy.scratch = (uintptr_t)cqp_request; cqp_info->in.u.qp_destroy.remove_hash_idx = true; status = i40iw_handle_cqp_op(iwdev, cqp_request); - if (status) - i40iw_pr_err("CQP-OP Destroy QP fail"); + if (!status) + return; + + i40iw_rem_pdusecount(iwqp->iwpd, iwdev); + i40iw_free_qp_resources(iwdev, iwqp, qp_num); + i40iw_rem_devusecount(iwdev); } /** diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c index 4dbe61ec7a77..02d871db7ca5 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c @@ -426,9 +426,13 @@ void i40iw_free_qp_resources(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, u32 qp_num) { + struct i40iw_pbl *iwpbl = &iwqp->iwpbl; + i40iw_dealloc_push_page(iwdev, &iwqp->sc_qp); if (qp_num) i40iw_free_resource(iwdev, iwdev->allocated_qps, qp_num); + if (iwpbl->pbl_allocated) + i40iw_free_pble(iwdev->pble_rsrc, &iwpbl->pble_alloc); i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->q2_ctx_mem); i40iw_free_dma_mem(iwdev->sc_dev.hw, &iwqp->kqp.dma_mem); kfree(iwqp->kqp.wrid_mem); @@ -483,7 +487,7 @@ static int i40iw_setup_virt_qp(struct i40iw_device *iwdev, struct i40iw_qp *iwqp, struct i40iw_qp_init_info *init_info) { - struct i40iw_pbl *iwpbl = iwqp->iwpbl; + struct i40iw_pbl *iwpbl = &iwqp->iwpbl; struct i40iw_qp_mr *qpmr = &iwpbl->qp_mr; iwqp->page = qpmr->sq_page; @@ -688,19 +692,22 @@ static struct ib_qp *i40iw_create_qp(struct ib_pd *ibpd, ucontext = to_ucontext(ibpd->uobject->context); if (req.user_wqe_buffers) { + struct i40iw_pbl *iwpbl; + spin_lock_irqsave( &ucontext->qp_reg_mem_list_lock, flags); - iwqp->iwpbl = i40iw_get_pbl( + iwpbl = i40iw_get_pbl( (unsigned long)req.user_wqe_buffers, &ucontext->qp_reg_mem_list); spin_unlock_irqrestore( &ucontext->qp_reg_mem_list_lock, flags); - if (!iwqp->iwpbl) { + if (!iwpbl) { err_code = -ENODATA; i40iw_pr_err("no pbl info\n"); goto error; } + memcpy(&iwqp->iwpbl, iwpbl, sizeof(iwqp->iwpbl)); } } err_code = i40iw_setup_virt_qp(iwdev, iwqp, &init_info); @@ -1161,8 +1168,10 @@ static struct ib_cq *i40iw_create_cq(struct ib_device *ibdev, memset(&req, 0, sizeof(req)); iwcq->user_mode = true; ucontext = to_ucontext(context); - if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) + if (ib_copy_from_udata(&req, udata, sizeof(struct i40iw_create_cq_req))) { + err_code = -EFAULT; goto cq_free_resources; + } spin_lock_irqsave(&ucontext->cq_reg_mem_list_lock, flags); iwpbl = i40iw_get_pbl((unsigned long)req.user_cq_buffer, @@ -2063,7 +2072,7 @@ static int i40iw_dereg_mr(struct ib_mr *ib_mr) ucontext = to_ucontext(ibpd->uobject->context); i40iw_del_memlist(iwmr, ucontext); } - if (iwpbl->pbl_allocated) + if (iwpbl->pbl_allocated && iwmr->type != IW_MEMREG_TYPE_QP) i40iw_free_pble(iwdev->pble_rsrc, palloc); kfree(iwmr); return 0; diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.h b/drivers/infiniband/hw/i40iw/i40iw_verbs.h index 07c3fec77de6..9067443cd311 100644 --- a/drivers/infiniband/hw/i40iw/i40iw_verbs.h +++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.h @@ -170,7 +170,7 @@ struct i40iw_qp { struct i40iw_qp_kmode kqp; struct i40iw_dma_mem host_ctx; struct timer_list terminate_timer; - struct i40iw_pbl *iwpbl; + struct i40iw_pbl iwpbl; struct i40iw_dma_mem q2_ctx_mem; struct i40iw_dma_mem ietf_mem; struct completion sq_drained; diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index 1e6c526450d9..fedaf8260105 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -323,6 +323,9 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id mad->mad_hdr.attr_id == CM_REP_ATTR_ID || mad->mad_hdr.attr_id == CM_SIDR_REQ_ATTR_ID) { sl_cm_id = get_local_comm_id(mad); + id = id_map_get(ibdev, &pv_cm_id, slave_id, sl_cm_id); + if (id) + goto cont; id = id_map_alloc(ibdev, slave_id, sl_cm_id); if (IS_ERR(id)) { mlx4_ib_warn(ibdev, "%s: id{slave: %d, sl_cm_id: 0x%x} Failed to id_map_alloc\n", @@ -343,6 +346,7 @@ int mlx4_ib_multiplex_cm_handler(struct ib_device *ibdev, int port, int slave_id return -EINVAL; } +cont: set_local_comm_id(mad, id->pv_cm_id); if (mad->mad_hdr.attr_id == CM_DREQ_ATTR_ID) diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 4f5a143fc0a7..ff931c580557 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -102,7 +102,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * int err; err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, - PAGE_SIZE * 2, &buf->buf, GFP_KERNEL); + PAGE_SIZE * 2, &buf->buf); if (err) goto out; @@ -113,7 +113,7 @@ static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf * if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); + err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); if (err) goto err_mtt; @@ -219,7 +219,7 @@ struct ib_cq *mlx4_ib_create_cq(struct ib_device *ibdev, uar = &to_mucontext(context)->uar; } else { - err = mlx4_db_alloc(dev->dev, &cq->db, 1, GFP_KERNEL); + err = mlx4_db_alloc(dev->dev, &cq->db, 1); if (err) goto err_cq; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 75b2f7d4cd95..d1b43cbbfea7 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1155,7 +1155,7 @@ static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) * call to mlx4_ib_vma_close. */ put_task_struct(owning_process); - msleep(1); + usleep_range(1000, 2000); owning_process = get_pid_task(ibcontext->tgid, PIDTYPE_PID); if (!owning_process || diff --git a/drivers/infiniband/hw/mlx4/mcg.c b/drivers/infiniband/hw/mlx4/mcg.c index 3405e947dc1e..b73f89700ef9 100644 --- a/drivers/infiniband/hw/mlx4/mcg.c +++ b/drivers/infiniband/hw/mlx4/mcg.c @@ -1091,7 +1091,7 @@ static void _mlx4_ib_mcg_port_cleanup(struct mlx4_ib_demux_ctx *ctx, int destroy if (!count) break; - msleep(1); + usleep_range(1000, 2000); } while (time_after(end, jiffies)); flush_workqueue(ctx->mcg_wq); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index c2b9cbf4da05..9db82e67e959 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -185,7 +185,6 @@ enum mlx4_ib_qp_flags { MLX4_IB_QP_LSO = IB_QP_CREATE_IPOIB_UD_LSO, MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK = IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK, MLX4_IB_QP_NETIF = IB_QP_CREATE_NETIF_QP, - MLX4_IB_QP_CREATE_USE_GFP_NOIO = IB_QP_CREATE_USE_GFP_NOIO, /* Mellanox specific flags start from IB_QP_CREATE_RESERVED_START */ MLX4_IB_ROCE_V2_GSI_QP = MLX4_IB_QP_CREATE_ROCE_V2_GSI, diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 996e9058e515..75c0e6c5dd56 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -634,8 +634,8 @@ static void mlx4_ib_free_qp_counter(struct mlx4_ib_dev *dev, static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, struct ib_qp_init_attr *init_attr, - struct ib_udata *udata, int sqpn, struct mlx4_ib_qp **caller_qp, - gfp_t gfp) + struct ib_udata *udata, int sqpn, + struct mlx4_ib_qp **caller_qp) { int qpn; int err; @@ -691,14 +691,14 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (qp_type == MLX4_IB_QPT_SMI || qp_type == MLX4_IB_QPT_GSI || (qp_type & (MLX4_IB_QPT_PROXY_SMI | MLX4_IB_QPT_PROXY_SMI_OWNER | MLX4_IB_QPT_PROXY_GSI | MLX4_IB_QPT_TUN_SMI_OWNER))) { - sqp = kzalloc(sizeof (struct mlx4_ib_sqp), gfp); + sqp = kzalloc(sizeof(struct mlx4_ib_sqp), GFP_KERNEL); if (!sqp) return -ENOMEM; qp = &sqp->qp; qp->pri.vid = 0xFFFF; qp->alt.vid = 0xFFFF; } else { - qp = kzalloc(sizeof (struct mlx4_ib_qp), gfp); + qp = kzalloc(sizeof(struct mlx4_ib_qp), GFP_KERNEL); if (!qp) return -ENOMEM; qp->pri.vid = 0xFFFF; @@ -780,7 +780,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err; if (qp_has_rq(init_attr)) { - err = mlx4_db_alloc(dev->dev, &qp->db, 0, gfp); + err = mlx4_db_alloc(dev->dev, &qp->db, 0); if (err) goto err; @@ -788,7 +788,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, } if (mlx4_buf_alloc(dev->dev, qp->buf_size, qp->buf_size, - &qp->buf, gfp)) { + &qp->buf)) { memcpy(&init_attr->cap, &backup_cap, sizeof(backup_cap)); err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, @@ -797,7 +797,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, goto err_db; if (mlx4_buf_alloc(dev->dev, qp->buf_size, - PAGE_SIZE * 2, &qp->buf, gfp)) { + PAGE_SIZE * 2, &qp->buf)) { err = -ENOMEM; goto err_db; } @@ -808,20 +808,20 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf, gfp); + err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); if (err) goto err_mtt; qp->sq.wrid = kmalloc_array(qp->sq.wqe_cnt, sizeof(u64), - gfp | __GFP_NOWARN); + GFP_KERNEL | __GFP_NOWARN); if (!qp->sq.wrid) qp->sq.wrid = __vmalloc(qp->sq.wqe_cnt * sizeof(u64), - gfp, PAGE_KERNEL); + GFP_KERNEL, PAGE_KERNEL); qp->rq.wrid = kmalloc_array(qp->rq.wqe_cnt, sizeof(u64), - gfp | __GFP_NOWARN); + GFP_KERNEL | __GFP_NOWARN); if (!qp->rq.wrid) qp->rq.wrid = __vmalloc(qp->rq.wqe_cnt * sizeof(u64), - gfp, PAGE_KERNEL); + GFP_KERNEL, PAGE_KERNEL); if (!qp->sq.wrid || !qp->rq.wrid) { err = -ENOMEM; goto err_wrid; @@ -859,7 +859,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) qp->flags |= MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; - err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp, gfp); + err = mlx4_qp_alloc(dev->dev, qpn, &qp->mqp); if (err) goto err_qpn; @@ -1127,10 +1127,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, int err; int sup_u_create_flags = MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK; u16 xrcdn = 0; - gfp_t gfp; - gfp = (init_attr->create_flags & MLX4_IB_QP_CREATE_USE_GFP_NOIO) ? - GFP_NOIO : GFP_KERNEL; /* * We only support LSO, vendor flag1, and multicast loopback blocking, * and only for kernel UD QPs. @@ -1140,8 +1137,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, MLX4_IB_SRIOV_TUNNEL_QP | MLX4_IB_SRIOV_SQP | MLX4_IB_QP_NETIF | - MLX4_IB_QP_CREATE_ROCE_V2_GSI | - MLX4_IB_QP_CREATE_USE_GFP_NOIO)) + MLX4_IB_QP_CREATE_ROCE_V2_GSI)) return ERR_PTR(-EINVAL); if (init_attr->create_flags & IB_QP_CREATE_NETIF_QP) { @@ -1154,7 +1150,6 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, return ERR_PTR(-EINVAL); if ((init_attr->create_flags & ~(MLX4_IB_SRIOV_SQP | - MLX4_IB_QP_CREATE_USE_GFP_NOIO | MLX4_IB_QP_CREATE_ROCE_V2_GSI | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK) && init_attr->qp_type != IB_QPT_UD) || @@ -1179,7 +1174,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, case IB_QPT_RC: case IB_QPT_UC: case IB_QPT_RAW_PACKET: - qp = kzalloc(sizeof *qp, gfp); + qp = kzalloc(sizeof(*qp), GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->pri.vid = 0xFFFF; @@ -1188,7 +1183,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, case IB_QPT_UD: { err = create_qp_common(to_mdev(pd->device), pd, init_attr, - udata, 0, &qp, gfp); + udata, 0, &qp); if (err) { kfree(qp); return ERR_PTR(err); @@ -1217,8 +1212,7 @@ static struct ib_qp *_mlx4_ib_create_qp(struct ib_pd *pd, } err = create_qp_common(to_mdev(pd->device), pd, init_attr, udata, - sqpn, - &qp, gfp); + sqpn, &qp); if (err) return ERR_PTR(err); diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index e32dd58937a8..0facaf5f6d23 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c @@ -135,14 +135,14 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_mtt; } else { - err = mlx4_db_alloc(dev->dev, &srq->db, 0, GFP_KERNEL); + err = mlx4_db_alloc(dev->dev, &srq->db, 0); if (err) goto err_srq; *srq->db.db = 0; - if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, &srq->buf, - GFP_KERNEL)) { + if (mlx4_buf_alloc(dev->dev, buf_size, PAGE_SIZE * 2, + &srq->buf)) { err = -ENOMEM; goto err_db; } @@ -167,7 +167,7 @@ struct ib_srq *mlx4_ib_create_srq(struct ib_pd *pd, if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); + err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); if (err) goto err_mtt; diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 763bb5b36144..2c40a2e989d2 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -582,6 +582,15 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) } } +static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) +{ + if (!mlx5_debugfs_root) + return; + + debugfs_remove_recursive(dev->cache.root); + dev->cache.root = NULL; +} + static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) { struct mlx5_mr_cache *cache = &dev->cache; @@ -600,38 +609,34 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev) sprintf(ent->name, "%d", ent->order); ent->dir = debugfs_create_dir(ent->name, cache->root); if (!ent->dir) - return -ENOMEM; + goto err; ent->fsize = debugfs_create_file("size", 0600, ent->dir, ent, &size_fops); if (!ent->fsize) - return -ENOMEM; + goto err; ent->flimit = debugfs_create_file("limit", 0600, ent->dir, ent, &limit_fops); if (!ent->flimit) - return -ENOMEM; + goto err; ent->fcur = debugfs_create_u32("cur", 0400, ent->dir, &ent->cur); if (!ent->fcur) - return -ENOMEM; + goto err; ent->fmiss = debugfs_create_u32("miss", 0600, ent->dir, &ent->miss); if (!ent->fmiss) - return -ENOMEM; + goto err; } return 0; -} - -static void mlx5_mr_cache_debugfs_cleanup(struct mlx5_ib_dev *dev) -{ - if (!mlx5_debugfs_root) - return; +err: + mlx5_mr_cache_debugfs_cleanup(dev); - debugfs_remove_recursive(dev->cache.root); + return -ENOMEM; } static void delay_time_func(unsigned long ctx) @@ -692,6 +697,11 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev) if (err) mlx5_ib_warn(dev, "cache debugfs failure\n"); + /* + * We don't want to fail driver if debugfs failed to initialize, + * so we are not forwarding error to the user. + */ + return 0; } @@ -825,7 +835,7 @@ static int mr_umem_get(struct ib_pd *pd, u64 start, u64 length, access_flags, 0); err = PTR_ERR_OR_ZERO(*umem); if (err < 0) { - mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); + mlx5_ib_err(dev, "umem get failed (%d)\n", err); return err; } @@ -1779,7 +1789,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, mr->ndescs = sg_nents; for_each_sg(sgl, sg, sg_nents, i) { - if (unlikely(i > mr->max_descs)) + if (unlikely(i >= mr->max_descs)) break; klms[i].va = cpu_to_be64(sg_dma_address(sg) + sg_offset); klms[i].bcount = cpu_to_be32(sg_dma_len(sg) - sg_offset); diff --git a/drivers/infiniband/hw/nes/nes_hw.c b/drivers/infiniband/hw/nes/nes_hw.c index 8f9d8b4ad583..b0adf65e4bdb 100644 --- a/drivers/infiniband/hw/nes/nes_hw.c +++ b/drivers/infiniband/hw/nes/nes_hw.c @@ -551,7 +551,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { if ((0x0F000100 == (pcs_control_status0 & 0x0F000100)) || (0x0F000100 == (pcs_control_status1 & 0x0F000100))) int_cnt++; - msleep(1); + usleep_range(1000, 2000); } if (int_cnt > 1) { spin_lock_irqsave(&nesadapter->phy_lock, flags); @@ -592,7 +592,7 @@ struct nes_adapter *nes_init_adapter(struct nes_device *nesdev, u8 hw_rev) { break; } } - msleep(1); + usleep_range(1000, 2000); } } } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 2f30bda8457a..27d5e8d9f08d 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -744,7 +744,8 @@ err: if (is_uctx_pd) { ocrdma_release_ucontext_pd(uctx); } else { - status = _ocrdma_dealloc_pd(dev, pd); + if (_ocrdma_dealloc_pd(dev, pd)) + pr_err("%s: _ocrdma_dealloc_pd() failed\n", __func__); } exit: return ERR_PTR(status); @@ -1901,6 +1902,7 @@ struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd, goto err; if (udata == NULL) { + status = -ENOMEM; srq->rqe_wr_id_tbl = kzalloc(sizeof(u64) * srq->rq.max_cnt, GFP_KERNEL); if (srq->rqe_wr_id_tbl == NULL) diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index 548e4d1e998f..2ae71b8f1ba8 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -53,6 +53,14 @@ #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT) +static inline int qedr_ib_copy_to_udata(struct ib_udata *udata, void *src, + size_t len) +{ + size_t min_len = min_t(size_t, len, udata->outlen); + + return ib_copy_to_udata(udata, src, min_len); +} + int qedr_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { if (index > QEDR_ROCE_PKEY_TABLE_LEN) @@ -378,7 +386,7 @@ struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *ibdev, uresp.sges_per_srq_wr = dev->attr.max_srq_sge; uresp.max_cqes = QEDR_MAX_CQES; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) goto err; @@ -499,7 +507,7 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev, uresp.pd_id = pd_id; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) { DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id); dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id); @@ -729,7 +737,7 @@ static int qedr_copy_cq_uresp(struct qedr_dev *dev, uresp.db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT); uresp.icid = cq->icid; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) DP_ERR(dev, "copy error cqid=0x%x.\n", cq->icid); @@ -1238,7 +1246,7 @@ static int qedr_copy_qp_uresp(struct qedr_dev *dev, uresp.atomic_supported = dev->atomic_cap != IB_ATOMIC_NONE; uresp.qp_id = qp->qp_id; - rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp)); + rc = qedr_ib_copy_to_udata(udata, &uresp, sizeof(uresp)); if (rc) DP_ERR(dev, "create qp: failed a copy to user space with qp icid=0x%x.\n", diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 5984981e7dd4..a343e3b5d4cb 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -104,10 +104,9 @@ const struct rvt_operation_params qib_post_parms[RVT_OPERATION_MAX] = { }; -static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, - gfp_t gfp) +static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map) { - unsigned long page = get_zeroed_page(gfp); + unsigned long page = get_zeroed_page(GFP_KERNEL); /* * Free the page if someone raced with us installing it. @@ -126,7 +125,7 @@ static void get_map_page(struct rvt_qpn_table *qpt, struct rvt_qpn_map *map, * zero/one for QP type IB_QPT_SMI/IB_QPT_GSI. */ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port, gfp_t gfp) + enum ib_qp_type type, u8 port) { u32 i, offset, max_scan, qpn; struct rvt_qpn_map *map; @@ -160,7 +159,7 @@ int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { - get_map_page(qpt, map, gfp); + get_map_page(qpt, map); if (unlikely(!map->page)) break; } @@ -317,16 +316,16 @@ u32 qib_mtu_from_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp, u32 pmtu) return ib_mtu_enum_to_int(pmtu); } -void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp) +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp) { struct qib_qp_priv *priv; - priv = kzalloc(sizeof(*priv), gfp); + priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); priv->owner = qp; - priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), gfp); + priv->s_hdr = kzalloc(sizeof(*priv->s_hdr), GFP_KERNEL); if (!priv->s_hdr) { kfree(priv); return ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index da0db5485ddc..a52fc67b40d7 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h @@ -274,11 +274,11 @@ int qib_get_counters(struct qib_pportdata *ppd, * Functions provided by qib driver for rdmavt to use */ unsigned qib_free_all_qps(struct rvt_dev_info *rdi); -void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp, gfp_t gfp); +void *qib_qp_priv_alloc(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qib_qp_priv_free(struct rvt_dev_info *rdi, struct rvt_qp *qp); void qib_notify_qp_reset(struct rvt_qp *qp); int qib_alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port, gfp_t gfp); + enum ib_qp_type type, u8 port); void qib_restart_rc(struct rvt_qp *qp, u32 psn, int wait); #ifdef CONFIG_DEBUG_FS diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 727e81cc2c8f..8876ee7bc326 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -118,10 +118,9 @@ const int ib_rvt_state_ops[IB_QPS_ERR + 1] = { EXPORT_SYMBOL(ib_rvt_state_ops); static void get_map_page(struct rvt_qpn_table *qpt, - struct rvt_qpn_map *map, - gfp_t gfp) + struct rvt_qpn_map *map) { - unsigned long page = get_zeroed_page(gfp); + unsigned long page = get_zeroed_page(GFP_KERNEL); /* * Free the page if someone raced with us installing it. @@ -173,7 +172,7 @@ static int init_qpn_table(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt) rdi->dparms.qpn_res_start, rdi->dparms.qpn_res_end); for (i = rdi->dparms.qpn_res_start; i <= rdi->dparms.qpn_res_end; i++) { if (!map->page) { - get_map_page(qpt, map, GFP_KERNEL); + get_map_page(qpt, map); if (!map->page) { ret = -ENOMEM; break; @@ -342,14 +341,14 @@ static inline unsigned mk_qpn(struct rvt_qpn_table *qpt, * Return: The queue pair number */ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, - enum ib_qp_type type, u8 port_num, gfp_t gfp) + enum ib_qp_type type, u8 port_num) { u32 i, offset, max_scan, qpn; struct rvt_qpn_map *map; u32 ret; if (rdi->driver_f.alloc_qpn) - return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num, gfp); + return rdi->driver_f.alloc_qpn(rdi, qpt, type, port_num); if (type == IB_QPT_SMI || type == IB_QPT_GSI) { unsigned n; @@ -374,7 +373,7 @@ static int alloc_qpn(struct rvt_dev_info *rdi, struct rvt_qpn_table *qpt, max_scan = qpt->nmaps - !offset; for (i = 0;;) { if (unlikely(!map->page)) { - get_map_page(qpt, map, gfp); + get_map_page(qpt, map); if (unlikely(!map->page)) break; } @@ -672,7 +671,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, struct ib_qp *ret = ERR_PTR(-ENOMEM); struct rvt_dev_info *rdi = ib_to_rvt(ibpd->device); void *priv = NULL; - gfp_t gfp; size_t sqsize; if (!rdi) @@ -680,18 +678,9 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, if (init_attr->cap.max_send_sge > rdi->dparms.props.max_sge || init_attr->cap.max_send_wr > rdi->dparms.props.max_qp_wr || - init_attr->create_flags & ~(IB_QP_CREATE_USE_GFP_NOIO)) + init_attr->create_flags) return ERR_PTR(-EINVAL); - /* GFP_NOIO is applicable to RC QP's only */ - - if (init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO && - init_attr->qp_type != IB_QPT_RC) - return ERR_PTR(-EINVAL); - - gfp = init_attr->create_flags & IB_QP_CREATE_USE_GFP_NOIO ? - GFP_NOIO : GFP_KERNEL; - /* Check receive queue parameters if no SRQ is specified. */ if (!init_attr->srq) { if (init_attr->cap.max_recv_sge > rdi->dparms.props.max_sge || @@ -719,14 +708,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, sz = sizeof(struct rvt_sge) * init_attr->cap.max_send_sge + sizeof(struct rvt_swqe); - if (gfp == GFP_NOIO) - swq = __vmalloc( - sqsize * sz, - gfp | __GFP_ZERO, PAGE_KERNEL); - else - swq = vzalloc_node( - sqsize * sz, - rdi->dparms.node); + swq = vzalloc_node(sqsize * sz, rdi->dparms.node); if (!swq) return ERR_PTR(-ENOMEM); @@ -741,7 +723,8 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, } else if (init_attr->cap.max_recv_sge > 1) sg_list_sz = sizeof(*qp->r_sg_list) * (init_attr->cap.max_recv_sge - 1); - qp = kzalloc_node(sz + sg_list_sz, gfp, rdi->dparms.node); + qp = kzalloc_node(sz + sg_list_sz, GFP_KERNEL, + rdi->dparms.node); if (!qp) goto bail_swq; @@ -751,7 +734,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, kzalloc_node( sizeof(*qp->s_ack_queue) * rvt_max_atomic(rdi), - gfp, + GFP_KERNEL, rdi->dparms.node); if (!qp->s_ack_queue) goto bail_qp; @@ -766,7 +749,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, * Driver needs to set up it's private QP structure and do any * initialization that is needed. */ - priv = rdi->driver_f.qp_priv_alloc(rdi, qp, gfp); + priv = rdi->driver_f.qp_priv_alloc(rdi, qp); if (IS_ERR(priv)) { ret = priv; goto bail_qp; @@ -786,11 +769,6 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, qp->r_rq.wq = vmalloc_user( sizeof(struct rvt_rwq) + qp->r_rq.size * sz); - else if (gfp == GFP_NOIO) - qp->r_rq.wq = __vmalloc( - sizeof(struct rvt_rwq) + - qp->r_rq.size * sz, - gfp | __GFP_ZERO, PAGE_KERNEL); else qp->r_rq.wq = vzalloc_node( sizeof(struct rvt_rwq) + @@ -824,7 +802,7 @@ struct ib_qp *rvt_create_qp(struct ib_pd *ibpd, err = alloc_qpn(rdi, &rdi->qp_dev->qpn_table, init_attr->qp_type, - init_attr->port_num, gfp); + init_attr->port_num); if (err < 0) { ret = ERR_PTR(err); goto bail_rq_wq; @@ -1280,9 +1258,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (attr_mask & IB_QP_TIMEOUT) { qp->timeout = attr->timeout; - qp->timeout_jiffies = - usecs_to_jiffies((4096UL * (1UL << qp->timeout)) / - 1000UL); + qp->timeout_jiffies = rvt_timeout_to_jiffies(qp->timeout); } if (attr_mask & IB_QP_QKEY) diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index c3a140ed4df2..08f3f90d2912 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -441,6 +441,8 @@ static void rxe_skb_tx_dtor(struct sk_buff *skb) if (unlikely(qp->need_req_skb && skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW)) rxe_run_task(&qp->req.task, 1); + + rxe_drop_ref(qp); } int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) @@ -473,6 +475,7 @@ int rxe_send(struct rxe_dev *rxe, struct rxe_pkt_info *pkt, struct sk_buff *skb) return -EAGAIN; } + rxe_add_ref(pkt->qp); atomic_inc(&pkt->qp->skb_out); kfree_skb(skb); diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 23039768f541..a958ee918a49 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -995,7 +995,9 @@ static int send_atomic_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, free_rd_atomic_resource(qp, res); rxe_advance_resp_resource(qp); - memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(skb->cb)); + memcpy(SKB_TO_PKT(skb), &ack_pkt, sizeof(ack_pkt)); + memset((unsigned char *)SKB_TO_PKT(skb) + sizeof(ack_pkt), 0, + sizeof(skb->cb) - sizeof(ack_pkt)); res->type = RXE_ATOMIC_MASK; res->atomic.skb = skb; @@ -1217,6 +1219,9 @@ void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) kfree_skb(skb); } + if (notify) + return; + while (!qp->srq && qp->rq.queue && queue_head(qp->rq.queue)) advance_consumer(qp->rq.queue); } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 073e66783f1d..af90a7d42b96 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -914,6 +914,9 @@ static int rxe_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, spin_unlock_irqrestore(&rq->producer_lock, flags); + if (qp->resp.state == QP_STATE_ERROR) + rxe_run_task(&qp->resp.task, 1); + err1: return err; } @@ -1240,6 +1243,8 @@ int rxe_register_device(struct rxe_dev *rxe) addrconf_addr_eui48((unsigned char *)&dev->node_guid, rxe->ndev->dev_addr); dev->dev.dma_ops = &dma_virt_ops; + dma_coerce_mask_and_coherent(&dev->dev, + dma_get_required_mask(dev->dev.parent)); dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION; dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 7cbcfdac6529..f87d104837dc 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -39,6 +39,7 @@ #include <linux/vmalloc.h> #include <linux/moduleparam.h> #include <linux/sched/signal.h> +#include <linux/sched/mm.h> #include "ipoib.h" @@ -954,7 +955,7 @@ void ipoib_cm_dev_stop(struct net_device *dev) break; } spin_unlock_irq(&priv->lock); - msleep(1); + usleep_range(1000, 2000); ipoib_drain_cq(dev); spin_lock_irq(&priv->lock); } @@ -1047,9 +1048,8 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ .sq_sig_type = IB_SIGNAL_ALL_WR, .qp_type = IB_QPT_RC, .qp_context = tx, - .create_flags = IB_QP_CREATE_USE_GFP_NOIO + .create_flags = 0 }; - struct ib_qp *tx_qp; if (dev->features & NETIF_F_SG) @@ -1057,10 +1057,6 @@ static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_ min_t(u32, priv->ca->attrs.max_sge, MAX_SKB_FRAGS + 1); tx_qp = ib_create_qp(priv->pd, &attr); - if (PTR_ERR(tx_qp) == -EINVAL) { - attr.create_flags &= ~IB_QP_CREATE_USE_GFP_NOIO; - tx_qp = ib_create_qp(priv->pd, &attr); - } tx->max_send_sge = attr.cap.max_send_sge; return tx_qp; } @@ -1131,10 +1127,11 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, struct sa_path_rec *pathrec) { struct ipoib_dev_priv *priv = ipoib_priv(p->dev); + unsigned int noio_flag; int ret; - p->tx_ring = __vmalloc(ipoib_sendq_size * sizeof *p->tx_ring, - GFP_NOIO, PAGE_KERNEL); + noio_flag = memalloc_noio_save(); + p->tx_ring = vzalloc(ipoib_sendq_size * sizeof(*p->tx_ring)); if (!p->tx_ring) { ret = -ENOMEM; goto err_tx; @@ -1142,9 +1139,10 @@ static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn, memset(p->tx_ring, 0, ipoib_sendq_size * sizeof *p->tx_ring); p->qp = ipoib_cm_create_tx_qp(p->dev, p); + memalloc_noio_restore(noio_flag); if (IS_ERR(p->qp)) { ret = PTR_ERR(p->qp); - ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); + ipoib_warn(priv, "failed to create tx qp: %d\n", ret); goto err_qp; } @@ -1206,7 +1204,7 @@ static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p) goto timeout; } - msleep(1); + usleep_range(1000, 2000); } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index efe7402f4885..57a9655e844d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -770,7 +770,7 @@ int ipoib_ib_dev_stop_default(struct net_device *dev) ipoib_drain_cq(dev); - msleep(1); + usleep_range(1000, 2000); } ipoib_dbg(priv, "All sends and receives done.\n"); diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 6e86eeee370e..4ce315c92b48 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -233,6 +233,7 @@ static netdev_features_t ipoib_fix_features(struct net_device *dev, netdev_featu static int ipoib_change_mtu(struct net_device *dev, int new_mtu) { struct ipoib_dev_priv *priv = ipoib_priv(dev); + int ret = 0; /* dev->mtu > 2K ==> connected mode */ if (ipoib_cm_admin_enabled(dev)) { @@ -256,9 +257,34 @@ static int ipoib_change_mtu(struct net_device *dev, int new_mtu) ipoib_dbg(priv, "MTU must be smaller than the underlying " "link layer MTU - 4 (%u)\n", priv->mcast_mtu); - dev->mtu = min(priv->mcast_mtu, priv->admin_mtu); + new_mtu = min(priv->mcast_mtu, priv->admin_mtu); - return 0; + if (priv->rn_ops->ndo_change_mtu) { + bool carrier_status = netif_carrier_ok(dev); + + netif_carrier_off(dev); + + /* notify lower level on the real mtu */ + ret = priv->rn_ops->ndo_change_mtu(dev, new_mtu); + + if (carrier_status) + netif_carrier_on(dev); + } else { + dev->mtu = new_mtu; + } + + return ret; +} + +static void ipoib_get_stats(struct net_device *dev, + struct rtnl_link_stats64 *stats) +{ + struct ipoib_dev_priv *priv = ipoib_priv(dev); + + if (priv->rn_ops->ndo_get_stats64) + priv->rn_ops->ndo_get_stats64(dev, stats); + else + netdev_stats_to_stats64(stats, &dev->stats); } /* Called with an RCU read lock taken */ @@ -1808,6 +1834,7 @@ static const struct net_device_ops ipoib_netdev_ops_pf = { .ndo_get_vf_stats = ipoib_get_vf_stats, .ndo_set_vf_guid = ipoib_set_vf_guid, .ndo_set_mac_address = ipoib_set_mac, + .ndo_get_stats64 = ipoib_get_stats, }; static const struct net_device_ops ipoib_netdev_ops_vf = { @@ -2212,6 +2239,7 @@ static struct net_device *ipoib_add_port(const char *format, goto register_failed; } + result = -ENOMEM; if (ipoib_cm_add_mode_attr(priv->dev)) goto sysfs_failed; if (ipoib_add_pkey_attr(priv->dev)) diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 5a887efb4bdf..37b33d708c2d 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -83,6 +83,7 @@ static struct scsi_host_template iscsi_iser_sht; static struct iscsi_transport iscsi_iser_transport; static struct scsi_transport_template *iscsi_iser_scsi_transport; static struct workqueue_struct *release_wq; +static DEFINE_MUTEX(unbind_iser_conn_mutex); struct iser_global ig; int iser_debug_level = 0; @@ -550,12 +551,14 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) */ if (iser_conn) { mutex_lock(&iser_conn->state_mutex); + mutex_lock(&unbind_iser_conn_mutex); iser_conn_terminate(iser_conn); iscsi_conn_stop(cls_conn, flag); /* unbind */ iser_conn->iscsi_conn = NULL; conn->dd_data = NULL; + mutex_unlock(&unbind_iser_conn_mutex); complete(&iser_conn->stop_completion); mutex_unlock(&iser_conn->state_mutex); @@ -977,13 +980,21 @@ static int iscsi_iser_slave_alloc(struct scsi_device *sdev) struct iser_conn *iser_conn; struct ib_device *ib_dev; + mutex_lock(&unbind_iser_conn_mutex); + session = starget_to_session(scsi_target(sdev))->dd_data; iser_conn = session->leadconn->dd_data; + if (!iser_conn) { + mutex_unlock(&unbind_iser_conn_mutex); + return -ENOTCONN; + } ib_dev = iser_conn->ib_conn.device->ib_device; if (!(ib_dev->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)) blk_queue_virt_boundary(sdev->request_queue, ~MASK_4K); + mutex_unlock(&unbind_iser_conn_mutex); + return 0; } diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 12ed62ce9ff7..2a07692007bd 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -137,8 +137,10 @@ iser_prepare_write_cmd(struct iscsi_task *task, if (unsol_sz < edtl) { hdr->flags |= ISER_WSV; - hdr->write_stag = cpu_to_be32(mem_reg->rkey); - hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); + if (buf_out->data_len > imm_sz) { + hdr->write_stag = cpu_to_be32(mem_reg->rkey); + hdr->write_va = cpu_to_be64(mem_reg->sge.addr + unsol_sz); + } iser_dbg("Cmd itt:%d, WRITE tags, RKEY:%#.4X " "VA:%#llX + unsol:%d\n", diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index c538a38c91ce..26a004e97ae0 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -708,8 +708,14 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, unsigned short sg_tablesize, sup_sg_tablesize; sg_tablesize = DIV_ROUND_UP(max_sectors * 512, SIZE_4K); - sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE, - device->ib_device->attrs.max_fast_reg_page_list_len); + if (device->ib_device->attrs.device_cap_flags & + IB_DEVICE_MEM_MGT_EXTENSIONS) + sup_sg_tablesize = + min_t( + uint, ISCSI_ISER_MAX_SG_TABLESIZE, + device->ib_device->attrs.max_fast_reg_page_list_len); + else + sup_sg_tablesize = ISCSI_ISER_MAX_SG_TABLESIZE; iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize); } diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index fcbed35e95a8..0e662656ef42 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -1452,7 +1452,7 @@ static void isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc) { struct isert_conn *isert_conn = wc->qp->qp_context; - struct ib_device *ib_dev = isert_conn->cm_id->device; + struct ib_device *ib_dev = isert_conn->device->ib_device; if (unlikely(wc->status != IB_WC_SUCCESS)) { isert_print_wc(wc, "login recv"); diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 1ced0731c140..402275be0931 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -1157,8 +1157,8 @@ static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx) } spin_unlock_irqrestore(&ioctx->spinlock, flags); - pr_debug("Aborting cmd with state %d and tag %lld\n", state, - ioctx->cmd.tag); + pr_debug("Aborting cmd with state %d -> %d and tag %lld\n", state, + ioctx->state, ioctx->cmd.tag); switch (state) { case SRPT_STATE_NEW: diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index da3d362f21b1..a047b9af8369 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -48,6 +48,7 @@ struct gpio_button_data { spinlock_t lock; bool disabled; bool key_pressed; + bool suspended; }; struct gpio_keys_drvdata { @@ -396,8 +397,20 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id) BUG_ON(irq != bdata->irq); - if (bdata->button->wakeup) + if (bdata->button->wakeup) { + const struct gpio_keys_button *button = bdata->button; + pm_stay_awake(bdata->input->dev.parent); + if (bdata->suspended && + (button->type == 0 || button->type == EV_KEY)) { + /* + * Simulate wakeup key press in case the key has + * already released by the time we got interrupt + * handler to run. + */ + input_report_key(bdata->input, button->code, 1); + } + } mod_delayed_work(system_wq, &bdata->work, @@ -855,6 +868,7 @@ static int __maybe_unused gpio_keys_suspend(struct device *dev) struct gpio_button_data *bdata = &ddata->data[i]; if (bdata->button->wakeup) enable_irq_wake(bdata->irq); + bdata->suspended = true; } } else { mutex_lock(&input->mutex); @@ -878,6 +892,7 @@ static int __maybe_unused gpio_keys_resume(struct device *dev) struct gpio_button_data *bdata = &ddata->data[i]; if (bdata->button->wakeup) disable_irq_wake(bdata->irq); + bdata->suspended = false; } } else { mutex_lock(&input->mutex); diff --git a/drivers/input/misc/xen-kbdfront.c b/drivers/input/misc/xen-kbdfront.c index eb770613a9bd..fa130e7b734c 100644 --- a/drivers/input/misc/xen-kbdfront.c +++ b/drivers/input/misc/xen-kbdfront.c @@ -17,6 +17,7 @@ #include <linux/errno.h> #include <linux/module.h> #include <linux/input.h> +#include <linux/input/mt.h> #include <linux/slab.h> #include <asm/xen/hypervisor.h> @@ -34,11 +35,14 @@ struct xenkbd_info { struct input_dev *kbd; struct input_dev *ptr; + struct input_dev *mtouch; struct xenkbd_page *page; int gref; int irq; struct xenbus_device *xbdev; char phys[32]; + /* current MT slot/contact ID we are injecting events in */ + int mtouch_cur_contact_id; }; enum { KPARAM_X, KPARAM_Y, KPARAM_CNT }; @@ -56,6 +60,112 @@ static void xenkbd_disconnect_backend(struct xenkbd_info *); * to do that. */ +static void xenkbd_handle_motion_event(struct xenkbd_info *info, + struct xenkbd_motion *motion) +{ + input_report_rel(info->ptr, REL_X, motion->rel_x); + input_report_rel(info->ptr, REL_Y, motion->rel_y); + if (motion->rel_z) + input_report_rel(info->ptr, REL_WHEEL, -motion->rel_z); + input_sync(info->ptr); +} + +static void xenkbd_handle_position_event(struct xenkbd_info *info, + struct xenkbd_position *pos) +{ + input_report_abs(info->ptr, ABS_X, pos->abs_x); + input_report_abs(info->ptr, ABS_Y, pos->abs_y); + if (pos->rel_z) + input_report_rel(info->ptr, REL_WHEEL, -pos->rel_z); + input_sync(info->ptr); +} + +static void xenkbd_handle_key_event(struct xenkbd_info *info, + struct xenkbd_key *key) +{ + struct input_dev *dev; + + if (test_bit(key->keycode, info->ptr->keybit)) { + dev = info->ptr; + } else if (test_bit(key->keycode, info->kbd->keybit)) { + dev = info->kbd; + } else { + pr_warn("unhandled keycode 0x%x\n", key->keycode); + return; + } + + input_report_key(dev, key->keycode, key->pressed); + input_sync(dev); +} + +static void xenkbd_handle_mt_event(struct xenkbd_info *info, + struct xenkbd_mtouch *mtouch) +{ + if (unlikely(!info->mtouch)) + return; + + if (mtouch->contact_id != info->mtouch_cur_contact_id) { + info->mtouch_cur_contact_id = mtouch->contact_id; + input_mt_slot(info->mtouch, mtouch->contact_id); + } + + switch (mtouch->event_type) { + case XENKBD_MT_EV_DOWN: + input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, true); + /* fall through */ + + case XENKBD_MT_EV_MOTION: + input_report_abs(info->mtouch, ABS_MT_POSITION_X, + mtouch->u.pos.abs_x); + input_report_abs(info->mtouch, ABS_MT_POSITION_Y, + mtouch->u.pos.abs_y); + break; + + case XENKBD_MT_EV_SHAPE: + input_report_abs(info->mtouch, ABS_MT_TOUCH_MAJOR, + mtouch->u.shape.major); + input_report_abs(info->mtouch, ABS_MT_TOUCH_MINOR, + mtouch->u.shape.minor); + break; + + case XENKBD_MT_EV_ORIENT: + input_report_abs(info->mtouch, ABS_MT_ORIENTATION, + mtouch->u.orientation); + break; + + case XENKBD_MT_EV_UP: + input_mt_report_slot_state(info->mtouch, MT_TOOL_FINGER, false); + break; + + case XENKBD_MT_EV_SYN: + input_mt_sync_frame(info->mtouch); + input_sync(info->mtouch); + break; + } +} + +static void xenkbd_handle_event(struct xenkbd_info *info, + union xenkbd_in_event *event) +{ + switch (event->type) { + case XENKBD_TYPE_MOTION: + xenkbd_handle_motion_event(info, &event->motion); + break; + + case XENKBD_TYPE_KEY: + xenkbd_handle_key_event(info, &event->key); + break; + + case XENKBD_TYPE_POS: + xenkbd_handle_position_event(info, &event->pos); + break; + + case XENKBD_TYPE_MTOUCH: + xenkbd_handle_mt_event(info, &event->mtouch); + break; + } +} + static irqreturn_t input_handler(int rq, void *dev_id) { struct xenkbd_info *info = dev_id; @@ -66,44 +176,8 @@ static irqreturn_t input_handler(int rq, void *dev_id) if (prod == page->in_cons) return IRQ_HANDLED; rmb(); /* ensure we see ring contents up to prod */ - for (cons = page->in_cons; cons != prod; cons++) { - union xenkbd_in_event *event; - struct input_dev *dev; - event = &XENKBD_IN_RING_REF(page, cons); - - dev = info->ptr; - switch (event->type) { - case XENKBD_TYPE_MOTION: - input_report_rel(dev, REL_X, event->motion.rel_x); - input_report_rel(dev, REL_Y, event->motion.rel_y); - if (event->motion.rel_z) - input_report_rel(dev, REL_WHEEL, - -event->motion.rel_z); - break; - case XENKBD_TYPE_KEY: - dev = NULL; - if (test_bit(event->key.keycode, info->kbd->keybit)) - dev = info->kbd; - if (test_bit(event->key.keycode, info->ptr->keybit)) - dev = info->ptr; - if (dev) - input_report_key(dev, event->key.keycode, - event->key.pressed); - else - pr_warn("unhandled keycode 0x%x\n", - event->key.keycode); - break; - case XENKBD_TYPE_POS: - input_report_abs(dev, ABS_X, event->pos.abs_x); - input_report_abs(dev, ABS_Y, event->pos.abs_y); - if (event->pos.rel_z) - input_report_rel(dev, REL_WHEEL, - -event->pos.rel_z); - break; - } - if (dev) - input_sync(dev); - } + for (cons = page->in_cons; cons != prod; cons++) + xenkbd_handle_event(info, &XENKBD_IN_RING_REF(page, cons)); mb(); /* ensure we got ring contents */ page->in_cons = cons; notify_remote_via_irq(info->irq); @@ -115,9 +189,9 @@ static int xenkbd_probe(struct xenbus_device *dev, const struct xenbus_device_id *id) { int ret, i; - unsigned int abs; + unsigned int abs, touch; struct xenkbd_info *info; - struct input_dev *kbd, *ptr; + struct input_dev *kbd, *ptr, *mtouch; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) { @@ -152,6 +226,17 @@ static int xenkbd_probe(struct xenbus_device *dev, } } + touch = xenbus_read_unsigned(dev->nodename, + XENKBD_FIELD_FEAT_MTOUCH, 0); + if (touch) { + ret = xenbus_write(XBT_NIL, dev->nodename, + XENKBD_FIELD_REQ_MTOUCH, "1"); + if (ret) { + pr_warn("xenkbd: can't request multi-touch"); + touch = 0; + } + } + /* keyboard */ kbd = input_allocate_device(); if (!kbd) @@ -208,6 +293,58 @@ static int xenkbd_probe(struct xenbus_device *dev, } info->ptr = ptr; + /* multi-touch device */ + if (touch) { + int num_cont, width, height; + + mtouch = input_allocate_device(); + if (!mtouch) + goto error_nomem; + + num_cont = xenbus_read_unsigned(info->xbdev->nodename, + XENKBD_FIELD_MT_NUM_CONTACTS, + 1); + width = xenbus_read_unsigned(info->xbdev->nodename, + XENKBD_FIELD_MT_WIDTH, + XENFB_WIDTH); + height = xenbus_read_unsigned(info->xbdev->nodename, + XENKBD_FIELD_MT_HEIGHT, + XENFB_HEIGHT); + + mtouch->name = "Xen Virtual Multi-touch"; + mtouch->phys = info->phys; + mtouch->id.bustype = BUS_PCI; + mtouch->id.vendor = 0x5853; + mtouch->id.product = 0xfffd; + + input_set_abs_params(mtouch, ABS_MT_TOUCH_MAJOR, + 0, 255, 0, 0); + input_set_abs_params(mtouch, ABS_MT_POSITION_X, + 0, width, 0, 0); + input_set_abs_params(mtouch, ABS_MT_POSITION_Y, + 0, height, 0, 0); + input_set_abs_params(mtouch, ABS_MT_PRESSURE, + 0, 255, 0, 0); + + ret = input_mt_init_slots(mtouch, num_cont, INPUT_MT_DIRECT); + if (ret) { + input_free_device(mtouch); + xenbus_dev_fatal(info->xbdev, ret, + "input_mt_init_slots"); + goto error; + } + + ret = input_register_device(mtouch); + if (ret) { + input_free_device(mtouch); + xenbus_dev_fatal(info->xbdev, ret, + "input_register_device(mtouch)"); + goto error; + } + info->mtouch_cur_contact_id = -1; + info->mtouch = mtouch; + } + ret = xenkbd_connect_backend(dev, info); if (ret < 0) goto error; @@ -240,6 +377,8 @@ static int xenkbd_remove(struct xenbus_device *dev) input_unregister_device(info->kbd); if (info->ptr) input_unregister_device(info->ptr); + if (info->mtouch) + input_unregister_device(info->mtouch); free_page((unsigned long)info->page); kfree(info); return 0; diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index c52da651269b..824f4c1c1f31 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -436,8 +436,10 @@ static int i8042_start(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = true; - mb(); + spin_unlock_irq(&i8042_lock); + return 0; } @@ -450,16 +452,20 @@ static void i8042_stop(struct serio *serio) { struct i8042_port *port = serio->port_data; + spin_lock_irq(&i8042_lock); port->exists = false; + port->serio = NULL; + spin_unlock_irq(&i8042_lock); /* + * We need to make sure that interrupt handler finishes using + * our serio port before we return from this function. * We synchronize with both AUX and KBD IRQs because there is * a (very unlikely) chance that AUX IRQ is raised for KBD port * and vice versa. */ synchronize_irq(I8042_AUX_IRQ); synchronize_irq(I8042_KBD_IRQ); - port->serio = NULL; } /* @@ -576,7 +582,7 @@ static irqreturn_t i8042_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&i8042_lock, flags); - if (likely(port->exists && !filtered)) + if (likely(serio && !filtered)) serio_interrupt(serio, data, dfl); out: diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index 6ee3a25ae731..f73ff28f77e2 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,7 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE - depends on HAS_DMA && (ARM || ARM64 || COMPILE_TEST) + depends on HAS_DMA && (ARM || ARM64 || (COMPILE_TEST && !GENERIC_ATOMIC64)) help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page @@ -219,7 +219,7 @@ config OMAP_IOMMU_DEBUG config ROCKCHIP_IOMMU bool "Rockchip IOMMU Support" - depends on ARM + depends on ARM || ARM64 depends on ARCH_ROCKCHIP || COMPILE_TEST select IOMMU_API select ARM_DMA_USE_IOMMU @@ -274,7 +274,7 @@ config EXYNOS_IOMMU_DEBUG config IPMMU_VMSA bool "Renesas VMSA-compatible IPMMU" - depends on ARM_LPAE + depends on ARM || IOMMU_DMA depends on ARCH_RENESAS || COMPILE_TEST select IOMMU_API select IOMMU_IO_PGTABLE_LPAE diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index f16d0f26ee24..688e77576e5a 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -91,25 +91,6 @@ LIST_HEAD(ioapic_map); LIST_HEAD(hpet_map); LIST_HEAD(acpihid_map); -#define FLUSH_QUEUE_SIZE 256 - -struct flush_queue_entry { - unsigned long iova_pfn; - unsigned long pages; - struct dma_ops_domain *dma_dom; -}; - -struct flush_queue { - spinlock_t lock; - unsigned next; - struct flush_queue_entry *entries; -}; - -static DEFINE_PER_CPU(struct flush_queue, flush_queue); - -static atomic_t queue_timer_on; -static struct timer_list queue_timer; - /* * Domain for untranslated devices - only allocated * if iommu=pt passed on kernel cmd line. @@ -140,6 +121,8 @@ struct iommu_dev_data { PPR completions */ u32 errata; /* Bitmap for errata to apply */ bool use_vapic; /* Enable device to use vapic mode */ + + struct ratelimit_state rs; /* Ratelimit IOPF messages */ }; /* @@ -155,6 +138,20 @@ static void update_domain(struct protection_domain *domain); static int protection_domain_init(struct protection_domain *domain); static void detach_device(struct device *dev); +#define FLUSH_QUEUE_SIZE 256 + +struct flush_queue_entry { + unsigned long iova_pfn; + unsigned long pages; + u64 counter; /* Flush counter when this entry was added to the queue */ +}; + +struct flush_queue { + struct flush_queue_entry *entries; + unsigned head, tail; + spinlock_t lock; +}; + /* * Data container for a dma_ops specific protection domain */ @@ -164,6 +161,36 @@ struct dma_ops_domain { /* IOVA RB-Tree */ struct iova_domain iovad; + + struct flush_queue __percpu *flush_queue; + + /* + * We need two counter here to be race-free wrt. IOTLB flushing and + * adding entries to the flush queue. + * + * The flush_start_cnt is incremented _before_ the IOTLB flush starts. + * New entries added to the flush ring-buffer get their 'counter' value + * from here. This way we can make sure that entries added to the queue + * (or other per-cpu queues of the same domain) while the TLB is about + * to be flushed are not considered to be flushed already. + */ + atomic64_t flush_start_cnt; + + /* + * The flush_finish_cnt is incremented when an IOTLB flush is complete. + * This value is always smaller than flush_start_cnt. The queue_add + * function frees all IOVAs that have a counter value smaller than + * flush_finish_cnt. This makes sure that we only free IOVAs that are + * flushed out of the IOTLB of the domain. + */ + atomic64_t flush_finish_cnt; + + /* + * Timer to make sure we don't keep IOVAs around unflushed + * for too long + */ + struct timer_list flush_timer; + atomic_t flush_timer_on; }; static struct iova_domain reserved_iova_ranges; @@ -255,6 +282,8 @@ static struct iommu_dev_data *alloc_dev_data(u16 devid) list_add_tail(&dev_data->dev_data_list, &dev_data_list); spin_unlock_irqrestore(&dev_data_list_lock, flags); + ratelimit_default_init(&dev_data->rs); + return dev_data; } @@ -553,6 +582,29 @@ static void dump_command(unsigned long phys_addr) pr_err("AMD-Vi: CMD[%d]: %08x\n", i, cmd->data[i]); } +static void amd_iommu_report_page_fault(u16 devid, u16 domain_id, + u64 address, int flags) +{ + struct iommu_dev_data *dev_data = NULL; + struct pci_dev *pdev; + + pdev = pci_get_bus_and_slot(PCI_BUS_NUM(devid), devid & 0xff); + if (pdev) + dev_data = get_dev_data(&pdev->dev); + + if (dev_data && __ratelimit(&dev_data->rs)) { + dev_err(&pdev->dev, "AMD-Vi: Event logged [IO_PAGE_FAULT domain=0x%04x address=0x%016llx flags=0x%04x]\n", + domain_id, address, flags); + } else if (printk_ratelimit()) { + pr_err("AMD-Vi: Event logged [IO_PAGE_FAULT device=%02x:%02x.%x domain=0x%04x address=0x%016llx flags=0x%04x]\n", + PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), + domain_id, address, flags); + } + + if (pdev) + pci_dev_put(pdev); +} + static void iommu_print_event(struct amd_iommu *iommu, void *__evt) { int type, devid, domid, flags; @@ -577,7 +629,12 @@ retry: goto retry; } - printk(KERN_ERR "AMD-Vi: Event logged ["); + if (type == EVENT_TYPE_IO_FAULT) { + amd_iommu_report_page_fault(devid, domid, address, flags); + return; + } else { + printk(KERN_ERR "AMD-Vi: Event logged ["); + } switch (type) { case EVENT_TYPE_ILL_DEV: @@ -587,12 +644,6 @@ retry: address, flags); dump_dte_entry(devid); break; - case EVENT_TYPE_IO_FAULT: - printk("IO_PAGE_FAULT device=%02x:%02x.%x " - "domain=0x%04x address=0x%016llx flags=0x%04x]\n", - PCI_BUS_NUM(devid), PCI_SLOT(devid), PCI_FUNC(devid), - domid, address, flags); - break; case EVENT_TYPE_DEV_TAB_ERR: printk("DEV_TAB_HARDWARE_ERROR device=%02x:%02x.%x " "address=0x%016llx flags=0x%04x]\n", @@ -850,19 +901,20 @@ static int wait_on_sem(volatile u64 *sem) } static void copy_cmd_to_buffer(struct amd_iommu *iommu, - struct iommu_cmd *cmd, - u32 tail) + struct iommu_cmd *cmd) { u8 *target; - target = iommu->cmd_buf + tail; - tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; + target = iommu->cmd_buf + iommu->cmd_buf_tail; + + iommu->cmd_buf_tail += sizeof(*cmd); + iommu->cmd_buf_tail %= CMD_BUFFER_SIZE; /* Copy command to buffer */ memcpy(target, cmd, sizeof(*cmd)); /* Tell the IOMMU about it */ - writel(tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + writel(iommu->cmd_buf_tail, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); } static void build_completion_wait(struct iommu_cmd *cmd, u64 address) @@ -1020,33 +1072,34 @@ static int __iommu_queue_command_sync(struct amd_iommu *iommu, struct iommu_cmd *cmd, bool sync) { - u32 left, tail, head, next_tail; + unsigned int count = 0; + u32 left, next_tail; + next_tail = (iommu->cmd_buf_tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; again: - - head = readl(iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); - tail = readl(iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); - next_tail = (tail + sizeof(*cmd)) % CMD_BUFFER_SIZE; - left = (head - next_tail) % CMD_BUFFER_SIZE; + left = (iommu->cmd_buf_head - next_tail) % CMD_BUFFER_SIZE; if (left <= 0x20) { - struct iommu_cmd sync_cmd; - int ret; - - iommu->cmd_sem = 0; + /* Skip udelay() the first time around */ + if (count++) { + if (count == LOOP_TIMEOUT) { + pr_err("AMD-Vi: Command buffer timeout\n"); + return -EIO; + } - build_completion_wait(&sync_cmd, (u64)&iommu->cmd_sem); - copy_cmd_to_buffer(iommu, &sync_cmd, tail); + udelay(1); + } - if ((ret = wait_on_sem(&iommu->cmd_sem)) != 0) - return ret; + /* Update head and recheck remaining space */ + iommu->cmd_buf_head = readl(iommu->mmio_base + + MMIO_CMD_HEAD_OFFSET); goto again; } - copy_cmd_to_buffer(iommu, cmd, tail); + copy_cmd_to_buffer(iommu, cmd); - /* We need to sync now to make sure all commands are processed */ + /* Do we need to make sure all commands are processed? */ iommu->need_sync = sync; return 0; @@ -1735,6 +1788,180 @@ static void free_gcr3_table(struct protection_domain *domain) free_page((unsigned long)domain->gcr3_tbl); } +static void dma_ops_domain_free_flush_queue(struct dma_ops_domain *dom) +{ + int cpu; + + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + kfree(queue->entries); + } + + free_percpu(dom->flush_queue); + + dom->flush_queue = NULL; +} + +static int dma_ops_domain_alloc_flush_queue(struct dma_ops_domain *dom) +{ + int cpu; + + atomic64_set(&dom->flush_start_cnt, 0); + atomic64_set(&dom->flush_finish_cnt, 0); + + dom->flush_queue = alloc_percpu(struct flush_queue); + if (!dom->flush_queue) + return -ENOMEM; + + /* First make sure everything is cleared */ + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + queue->head = 0; + queue->tail = 0; + queue->entries = NULL; + } + + /* Now start doing the allocation */ + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + queue->entries = kzalloc(FLUSH_QUEUE_SIZE * sizeof(*queue->entries), + GFP_KERNEL); + if (!queue->entries) { + dma_ops_domain_free_flush_queue(dom); + return -ENOMEM; + } + + spin_lock_init(&queue->lock); + } + + return 0; +} + +static void dma_ops_domain_flush_tlb(struct dma_ops_domain *dom) +{ + atomic64_inc(&dom->flush_start_cnt); + domain_flush_tlb(&dom->domain); + domain_flush_complete(&dom->domain); + atomic64_inc(&dom->flush_finish_cnt); +} + +static inline bool queue_ring_full(struct flush_queue *queue) +{ + assert_spin_locked(&queue->lock); + + return (((queue->tail + 1) % FLUSH_QUEUE_SIZE) == queue->head); +} + +#define queue_ring_for_each(i, q) \ + for (i = (q)->head; i != (q)->tail; i = (i + 1) % FLUSH_QUEUE_SIZE) + +static inline unsigned queue_ring_add(struct flush_queue *queue) +{ + unsigned idx = queue->tail; + + assert_spin_locked(&queue->lock); + queue->tail = (idx + 1) % FLUSH_QUEUE_SIZE; + + return idx; +} + +static inline void queue_ring_remove_head(struct flush_queue *queue) +{ + assert_spin_locked(&queue->lock); + queue->head = (queue->head + 1) % FLUSH_QUEUE_SIZE; +} + +static void queue_ring_free_flushed(struct dma_ops_domain *dom, + struct flush_queue *queue) +{ + u64 counter = atomic64_read(&dom->flush_finish_cnt); + int idx; + + queue_ring_for_each(idx, queue) { + /* + * This assumes that counter values in the ring-buffer are + * monotonously rising. + */ + if (queue->entries[idx].counter >= counter) + break; + + free_iova_fast(&dom->iovad, + queue->entries[idx].iova_pfn, + queue->entries[idx].pages); + + queue_ring_remove_head(queue); + } +} + +static void queue_add(struct dma_ops_domain *dom, + unsigned long address, unsigned long pages) +{ + struct flush_queue *queue; + unsigned long flags; + int idx; + + pages = __roundup_pow_of_two(pages); + address >>= PAGE_SHIFT; + + queue = get_cpu_ptr(dom->flush_queue); + spin_lock_irqsave(&queue->lock, flags); + + /* + * First remove the enries from the ring-buffer that are already + * flushed to make the below queue_ring_full() check less likely + */ + queue_ring_free_flushed(dom, queue); + + /* + * When ring-queue is full, flush the entries from the IOTLB so + * that we can free all entries with queue_ring_free_flushed() + * below. + */ + if (queue_ring_full(queue)) { + dma_ops_domain_flush_tlb(dom); + queue_ring_free_flushed(dom, queue); + } + + idx = queue_ring_add(queue); + + queue->entries[idx].iova_pfn = address; + queue->entries[idx].pages = pages; + queue->entries[idx].counter = atomic64_read(&dom->flush_start_cnt); + + spin_unlock_irqrestore(&queue->lock, flags); + + if (atomic_cmpxchg(&dom->flush_timer_on, 0, 1) == 0) + mod_timer(&dom->flush_timer, jiffies + msecs_to_jiffies(10)); + + put_cpu_ptr(dom->flush_queue); +} + +static void queue_flush_timeout(unsigned long data) +{ + struct dma_ops_domain *dom = (struct dma_ops_domain *)data; + int cpu; + + atomic_set(&dom->flush_timer_on, 0); + + dma_ops_domain_flush_tlb(dom); + + for_each_possible_cpu(cpu) { + struct flush_queue *queue; + unsigned long flags; + + queue = per_cpu_ptr(dom->flush_queue, cpu); + spin_lock_irqsave(&queue->lock, flags); + queue_ring_free_flushed(dom, queue); + spin_unlock_irqrestore(&queue->lock, flags); + } +} + /* * Free a domain, only used if something went wrong in the * allocation path and we need to free an already allocated page table @@ -1746,6 +1973,11 @@ static void dma_ops_domain_free(struct dma_ops_domain *dom) del_domain_from_list(&dom->domain); + if (timer_pending(&dom->flush_timer)) + del_timer(&dom->flush_timer); + + dma_ops_domain_free_flush_queue(dom); + put_iova_domain(&dom->iovad); free_pagetable(&dom->domain); @@ -1784,6 +2016,14 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) /* Initialize reserved ranges */ copy_reserved_iova(&reserved_iova_ranges, &dma_dom->iovad); + if (dma_ops_domain_alloc_flush_queue(dma_dom)) + goto free_dma_dom; + + setup_timer(&dma_dom->flush_timer, queue_flush_timeout, + (unsigned long)dma_dom); + + atomic_set(&dma_dom->flush_timer_on, 0); + add_domain_to_list(&dma_dom->domain); return dma_dom; @@ -1846,7 +2086,8 @@ static void set_dte_entry(u16 devid, struct protection_domain *domain, bool ats) flags |= tmp; } - flags &= ~(0xffffUL); + + flags &= ~(DTE_FLAG_SA | 0xffffULL); flags |= domain->id; amd_iommu_dev_table[devid].data[1] = flags; @@ -2227,92 +2468,6 @@ static struct iommu_group *amd_iommu_device_group(struct device *dev) * *****************************************************************************/ -static void __queue_flush(struct flush_queue *queue) -{ - struct protection_domain *domain; - unsigned long flags; - int idx; - - /* First flush TLB of all known domains */ - spin_lock_irqsave(&amd_iommu_pd_lock, flags); - list_for_each_entry(domain, &amd_iommu_pd_list, list) - domain_flush_tlb(domain); - spin_unlock_irqrestore(&amd_iommu_pd_lock, flags); - - /* Wait until flushes have completed */ - domain_flush_complete(NULL); - - for (idx = 0; idx < queue->next; ++idx) { - struct flush_queue_entry *entry; - - entry = queue->entries + idx; - - free_iova_fast(&entry->dma_dom->iovad, - entry->iova_pfn, - entry->pages); - - /* Not really necessary, just to make sure we catch any bugs */ - entry->dma_dom = NULL; - } - - queue->next = 0; -} - -static void queue_flush_all(void) -{ - int cpu; - - for_each_possible_cpu(cpu) { - struct flush_queue *queue; - unsigned long flags; - - queue = per_cpu_ptr(&flush_queue, cpu); - spin_lock_irqsave(&queue->lock, flags); - if (queue->next > 0) - __queue_flush(queue); - spin_unlock_irqrestore(&queue->lock, flags); - } -} - -static void queue_flush_timeout(unsigned long unsused) -{ - atomic_set(&queue_timer_on, 0); - queue_flush_all(); -} - -static void queue_add(struct dma_ops_domain *dma_dom, - unsigned long address, unsigned long pages) -{ - struct flush_queue_entry *entry; - struct flush_queue *queue; - unsigned long flags; - int idx; - - pages = __roundup_pow_of_two(pages); - address >>= PAGE_SHIFT; - - queue = get_cpu_ptr(&flush_queue); - spin_lock_irqsave(&queue->lock, flags); - - if (queue->next == FLUSH_QUEUE_SIZE) - __queue_flush(queue); - - idx = queue->next++; - entry = queue->entries + idx; - - entry->iova_pfn = address; - entry->pages = pages; - entry->dma_dom = dma_dom; - - spin_unlock_irqrestore(&queue->lock, flags); - - if (atomic_cmpxchg(&queue_timer_on, 0, 1) == 0) - mod_timer(&queue_timer, jiffies + msecs_to_jiffies(10)); - - put_cpu_ptr(&flush_queue); -} - - /* * In the dma_ops path we only have the struct device. This function * finds the corresponding IOMMU, the protection domain and the @@ -2807,7 +2962,7 @@ static int init_reserved_iova_ranges(void) int __init amd_iommu_init_api(void) { - int ret, cpu, err = 0; + int ret, err = 0; ret = iova_cache_get(); if (ret) @@ -2817,18 +2972,6 @@ int __init amd_iommu_init_api(void) if (ret) return ret; - for_each_possible_cpu(cpu) { - struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu); - - queue->entries = kzalloc(FLUSH_QUEUE_SIZE * - sizeof(*queue->entries), - GFP_KERNEL); - if (!queue->entries) - goto out_put_iova; - - spin_lock_init(&queue->lock); - } - err = bus_set_iommu(&pci_bus_type, &amd_iommu_ops); if (err) return err; @@ -2840,23 +2983,12 @@ int __init amd_iommu_init_api(void) err = bus_set_iommu(&platform_bus_type, &amd_iommu_ops); if (err) return err; - return 0; - -out_put_iova: - for_each_possible_cpu(cpu) { - struct flush_queue *queue = per_cpu_ptr(&flush_queue, cpu); - - kfree(queue->entries); - } - return -ENOMEM; + return 0; } int __init amd_iommu_init_dma_ops(void) { - setup_timer(&queue_timer, queue_flush_timeout, 0); - atomic_set(&queue_timer_on, 0); - swiotlb = iommu_pass_through ? 1 : 0; iommu_detected = 1; @@ -3012,12 +3144,6 @@ static void amd_iommu_domain_free(struct iommu_domain *dom) switch (dom->type) { case IOMMU_DOMAIN_DMA: - /* - * First make sure the domain is no longer referenced from the - * flush queue - */ - queue_flush_all(); - /* Now release the domain */ dma_dom = to_dma_ops_domain(domain); dma_ops_domain_free(dma_dom); @@ -4281,7 +4407,7 @@ static void irq_remapping_deactivate(struct irq_domain *domain, irte_info->index); } -static struct irq_domain_ops amd_ir_domain_ops = { +static const struct irq_domain_ops amd_ir_domain_ops = { .alloc = irq_remapping_alloc, .free = irq_remapping_free, .activate = irq_remapping_activate, diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 5a11328f4d98..5cc597b383c7 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -29,6 +29,7 @@ #include <linux/export.h> #include <linux/iommu.h> #include <linux/kmemleak.h> +#include <linux/crash_dump.h> #include <asm/pci-direct.h> #include <asm/iommu.h> #include <asm/gart.h> @@ -236,6 +237,7 @@ enum iommu_init_state { IOMMU_INITIALIZED, IOMMU_NOT_FOUND, IOMMU_INIT_ERROR, + IOMMU_CMDLINE_DISABLED, }; /* Early ioapic and hpet maps from kernel command line */ @@ -588,6 +590,8 @@ void amd_iommu_reset_cmd_buffer(struct amd_iommu *iommu) writel(0x00, iommu->mmio_base + MMIO_CMD_HEAD_OFFSET); writel(0x00, iommu->mmio_base + MMIO_CMD_TAIL_OFFSET); + iommu->cmd_buf_head = 0; + iommu->cmd_buf_tail = 0; iommu_feature_enable(iommu, CONTROL_CMDBUF_EN); } @@ -1898,6 +1902,14 @@ static void init_device_table_dma(void) for (devid = 0; devid <= amd_iommu_last_bdf; ++devid) { set_dev_entry_bit(devid, DEV_ENTRY_VALID); set_dev_entry_bit(devid, DEV_ENTRY_TRANSLATION); + /* + * In kdump kernels in-flight DMA from the old kernel might + * cause IO_PAGE_FAULTs. There are no reports that a kdump + * actually failed because of that, so just disable fault + * reporting in the hardware to get rid of the messages + */ + if (is_kdump_kernel()) + set_dev_entry_bit(devid, DEV_ENTRY_NO_PAGE_FAULT); } } @@ -2097,23 +2109,27 @@ static struct syscore_ops amd_iommu_syscore_ops = { .resume = amd_iommu_resume, }; -static void __init free_on_init_error(void) +static void __init free_iommu_resources(void) { kmemleak_free(irq_lookup_table); free_pages((unsigned long)irq_lookup_table, get_order(rlookup_table_size)); + irq_lookup_table = NULL; kmem_cache_destroy(amd_iommu_irq_cache); amd_iommu_irq_cache = NULL; free_pages((unsigned long)amd_iommu_rlookup_table, get_order(rlookup_table_size)); + amd_iommu_rlookup_table = NULL; free_pages((unsigned long)amd_iommu_alias_table, get_order(alias_table_size)); + amd_iommu_alias_table = NULL; free_pages((unsigned long)amd_iommu_dev_table, get_order(dev_table_size)); + amd_iommu_dev_table = NULL; free_iommu_all(); @@ -2183,6 +2199,7 @@ static void __init free_dma_resources(void) { free_pages((unsigned long)amd_iommu_pd_alloc_bitmap, get_order(MAX_DOMAIN_ID/8)); + amd_iommu_pd_alloc_bitmap = NULL; free_unity_maps(); } @@ -2307,6 +2324,9 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; + /* Disable any previously enabled IOMMUs */ + disable_iommus(); + if (amd_iommu_irq_remap) amd_iommu_irq_remap = check_ioapic_information(); @@ -2410,6 +2430,13 @@ static int __init state_next(void) case IOMMU_IVRS_DETECTED: ret = early_amd_iommu_init(); init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED; + if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) { + pr_info("AMD-Vi: AMD IOMMU disabled on kernel command-line\n"); + free_dma_resources(); + free_iommu_resources(); + init_state = IOMMU_CMDLINE_DISABLED; + ret = -EINVAL; + } break; case IOMMU_ACPI_FINISHED: early_enable_iommus(); @@ -2438,6 +2465,7 @@ static int __init state_next(void) break; case IOMMU_NOT_FOUND: case IOMMU_INIT_ERROR: + case IOMMU_CMDLINE_DISABLED: /* Error states => do nothing */ ret = -EINVAL; break; @@ -2451,13 +2479,14 @@ static int __init state_next(void) static int __init iommu_go_to_state(enum iommu_init_state state) { - int ret = 0; + int ret = -EINVAL; while (init_state != state) { - ret = state_next(); - if (init_state == IOMMU_NOT_FOUND || - init_state == IOMMU_INIT_ERROR) + if (init_state == IOMMU_NOT_FOUND || + init_state == IOMMU_INIT_ERROR || + init_state == IOMMU_CMDLINE_DISABLED) break; + ret = state_next(); } return ret; @@ -2522,7 +2551,7 @@ static int __init amd_iommu_init(void) free_dma_resources(); if (!irq_remapping_enabled) { disable_iommus(); - free_on_init_error(); + free_iommu_resources(); } else { struct amd_iommu *iommu; @@ -2549,9 +2578,6 @@ int __init amd_iommu_detect(void) if (no_iommu || (iommu_detected && !gart_iommu_aperture)) return -ENODEV; - if (amd_iommu_disabled) - return -ENODEV; - ret = iommu_go_to_state(IOMMU_IVRS_DETECTED); if (ret) return ret; diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index 4de8f4160bb8..294a409e283b 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -322,6 +322,7 @@ #define IOMMU_PTE_IW (1ULL << 62) #define DTE_FLAG_IOTLB (1ULL << 32) +#define DTE_FLAG_SA (1ULL << 34) #define DTE_FLAG_GV (1ULL << 55) #define DTE_FLAG_MASK (0x3ffULL << 32) #define DTE_GLX_SHIFT (56) @@ -516,6 +517,8 @@ struct amd_iommu { /* command buffer virtual address */ u8 *cmd_buf; + u32 cmd_buf_head; + u32 cmd_buf_tail; /* event buffer virtual address */ u8 *evt_buf; diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c index 380969aa60d5..568c400eeaed 100644 --- a/drivers/iommu/arm-smmu-v3.c +++ b/drivers/iommu/arm-smmu-v3.c @@ -408,10 +408,20 @@ /* High-level queue structures */ #define ARM_SMMU_POLL_TIMEOUT_US 100 +#define ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US 1000000 /* 1s! */ #define MSI_IOVA_BASE 0x8000000 #define MSI_IOVA_LENGTH 0x100000 +/* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_HISILICON_HI161X +#define ACPI_IORT_SMMU_HISILICON_HI161X 0x1 +#endif + +#ifndef ACPI_IORT_SMMU_V3_CAVIUM_CN99XX +#define ACPI_IORT_SMMU_V3_CAVIUM_CN99XX 0x2 +#endif + static bool disable_bypass; module_param_named(disable_bypass, disable_bypass, bool, S_IRUGO); MODULE_PARM_DESC(disable_bypass, @@ -597,6 +607,7 @@ struct arm_smmu_device { u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) +#define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) u32 options; struct arm_smmu_cmdq cmdq; @@ -604,6 +615,7 @@ struct arm_smmu_device { struct arm_smmu_priq priq; int gerr_irq; + int combined_irq; unsigned long ias; /* IPA */ unsigned long oas; /* PA */ @@ -645,7 +657,6 @@ struct arm_smmu_domain { struct mutex init_mutex; /* Protects smmu pointer */ struct io_pgtable_ops *pgtbl_ops; - spinlock_t pgtbl_lock; enum arm_smmu_domain_stage stage; union { @@ -663,9 +674,20 @@ struct arm_smmu_option_prop { static struct arm_smmu_option_prop arm_smmu_options[] = { { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" }, + { ARM_SMMU_OPT_PAGE0_REGS_ONLY, "cavium,cn9900-broken-page1-regspace"}, { 0, NULL}, }; +static inline void __iomem *arm_smmu_page1_fixup(unsigned long offset, + struct arm_smmu_device *smmu) +{ + if ((offset > SZ_64K) && + (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY)) + offset -= SZ_64K; + + return smmu->base + offset; +} + static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) { return container_of(dom, struct arm_smmu_domain, domain); @@ -737,7 +759,13 @@ static void queue_inc_prod(struct arm_smmu_queue *q) */ static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe) { - ktime_t timeout = ktime_add_us(ktime_get(), ARM_SMMU_POLL_TIMEOUT_US); + ktime_t timeout; + unsigned int delay = 1; + + /* Wait longer if it's queue drain */ + timeout = ktime_add_us(ktime_get(), drain ? + ARM_SMMU_CMDQ_DRAIN_TIMEOUT_US : + ARM_SMMU_POLL_TIMEOUT_US); while (queue_sync_cons(q), (drain ? !queue_empty(q) : queue_full(q))) { if (ktime_compare(ktime_get(), timeout) > 0) @@ -747,7 +775,8 @@ static int queue_poll_cons(struct arm_smmu_queue *q, bool drain, bool wfe) wfe(); } else { cpu_relax(); - udelay(1); + udelay(delay); + delay *= 2; } } @@ -1302,6 +1331,24 @@ static irqreturn_t arm_smmu_gerror_handler(int irq, void *dev) return IRQ_HANDLED; } +static irqreturn_t arm_smmu_combined_irq_thread(int irq, void *dev) +{ + struct arm_smmu_device *smmu = dev; + + arm_smmu_evtq_thread(irq, dev); + if (smmu->features & ARM_SMMU_FEAT_PRI) + arm_smmu_priq_thread(irq, dev); + + return IRQ_HANDLED; +} + +static irqreturn_t arm_smmu_combined_irq_handler(int irq, void *dev) +{ + arm_smmu_gerror_handler(irq, dev); + arm_smmu_cmdq_sync_handler(irq, dev); + return IRQ_WAKE_THREAD; +} + /* IO_PGTABLE API */ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) { @@ -1406,7 +1453,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) } mutex_init(&smmu_domain->init_mutex); - spin_lock_init(&smmu_domain->pgtbl_lock); return &smmu_domain->domain; } @@ -1555,6 +1601,9 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) .iommu_dev = smmu->dev, }; + if (smmu->features & ARM_SMMU_FEAT_COHERENCY) + pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) return -ENOMEM; @@ -1675,44 +1724,29 @@ out_unlock: static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { - int ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return -ENODEV; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->map(ops, iova, paddr, size, prot); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { - size_t ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->unmap(ops, iova, size); } static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - phys_addr_t ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (domain->type == IOMMU_DOMAIN_IDENTITY) return iova; @@ -1720,11 +1754,7 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->iova_to_phys(ops, iova); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - - return ret; + return ops->iova_to_phys(ops, iova); } static struct platform_driver arm_smmu_driver; @@ -1961,8 +1991,8 @@ static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, return -ENOMEM; } - q->prod_reg = smmu->base + prod_off; - q->cons_reg = smmu->base + cons_off; + q->prod_reg = arm_smmu_page1_fixup(prod_off, smmu); + q->cons_reg = arm_smmu_page1_fixup(cons_off, smmu); q->ent_dwords = dwords; q->q_base = Q_BASE_RWA; @@ -2218,18 +2248,9 @@ static void arm_smmu_setup_msis(struct arm_smmu_device *smmu) devm_add_action(dev, arm_smmu_free_msis, dev); } -static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) +static void arm_smmu_setup_unique_irqs(struct arm_smmu_device *smmu) { - int ret, irq; - u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; - - /* Disable IRQs first */ - ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, - ARM_SMMU_IRQ_CTRLACK); - if (ret) { - dev_err(smmu->dev, "failed to disable irqs\n"); - return ret; - } + int irq, ret; arm_smmu_setup_msis(smmu); @@ -2272,10 +2293,41 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) if (ret < 0) dev_warn(smmu->dev, "failed to enable priq irq\n"); - else - irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; } } +} + +static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) +{ + int ret, irq; + u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN; + + /* Disable IRQs first */ + ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, + ARM_SMMU_IRQ_CTRLACK); + if (ret) { + dev_err(smmu->dev, "failed to disable irqs\n"); + return ret; + } + + irq = smmu->combined_irq; + if (irq) { + /* + * Cavium ThunderX2 implementation doesn't not support unique + * irq lines. Use single irq line for all the SMMUv3 interrupts. + */ + ret = devm_request_threaded_irq(smmu->dev, irq, + arm_smmu_combined_irq_handler, + arm_smmu_combined_irq_thread, + IRQF_ONESHOT, + "arm-smmu-v3-combined-irq", smmu); + if (ret < 0) + dev_warn(smmu->dev, "failed to enable combined irq\n"); + } else + arm_smmu_setup_unique_irqs(smmu); + + if (smmu->features & ARM_SMMU_FEAT_PRI) + irqen_flags |= IRQ_CTRL_PRIQ_IRQEN; /* Enable interrupt generation on the SMMU */ ret = arm_smmu_write_reg_sync(smmu, irqen_flags, @@ -2363,8 +2415,10 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) /* Event queue */ writeq_relaxed(smmu->evtq.q.q_base, smmu->base + ARM_SMMU_EVTQ_BASE); - writel_relaxed(smmu->evtq.q.prod, smmu->base + ARM_SMMU_EVTQ_PROD); - writel_relaxed(smmu->evtq.q.cons, smmu->base + ARM_SMMU_EVTQ_CONS); + writel_relaxed(smmu->evtq.q.prod, + arm_smmu_page1_fixup(ARM_SMMU_EVTQ_PROD, smmu)); + writel_relaxed(smmu->evtq.q.cons, + arm_smmu_page1_fixup(ARM_SMMU_EVTQ_CONS, smmu)); enables |= CR0_EVTQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2379,9 +2433,9 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) writeq_relaxed(smmu->priq.q.q_base, smmu->base + ARM_SMMU_PRIQ_BASE); writel_relaxed(smmu->priq.q.prod, - smmu->base + ARM_SMMU_PRIQ_PROD); + arm_smmu_page1_fixup(ARM_SMMU_PRIQ_PROD, smmu)); writel_relaxed(smmu->priq.q.cons, - smmu->base + ARM_SMMU_PRIQ_CONS); + arm_smmu_page1_fixup(ARM_SMMU_PRIQ_CONS, smmu)); enables |= CR0_PRIQEN; ret = arm_smmu_write_reg_sync(smmu, enables, ARM_SMMU_CR0, @@ -2605,6 +2659,20 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) } #ifdef CONFIG_ACPI +static void acpi_smmu_get_options(u32 model, struct arm_smmu_device *smmu) +{ + switch (model) { + case ACPI_IORT_SMMU_V3_CAVIUM_CN99XX: + smmu->options |= ARM_SMMU_OPT_PAGE0_REGS_ONLY; + break; + case ACPI_IORT_SMMU_HISILICON_HI161X: + smmu->options |= ARM_SMMU_OPT_SKIP_PREFETCH; + break; + } + + dev_notice(smmu->dev, "option mask 0x%x\n", smmu->options); +} + static int arm_smmu_device_acpi_probe(struct platform_device *pdev, struct arm_smmu_device *smmu) { @@ -2617,6 +2685,8 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, /* Retrieve SMMUv3 specific data */ iort_smmu = (struct acpi_iort_smmu_v3 *)node->node_data; + acpi_smmu_get_options(iort_smmu->model, smmu); + if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) smmu->features |= ARM_SMMU_FEAT_COHERENCY; @@ -2652,6 +2722,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, return ret; } +static unsigned long arm_smmu_resource_size(struct arm_smmu_device *smmu) +{ + if (smmu->options & ARM_SMMU_OPT_PAGE0_REGS_ONLY) + return SZ_64K; + else + return SZ_128K; +} + static int arm_smmu_device_probe(struct platform_device *pdev) { int irq, ret; @@ -2668,9 +2746,20 @@ static int arm_smmu_device_probe(struct platform_device *pdev) } smmu->dev = dev; + if (dev->of_node) { + ret = arm_smmu_device_dt_probe(pdev, smmu); + } else { + ret = arm_smmu_device_acpi_probe(pdev, smmu); + if (ret == -ENODEV) + return ret; + } + + /* Set bypass mode according to firmware probing result */ + bypass = !!ret; + /* Base address */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (resource_size(res) + 1 < SZ_128K) { + if (resource_size(res) + 1 < arm_smmu_resource_size(smmu)) { dev_err(dev, "MMIO region too small (%pr)\n", res); return -EINVAL; } @@ -2681,33 +2770,27 @@ static int arm_smmu_device_probe(struct platform_device *pdev) return PTR_ERR(smmu->base); /* Interrupt lines */ - irq = platform_get_irq_byname(pdev, "eventq"); - if (irq > 0) - smmu->evtq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "priq"); + irq = platform_get_irq_byname(pdev, "combined"); if (irq > 0) - smmu->priq.q.irq = irq; + smmu->combined_irq = irq; + else { + irq = platform_get_irq_byname(pdev, "eventq"); + if (irq > 0) + smmu->evtq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "cmdq-sync"); - if (irq > 0) - smmu->cmdq.q.irq = irq; + irq = platform_get_irq_byname(pdev, "priq"); + if (irq > 0) + smmu->priq.q.irq = irq; - irq = platform_get_irq_byname(pdev, "gerror"); - if (irq > 0) - smmu->gerr_irq = irq; + irq = platform_get_irq_byname(pdev, "cmdq-sync"); + if (irq > 0) + smmu->cmdq.q.irq = irq; - if (dev->of_node) { - ret = arm_smmu_device_dt_probe(pdev, smmu); - } else { - ret = arm_smmu_device_acpi_probe(pdev, smmu); - if (ret == -ENODEV) - return ret; + irq = platform_get_irq_byname(pdev, "gerror"); + if (irq > 0) + smmu->gerr_irq = irq; } - - /* Set bypass mode according to firmware probing result */ - bypass = !!ret; - /* Probe the h/w */ ret = arm_smmu_device_hw_probe(smmu); if (ret) @@ -2736,6 +2819,10 @@ static int arm_smmu_device_probe(struct platform_device *pdev) iommu_device_set_fwnode(&smmu->iommu, dev->fwnode); ret = iommu_device_register(&smmu->iommu); + if (ret) { + dev_err(dev, "Failed to register iommu\n"); + return ret; + } #ifdef CONFIG_PCI if (pci_bus_type.iommu_ops != &arm_smmu_ops) { @@ -2768,7 +2855,7 @@ static int arm_smmu_device_remove(struct platform_device *pdev) return 0; } -static struct of_device_id arm_smmu_of_match[] = { +static const struct of_device_id arm_smmu_of_match[] = { { .compatible = "arm,smmu-v3", }, { }, }; diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 7ec30b08b3bd..bc89b4d6c043 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -312,6 +312,14 @@ enum arm_smmu_implementation { CAVIUM_SMMUV2, }; +/* Until ACPICA headers cover IORT rev. C */ +#ifndef ACPI_IORT_SMMU_CORELINK_MMU401 +#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4 +#endif +#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX +#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5 +#endif + struct arm_smmu_s2cr { struct iommu_group *group; int count; @@ -425,10 +433,10 @@ enum arm_smmu_domain_stage { struct arm_smmu_domain { struct arm_smmu_device *smmu; struct io_pgtable_ops *pgtbl_ops; - spinlock_t pgtbl_lock; struct arm_smmu_cfg cfg; enum arm_smmu_domain_stage stage; struct mutex init_mutex; /* Protects smmu pointer */ + spinlock_t cb_lock; /* Serialises ATS1* ops */ struct iommu_domain domain; }; @@ -1010,6 +1018,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, .iommu_dev = smmu->dev, }; + if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) + pgtbl_cfg.quirks = IO_PGTABLE_QUIRK_NO_DMA; + smmu_domain->smmu = smmu; pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); if (!pgtbl_ops) { @@ -1102,7 +1113,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) } mutex_init(&smmu_domain->init_mutex); - spin_lock_init(&smmu_domain->pgtbl_lock); + spin_lock_init(&smmu_domain->cb_lock); return &smmu_domain->domain; } @@ -1380,35 +1391,23 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, phys_addr_t paddr, size_t size, int prot) { - int ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return -ENODEV; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->map(ops, iova, paddr, size, prot); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->map(ops, iova, paddr, size, prot); } static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) { - size_t ret; - unsigned long flags; - struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops; if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); - ret = ops->unmap(ops, iova, size); - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; + return ops->unmap(ops, iova, size); } static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, @@ -1422,10 +1421,11 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, void __iomem *cb_base; u32 tmp; u64 phys; - unsigned long va; + unsigned long va, flags; cb_base = ARM_SMMU_CB(smmu, cfg->cbndx); + spin_lock_irqsave(&smmu_domain->cb_lock, flags); /* ATS1 registers can only be written atomically */ va = iova & ~0xfffUL; if (smmu->version == ARM_SMMU_V2) @@ -1435,6 +1435,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, !(tmp & ATSR_ACTIVE), 5, 50)) { + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); dev_err(dev, "iova to phys timed out on %pad. Falling back to software table walk.\n", &iova); @@ -1442,6 +1443,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, } phys = readq_relaxed(cb_base + ARM_SMMU_CB_PAR); + spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); if (phys & CB_PAR_F) { dev_err(dev, "translation fault!\n"); dev_err(dev, "PAR = 0x%llx\n", phys); @@ -1454,10 +1456,8 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) { - phys_addr_t ret; - unsigned long flags; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; + struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; if (domain->type == IOMMU_DOMAIN_IDENTITY) return iova; @@ -1465,17 +1465,11 @@ static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, if (!ops) return 0; - spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && - smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { - ret = arm_smmu_iova_to_phys_hard(domain, iova); - } else { - ret = ops->iova_to_phys(ops, iova); - } - - spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); + smmu_domain->stage == ARM_SMMU_DOMAIN_S1) + return arm_smmu_iova_to_phys_hard(domain, iova); - return ret; + return ops->iova_to_phys(ops, iova); } static bool arm_smmu_capable(enum iommu_cap cap) @@ -2073,6 +2067,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) smmu->version = ARM_SMMU_V1; smmu->model = GENERIC_SMMU; break; + case ACPI_IORT_SMMU_CORELINK_MMU401: + smmu->version = ARM_SMMU_V1_64K; + smmu->model = GENERIC_SMMU; + break; case ACPI_IORT_SMMU_V2: smmu->version = ARM_SMMU_V2; smmu->model = GENERIC_SMMU; @@ -2081,6 +2079,10 @@ static int acpi_smmu_get_data(u32 model, struct arm_smmu_device *smmu) smmu->version = ARM_SMMU_V2; smmu->model = ARM_MMU500; break; + case ACPI_IORT_SMMU_CAVIUM_THUNDERX: + smmu->version = ARM_SMMU_V2; + smmu->model = CAVIUM_SMMUV2; + break; default: ret = -ENODEV; } diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 9403336f1fa6..9d1cebe7f6cb 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -316,7 +316,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, * If we have devices with different DMA masks, move the free * area cache limit down for the benefit of the smaller one. */ - iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn); + iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn); return 0; } diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 1e95475883cd..687f18f65cea 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -481,7 +481,7 @@ struct deferred_flush_data { struct deferred_flush_table *tables; }; -DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); +static DEFINE_PER_CPU(struct deferred_flush_data, deferred_flush); /* bitmap for indexing intel_iommus */ static int g_num_of_iommus; @@ -2390,7 +2390,7 @@ static struct dmar_domain *find_domain(struct device *dev) /* No lock here, assumes no domain exit in normal case */ info = dev->archdata.iommu; - if (info) + if (likely(info)) return info->domain; return NULL; } @@ -3478,7 +3478,7 @@ static unsigned long intel_alloc_iova(struct device *dev, return iova_pfn; } -static struct dmar_domain *__get_valid_domain_for_dev(struct device *dev) +static struct dmar_domain *get_valid_domain_for_dev(struct device *dev) { struct dmar_domain *domain, *tmp; struct dmar_rmrr_unit *rmrr; @@ -3525,18 +3525,6 @@ out: return domain; } -static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev) -{ - struct device_domain_info *info; - - /* No lock here, assumes no domain exit in normal case */ - info = dev->archdata.iommu; - if (likely(info)) - return info->domain; - - return __get_valid_domain_for_dev(dev); -} - /* Check if the dev needs to go through non-identity map and unmap process.*/ static int iommu_no_mapping(struct device *dev) { @@ -3725,10 +3713,8 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, struct intel_iommu *iommu; struct deferred_flush_entry *entry; struct deferred_flush_data *flush_data; - unsigned int cpuid; - cpuid = get_cpu(); - flush_data = per_cpu_ptr(&deferred_flush, cpuid); + flush_data = raw_cpu_ptr(&deferred_flush); /* Flush all CPUs' entries to avoid deferring too much. If * this becomes a bottleneck, can just flush us, and rely on @@ -3761,8 +3747,6 @@ static void add_unmap(struct dmar_domain *dom, unsigned long iova_pfn, } flush_data->size++; spin_unlock_irqrestore(&flush_data->lock, flags); - - put_cpu(); } static void intel_unmap(struct device *dev, dma_addr_t dev_addr, size_t size) @@ -3973,7 +3957,7 @@ static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr) return !dma_addr; } -struct dma_map_ops intel_dma_ops = { +const struct dma_map_ops intel_dma_ops = { .alloc = intel_alloc_coherent, .free = intel_free_coherent, .map_sg = intel_map_sg, diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c index 23c427602c55..f167c0d84ebf 100644 --- a/drivers/iommu/intel-svm.c +++ b/drivers/iommu/intel-svm.c @@ -489,6 +489,36 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) } EXPORT_SYMBOL_GPL(intel_svm_unbind_mm); +int intel_svm_is_pasid_valid(struct device *dev, int pasid) +{ + struct intel_iommu *iommu; + struct intel_svm *svm; + int ret = -EINVAL; + + mutex_lock(&pasid_mutex); + iommu = intel_svm_device_to_iommu(dev); + if (!iommu || !iommu->pasid_table) + goto out; + + svm = idr_find(&iommu->pasid_idr, pasid); + if (!svm) + goto out; + + /* init_mm is used in this case */ + if (!svm->mm) + ret = 1; + else if (atomic_read(&svm->mm->mm_users) > 0) + ret = 1; + else + ret = 0; + + out: + mutex_unlock(&pasid_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(intel_svm_is_pasid_valid); + /* Page request queue descriptor */ struct page_req_dsc { u64 srr:1; diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 8fc641ea2e41..a5b89f6bcdbf 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -76,7 +76,7 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS]; * the dmar_global_lock. */ static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); -static struct irq_domain_ops intel_ir_domain_ops; +static const struct irq_domain_ops intel_ir_domain_ops; static void iommu_disable_irq_remapping(struct intel_iommu *iommu); static int __init parse_ioapics_under_ir(void); @@ -1407,7 +1407,7 @@ static void intel_irq_remapping_deactivate(struct irq_domain *domain, modify_irte(&data->irq_2_iommu, &entry); } -static struct irq_domain_ops intel_ir_domain_ops = { +static const struct irq_domain_ops intel_ir_domain_ops = { .alloc = intel_irq_remapping_alloc, .free = intel_irq_remapping_free, .activate = intel_irq_remapping_activate, diff --git a/drivers/iommu/io-pgtable-arm-v7s.c b/drivers/iommu/io-pgtable-arm-v7s.c index 8d6ca28c3e1f..af330f513653 100644 --- a/drivers/iommu/io-pgtable-arm-v7s.c +++ b/drivers/iommu/io-pgtable-arm-v7s.c @@ -32,6 +32,7 @@ #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt +#include <linux/atomic.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <linux/iommu.h> @@ -39,6 +40,7 @@ #include <linux/kmemleak.h> #include <linux/sizes.h> #include <linux/slab.h> +#include <linux/spinlock.h> #include <linux/types.h> #include <asm/barrier.h> @@ -92,7 +94,8 @@ #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1 #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0) -#define ARM_V7S_PTE_IS_TABLE(pte, lvl) (lvl == 1 && ((pte) & ARM_V7S_PTE_TYPE_TABLE)) +#define ARM_V7S_PTE_IS_TABLE(pte, lvl) \ + ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE)) /* Page table bits */ #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl))) @@ -167,6 +170,7 @@ struct arm_v7s_io_pgtable { arm_v7s_iopte *pgd; struct kmem_cache *l2_tables; + spinlock_t split_lock; }; static dma_addr_t __arm_v7s_dma_addr(void *pages) @@ -186,7 +190,8 @@ static arm_v7s_iopte *iopte_deref(arm_v7s_iopte pte, int lvl) static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, struct arm_v7s_io_pgtable *data) { - struct device *dev = data->iop.cfg.iommu_dev; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + struct device *dev = cfg->iommu_dev; dma_addr_t dma; size_t size = ARM_V7S_TABLE_SIZE(lvl); void *table = NULL; @@ -195,7 +200,7 @@ static void *__arm_v7s_alloc_table(int lvl, gfp_t gfp, table = (void *)__get_dma_pages(__GFP_ZERO, get_order(size)); else if (lvl == 2) table = kmem_cache_zalloc(data->l2_tables, gfp | GFP_DMA); - if (table && !selftest_running) { + if (table && !(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, table, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; @@ -224,10 +229,11 @@ out_free: static void __arm_v7s_free_table(void *table, int lvl, struct arm_v7s_io_pgtable *data) { - struct device *dev = data->iop.cfg.iommu_dev; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + struct device *dev = cfg->iommu_dev; size_t size = ARM_V7S_TABLE_SIZE(lvl); - if (!selftest_running) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) dma_unmap_single(dev, __arm_v7s_dma_addr(table), size, DMA_TO_DEVICE); if (lvl == 1) @@ -239,7 +245,7 @@ static void __arm_v7s_free_table(void *table, int lvl, static void __arm_v7s_pte_sync(arm_v7s_iopte *ptep, int num_entries, struct io_pgtable_cfg *cfg) { - if (selftest_running) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) return; dma_sync_single_for_device(cfg->iommu_dev, __arm_v7s_dma_addr(ptep), @@ -280,6 +286,13 @@ static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, else if (prot & IOMMU_CACHE) pte |= ARM_V7S_ATTR_B | ARM_V7S_ATTR_C; + pte |= ARM_V7S_PTE_TYPE_PAGE; + if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) + pte |= ARM_V7S_ATTR_NS_SECTION; + + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) + pte |= ARM_V7S_ATTR_MTK_4GB; + return pte; } @@ -352,7 +365,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, int lvl, int num_entries, arm_v7s_iopte *ptep) { struct io_pgtable_cfg *cfg = &data->iop.cfg; - arm_v7s_iopte pte = arm_v7s_prot_to_pte(prot, lvl, cfg); + arm_v7s_iopte pte; int i; for (i = 0; i < num_entries; i++) @@ -374,13 +387,7 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, return -EEXIST; } - pte |= ARM_V7S_PTE_TYPE_PAGE; - if (lvl == 1 && (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)) - pte |= ARM_V7S_ATTR_NS_SECTION; - - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_MTK_4GB) - pte |= ARM_V7S_ATTR_MTK_4GB; - + pte = arm_v7s_prot_to_pte(prot, lvl, cfg); if (num_entries > 1) pte = arm_v7s_pte_to_cont(pte, lvl); @@ -390,6 +397,30 @@ static int arm_v7s_init_pte(struct arm_v7s_io_pgtable *data, return 0; } +static arm_v7s_iopte arm_v7s_install_table(arm_v7s_iopte *table, + arm_v7s_iopte *ptep, + arm_v7s_iopte curr, + struct io_pgtable_cfg *cfg) +{ + arm_v7s_iopte old, new; + + new = virt_to_phys(table) | ARM_V7S_PTE_TYPE_TABLE; + if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) + new |= ARM_V7S_ATTR_NS_TABLE; + + /* + * Ensure the table itself is visible before its PTE can be. + * Whilst we could get away with cmpxchg64_release below, this + * doesn't have any ordering semantics when !CONFIG_SMP. + */ + dma_wmb(); + + old = cmpxchg_relaxed(ptep, curr, new); + __arm_v7s_pte_sync(ptep, 1, cfg); + + return old; +} + static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, phys_addr_t paddr, size_t size, int prot, int lvl, arm_v7s_iopte *ptep) @@ -411,20 +442,23 @@ static int __arm_v7s_map(struct arm_v7s_io_pgtable *data, unsigned long iova, return -EINVAL; /* Grab a pointer to the next level */ - pte = *ptep; + pte = READ_ONCE(*ptep); if (!pte) { cptep = __arm_v7s_alloc_table(lvl + 1, GFP_ATOMIC, data); if (!cptep) return -ENOMEM; - pte = virt_to_phys(cptep) | ARM_V7S_PTE_TYPE_TABLE; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_V7S_ATTR_NS_TABLE; + pte = arm_v7s_install_table(cptep, ptep, 0, cfg); + if (pte) + __arm_v7s_free_table(cptep, lvl + 1, data); + } else { + /* We've no easy way of knowing if it's synced yet, so... */ + __arm_v7s_pte_sync(ptep, 1, cfg); + } - __arm_v7s_set_pte(ptep, pte, 1, cfg); - } else if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { + if (ARM_V7S_PTE_IS_TABLE(pte, lvl)) { cptep = iopte_deref(pte, lvl); - } else { + } else if (pte) { /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; @@ -477,66 +511,73 @@ static void arm_v7s_free_pgtable(struct io_pgtable *iop) kfree(data); } -static void arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, - unsigned long iova, int idx, int lvl, - arm_v7s_iopte *ptep) +static arm_v7s_iopte arm_v7s_split_cont(struct arm_v7s_io_pgtable *data, + unsigned long iova, int idx, int lvl, + arm_v7s_iopte *ptep) { struct io_pgtable *iop = &data->iop; arm_v7s_iopte pte; size_t size = ARM_V7S_BLOCK_SIZE(lvl); int i; + /* Check that we didn't lose a race to get the lock */ + pte = *ptep; + if (!arm_v7s_pte_is_cont(pte, lvl)) + return pte; + ptep -= idx & (ARM_V7S_CONT_PAGES - 1); - pte = arm_v7s_cont_to_pte(*ptep, lvl); - for (i = 0; i < ARM_V7S_CONT_PAGES; i++) { - ptep[i] = pte; - pte += size; - } + pte = arm_v7s_cont_to_pte(pte, lvl); + for (i = 0; i < ARM_V7S_CONT_PAGES; i++) + ptep[i] = pte + i * size; __arm_v7s_pte_sync(ptep, ARM_V7S_CONT_PAGES, &iop->cfg); size *= ARM_V7S_CONT_PAGES; io_pgtable_tlb_add_flush(iop, iova, size, size, true); io_pgtable_tlb_sync(iop); + return pte; } static int arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable *data, unsigned long iova, size_t size, - arm_v7s_iopte *ptep) + arm_v7s_iopte blk_pte, arm_v7s_iopte *ptep) { - unsigned long blk_start, blk_end, blk_size; - phys_addr_t blk_paddr; - arm_v7s_iopte table = 0; - int prot = arm_v7s_pte_to_prot(*ptep, 1); + struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_v7s_iopte pte, *tablep; + int i, unmap_idx, num_entries, num_ptes; - blk_size = ARM_V7S_BLOCK_SIZE(1); - blk_start = iova & ARM_V7S_LVL_MASK(1); - blk_end = blk_start + ARM_V7S_BLOCK_SIZE(1); - blk_paddr = *ptep & ARM_V7S_LVL_MASK(1); + tablep = __arm_v7s_alloc_table(2, GFP_ATOMIC, data); + if (!tablep) + return 0; /* Bytes unmapped */ - for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { - arm_v7s_iopte *tablep; + num_ptes = ARM_V7S_PTES_PER_LVL(2); + num_entries = size >> ARM_V7S_LVL_SHIFT(2); + unmap_idx = ARM_V7S_LVL_IDX(iova, 2); + pte = arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte, 1), 2, cfg); + if (num_entries > 1) + pte = arm_v7s_pte_to_cont(pte, 2); + + for (i = 0; i < num_ptes; i += num_entries, pte += size) { /* Unmap! */ - if (blk_start == iova) + if (i == unmap_idx) continue; - /* __arm_v7s_map expects a pointer to the start of the table */ - tablep = &table - ARM_V7S_LVL_IDX(blk_start, 1); - if (__arm_v7s_map(data, blk_start, blk_paddr, size, prot, 1, - tablep) < 0) { - if (table) { - /* Free the table we allocated */ - tablep = iopte_deref(table, 1); - __arm_v7s_free_table(tablep, 2, data); - } - return 0; /* Bytes unmapped */ - } + __arm_v7s_set_pte(&tablep[i], pte, num_entries, cfg); } - __arm_v7s_set_pte(ptep, table, 1, &data->iop.cfg); - iova &= ~(blk_size - 1); - io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); + pte = arm_v7s_install_table(tablep, ptep, blk_pte, cfg); + if (pte != blk_pte) { + __arm_v7s_free_table(tablep, 2, data); + + if (!ARM_V7S_PTE_IS_TABLE(pte, 1)) + return 0; + + tablep = iopte_deref(pte, 1); + return __arm_v7s_unmap(data, iova, size, 2, tablep); + } + + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); return size; } @@ -555,17 +596,28 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, idx = ARM_V7S_LVL_IDX(iova, lvl); ptep += idx; do { - if (WARN_ON(!ARM_V7S_PTE_IS_VALID(ptep[i]))) + pte[i] = READ_ONCE(ptep[i]); + if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte[i]))) return 0; - pte[i] = ptep[i]; } while (++i < num_entries); /* * If we've hit a contiguous 'large page' entry at this level, it * needs splitting first, unless we're unmapping the whole lot. + * + * For splitting, we can't rewrite 16 PTEs atomically, and since we + * can't necessarily assume TEX remap we don't have a software bit to + * mark live entries being split. In practice (i.e. DMA API code), we + * will never be splitting large pages anyway, so just wrap this edge + * case in a lock for the sake of correctness and be done with it. */ - if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) - arm_v7s_split_cont(data, iova, idx, lvl, ptep); + if (num_entries <= 1 && arm_v7s_pte_is_cont(pte[0], lvl)) { + unsigned long flags; + + spin_lock_irqsave(&data->split_lock, flags); + pte[0] = arm_v7s_split_cont(data, iova, idx, lvl, ptep); + spin_unlock_irqrestore(&data->split_lock, flags); + } /* If the size matches this level, we're in the right place */ if (num_entries) { @@ -593,7 +645,7 @@ static int __arm_v7s_unmap(struct arm_v7s_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_v7s_split_blk_unmap(data, iova, size, ptep); + return arm_v7s_split_blk_unmap(data, iova, size, pte[0], ptep); } /* Keep on walkin' */ @@ -623,7 +675,8 @@ static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, u32 mask; do { - pte = ptep[ARM_V7S_LVL_IDX(iova, ++lvl)]; + ptep += ARM_V7S_LVL_IDX(iova, ++lvl); + pte = READ_ONCE(*ptep); ptep = iopte_deref(pte, lvl); } while (ARM_V7S_PTE_IS_TABLE(pte, lvl)); @@ -651,7 +704,8 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_PERMS | IO_PGTABLE_QUIRK_TLBI_ON_MAP | - IO_PGTABLE_QUIRK_ARM_MTK_4GB)) + IO_PGTABLE_QUIRK_ARM_MTK_4GB | + IO_PGTABLE_QUIRK_NO_DMA)) return NULL; /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */ @@ -663,6 +717,7 @@ static struct io_pgtable *arm_v7s_alloc_pgtable(struct io_pgtable_cfg *cfg, if (!data) return NULL; + spin_lock_init(&data->split_lock); data->l2_tables = kmem_cache_create("io-pgtable_armv7s_l2", ARM_V7S_TABLE_SIZE(2), ARM_V7S_TABLE_SIZE(2), @@ -749,7 +804,7 @@ static void dummy_tlb_sync(void *cookie) WARN_ON(cookie != cfg_cookie); } -static struct iommu_gather_ops dummy_tlb_ops = { +static const struct iommu_gather_ops dummy_tlb_ops = { .tlb_flush_all = dummy_tlb_flush_all, .tlb_add_flush = dummy_tlb_add_flush, .tlb_sync = dummy_tlb_sync, @@ -768,7 +823,7 @@ static int __init arm_v7s_do_selftests(void) .tlb = &dummy_tlb_ops, .oas = 32, .ias = 32, - .quirks = IO_PGTABLE_QUIRK_ARM_NS, + .quirks = IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA, .pgsize_bitmap = SZ_4K | SZ_64K | SZ_1M | SZ_16M, }; unsigned int iova, size, iova_start; diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 6e5df5e0a3bd..b182039862c5 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -20,6 +20,7 @@ #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt +#include <linux/atomic.h> #include <linux/iommu.h> #include <linux/kernel.h> #include <linux/sizes.h> @@ -99,6 +100,8 @@ #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ ARM_LPAE_PTE_ATTR_HI_MASK) +/* Software bit for solving coherency races */ +#define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55) /* Stage-1 PTE */ #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) @@ -217,7 +220,7 @@ static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp, if (!pages) return NULL; - if (!selftest_running) { + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) { dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma)) goto out_free; @@ -243,40 +246,64 @@ out_free: static void __arm_lpae_free_pages(void *pages, size_t size, struct io_pgtable_cfg *cfg) { - if (!selftest_running) + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages), size, DMA_TO_DEVICE); free_pages_exact(pages, size); } +static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, + struct io_pgtable_cfg *cfg) +{ + dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep), + sizeof(*ptep), DMA_TO_DEVICE); +} + static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte, struct io_pgtable_cfg *cfg) { *ptep = pte; - if (!selftest_running) - dma_sync_single_for_device(cfg->iommu_dev, - __arm_lpae_dma_addr(ptep), - sizeof(pte), DMA_TO_DEVICE); + if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) + __arm_lpae_sync_pte(ptep, cfg); } static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, int lvl, arm_lpae_iopte *ptep); +static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, + phys_addr_t paddr, arm_lpae_iopte prot, + int lvl, arm_lpae_iopte *ptep) +{ + arm_lpae_iopte pte = prot; + + if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) + pte |= ARM_LPAE_PTE_NS; + + if (lvl == ARM_LPAE_MAX_LEVELS - 1) + pte |= ARM_LPAE_PTE_TYPE_PAGE; + else + pte |= ARM_LPAE_PTE_TYPE_BLOCK; + + pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; + pte |= pfn_to_iopte(paddr >> data->pg_shift, data); + + __arm_lpae_set_pte(ptep, pte, &data->iop.cfg); +} + static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, unsigned long iova, phys_addr_t paddr, arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep) { - arm_lpae_iopte pte = prot; - struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte = *ptep; - if (iopte_leaf(*ptep, lvl)) { + if (iopte_leaf(pte, lvl)) { /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; - } else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { + } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) { /* * We need to unmap and free the old table before * overwriting it with a block entry. @@ -289,19 +316,40 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, return -EINVAL; } + __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); + return 0; +} + +static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table, + arm_lpae_iopte *ptep, + arm_lpae_iopte curr, + struct io_pgtable_cfg *cfg) +{ + arm_lpae_iopte old, new; + + new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE; if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NS; + new |= ARM_LPAE_PTE_NSTABLE; - if (lvl == ARM_LPAE_MAX_LEVELS - 1) - pte |= ARM_LPAE_PTE_TYPE_PAGE; - else - pte |= ARM_LPAE_PTE_TYPE_BLOCK; + /* + * Ensure the table itself is visible before its PTE can be. + * Whilst we could get away with cmpxchg64_release below, this + * doesn't have any ordering semantics when !CONFIG_SMP. + */ + dma_wmb(); - pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; - pte |= pfn_to_iopte(paddr >> data->pg_shift, data); + old = cmpxchg64_relaxed(ptep, curr, new); - __arm_lpae_set_pte(ptep, pte, cfg); - return 0; + if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) || + (old & ARM_LPAE_PTE_SW_SYNC)) + return old; + + /* Even if it's not ours, there's no point waiting; just kick it */ + __arm_lpae_sync_pte(ptep, cfg); + if (old == curr) + WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC); + + return old; } static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, @@ -310,6 +358,7 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, { arm_lpae_iopte *cptep, pte; size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); + size_t tblsz = ARM_LPAE_GRANULE(data); struct io_pgtable_cfg *cfg = &data->iop.cfg; /* Find our entry at the current level */ @@ -324,20 +373,23 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, return -EINVAL; /* Grab a pointer to the next level */ - pte = *ptep; + pte = READ_ONCE(*ptep); if (!pte) { - cptep = __arm_lpae_alloc_pages(ARM_LPAE_GRANULE(data), - GFP_ATOMIC, cfg); + cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); if (!cptep) return -ENOMEM; - pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; - if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS) - pte |= ARM_LPAE_PTE_NSTABLE; - __arm_lpae_set_pte(ptep, pte, cfg); - } else if (!iopte_leaf(pte, lvl)) { + pte = arm_lpae_install_table(cptep, ptep, 0, cfg); + if (pte) + __arm_lpae_free_pages(cptep, tblsz, cfg); + } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) && + !(pte & ARM_LPAE_PTE_SW_SYNC)) { + __arm_lpae_sync_pte(ptep, cfg); + } + + if (pte && !iopte_leaf(pte, lvl)) { cptep = iopte_deref(pte, data); - } else { + } else if (pte) { /* We require an unmap first */ WARN_ON(!selftest_running); return -EEXIST; @@ -452,40 +504,55 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop) static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, unsigned long iova, size_t size, - arm_lpae_iopte prot, int lvl, - arm_lpae_iopte *ptep, size_t blk_size) + arm_lpae_iopte blk_pte, int lvl, + arm_lpae_iopte *ptep) { - unsigned long blk_start, blk_end; + struct io_pgtable_cfg *cfg = &data->iop.cfg; + arm_lpae_iopte pte, *tablep; phys_addr_t blk_paddr; - arm_lpae_iopte table = 0; + size_t tablesz = ARM_LPAE_GRANULE(data); + size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data); + int i, unmap_idx = -1; + + if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) + return 0; - blk_start = iova & ~(blk_size - 1); - blk_end = blk_start + blk_size; - blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; + tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg); + if (!tablep) + return 0; /* Bytes unmapped */ - for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { - arm_lpae_iopte *tablep; + if (size == split_sz) + unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data); + blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift; + pte = iopte_prot(blk_pte); + + for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) { /* Unmap! */ - if (blk_start == iova) + if (i == unmap_idx) continue; - /* __arm_lpae_map expects a pointer to the start of the table */ - tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); - if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, - tablep) < 0) { - if (table) { - /* Free the table we allocated */ - tablep = iopte_deref(table, data); - __arm_lpae_free_pgtable(data, lvl + 1, tablep); - } - return 0; /* Bytes unmapped */ - } + __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]); } - __arm_lpae_set_pte(ptep, table, &data->iop.cfg); - iova &= ~(blk_size - 1); - io_pgtable_tlb_add_flush(&data->iop, iova, blk_size, blk_size, true); + pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg); + if (pte != blk_pte) { + __arm_lpae_free_pages(tablep, tablesz, cfg); + /* + * We may race against someone unmapping another part of this + * block, but anything else is invalid. We can't misinterpret + * a page entry here since we're never at the last level. + */ + if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE) + return 0; + + tablep = iopte_deref(pte, data); + } + + if (unmap_idx < 0) + return __arm_lpae_unmap(data, iova, size, lvl, tablep); + + io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true); return size; } @@ -495,19 +562,18 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, { arm_lpae_iopte pte; struct io_pgtable *iop = &data->iop; - size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); /* Something went horribly wrong and we ran out of page table */ if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS)) return 0; ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); - pte = *ptep; + pte = READ_ONCE(*ptep); if (WARN_ON(!pte)) return 0; /* If the size matches this level, we're in the right place */ - if (size == blk_size) { + if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) { __arm_lpae_set_pte(ptep, 0, &iop->cfg); if (!iopte_leaf(pte, lvl)) { @@ -527,9 +593,8 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, * Insert a table at the next level to map the old region, * minus the part we want to unmap */ - return arm_lpae_split_blk_unmap(data, iova, size, - iopte_prot(pte), lvl, ptep, - blk_size); + return arm_lpae_split_blk_unmap(data, iova, size, pte, + lvl + 1, ptep); } /* Keep on walkin' */ @@ -565,7 +630,8 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, return 0; /* Grab the IOPTE we're interested in */ - pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); + ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); + pte = READ_ONCE(*ptep); /* Valid entry? */ if (!pte) @@ -673,7 +739,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) u64 reg; struct arm_lpae_io_pgtable *data; - if (cfg->quirks & ~IO_PGTABLE_QUIRK_ARM_NS) + if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA)) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -762,7 +828,7 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) struct arm_lpae_io_pgtable *data; /* The NS quirk doesn't apply at stage 2 */ - if (cfg->quirks) + if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA) return NULL; data = arm_lpae_alloc_pgtable(cfg); @@ -1066,6 +1132,7 @@ static int __init arm_lpae_do_selftests(void) struct io_pgtable_cfg cfg = { .tlb = &dummy_tlb_ops, .oas = 48, + .quirks = IO_PGTABLE_QUIRK_NO_DMA, }; for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h index 969d82cc92ca..524263a7ae6f 100644 --- a/drivers/iommu/io-pgtable.h +++ b/drivers/iommu/io-pgtable.h @@ -65,11 +65,17 @@ struct io_pgtable_cfg { * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit * when the SoC is in "4GB mode" and they can only access the high * remap of DRAM (0x1_00000000 to 0x1_ffffffff). + * + * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever + * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a + * software-emulated IOMMU), such that pagetable updates need not + * be treated as explicit DMA data. */ #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) + #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) unsigned long quirks; unsigned long pgsize_bitmap; unsigned int ias; diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index cf7ca7e70777..3f6ea160afed 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -915,13 +915,7 @@ static int get_pci_alias_or_group(struct pci_dev *pdev, u16 alias, void *opaque) */ struct iommu_group *generic_device_group(struct device *dev) { - struct iommu_group *group; - - group = iommu_group_alloc(); - if (IS_ERR(group)) - return NULL; - - return group; + return iommu_group_alloc(); } /* @@ -988,11 +982,7 @@ struct iommu_group *pci_device_group(struct device *dev) return group; /* No shared group found, allocate new */ - group = iommu_group_alloc(); - if (IS_ERR(group)) - return NULL; - - return group; + return iommu_group_alloc(); } /** @@ -1020,6 +1010,9 @@ struct iommu_group *iommu_group_get_for_dev(struct device *dev) if (ops && ops->device_group) group = ops->device_group(dev); + if (WARN_ON_ONCE(group == NULL)) + return ERR_PTR(-EINVAL); + if (IS_ERR(group)) return group; diff --git a/drivers/iommu/iova.c b/drivers/iommu/iova.c index 5c88ba70e4e0..246f14c83944 100644 --- a/drivers/iommu/iova.c +++ b/drivers/iommu/iova.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/smp.h> #include <linux/bitops.h> +#include <linux/cpu.h> static bool iova_rcache_insert(struct iova_domain *iovad, unsigned long pfn, @@ -48,7 +49,7 @@ init_iova_domain(struct iova_domain *iovad, unsigned long granule, iovad->cached32_node = NULL; iovad->granule = granule; iovad->start_pfn = start_pfn; - iovad->dma_32bit_pfn = pfn_32bit; + iovad->dma_32bit_pfn = pfn_32bit + 1; init_iova_rcaches(iovad); } EXPORT_SYMBOL_GPL(init_iova_domain); @@ -63,7 +64,7 @@ __get_cached_rbnode(struct iova_domain *iovad, unsigned long *limit_pfn) struct rb_node *prev_node = rb_prev(iovad->cached32_node); struct iova *curr_iova = rb_entry(iovad->cached32_node, struct iova, node); - *limit_pfn = curr_iova->pfn_lo - 1; + *limit_pfn = curr_iova->pfn_lo; return prev_node; } } @@ -135,7 +136,7 @@ iova_insert_rbtree(struct rb_root *root, struct iova *iova, static unsigned int iova_get_pad_size(unsigned int size, unsigned int limit_pfn) { - return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1); + return (limit_pfn - size) & (__roundup_pow_of_two(size) - 1); } static int __alloc_and_insert_iova_range(struct iova_domain *iovad, @@ -155,18 +156,15 @@ static int __alloc_and_insert_iova_range(struct iova_domain *iovad, while (curr) { struct iova *curr_iova = rb_entry(curr, struct iova, node); - if (limit_pfn < curr_iova->pfn_lo) + if (limit_pfn <= curr_iova->pfn_lo) { goto move_left; - else if (limit_pfn < curr_iova->pfn_hi) - goto adjust_limit_pfn; - else { + } else if (limit_pfn > curr_iova->pfn_hi) { if (size_aligned) pad_size = iova_get_pad_size(size, limit_pfn); - if ((curr_iova->pfn_hi + size + pad_size) <= limit_pfn) + if ((curr_iova->pfn_hi + size + pad_size) < limit_pfn) break; /* found a free slot */ } -adjust_limit_pfn: - limit_pfn = curr_iova->pfn_lo ? (curr_iova->pfn_lo - 1) : 0; + limit_pfn = curr_iova->pfn_lo; move_left: prev = curr; curr = rb_prev(curr); @@ -182,7 +180,7 @@ move_left: } /* pfn_lo will point to size aligned address if size_aligned is set */ - new->pfn_lo = limit_pfn - (size + pad_size) + 1; + new->pfn_lo = limit_pfn - (size + pad_size); new->pfn_hi = new->pfn_lo + size - 1; /* If we have 'prev', it's a valid place to start the insertion. */ @@ -269,7 +267,7 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, if (!new_iova) return NULL; - ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, + ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn + 1, new_iova, size_aligned); if (ret) { @@ -398,10 +396,8 @@ retry: /* Try replenishing IOVAs by flushing rcache. */ flushed_rcache = true; - preempt_disable(); for_each_online_cpu(cpu) free_cpu_cached_iovas(cpu, iovad); - preempt_enable(); goto retry; } @@ -729,7 +725,7 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, bool can_insert = false; unsigned long flags; - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_full(cpu_rcache->loaded)) { @@ -759,7 +755,6 @@ static bool __iova_rcache_insert(struct iova_domain *iovad, iova_magazine_push(cpu_rcache->loaded, iova_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); - put_cpu_ptr(rcache->cpu_rcaches); if (mag_to_free) { iova_magazine_free_pfns(mag_to_free, iovad); @@ -793,7 +788,7 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, bool has_pfn = false; unsigned long flags; - cpu_rcache = get_cpu_ptr(rcache->cpu_rcaches); + cpu_rcache = raw_cpu_ptr(rcache->cpu_rcaches); spin_lock_irqsave(&cpu_rcache->lock, flags); if (!iova_magazine_empty(cpu_rcache->loaded)) { @@ -815,7 +810,6 @@ static unsigned long __iova_rcache_get(struct iova_rcache *rcache, iova_pfn = iova_magazine_pop(cpu_rcache->loaded, limit_pfn); spin_unlock_irqrestore(&cpu_rcache->lock, flags); - put_cpu_ptr(rcache->cpu_rcaches); return iova_pfn; } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index b7e14ee863f9..2a38aa15be17 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -8,7 +8,9 @@ * the Free Software Foundation; version 2 of the License. */ +#include <linux/bitmap.h> #include <linux/delay.h> +#include <linux/dma-iommu.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/export.h> @@ -21,17 +23,24 @@ #include <linux/sizes.h> #include <linux/slab.h> +#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) #include <asm/dma-iommu.h> #include <asm/pgalloc.h> +#endif #include "io-pgtable.h" +#define IPMMU_CTX_MAX 1 + struct ipmmu_vmsa_device { struct device *dev; void __iomem *base; struct list_head list; unsigned int num_utlbs; + spinlock_t lock; /* Protects ctx and domains[] */ + DECLARE_BITMAP(ctx, IPMMU_CTX_MAX); + struct ipmmu_vmsa_domain *domains[IPMMU_CTX_MAX]; struct dma_iommu_mapping *mapping; }; @@ -47,10 +56,12 @@ struct ipmmu_vmsa_domain { spinlock_t lock; /* Protects mappings */ }; -struct ipmmu_vmsa_archdata { +struct ipmmu_vmsa_iommu_priv { struct ipmmu_vmsa_device *mmu; unsigned int *utlbs; unsigned int num_utlbs; + struct device *dev; + struct list_head list; }; static DEFINE_SPINLOCK(ipmmu_devices_lock); @@ -61,6 +72,24 @@ static struct ipmmu_vmsa_domain *to_vmsa_domain(struct iommu_domain *dom) return container_of(dom, struct ipmmu_vmsa_domain, io_domain); } + +static struct ipmmu_vmsa_iommu_priv *to_priv(struct device *dev) +{ +#if defined(CONFIG_ARM) + return dev->archdata.iommu; +#else + return dev->iommu_fwspec->iommu_priv; +#endif +} +static void set_priv(struct device *dev, struct ipmmu_vmsa_iommu_priv *p) +{ +#if defined(CONFIG_ARM) + dev->archdata.iommu = p; +#else + dev->iommu_fwspec->iommu_priv = p; +#endif +} + #define TLB_LOOP_TIMEOUT 100 /* 100us */ /* ----------------------------------------------------------------------------- @@ -293,9 +322,29 @@ static struct iommu_gather_ops ipmmu_gather_ops = { * Domain/Context Management */ +static int ipmmu_domain_allocate_context(struct ipmmu_vmsa_device *mmu, + struct ipmmu_vmsa_domain *domain) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&mmu->lock, flags); + + ret = find_first_zero_bit(mmu->ctx, IPMMU_CTX_MAX); + if (ret != IPMMU_CTX_MAX) { + mmu->domains[ret] = domain; + set_bit(ret, mmu->ctx); + } + + spin_unlock_irqrestore(&mmu->lock, flags); + + return ret; +} + static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) { u64 ttbr; + int ret; /* * Allocate the page table operations. @@ -309,7 +358,7 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) * non-secure mode. */ domain->cfg.quirks = IO_PGTABLE_QUIRK_ARM_NS; - domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, + domain->cfg.pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K; domain->cfg.ias = 32; domain->cfg.oas = 40; domain->cfg.tlb = &ipmmu_gather_ops; @@ -327,10 +376,15 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) return -EINVAL; /* - * TODO: When adding support for multiple contexts, find an unused - * context. + * Find an unused context. */ - domain->context_id = 0; + ret = ipmmu_domain_allocate_context(domain->mmu, domain); + if (ret == IPMMU_CTX_MAX) { + free_io_pgtable_ops(domain->iop); + return -EBUSY; + } + + domain->context_id = ret; /* TTBR0 */ ttbr = domain->cfg.arm_lpae_s1_cfg.ttbr[0]; @@ -372,6 +426,19 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) return 0; } +static void ipmmu_domain_free_context(struct ipmmu_vmsa_device *mmu, + unsigned int context_id) +{ + unsigned long flags; + + spin_lock_irqsave(&mmu->lock, flags); + + clear_bit(context_id, mmu->ctx); + mmu->domains[context_id] = NULL; + + spin_unlock_irqrestore(&mmu->lock, flags); +} + static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) { /* @@ -382,6 +449,7 @@ static void ipmmu_domain_destroy_context(struct ipmmu_vmsa_domain *domain) */ ipmmu_ctx_write(domain, IMCTR, IMCTR_FLUSH); ipmmu_tlb_sync(domain); + ipmmu_domain_free_context(domain->mmu, domain->context_id); } /* ----------------------------------------------------------------------------- @@ -439,29 +507,35 @@ static irqreturn_t ipmmu_domain_irq(struct ipmmu_vmsa_domain *domain) static irqreturn_t ipmmu_irq(int irq, void *dev) { struct ipmmu_vmsa_device *mmu = dev; - struct iommu_domain *io_domain; - struct ipmmu_vmsa_domain *domain; + irqreturn_t status = IRQ_NONE; + unsigned int i; + unsigned long flags; - if (!mmu->mapping) - return IRQ_NONE; + spin_lock_irqsave(&mmu->lock, flags); + + /* + * Check interrupts for all active contexts. + */ + for (i = 0; i < IPMMU_CTX_MAX; i++) { + if (!mmu->domains[i]) + continue; + if (ipmmu_domain_irq(mmu->domains[i]) == IRQ_HANDLED) + status = IRQ_HANDLED; + } - io_domain = mmu->mapping->domain; - domain = to_vmsa_domain(io_domain); + spin_unlock_irqrestore(&mmu->lock, flags); - return ipmmu_domain_irq(domain); + return status; } /* ----------------------------------------------------------------------------- * IOMMU Operations */ -static struct iommu_domain *ipmmu_domain_alloc(unsigned type) +static struct iommu_domain *__ipmmu_domain_alloc(unsigned type) { struct ipmmu_vmsa_domain *domain; - if (type != IOMMU_DOMAIN_UNMANAGED) - return NULL; - domain = kzalloc(sizeof(*domain), GFP_KERNEL); if (!domain) return NULL; @@ -487,8 +561,8 @@ static void ipmmu_domain_free(struct iommu_domain *io_domain) static int ipmmu_attach_device(struct iommu_domain *io_domain, struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; - struct ipmmu_vmsa_device *mmu = archdata->mmu; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + struct ipmmu_vmsa_device *mmu = priv->mmu; struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned long flags; unsigned int i; @@ -513,15 +587,16 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, dev_err(dev, "Can't attach IPMMU %s to domain on IPMMU %s\n", dev_name(mmu->dev), dev_name(domain->mmu->dev)); ret = -EINVAL; - } + } else + dev_info(dev, "Reusing IPMMU context %u\n", domain->context_id); spin_unlock_irqrestore(&domain->lock, flags); if (ret < 0) return ret; - for (i = 0; i < archdata->num_utlbs; ++i) - ipmmu_utlb_enable(domain, archdata->utlbs[i]); + for (i = 0; i < priv->num_utlbs; ++i) + ipmmu_utlb_enable(domain, priv->utlbs[i]); return 0; } @@ -529,12 +604,12 @@ static int ipmmu_attach_device(struct iommu_domain *io_domain, static void ipmmu_detach_device(struct iommu_domain *io_domain, struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); struct ipmmu_vmsa_domain *domain = to_vmsa_domain(io_domain); unsigned int i; - for (i = 0; i < archdata->num_utlbs; ++i) - ipmmu_utlb_disable(domain, archdata->utlbs[i]); + for (i = 0; i < priv->num_utlbs; ++i) + ipmmu_utlb_disable(domain, priv->utlbs[i]); /* * TODO: Optimize by disabling the context when no device is attached. @@ -595,22 +670,15 @@ static int ipmmu_find_utlbs(struct ipmmu_vmsa_device *mmu, struct device *dev, return 0; } -static int ipmmu_add_device(struct device *dev) +static int ipmmu_init_platform_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata; + struct ipmmu_vmsa_iommu_priv *priv; struct ipmmu_vmsa_device *mmu; - struct iommu_group *group = NULL; unsigned int *utlbs; unsigned int i; int num_utlbs; int ret = -ENODEV; - if (dev->archdata.iommu) { - dev_warn(dev, "IOMMU driver already assigned to device %s\n", - dev_name(dev)); - return -EINVAL; - } - /* Find the master corresponding to the device. */ num_utlbs = of_count_phandle_with_args(dev->of_node, "iommus", @@ -647,6 +715,46 @@ static int ipmmu_add_device(struct device *dev) } } + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto error; + } + + priv->mmu = mmu; + priv->utlbs = utlbs; + priv->num_utlbs = num_utlbs; + priv->dev = dev; + set_priv(dev, priv); + return 0; + +error: + kfree(utlbs); + return ret; +} + +#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) + +static struct iommu_domain *ipmmu_domain_alloc(unsigned type) +{ + if (type != IOMMU_DOMAIN_UNMANAGED) + return NULL; + + return __ipmmu_domain_alloc(type); +} + +static int ipmmu_add_device(struct device *dev) +{ + struct ipmmu_vmsa_device *mmu = NULL; + struct iommu_group *group; + int ret; + + if (to_priv(dev)) { + dev_warn(dev, "IOMMU driver already assigned to device %s\n", + dev_name(dev)); + return -EINVAL; + } + /* Create a device group and add the device to it. */ group = iommu_group_alloc(); if (IS_ERR(group)) { @@ -664,16 +772,9 @@ static int ipmmu_add_device(struct device *dev) goto error; } - archdata = kzalloc(sizeof(*archdata), GFP_KERNEL); - if (!archdata) { - ret = -ENOMEM; + ret = ipmmu_init_platform_device(dev); + if (ret < 0) goto error; - } - - archdata->mmu = mmu; - archdata->utlbs = utlbs; - archdata->num_utlbs = num_utlbs; - dev->archdata.iommu = archdata; /* * Create the ARM mapping, used by the ARM DMA mapping core to allocate @@ -684,6 +785,7 @@ static int ipmmu_add_device(struct device *dev) * - Make the mapping size configurable ? We currently use a 2GB mapping * at a 1GB offset to ensure that NULL VAs will fault. */ + mmu = to_priv(dev)->mmu; if (!mmu->mapping) { struct dma_iommu_mapping *mapping; @@ -708,30 +810,30 @@ static int ipmmu_add_device(struct device *dev) return 0; error: - arm_iommu_release_mapping(mmu->mapping); - - kfree(dev->archdata.iommu); - kfree(utlbs); - - dev->archdata.iommu = NULL; + if (mmu) + arm_iommu_release_mapping(mmu->mapping); if (!IS_ERR_OR_NULL(group)) iommu_group_remove_device(dev); + kfree(to_priv(dev)->utlbs); + kfree(to_priv(dev)); + set_priv(dev, NULL); + return ret; } static void ipmmu_remove_device(struct device *dev) { - struct ipmmu_vmsa_archdata *archdata = dev->archdata.iommu; + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); arm_iommu_detach_device(dev); iommu_group_remove_device(dev); - kfree(archdata->utlbs); - kfree(archdata); + kfree(priv->utlbs); + kfree(priv); - dev->archdata.iommu = NULL; + set_priv(dev, NULL); } static const struct iommu_ops ipmmu_ops = { @@ -748,6 +850,144 @@ static const struct iommu_ops ipmmu_ops = { .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, }; +#endif /* !CONFIG_ARM && CONFIG_IOMMU_DMA */ + +#ifdef CONFIG_IOMMU_DMA + +static DEFINE_SPINLOCK(ipmmu_slave_devices_lock); +static LIST_HEAD(ipmmu_slave_devices); + +static struct iommu_domain *ipmmu_domain_alloc_dma(unsigned type) +{ + struct iommu_domain *io_domain = NULL; + + switch (type) { + case IOMMU_DOMAIN_UNMANAGED: + io_domain = __ipmmu_domain_alloc(type); + break; + + case IOMMU_DOMAIN_DMA: + io_domain = __ipmmu_domain_alloc(type); + if (io_domain) + iommu_get_dma_cookie(io_domain); + break; + } + + return io_domain; +} + +static void ipmmu_domain_free_dma(struct iommu_domain *io_domain) +{ + switch (io_domain->type) { + case IOMMU_DOMAIN_DMA: + iommu_put_dma_cookie(io_domain); + /* fall-through */ + default: + ipmmu_domain_free(io_domain); + break; + } +} + +static int ipmmu_add_device_dma(struct device *dev) +{ + struct iommu_fwspec *fwspec = dev->iommu_fwspec; + struct iommu_group *group; + + /* + * Only let through devices that have been verified in xlate() + * We may get called with dev->iommu_fwspec set to NULL. + */ + if (!fwspec || !fwspec->iommu_priv) + return -ENODEV; + + group = iommu_group_get_for_dev(dev); + if (IS_ERR(group)) + return PTR_ERR(group); + + spin_lock(&ipmmu_slave_devices_lock); + list_add(&to_priv(dev)->list, &ipmmu_slave_devices); + spin_unlock(&ipmmu_slave_devices_lock); + return 0; +} + +static void ipmmu_remove_device_dma(struct device *dev) +{ + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + + spin_lock(&ipmmu_slave_devices_lock); + list_del(&priv->list); + spin_unlock(&ipmmu_slave_devices_lock); + + iommu_group_remove_device(dev); +} + +static struct device *ipmmu_find_sibling_device(struct device *dev) +{ + struct ipmmu_vmsa_iommu_priv *priv = to_priv(dev); + struct ipmmu_vmsa_iommu_priv *sibling_priv = NULL; + bool found = false; + + spin_lock(&ipmmu_slave_devices_lock); + + list_for_each_entry(sibling_priv, &ipmmu_slave_devices, list) { + if (priv == sibling_priv) + continue; + if (sibling_priv->mmu == priv->mmu) { + found = true; + break; + } + } + + spin_unlock(&ipmmu_slave_devices_lock); + + return found ? sibling_priv->dev : NULL; +} + +static struct iommu_group *ipmmu_find_group_dma(struct device *dev) +{ + struct iommu_group *group; + struct device *sibling; + + sibling = ipmmu_find_sibling_device(dev); + if (sibling) + group = iommu_group_get(sibling); + if (!sibling || IS_ERR(group)) + group = generic_device_group(dev); + + return group; +} + +static int ipmmu_of_xlate_dma(struct device *dev, + struct of_phandle_args *spec) +{ + /* If the IPMMU device is disabled in DT then return error + * to make sure the of_iommu code does not install ops + * even though the iommu device is disabled + */ + if (!of_device_is_available(spec->np)) + return -ENODEV; + + return ipmmu_init_platform_device(dev); +} + +static const struct iommu_ops ipmmu_ops = { + .domain_alloc = ipmmu_domain_alloc_dma, + .domain_free = ipmmu_domain_free_dma, + .attach_dev = ipmmu_attach_device, + .detach_dev = ipmmu_detach_device, + .map = ipmmu_map, + .unmap = ipmmu_unmap, + .map_sg = default_iommu_map_sg, + .iova_to_phys = ipmmu_iova_to_phys, + .add_device = ipmmu_add_device_dma, + .remove_device = ipmmu_remove_device_dma, + .device_group = ipmmu_find_group_dma, + .pgsize_bitmap = SZ_1G | SZ_2M | SZ_4K, + .of_xlate = ipmmu_of_xlate_dma, +}; + +#endif /* CONFIG_IOMMU_DMA */ + /* ----------------------------------------------------------------------------- * Probe/remove and init */ @@ -768,11 +1008,6 @@ static int ipmmu_probe(struct platform_device *pdev) int irq; int ret; - if (!IS_ENABLED(CONFIG_OF) && !pdev->dev.platform_data) { - dev_err(&pdev->dev, "missing platform data\n"); - return -EINVAL; - } - mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); if (!mmu) { dev_err(&pdev->dev, "cannot allocate device data\n"); @@ -781,6 +1016,8 @@ static int ipmmu_probe(struct platform_device *pdev) mmu->dev = &pdev->dev; mmu->num_utlbs = 32; + spin_lock_init(&mmu->lock); + bitmap_zero(mmu->ctx, IPMMU_CTX_MAX); /* Map I/O memory and request IRQ. */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -840,7 +1077,9 @@ static int ipmmu_remove(struct platform_device *pdev) list_del(&mmu->list); spin_unlock(&ipmmu_devices_lock); +#if defined(CONFIG_ARM) && !defined(CONFIG_IOMMU_DMA) arm_iommu_release_mapping(mmu->mapping); +#endif ipmmu_device_reset(mmu); diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 95dfca36ccb9..641e035cf866 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1309,7 +1309,7 @@ static void omap_iommu_remove_device(struct device *dev) static struct iommu_group *omap_iommu_device_group(struct device *dev) { struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; - struct iommu_group *group = NULL; + struct iommu_group *group = ERR_PTR(-EINVAL); if (arch_data->iommu_dev) group = arch_data->iommu_dev->group; diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c index 179e636a4d91..8788640756a7 100644 --- a/drivers/iommu/s390-iommu.c +++ b/drivers/iommu/s390-iommu.c @@ -165,20 +165,14 @@ static void s390_iommu_detach_device(struct iommu_domain *domain, static int s390_iommu_add_device(struct device *dev) { - struct iommu_group *group; - int rc; + struct iommu_group *group = iommu_group_get_for_dev(dev); - group = iommu_group_get(dev); - if (!group) { - group = iommu_group_alloc(); - if (IS_ERR(group)) - return PTR_ERR(group); - } + if (IS_ERR(group)) + return PTR_ERR(group); - rc = iommu_group_add_device(group, dev); iommu_group_put(group); - return rc; + return 0; } static void s390_iommu_remove_device(struct device *dev) @@ -344,6 +338,7 @@ static struct iommu_ops s390_iommu_ops = { .iova_to_phys = s390_iommu_iova_to_phys, .add_device = s390_iommu_add_device, .remove_device = s390_iommu_remove_device, + .device_group = generic_device_group, .pgsize_bitmap = S390_IOMMU_PGSIZES, }; diff --git a/drivers/irqchip/irq-digicolor.c b/drivers/irqchip/irq-digicolor.c index dad85e74c37c..3aae015469a5 100644 --- a/drivers/irqchip/irq-digicolor.c +++ b/drivers/irqchip/irq-digicolor.c @@ -71,7 +71,7 @@ static void __init digicolor_set_gc(void __iomem *reg_base, unsigned irq_base, static int __init digicolor_of_init(struct device_node *node, struct device_node *parent) { - static void __iomem *reg_base; + void __iomem *reg_base; unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct regmap *ucregs; int ret; diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index 54c296401525..18d58d2b4ffe 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -43,7 +43,7 @@ static const struct of_device_id syscon_pldset_of_match[] = { static int __init realview_gic_of_init(struct device_node *node, struct device_node *parent) { - static struct regmap *map; + struct regmap *map; struct device_node *np; const struct of_device_id *gic_id; u32 pld1_ctrl; diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 0a8ed1c05518..14461cbfab2f 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -154,7 +154,7 @@ asmlinkage void __weak plat_irq_dispatch(void) static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { - static struct irq_chip *chip; + struct irq_chip *chip; if (hw < 2 && cpu_has_mipsmt) { /* Software interrupts are used for MT/CMT IPI */ diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index 832ebf4062f7..6ab1d3afec02 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -950,7 +950,6 @@ static void __init __gic_init(unsigned long gic_base_addr, &gic_irq_domain_ops, NULL); if (!gic_irq_domain) panic("Failed to add GIC IRQ domain"); - gic_irq_domain->name = "mips-gic-irq"; gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, @@ -959,7 +958,6 @@ static void __init __gic_init(unsigned long gic_base_addr, if (!gic_ipi_domain) panic("Failed to add GIC IPI domain"); - gic_ipi_domain->name = "mips-gic-ipi"; irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); if (node && diff --git a/drivers/isdn/divert/isdn_divert.c b/drivers/isdn/divert/isdn_divert.c index 060d357f107f..6f423bc49d0d 100644 --- a/drivers/isdn/divert/isdn_divert.c +++ b/drivers/isdn/divert/isdn_divert.c @@ -485,18 +485,19 @@ static int isdn_divert_icall(isdn_ctrl *ic) cs->deflect_dest[0] = '\0'; retval = 4; /* only proceed */ } - sprintf(cs->info, "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n", - cs->akt_state, - cs->divert_id, - divert_if.drv_to_name(cs->ics.driver), - (ic->command == ISDN_STAT_ICALLW) ? "1" : "0", - cs->ics.parm.setup.phone, - cs->ics.parm.setup.eazmsn, - cs->ics.parm.setup.si1, - cs->ics.parm.setup.si2, - cs->ics.parm.setup.screen, - dv->rule.waittime, - cs->deflect_dest); + snprintf(cs->info, sizeof(cs->info), + "%d 0x%lx %s %s %s %s 0x%x 0x%x %d %d %s\n", + cs->akt_state, + cs->divert_id, + divert_if.drv_to_name(cs->ics.driver), + (ic->command == ISDN_STAT_ICALLW) ? "1" : "0", + cs->ics.parm.setup.phone, + cs->ics.parm.setup.eazmsn, + cs->ics.parm.setup.si1, + cs->ics.parm.setup.si2, + cs->ics.parm.setup.screen, + dv->rule.waittime, + cs->deflect_dest); if ((dv->rule.action == DEFLECT_REPORT) || (dv->rule.action == DEFLECT_REJECT)) { put_info_buffer(cs->info); diff --git a/drivers/isdn/hardware/avm/c4.c b/drivers/isdn/hardware/avm/c4.c index 40c7e2cf423b..034cabac699d 100644 --- a/drivers/isdn/hardware/avm/c4.c +++ b/drivers/isdn/hardware/avm/c4.c @@ -42,7 +42,7 @@ static char *revision = "$Revision: 1.1.2.2 $"; static bool suppress_pollack; -static struct pci_device_id c4_pci_tbl[] = { +static const struct pci_device_id c4_pci_tbl[] = { { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C4, 0, 0, (unsigned long)4 }, { PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_C2, 0, 0, (unsigned long)2 }, { } /* Terminating entry */ diff --git a/drivers/isdn/hardware/eicon/divasmain.c b/drivers/isdn/hardware/eicon/divasmain.c index 8b7ad4f1ab01..b2023e08dcd2 100644 --- a/drivers/isdn/hardware/eicon/divasmain.c +++ b/drivers/isdn/hardware/eicon/divasmain.c @@ -110,7 +110,7 @@ typedef struct _diva_os_thread_dpc { /* This table should be sorted by PCI device ID */ -static struct pci_device_id divas_pci_tbl[] = { +static const struct pci_device_id divas_pci_tbl[] = { /* Diva Server BRI-2M PCI 0xE010 */ { PCI_VDEVICE(EICON, PCI_DEVICE_ID_EICON_MAESTRA), CARDTYPE_MAESTRA_PCI }, diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index e3fa1cd64470..dce6632daae1 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -1142,7 +1142,7 @@ fritz_remove_pci(struct pci_dev *pdev) pr_info("%s: drvdata already removed\n", __func__); } -static struct pci_device_id fcpci_ids[] = { +static const struct pci_device_id fcpci_ids[] = { { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (unsigned long) "Fritz!Card PCI"}, { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_A1_V2, PCI_ANY_ID, PCI_ANY_ID, diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index aea0c9616ea5..3cf07b8ced1c 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -5348,7 +5348,7 @@ static const struct hm_map hfcm_map[] = { #undef H #define H(x) ((unsigned long)&hfcm_map[x]) -static struct pci_device_id hfmultipci_ids[] = { +static const struct pci_device_id hfmultipci_ids[] = { /* Cards with HFC-4S Chip */ { PCI_VENDOR_ID_CCD, PCI_DEVICE_ID_CCD_HFC4S, PCI_VENDOR_ID_CCD, diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index 5dc246d71c16..d2e401a8090e 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -2161,7 +2161,7 @@ static const struct _hfc_map hfc_map[] = {}, }; -static struct pci_device_id hfc_ids[] = +static const struct pci_device_id hfc_ids[] = { { PCI_VDEVICE(CCD, PCI_DEVICE_ID_CCD_2BD0), (unsigned long) &hfc_map[0] }, diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index afde4edef9ae..6a6d848bd18e 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -1137,7 +1137,7 @@ static void nj_remove(struct pci_dev *pdev) /* We cannot select cards with PCI_SUB... IDs, since here are cards with * SUB IDs set to PCI_ANY_ID, so we need to match all and reject * known other cards which not work with this driver - see probe function */ -static struct pci_device_id nj_pci_ids[] = { +static const struct pci_device_id nj_pci_ids[] = { { PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c index 3052c836b89f..d80072fef434 100644 --- a/drivers/isdn/hardware/mISDN/w6692.c +++ b/drivers/isdn/hardware/mISDN/w6692.c @@ -1398,7 +1398,7 @@ w6692_remove_pci(struct pci_dev *pdev) pr_notice("%s: drvdata already removed\n", __func__); } -static struct pci_device_id w6692_ids[] = { +static const struct pci_device_id w6692_ids[] = { { PCI_VENDOR_ID_DYNALINK, PCI_DEVICE_ID_DYNALINK_IS64PH, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (ulong)&w6692_map[0]}, { PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_6692, diff --git a/drivers/isdn/hisax/config.c b/drivers/isdn/hisax/config.c index c7d68675b028..7108bdb8742e 100644 --- a/drivers/isdn/hisax/config.c +++ b/drivers/isdn/hisax/config.c @@ -1909,7 +1909,7 @@ static void EChannel_proc_rcv(struct hisax_d_if *d_if) #ifdef CONFIG_PCI #include <linux/pci.h> -static struct pci_device_id hisax_pci_tbl[] __used = { +static const struct pci_device_id hisax_pci_tbl[] __used = { #ifdef CONFIG_HISAX_FRITZPCI {PCI_VDEVICE(AVM, PCI_DEVICE_ID_AVM_A1) }, #endif diff --git a/drivers/isdn/hisax/hfc4s8s_l1.c b/drivers/isdn/hisax/hfc4s8s_l1.c index 90f051ce0259..9090cc1e1f29 100644 --- a/drivers/isdn/hisax/hfc4s8s_l1.c +++ b/drivers/isdn/hisax/hfc4s8s_l1.c @@ -86,7 +86,7 @@ typedef struct { char *device_name; } hfc4s8s_param; -static struct pci_device_id hfc4s8s_ids[] = { +static const struct pci_device_id hfc4s8s_ids[] = { {.vendor = PCI_VENDOR_ID_CCD, .device = PCI_DEVICE_ID_4S, .subvendor = 0x1397, diff --git a/drivers/isdn/hisax/hisax_fcpcipnp.c b/drivers/isdn/hisax/hisax_fcpcipnp.c index 5a9f39ed1d5d..e4f7573ba9bf 100644 --- a/drivers/isdn/hisax/hisax_fcpcipnp.c +++ b/drivers/isdn/hisax/hisax_fcpcipnp.c @@ -52,7 +52,7 @@ module_param(debug, int, 0); MODULE_AUTHOR("Kai Germaschewski <kai.germaschewski@gmx.de>/Karsten Keil <kkeil@suse.de>"); MODULE_DESCRIPTION("AVM Fritz!PCI/PnP ISDN driver"); -static struct pci_device_id fcpci_ids[] = { +static const struct pci_device_id fcpci_ids[] = { { .vendor = PCI_VENDOR_ID_AVM, .device = PCI_DEVICE_ID_AVM_A1, .subvendor = PCI_ANY_ID, diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c index 11fe0c5b2a9c..81501644fb15 100644 --- a/drivers/lightnvm/pblk-core.c +++ b/drivers/lightnvm/pblk-core.c @@ -1670,13 +1670,10 @@ void pblk_line_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv, queue_work(wq, &line_ws->ws); } -void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, - unsigned long *lun_bitmap) +static void __pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, + int nr_ppas, int pos) { - struct nvm_tgt_dev *dev = pblk->dev; - struct nvm_geo *geo = &dev->geo; - struct pblk_lun *rlun; - int pos = pblk_ppa_to_pos(geo, ppa_list[0]); + struct pblk_lun *rlun = &pblk->luns[pos]; int ret; /* @@ -1690,14 +1687,8 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun || ppa_list[0].g.ch != ppa_list[i].g.ch); #endif - /* If the LUN has been locked for this same request, do no attempt to - * lock it again - */ - if (test_and_set_bit(pos, lun_bitmap)) - return; - rlun = &pblk->luns[pos]; - ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000)); + ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(30000)); if (ret) { switch (ret) { case -ETIME: @@ -1710,6 +1701,50 @@ void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, } } +void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + int pos = pblk_ppa_to_pos(geo, ppa_list[0]); + + __pblk_down_page(pblk, ppa_list, nr_ppas, pos); +} + +void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, + unsigned long *lun_bitmap) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + int pos = pblk_ppa_to_pos(geo, ppa_list[0]); + + /* If the LUN has been locked for this same request, do no attempt to + * lock it again + */ + if (test_and_set_bit(pos, lun_bitmap)) + return; + + __pblk_down_page(pblk, ppa_list, nr_ppas, pos); +} + +void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas) +{ + struct nvm_tgt_dev *dev = pblk->dev; + struct nvm_geo *geo = &dev->geo; + struct pblk_lun *rlun; + int pos = pblk_ppa_to_pos(geo, ppa_list[0]); + +#ifdef CONFIG_NVM_DEBUG + int i; + + for (i = 1; i < nr_ppas; i++) + WARN_ON(ppa_list[0].g.lun != ppa_list[i].g.lun || + ppa_list[0].g.ch != ppa_list[i].g.ch); +#endif + + rlun = &pblk->luns[pos]; + up(&rlun->wr_sem); +} + void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, unsigned long *lun_bitmap) { diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c index 0e48d3e4e143..cb556e06673e 100644 --- a/drivers/lightnvm/pblk-recovery.c +++ b/drivers/lightnvm/pblk-recovery.c @@ -340,9 +340,14 @@ static void pblk_end_io_recov(struct nvm_rq *rqd) struct pblk *pblk = pad_rq->pblk; struct nvm_tgt_dev *dev = pblk->dev; - kref_put(&pad_rq->ref, pblk_recov_complete); + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); + + bio_put(rqd->bio); nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); pblk_free_rqd(pblk, rqd, WRITE); + + atomic_dec(&pblk->inflight_io); + kref_put(&pad_rq->ref, pblk_recov_complete); } static int pblk_recov_pad_oob(struct pblk *pblk, struct pblk_line *line, @@ -385,7 +390,7 @@ next_pad_rq: rq_ppas = pblk_calc_secs(pblk, left_ppas, 0); if (rq_ppas < pblk->min_write_pgs) { pr_err("pblk: corrupted pad line %d\n", line->id); - goto free_rq; + goto fail_free_pad; } rq_len = rq_ppas * geo->sec_size; @@ -393,7 +398,7 @@ next_pad_rq: meta_list = nvm_dev_dma_alloc(dev->parent, GFP_KERNEL, &dma_meta_list); if (!meta_list) { ret = -ENOMEM; - goto free_data; + goto fail_free_pad; } ppa_list = (void *)(meta_list) + pblk_dma_meta_size; @@ -404,9 +409,9 @@ next_pad_rq: ret = PTR_ERR(rqd); goto fail_free_meta; } - memset(rqd, 0, pblk_w_rq_size); - bio = bio_map_kern(dev->q, data, rq_len, GFP_KERNEL); + bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len, + PBLK_VMALLOC_META, GFP_KERNEL); if (IS_ERR(bio)) { ret = PTR_ERR(bio); goto fail_free_rqd; @@ -453,15 +458,15 @@ next_pad_rq: } kref_get(&pad_rq->ref); + pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); ret = pblk_submit_io(pblk, rqd); if (ret) { pr_err("pblk: I/O submission failed: %d\n", ret); - goto free_data; + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); + goto fail_free_bio; } - atomic_dec(&pblk->inflight_io); - left_line_ppas -= rq_ppas; left_ppas -= rq_ppas; if (left_ppas && left_line_ppas) @@ -475,17 +480,23 @@ next_pad_rq: ret = -ETIME; } + if (!pblk_line_is_full(line)) + pr_err("pblk: corrupted padded line: %d\n", line->id); + + vfree(data); free_rq: kfree(pad_rq); -free_data: - vfree(data); return ret; +fail_free_bio: + bio_put(bio); fail_free_rqd: pblk_free_rqd(pblk, rqd, WRITE); fail_free_meta: nvm_dev_dma_free(dev->parent, meta_list, dma_meta_list); +fail_free_pad: kfree(pad_rq); + vfree(data); return ret; } diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c index d62a8f4faaf4..3ad9e56d2473 100644 --- a/drivers/lightnvm/pblk-write.c +++ b/drivers/lightnvm/pblk-write.c @@ -39,9 +39,7 @@ static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd, ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid); - if (rqd->meta_list) - nvm_dev_dma_free(dev->parent, rqd->meta_list, - rqd->dma_meta_list); + nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); bio_put(rqd->bio); pblk_free_rqd(pblk, rqd, WRITE); @@ -178,15 +176,12 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd) { struct pblk *pblk = rqd->private; struct nvm_tgt_dev *dev = pblk->dev; - struct nvm_geo *geo = &dev->geo; struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd); struct pblk_line *line = m_ctx->private; struct pblk_emeta *emeta = line->emeta; - int pos = pblk_ppa_to_pos(geo, rqd->ppa_list[0]); - struct pblk_lun *rlun = &pblk->luns[pos]; int sync; - up(&rlun->wr_sem); + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); if (rqd->error) { pblk_log_write_err(pblk, rqd); @@ -203,6 +198,7 @@ static void pblk_end_io_write_meta(struct nvm_rq *rqd) pblk->close_wq); bio_put(rqd->bio); + nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); pblk_free_rqd(pblk, rqd, READ); atomic_dec(&pblk->inflight_io); @@ -226,9 +222,6 @@ static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd, if (!rqd->meta_list) return -ENOMEM; - if (unlikely(nr_secs == 1)) - return 0; - rqd->ppa_list = rqd->meta_list + pblk_dma_meta_size; rqd->dma_ppa_list = rqd->dma_meta_list + pblk_dma_meta_size; @@ -367,7 +360,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) struct pblk_line_meta *lm = &pblk->lm; struct pblk_emeta *emeta = meta_line->emeta; struct pblk_g_ctx *m_ctx; - struct pblk_lun *rlun; struct bio *bio; struct nvm_rq *rqd; void *data; @@ -411,13 +403,6 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) rqd->ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id); } - rlun = &pblk->luns[pblk_ppa_to_pos(geo, rqd->ppa_list[0])]; - ret = down_timeout(&rlun->wr_sem, msecs_to_jiffies(5000)); - if (ret) { - pr_err("pblk: lun semaphore timed out (%d)\n", ret); - goto fail_free_bio; - } - emeta->mem += rq_len; if (emeta->mem >= lm->emeta_len[0]) { spin_lock(&l_mg->close_lock); @@ -427,6 +412,8 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) spin_unlock(&l_mg->close_lock); } + pblk_down_page(pblk, rqd->ppa_list, rqd->nr_ppas); + ret = pblk_submit_io(pblk, rqd); if (ret) { pr_err("pblk: emeta I/O submission failed: %d\n", ret); @@ -436,10 +423,13 @@ int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line) return NVM_IO_OK; fail_rollback: + pblk_up_page(pblk, rqd->ppa_list, rqd->nr_ppas); spin_lock(&l_mg->close_lock); pblk_dealloc_page(pblk, meta_line, rq_ppas); list_add(&meta_line->list, &meta_line->list); spin_unlock(&l_mg->close_lock); + + nvm_dev_dma_free(dev->parent, rqd->meta_list, rqd->dma_meta_list); fail_free_bio: if (likely(l_mg->emeta_alloc_type == PBLK_VMALLOC_META)) bio_put(bio); diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h index 15931381348c..0c5692cc2f60 100644 --- a/drivers/lightnvm/pblk.h +++ b/drivers/lightnvm/pblk.h @@ -739,8 +739,10 @@ u64 pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); u64 __pblk_alloc_page(struct pblk *pblk, struct pblk_line *line, int nr_secs); int pblk_calc_secs(struct pblk *pblk, unsigned long secs_avail, unsigned long secs_to_flush); +void pblk_up_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas); void pblk_down_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, unsigned long *lun_bitmap); +void pblk_down_page(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas); void pblk_up_rq(struct pblk *pblk, struct ppa_addr *ppa_list, int nr_ppas, unsigned long *lun_bitmap); void pblk_end_bio_sync(struct bio *bio); diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index f4eace5ea184..40f3cd7eab0f 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c @@ -156,7 +156,8 @@ static int read_sb_page(struct mddev *mddev, loff_t offset, rdev_for_each(rdev, mddev) { if (! test_bit(In_sync, &rdev->flags) - || test_bit(Faulty, &rdev->flags)) + || test_bit(Faulty, &rdev->flags) + || test_bit(Bitmap_sync, &rdev->flags)) continue; target = offset + index * (PAGE_SIZE/512); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 10cabe961bdb..2edbcc2d7d3f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1279,7 +1279,7 @@ static int clone_bio(struct dm_target_io *tio, struct bio *bio, clone->bi_iter.bi_size = to_bytes(len); if (unlikely(bio_integrity(bio) != NULL)) - bio_integrity_trim(clone, 0, len); + bio_integrity_trim(clone); return 0; } diff --git a/drivers/md/md.h b/drivers/md/md.h index 991f0fe2dcc6..b50eb4ac1b82 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -134,7 +134,9 @@ enum flag_bits { Faulty, /* device is known to have a fault */ In_sync, /* device is in_sync with rest of array */ Bitmap_sync, /* ..actually, not quite In_sync. Need a - * bitmap-based recovery to get fully in sync + * bitmap-based recovery to get fully in sync. + * The bit is only meaningful before device + * has been passed to pers->hot_add_disk. */ WriteMostly, /* Avoid reading if at all possible */ AutoDetected, /* added by auto-detect */ diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 77cce3573aa8..44ad5baf3206 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -1150,7 +1150,7 @@ int ppl_init_log(struct r5conf *conf) goto err; } - ppl_conf->bs = bioset_create(conf->raid_disks, 0, 0); + ppl_conf->bs = bioset_create(conf->raid_disks, 0, BIOSET_NEED_BVECS); if (!ppl_conf->bs) { ret = -ENOMEM; goto err; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 2ceb338b094b..aeeb8d6854e2 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -7951,12 +7951,10 @@ static void end_reshape(struct r5conf *conf) { if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { - struct md_rdev *rdev; spin_lock_irq(&conf->device_lock); conf->previous_raid_disks = conf->raid_disks; - rdev_for_each(rdev, conf->mddev) - rdev->data_offset = rdev->new_data_offset; + md_finish_reshape(conf->mddev); smp_wmb(); conf->reshape_progress = MaxSector; conf->mddev->reshape_position = MaxSector; diff --git a/drivers/mfd/cros_ec.c b/drivers/mfd/cros_ec.c index dc6ce9091694..b0ca5a4c841e 100644 --- a/drivers/mfd/cros_ec.c +++ b/drivers/mfd/cros_ec.c @@ -54,12 +54,19 @@ static const struct mfd_cell ec_pd_cell = { static irqreturn_t ec_irq_thread(int irq, void *data) { struct cros_ec_device *ec_dev = data; + bool wake_event = true; int ret; - if (device_may_wakeup(ec_dev->dev)) + ret = cros_ec_get_next_event(ec_dev, &wake_event); + + /* + * Signal only if wake host events or any interrupt if + * cros_ec_get_next_event() returned an error (default value for + * wake_event is true) + */ + if (wake_event && device_may_wakeup(ec_dev->dev)) pm_wakeup_event(ec_dev->dev, 0); - ret = cros_ec_get_next_event(ec_dev); if (ret > 0) blocking_notifier_call_chain(&ec_dev->event_notifier, 0, ec_dev); @@ -224,7 +231,7 @@ EXPORT_SYMBOL(cros_ec_suspend); static void cros_ec_drain_events(struct cros_ec_device *ec_dev) { - while (cros_ec_get_next_event(ec_dev) > 0) + while (cros_ec_get_next_event(ec_dev, NULL) > 0) blocking_notifier_call_chain(&ec_dev->event_notifier, 1, ec_dev); } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 0cfac2d39107..8ac59dc80f23 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -637,6 +637,9 @@ static int mmc_blk_ioctl_multi_cmd(struct block_device *bdev, sizeof(num_of_cmds))) return -EFAULT; + if (!num_of_cmds) + return 0; + if (num_of_cmds > MMC_IOC_MAX_CMDS) return -EINVAL; @@ -1182,7 +1185,7 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) switch (mq_rq->drv_op) { case MMC_DRV_OP_IOCTL: - for (i = 0; i < mq_rq->ioc_count; i++) { + for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { ret = __mmc_blk_ioctl_cmd(card, md, mq_rq->idata[i]); if (ret) break; @@ -2167,6 +2170,7 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) * from being accepted. */ card = md->queue.card; + blk_set_queue_dying(md->queue.queue); mmc_cleanup_queue(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index cf66a3db71b8..ac678e9fb19a 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -45,6 +45,7 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> #include <asm/iosf_mbi.h> +#include <linux/pci.h> #endif #include "sdhci.h" @@ -134,6 +135,16 @@ static bool sdhci_acpi_byt(void) return x86_match_cpu(byt); } +static bool sdhci_acpi_cht(void) +{ + static const struct x86_cpu_id cht[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_ATOM_AIRMONT }, + {} + }; + + return x86_match_cpu(cht); +} + #define BYT_IOSF_SCCEP 0x63 #define BYT_IOSF_OCP_NETCTRL0 0x1078 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) @@ -178,6 +189,45 @@ static bool sdhci_acpi_byt_defer(struct device *dev) return false; } +static bool sdhci_acpi_cht_pci_wifi(unsigned int vendor, unsigned int device, + unsigned int slot, unsigned int parent_slot) +{ + struct pci_dev *dev, *parent, *from = NULL; + + while (1) { + dev = pci_get_device(vendor, device, from); + pci_dev_put(from); + if (!dev) + break; + parent = pci_upstream_bridge(dev); + if (ACPI_COMPANION(&dev->dev) && PCI_SLOT(dev->devfn) == slot && + parent && PCI_SLOT(parent->devfn) == parent_slot && + !pci_upstream_bridge(parent)) { + pci_dev_put(dev); + return true; + } + from = dev; + } + + return false; +} + +/* + * GPDwin uses PCI wifi which conflicts with SDIO's use of + * acpi_device_fix_up_power() on child device nodes. Identifying GPDwin is + * problematic, but since SDIO is only used for wifi, the presence of the PCI + * wifi card in the expected slot with an ACPI companion node, is used to + * indicate that acpi_device_fix_up_power() should be avoided. + */ +static inline bool sdhci_acpi_no_fixup_child_power(const char *hid, + const char *uid) +{ + return sdhci_acpi_cht() && + !strcmp(hid, "80860F14") && + !strcmp(uid, "2") && + sdhci_acpi_cht_pci_wifi(0x14e4, 0x43ec, 0, 28); +} + #else static inline void sdhci_acpi_byt_setting(struct device *dev) @@ -189,6 +239,12 @@ static inline bool sdhci_acpi_byt_defer(struct device *dev) return false; } +static inline bool sdhci_acpi_no_fixup_child_power(const char *hid, + const char *uid) +{ + return false; +} + #endif static int bxt_get_cd(struct mmc_host *mmc) @@ -389,18 +445,20 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (acpi_bus_get_device(handle, &device)) return -ENODEV; + hid = acpi_device_hid(device); + uid = device->pnp.unique_id; + /* Power on the SDHCI controller and its children */ acpi_device_fix_up_power(device); - list_for_each_entry(child, &device->children, node) - if (child->status.present && child->status.enabled) - acpi_device_fix_up_power(child); + if (!sdhci_acpi_no_fixup_child_power(hid, uid)) { + list_for_each_entry(child, &device->children, node) + if (child->status.present && child->status.enabled) + acpi_device_fix_up_power(child); + } if (sdhci_acpi_byt_defer(dev)) return -EPROBE_DEFER; - hid = acpi_device_hid(device); - uid = device->pnp.unique_id; - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) return -ENOMEM; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 82b80d42f7ae..88a94355ac90 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -409,30 +409,29 @@ static void tmio_mmc_transfer_data(struct tmio_mmc_host *host, * Transfer the data */ if (host->pdata->flags & TMIO_MMC_32BIT_DATA_PORT) { - u8 data[4] = { }; + u32 data = 0; + u32 *buf32 = (u32 *)buf; if (is_read) - sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf, + sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, buf32, count >> 2); else - sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, (u32 *)buf, + sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, buf32, count >> 2); /* if count was multiple of 4 */ if (!(count & 0x3)) return; - buf8 = (u8 *)(buf + (count >> 2)); + buf32 += count >> 2; count %= 4; if (is_read) { - sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, - (u32 *)data, 1); - memcpy(buf8, data, count); + sd_ctrl_read32_rep(host, CTL_SD_DATA_PORT, &data, 1); + memcpy(buf32, &data, count); } else { - memcpy(data, buf8, count); - sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, - (u32 *)data, 1); + memcpy(&data, buf32, count); + sd_ctrl_write32_rep(host, CTL_SD_DATA_PORT, &data, 1); } return; diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index e15a9733fcfd..9668616faf16 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1386,7 +1386,7 @@ static void wbsd_request_dma(struct wbsd_host *host, int dma) * order for ISA to be able to DMA to it. */ host->dma_buffer = kmalloc(WBSD_DMA_SIZE, - GFP_NOIO | GFP_DMA | __GFP_REPEAT | __GFP_NOWARN); + GFP_NOIO | GFP_DMA | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (!host->dma_buffer) goto free; diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig index e83a279f1217..5a2d71729b9a 100644 --- a/drivers/mtd/Kconfig +++ b/drivers/mtd/Kconfig @@ -155,6 +155,10 @@ config MTD_BCM47XX_PARTS This provides partitions parser for devices based on BCM47xx boards. +menu "Partition parsers" +source "drivers/mtd/parsers/Kconfig" +endmenu + comment "User Modules And Translation Layers" # diff --git a/drivers/mtd/Makefile b/drivers/mtd/Makefile index 99bb9a1f6e16..151d60df303a 100644 --- a/drivers/mtd/Makefile +++ b/drivers/mtd/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_MTD_AFS_PARTS) += afs.o obj-$(CONFIG_MTD_AR7_PARTS) += ar7part.o obj-$(CONFIG_MTD_BCM63XX_PARTS) += bcm63xxpart.o obj-$(CONFIG_MTD_BCM47XX_PARTS) += bcm47xxpart.o +obj-y += parsers/ # 'Users' - code which presents functionality to userspace. obj-$(CONFIG_MTD_BLKDEVS) += mtd_blkdevs.o diff --git a/drivers/mtd/bcm47xxpart.c b/drivers/mtd/bcm47xxpart.c index d10fa6c8f074..fe2581d9d882 100644 --- a/drivers/mtd/bcm47xxpart.c +++ b/drivers/mtd/bcm47xxpart.c @@ -43,7 +43,8 @@ #define ML_MAGIC2 0x26594131 #define TRX_MAGIC 0x30524448 #define SHSQ_MAGIC 0x71736873 /* shsq (weird ZTE H218N endianness) */ -#define UBI_EC_MAGIC 0x23494255 /* UBI# */ + +static const char * const trx_types[] = { "trx", NULL }; struct trx_header { uint32_t magic; @@ -62,89 +63,6 @@ static void bcm47xxpart_add_part(struct mtd_partition *part, const char *name, part->mask_flags = mask_flags; } -static const char *bcm47xxpart_trx_data_part_name(struct mtd_info *master, - size_t offset) -{ - uint32_t buf; - size_t bytes_read; - int err; - - err = mtd_read(master, offset, sizeof(buf), &bytes_read, - (uint8_t *)&buf); - if (err && !mtd_is_bitflip(err)) { - pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", - offset, err); - goto out_default; - } - - if (buf == UBI_EC_MAGIC) - return "ubi"; - -out_default: - return "rootfs"; -} - -static int bcm47xxpart_parse_trx(struct mtd_info *master, - struct mtd_partition *trx, - struct mtd_partition *parts, - size_t parts_len) -{ - struct trx_header header; - size_t bytes_read; - int curr_part = 0; - int i, err; - - if (parts_len < 3) { - pr_warn("No enough space to add TRX partitions!\n"); - return -ENOMEM; - } - - err = mtd_read(master, trx->offset, sizeof(header), &bytes_read, - (uint8_t *)&header); - if (err && !mtd_is_bitflip(err)) { - pr_err("mtd_read error while reading TRX header: %d\n", err); - return err; - } - - i = 0; - - /* We have LZMA loader if offset[2] points to sth */ - if (header.offset[2]) { - bcm47xxpart_add_part(&parts[curr_part++], "loader", - trx->offset + header.offset[i], 0); - i++; - } - - if (header.offset[i]) { - bcm47xxpart_add_part(&parts[curr_part++], "linux", - trx->offset + header.offset[i], 0); - i++; - } - - if (header.offset[i]) { - size_t offset = trx->offset + header.offset[i]; - const char *name = bcm47xxpart_trx_data_part_name(master, - offset); - - bcm47xxpart_add_part(&parts[curr_part++], name, offset, 0); - i++; - } - - /* - * Assume that every partition ends at the beginning of the one it is - * followed by. - */ - for (i = 0; i < curr_part; i++) { - u64 next_part_offset = (i < curr_part - 1) ? - parts[i + 1].offset : - trx->offset + trx->size; - - parts[i].size = next_part_offset - parts[i].offset; - } - - return curr_part; -} - /** * bcm47xxpart_bootpartition - gets index of TRX partition used by bootloader * @@ -362,17 +280,10 @@ static int bcm47xxpart_parse(struct mtd_info *master, for (i = 0; i < trx_num; i++) { struct mtd_partition *trx = &parts[trx_parts[i]]; - if (i == bcm47xxpart_bootpartition()) { - int num_parts; - - num_parts = bcm47xxpart_parse_trx(master, trx, - parts + curr_part, - BCM47XXPART_MAX_PARTS - curr_part); - if (num_parts > 0) - curr_part += num_parts; - } else { + if (i == bcm47xxpart_bootpartition()) + trx->types = trx_types; + else trx->name = "failsafe"; - } } *pparts = parts; diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 94d3eb42c4d5..7d342965f392 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c @@ -666,7 +666,7 @@ cfi_staa_writev(struct mtd_info *mtd, const struct kvec *vecs, size_t totlen = 0, thislen; int ret = 0; size_t buflen = 0; - static char *buffer; + char *buffer; if (!ECCBUF_SIZE) { /* We should fall back to a general writev implementation. diff --git a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig index 58329d2dacd1..6def5445e03e 100644 --- a/drivers/mtd/devices/Kconfig +++ b/drivers/mtd/devices/Kconfig @@ -95,6 +95,16 @@ config MTD_M25P80 if you want to specify device partitioning or to use a device which doesn't support the JEDEC ID instruction. +config MTD_MCHP23K256 + tristate "Microchip 23K256 SRAM" + depends on SPI_MASTER + help + This enables access to Microchip 23K256 SRAM chips, using SPI. + + Set up your spi devices with the right board-specific + platform data, or a device tree description if you want to + specify device partitioning + config MTD_SPEAR_SMI tristate "SPEAR MTD NOR Support through SMI controller" depends on PLAT_SPEAR diff --git a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile index 7912d3a0ee34..f0f767624cc6 100644 --- a/drivers/mtd/devices/Makefile +++ b/drivers/mtd/devices/Makefile @@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_LART) += lart.o obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o obj-$(CONFIG_MTD_M25P80) += m25p80.o +obj-$(CONFIG_MTD_MCHP23K256) += mchp23k256.o obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o obj-$(CONFIG_MTD_SST25L) += sst25l.o obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c index c4df3b1bded0..00eea6fd379c 100644 --- a/drivers/mtd/devices/m25p80.c +++ b/drivers/mtd/devices/m25p80.c @@ -78,11 +78,17 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, { struct m25p *flash = nor->priv; struct spi_device *spi = flash->spi; - struct spi_transfer t[2] = {}; + unsigned int inst_nbits, addr_nbits, data_nbits, data_idx; + struct spi_transfer t[3] = {}; struct spi_message m; int cmd_sz = m25p_cmdsz(nor); ssize_t ret; + /* get transfer protocols. */ + inst_nbits = spi_nor_get_protocol_inst_nbits(nor->write_proto); + addr_nbits = spi_nor_get_protocol_addr_nbits(nor->write_proto); + data_nbits = spi_nor_get_protocol_data_nbits(nor->write_proto); + spi_message_init(&m); if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) @@ -92,12 +98,27 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, m25p_addr2cmd(nor, to, flash->command); t[0].tx_buf = flash->command; + t[0].tx_nbits = inst_nbits; t[0].len = cmd_sz; spi_message_add_tail(&t[0], &m); - t[1].tx_buf = buf; - t[1].len = len; - spi_message_add_tail(&t[1], &m); + /* split the op code and address bytes into two transfers if needed. */ + data_idx = 1; + if (addr_nbits != inst_nbits) { + t[0].len = 1; + + t[1].tx_buf = &flash->command[1]; + t[1].tx_nbits = addr_nbits; + t[1].len = cmd_sz - 1; + spi_message_add_tail(&t[1], &m); + + data_idx = 2; + } + + t[data_idx].tx_buf = buf; + t[data_idx].tx_nbits = data_nbits; + t[data_idx].len = len; + spi_message_add_tail(&t[data_idx], &m); ret = spi_sync(spi, &m); if (ret) @@ -109,18 +130,6 @@ static ssize_t m25p80_write(struct spi_nor *nor, loff_t to, size_t len, return ret; } -static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) -{ - switch (nor->flash_read) { - case SPI_NOR_DUAL: - return 2; - case SPI_NOR_QUAD: - return 4; - default: - return 0; - } -} - /* * Read an address range from the nor chip. The address range * may be any size provided it is within the physical boundaries. @@ -130,13 +139,20 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, { struct m25p *flash = nor->priv; struct spi_device *spi = flash->spi; - struct spi_transfer t[2]; + unsigned int inst_nbits, addr_nbits, data_nbits, data_idx; + struct spi_transfer t[3]; struct spi_message m; unsigned int dummy = nor->read_dummy; ssize_t ret; + int cmd_sz; + + /* get transfer protocols. */ + inst_nbits = spi_nor_get_protocol_inst_nbits(nor->read_proto); + addr_nbits = spi_nor_get_protocol_addr_nbits(nor->read_proto); + data_nbits = spi_nor_get_protocol_data_nbits(nor->read_proto); /* convert the dummy cycles to the number of bytes */ - dummy /= 8; + dummy = (dummy * addr_nbits) / 8; if (spi_flash_read_supported(spi)) { struct spi_flash_read_message msg; @@ -149,10 +165,9 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, msg.read_opcode = nor->read_opcode; msg.addr_width = nor->addr_width; msg.dummy_bytes = dummy; - /* TODO: Support other combinations */ - msg.opcode_nbits = SPI_NBITS_SINGLE; - msg.addr_nbits = SPI_NBITS_SINGLE; - msg.data_nbits = m25p80_rx_nbits(nor); + msg.opcode_nbits = inst_nbits; + msg.addr_nbits = addr_nbits; + msg.data_nbits = data_nbits; ret = spi_flash_read(spi, &msg); if (ret < 0) @@ -167,20 +182,45 @@ static ssize_t m25p80_read(struct spi_nor *nor, loff_t from, size_t len, m25p_addr2cmd(nor, from, flash->command); t[0].tx_buf = flash->command; + t[0].tx_nbits = inst_nbits; t[0].len = m25p_cmdsz(nor) + dummy; spi_message_add_tail(&t[0], &m); - t[1].rx_buf = buf; - t[1].rx_nbits = m25p80_rx_nbits(nor); - t[1].len = min3(len, spi_max_transfer_size(spi), - spi_max_message_size(spi) - t[0].len); - spi_message_add_tail(&t[1], &m); + /* + * Set all dummy/mode cycle bits to avoid sending some manufacturer + * specific pattern, which might make the memory enter its Continuous + * Read mode by mistake. + * Based on the different mode cycle bit patterns listed and described + * in the JESD216B specification, the 0xff value works for all memories + * and all manufacturers. + */ + cmd_sz = t[0].len; + memset(flash->command + cmd_sz - dummy, 0xff, dummy); + + /* split the op code and address bytes into two transfers if needed. */ + data_idx = 1; + if (addr_nbits != inst_nbits) { + t[0].len = 1; + + t[1].tx_buf = &flash->command[1]; + t[1].tx_nbits = addr_nbits; + t[1].len = cmd_sz - 1; + spi_message_add_tail(&t[1], &m); + + data_idx = 2; + } + + t[data_idx].rx_buf = buf; + t[data_idx].rx_nbits = data_nbits; + t[data_idx].len = min3(len, spi_max_transfer_size(spi), + spi_max_message_size(spi) - cmd_sz); + spi_message_add_tail(&t[data_idx], &m); ret = spi_sync(spi, &m); if (ret) return ret; - ret = m.actual_length - m25p_cmdsz(nor) - dummy; + ret = m.actual_length - cmd_sz; if (ret < 0) return -EIO; return ret; @@ -196,7 +236,11 @@ static int m25p_probe(struct spi_device *spi) struct flash_platform_data *data; struct m25p *flash; struct spi_nor *nor; - enum read_mode mode = SPI_NOR_NORMAL; + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; char *flash_name; int ret; @@ -221,10 +265,19 @@ static int m25p_probe(struct spi_device *spi) spi_set_drvdata(spi, flash); flash->spi = spi; - if (spi->mode & SPI_RX_QUAD) - mode = SPI_NOR_QUAD; - else if (spi->mode & SPI_RX_DUAL) - mode = SPI_NOR_DUAL; + if (spi->mode & SPI_RX_QUAD) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + + if (spi->mode & SPI_TX_QUAD) + hwcaps.mask |= (SNOR_HWCAPS_READ_1_4_4 | + SNOR_HWCAPS_PP_1_1_4 | + SNOR_HWCAPS_PP_1_4_4); + } else if (spi->mode & SPI_RX_DUAL) { + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + + if (spi->mode & SPI_TX_DUAL) + hwcaps.mask |= SNOR_HWCAPS_READ_1_2_2; + } if (data && data->name) nor->mtd.name = data->name; @@ -241,7 +294,7 @@ static int m25p_probe(struct spi_device *spi) else flash_name = spi->modalias; - ret = spi_nor_scan(nor, flash_name, mode); + ret = spi_nor_scan(nor, flash_name, &hwcaps); if (ret) return ret; diff --git a/drivers/mtd/devices/mchp23k256.c b/drivers/mtd/devices/mchp23k256.c new file mode 100644 index 000000000000..8956b7dcc984 --- /dev/null +++ b/drivers/mtd/devices/mchp23k256.c @@ -0,0 +1,236 @@ +/* + * mchp23k256.c + * + * Driver for Microchip 23k256 SPI RAM chips + * + * Copyright © 2016 Andrew Lunn <andrew@lunn.ch> + * + * This code is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ +#include <linux/device.h> +#include <linux/module.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/sizes.h> +#include <linux/spi/flash.h> +#include <linux/spi/spi.h> +#include <linux/of_device.h> + +#define MAX_CMD_SIZE 4 + +struct mchp23_caps { + u8 addr_width; + unsigned int size; +}; + +struct mchp23k256_flash { + struct spi_device *spi; + struct mutex lock; + struct mtd_info mtd; + const struct mchp23_caps *caps; +}; + +#define MCHP23K256_CMD_WRITE_STATUS 0x01 +#define MCHP23K256_CMD_WRITE 0x02 +#define MCHP23K256_CMD_READ 0x03 +#define MCHP23K256_MODE_SEQ BIT(6) + +#define to_mchp23k256_flash(x) container_of(x, struct mchp23k256_flash, mtd) + +static void mchp23k256_addr2cmd(struct mchp23k256_flash *flash, + unsigned int addr, u8 *cmd) +{ + int i; + + /* + * Address is sent in big endian (MSB first) and we skip + * the first entry of the cmd array which contains the cmd + * opcode. + */ + for (i = flash->caps->addr_width; i > 0; i--, addr >>= 8) + cmd[i] = addr; +} + +static int mchp23k256_cmdsz(struct mchp23k256_flash *flash) +{ + return 1 + flash->caps->addr_width; +} + +static int mchp23k256_write(struct mtd_info *mtd, loff_t to, size_t len, + size_t *retlen, const unsigned char *buf) +{ + struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd); + struct spi_transfer transfer[2] = {}; + struct spi_message message; + unsigned char command[MAX_CMD_SIZE]; + + spi_message_init(&message); + + command[0] = MCHP23K256_CMD_WRITE; + mchp23k256_addr2cmd(flash, to, command); + + transfer[0].tx_buf = command; + transfer[0].len = mchp23k256_cmdsz(flash); + spi_message_add_tail(&transfer[0], &message); + + transfer[1].tx_buf = buf; + transfer[1].len = len; + spi_message_add_tail(&transfer[1], &message); + + mutex_lock(&flash->lock); + + spi_sync(flash->spi, &message); + + if (retlen && message.actual_length > sizeof(command)) + *retlen += message.actual_length - sizeof(command); + + mutex_unlock(&flash->lock); + return 0; +} + +static int mchp23k256_read(struct mtd_info *mtd, loff_t from, size_t len, + size_t *retlen, unsigned char *buf) +{ + struct mchp23k256_flash *flash = to_mchp23k256_flash(mtd); + struct spi_transfer transfer[2] = {}; + struct spi_message message; + unsigned char command[MAX_CMD_SIZE]; + + spi_message_init(&message); + + memset(&transfer, 0, sizeof(transfer)); + command[0] = MCHP23K256_CMD_READ; + mchp23k256_addr2cmd(flash, from, command); + + transfer[0].tx_buf = command; + transfer[0].len = mchp23k256_cmdsz(flash); + spi_message_add_tail(&transfer[0], &message); + + transfer[1].rx_buf = buf; + transfer[1].len = len; + spi_message_add_tail(&transfer[1], &message); + + mutex_lock(&flash->lock); + + spi_sync(flash->spi, &message); + + if (retlen && message.actual_length > sizeof(command)) + *retlen += message.actual_length - sizeof(command); + + mutex_unlock(&flash->lock); + return 0; +} + +/* + * Set the device into sequential mode. This allows read/writes to the + * entire SRAM in a single operation + */ +static int mchp23k256_set_mode(struct spi_device *spi) +{ + struct spi_transfer transfer = {}; + struct spi_message message; + unsigned char command[2]; + + spi_message_init(&message); + + command[0] = MCHP23K256_CMD_WRITE_STATUS; + command[1] = MCHP23K256_MODE_SEQ; + + transfer.tx_buf = command; + transfer.len = sizeof(command); + spi_message_add_tail(&transfer, &message); + + return spi_sync(spi, &message); +} + +static const struct mchp23_caps mchp23k256_caps = { + .size = SZ_32K, + .addr_width = 2, +}; + +static const struct mchp23_caps mchp23lcv1024_caps = { + .size = SZ_128K, + .addr_width = 3, +}; + +static int mchp23k256_probe(struct spi_device *spi) +{ + struct mchp23k256_flash *flash; + struct flash_platform_data *data; + int err; + + flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL); + if (!flash) + return -ENOMEM; + + flash->spi = spi; + mutex_init(&flash->lock); + spi_set_drvdata(spi, flash); + + err = mchp23k256_set_mode(spi); + if (err) + return err; + + data = dev_get_platdata(&spi->dev); + + flash->caps = of_device_get_match_data(&spi->dev); + if (!flash->caps) + flash->caps = &mchp23k256_caps; + + mtd_set_of_node(&flash->mtd, spi->dev.of_node); + flash->mtd.dev.parent = &spi->dev; + flash->mtd.type = MTD_RAM; + flash->mtd.flags = MTD_CAP_RAM; + flash->mtd.writesize = 1; + flash->mtd.size = flash->caps->size; + flash->mtd._read = mchp23k256_read; + flash->mtd._write = mchp23k256_write; + + err = mtd_device_register(&flash->mtd, data ? data->parts : NULL, + data ? data->nr_parts : 0); + if (err) + return err; + + return 0; +} + +static int mchp23k256_remove(struct spi_device *spi) +{ + struct mchp23k256_flash *flash = spi_get_drvdata(spi); + + return mtd_device_unregister(&flash->mtd); +} + +static const struct of_device_id mchp23k256_of_table[] = { + { + .compatible = "microchip,mchp23k256", + .data = &mchp23k256_caps, + }, + { + .compatible = "microchip,mchp23lcv1024", + .data = &mchp23lcv1024_caps, + }, + {} +}; +MODULE_DEVICE_TABLE(of, mchp23k256_of_table); + +static struct spi_driver mchp23k256_driver = { + .driver = { + .name = "mchp23k256", + .of_match_table = of_match_ptr(mchp23k256_of_table), + }, + .probe = mchp23k256_probe, + .remove = mchp23k256_remove, +}; + +module_spi_driver(mchp23k256_driver); + +MODULE_DESCRIPTION("MTD SPI driver for MCHP23K256 RAM chips"); +MODULE_AUTHOR("Andrew Lunn <andre@lunn.ch>"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("spi:mchp23k256"); diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c index f9e9bd1cfaa0..5dc8bd042cc5 100644 --- a/drivers/mtd/devices/mtd_dataflash.c +++ b/drivers/mtd/devices/mtd_dataflash.c @@ -82,9 +82,13 @@ #define OP_WRITE_SECURITY_REVC 0x9A #define OP_WRITE_SECURITY 0x9B /* revision D */ +#define CFI_MFR_ATMEL 0x1F + +#define DATAFLASH_SHIFT_EXTID 24 +#define DATAFLASH_SHIFT_ID 40 struct dataflash { - uint8_t command[4]; + u8 command[4]; char name[24]; unsigned short page_offset; /* offset in flash address */ @@ -129,8 +133,7 @@ static int dataflash_waitready(struct spi_device *spi) for (;;) { status = dataflash_status(spi); if (status < 0) { - pr_debug("%s: status %d?\n", - dev_name(&spi->dev), status); + dev_dbg(&spi->dev, "status %d?\n", status); status = 0; } @@ -153,12 +156,11 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) struct spi_transfer x = { }; struct spi_message msg; unsigned blocksize = priv->page_size << 3; - uint8_t *command; - uint32_t rem; + u8 *command; + u32 rem; - pr_debug("%s: erase addr=0x%llx len 0x%llx\n", - dev_name(&spi->dev), (long long)instr->addr, - (long long)instr->len); + dev_dbg(&spi->dev, "erase addr=0x%llx len 0x%llx\n", + (long long)instr->addr, (long long)instr->len); div_u64_rem(instr->len, priv->page_size, &rem); if (rem) @@ -187,11 +189,11 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) pageaddr = pageaddr << priv->page_offset; command[0] = do_block ? OP_ERASE_BLOCK : OP_ERASE_PAGE; - command[1] = (uint8_t)(pageaddr >> 16); - command[2] = (uint8_t)(pageaddr >> 8); + command[1] = (u8)(pageaddr >> 16); + command[2] = (u8)(pageaddr >> 8); command[3] = 0; - pr_debug("ERASE %s: (%x) %x %x %x [%i]\n", + dev_dbg(&spi->dev, "ERASE %s: (%x) %x %x %x [%i]\n", do_block ? "block" : "page", command[0], command[1], command[2], command[3], pageaddr); @@ -200,8 +202,8 @@ static int dataflash_erase(struct mtd_info *mtd, struct erase_info *instr) (void) dataflash_waitready(spi); if (status < 0) { - printk(KERN_ERR "%s: erase %x, err %d\n", - dev_name(&spi->dev), pageaddr, status); + dev_err(&spi->dev, "erase %x, err %d\n", + pageaddr, status); /* REVISIT: can retry instr->retries times; or * giveup and instr->fail_addr = instr->addr; */ @@ -239,11 +241,11 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, struct spi_transfer x[2] = { }; struct spi_message msg; unsigned int addr; - uint8_t *command; + u8 *command; int status; - pr_debug("%s: read 0x%x..0x%x\n", dev_name(&priv->spi->dev), - (unsigned)from, (unsigned)(from + len)); + dev_dbg(&priv->spi->dev, "read 0x%x..0x%x\n", + (unsigned int)from, (unsigned int)(from + len)); /* Calculate flash page/byte address */ addr = (((unsigned)from / priv->page_size) << priv->page_offset) @@ -251,7 +253,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, command = priv->command; - pr_debug("READ: (%x) %x %x %x\n", + dev_dbg(&priv->spi->dev, "READ: (%x) %x %x %x\n", command[0], command[1], command[2], command[3]); spi_message_init(&msg); @@ -271,9 +273,9 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, * fewer "don't care" bytes. Both buffers stay unchanged. */ command[0] = OP_READ_CONTINUOUS; - command[1] = (uint8_t)(addr >> 16); - command[2] = (uint8_t)(addr >> 8); - command[3] = (uint8_t)(addr >> 0); + command[1] = (u8)(addr >> 16); + command[2] = (u8)(addr >> 8); + command[3] = (u8)(addr >> 0); /* plus 4 "don't care" bytes */ status = spi_sync(priv->spi, &msg); @@ -283,8 +285,7 @@ static int dataflash_read(struct mtd_info *mtd, loff_t from, size_t len, *retlen = msg.actual_length - 8; status = 0; } else - pr_debug("%s: read %x..%x --> %d\n", - dev_name(&priv->spi->dev), + dev_dbg(&priv->spi->dev, "read %x..%x --> %d\n", (unsigned)from, (unsigned)(from + len), status); return status; @@ -308,10 +309,10 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, size_t remaining = len; u_char *writebuf = (u_char *) buf; int status = -EINVAL; - uint8_t *command; + u8 *command; - pr_debug("%s: write 0x%x..0x%x\n", - dev_name(&spi->dev), (unsigned)to, (unsigned)(to + len)); + dev_dbg(&spi->dev, "write 0x%x..0x%x\n", + (unsigned int)to, (unsigned int)(to + len)); spi_message_init(&msg); @@ -328,7 +329,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, mutex_lock(&priv->lock); while (remaining > 0) { - pr_debug("write @ %i:%i len=%i\n", + dev_dbg(&spi->dev, "write @ %i:%i len=%i\n", pageaddr, offset, writelen); /* REVISIT: @@ -356,13 +357,13 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, command[2] = (addr & 0x0000FF00) >> 8; command[3] = 0; - pr_debug("TRANSFER: (%x) %x %x %x\n", + dev_dbg(&spi->dev, "TRANSFER: (%x) %x %x %x\n", command[0], command[1], command[2], command[3]); status = spi_sync(spi, &msg); if (status < 0) - pr_debug("%s: xfer %u -> %d\n", - dev_name(&spi->dev), addr, status); + dev_dbg(&spi->dev, "xfer %u -> %d\n", + addr, status); (void) dataflash_waitready(priv->spi); } @@ -374,7 +375,7 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, command[2] = (addr & 0x0000FF00) >> 8; command[3] = (addr & 0x000000FF); - pr_debug("PROGRAM: (%x) %x %x %x\n", + dev_dbg(&spi->dev, "PROGRAM: (%x) %x %x %x\n", command[0], command[1], command[2], command[3]); x[1].tx_buf = writebuf; @@ -383,8 +384,8 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, status = spi_sync(spi, &msg); spi_transfer_del(x + 1); if (status < 0) - pr_debug("%s: pgm %u/%u -> %d\n", - dev_name(&spi->dev), addr, writelen, status); + dev_dbg(&spi->dev, "pgm %u/%u -> %d\n", + addr, writelen, status); (void) dataflash_waitready(priv->spi); @@ -398,20 +399,20 @@ static int dataflash_write(struct mtd_info *mtd, loff_t to, size_t len, command[2] = (addr & 0x0000FF00) >> 8; command[3] = 0; - pr_debug("COMPARE: (%x) %x %x %x\n", + dev_dbg(&spi->dev, "COMPARE: (%x) %x %x %x\n", command[0], command[1], command[2], command[3]); status = spi_sync(spi, &msg); if (status < 0) - pr_debug("%s: compare %u -> %d\n", - dev_name(&spi->dev), addr, status); + dev_dbg(&spi->dev, "compare %u -> %d\n", + addr, status); status = dataflash_waitready(priv->spi); /* Check result of the compare operation */ if (status & (1 << 6)) { - printk(KERN_ERR "%s: compare page %u, err %d\n", - dev_name(&spi->dev), pageaddr, status); + dev_err(&spi->dev, "compare page %u, err %d\n", + pageaddr, status); remaining = 0; status = -EIO; break; @@ -455,11 +456,11 @@ static int dataflash_get_otp_info(struct mtd_info *mtd, size_t len, } static ssize_t otp_read(struct spi_device *spi, unsigned base, - uint8_t *buf, loff_t off, size_t len) + u8 *buf, loff_t off, size_t len) { struct spi_message m; size_t l; - uint8_t *scratch; + u8 *scratch; struct spi_transfer t; int status; @@ -538,7 +539,7 @@ static int dataflash_write_user_otp(struct mtd_info *mtd, { struct spi_message m; const size_t l = 4 + 64; - uint8_t *scratch; + u8 *scratch; struct spi_transfer t; struct dataflash *priv = mtd->priv; int status; @@ -689,14 +690,15 @@ struct flash_info { /* JEDEC id has a high byte of zero plus three data bytes: * the manufacturer id, then a two byte device id. */ - uint32_t jedec_id; + u64 jedec_id; /* The size listed here is what works with OP_ERASE_PAGE. */ unsigned nr_pages; - uint16_t pagesize; - uint16_t pageoffset; + u16 pagesize; + u16 pageoffset; - uint16_t flags; + u16 flags; +#define SUP_EXTID 0x0004 /* supports extended ID data */ #define SUP_POW2PS 0x0002 /* supports 2^N byte pages */ #define IS_POW2PS 0x0001 /* uses 2^N byte pages */ }; @@ -734,54 +736,32 @@ static struct flash_info dataflash_data[] = { { "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS}, { "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS}, + + { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS}, + { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS}, }; -static struct flash_info *jedec_probe(struct spi_device *spi) +static struct flash_info *jedec_lookup(struct spi_device *spi, + u64 jedec, bool use_extid) { - int tmp; - uint8_t code = OP_READ_ID; - uint8_t id[3]; - uint32_t jedec; - struct flash_info *info; + struct flash_info *info; int status; - /* JEDEC also defines an optional "extended device information" - * string for after vendor-specific data, after the three bytes - * we use here. Supporting some chips might require using it. - * - * If the vendor ID isn't Atmel's (0x1f), assume this call failed. - * That's not an error; only rev C and newer chips handle it, and - * only Atmel sells these chips. - */ - tmp = spi_write_then_read(spi, &code, 1, id, 3); - if (tmp < 0) { - pr_debug("%s: error %d reading JEDEC ID\n", - dev_name(&spi->dev), tmp); - return ERR_PTR(tmp); - } - if (id[0] != 0x1f) - return NULL; - - jedec = id[0]; - jedec = jedec << 8; - jedec |= id[1]; - jedec = jedec << 8; - jedec |= id[2]; + for (info = dataflash_data; + info < dataflash_data + ARRAY_SIZE(dataflash_data); + info++) { + if (use_extid && !(info->flags & SUP_EXTID)) + continue; - for (tmp = 0, info = dataflash_data; - tmp < ARRAY_SIZE(dataflash_data); - tmp++, info++) { if (info->jedec_id == jedec) { - pr_debug("%s: OTP, sector protect%s\n", - dev_name(&spi->dev), - (info->flags & SUP_POW2PS) - ? ", binary pagesize" : "" - ); + dev_dbg(&spi->dev, "OTP, sector protect%s\n", + (info->flags & SUP_POW2PS) ? + ", binary pagesize" : ""); if (info->flags & SUP_POW2PS) { status = dataflash_status(spi); if (status < 0) { - pr_debug("%s: status error %d\n", - dev_name(&spi->dev), status); + dev_dbg(&spi->dev, "status error %d\n", + status); return ERR_PTR(status); } if (status & 0x1) { @@ -796,12 +776,58 @@ static struct flash_info *jedec_probe(struct spi_device *spi) } } + return ERR_PTR(-ENODEV); +} + +static struct flash_info *jedec_probe(struct spi_device *spi) +{ + int ret; + u8 code = OP_READ_ID; + u64 jedec; + u8 id[sizeof(jedec)] = {0}; + const unsigned int id_size = 5; + struct flash_info *info; + + /* + * JEDEC also defines an optional "extended device information" + * string for after vendor-specific data, after the three bytes + * we use here. Supporting some chips might require using it. + * + * If the vendor ID isn't Atmel's (0x1f), assume this call failed. + * That's not an error; only rev C and newer chips handle it, and + * only Atmel sells these chips. + */ + ret = spi_write_then_read(spi, &code, 1, id, id_size); + if (ret < 0) { + dev_dbg(&spi->dev, "error %d reading JEDEC ID\n", ret); + return ERR_PTR(ret); + } + + if (id[0] != CFI_MFR_ATMEL) + return NULL; + + jedec = be64_to_cpup((__be64 *)id); + + /* + * First, try to match device using extended device + * information + */ + info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_EXTID, true); + if (!IS_ERR(info)) + return info; + /* + * If that fails, make another pass using regular ID + * information + */ + info = jedec_lookup(spi, jedec >> DATAFLASH_SHIFT_ID, false); + if (!IS_ERR(info)) + return info; /* * Treat other chips as errors ... we won't know the right page * size (it might be binary) even when we can tell which density * class is involved (legacy chip id scheme). */ - dev_warn(&spi->dev, "JEDEC id %06x not handled\n", jedec); + dev_warn(&spi->dev, "JEDEC id %016llx not handled\n", jedec); return ERR_PTR(-ENODEV); } @@ -845,8 +871,7 @@ static int dataflash_probe(struct spi_device *spi) */ status = dataflash_status(spi); if (status <= 0 || status == 0xff) { - pr_debug("%s: status error %d\n", - dev_name(&spi->dev), status); + dev_dbg(&spi->dev, "status error %d\n", status); if (status == 0 || status == 0xff) status = -ENODEV; return status; @@ -887,8 +912,7 @@ static int dataflash_probe(struct spi_device *spi) } if (status < 0) - pr_debug("%s: add_dataflash --> %d\n", dev_name(&spi->dev), - status); + dev_dbg(&spi->dev, "add_dataflash --> %d\n", status); return status; } @@ -898,7 +922,7 @@ static int dataflash_remove(struct spi_device *spi) struct dataflash *flash = spi_get_drvdata(spi); int status; - pr_debug("%s: remove\n", dev_name(&spi->dev)); + dev_dbg(&spi->dev, "remove\n"); status = mtd_device_unregister(&flash->mtd); if (status == 0) diff --git a/drivers/mtd/devices/serial_flash_cmds.h b/drivers/mtd/devices/serial_flash_cmds.h index 8b81e15105dd..eba125c9f23f 100644 --- a/drivers/mtd/devices/serial_flash_cmds.h +++ b/drivers/mtd/devices/serial_flash_cmds.h @@ -13,7 +13,6 @@ #define _MTD_SERIAL_FLASH_CMDS_H /* Generic Flash Commands/OPCODEs */ -#define SPINOR_OP_RDSR2 0x35 #define SPINOR_OP_WRVCR 0x81 #define SPINOR_OP_RDVCR 0x85 diff --git a/drivers/mtd/devices/st_spi_fsm.c b/drivers/mtd/devices/st_spi_fsm.c index 804313a33f2b..21afd94cd904 100644 --- a/drivers/mtd/devices/st_spi_fsm.c +++ b/drivers/mtd/devices/st_spi_fsm.c @@ -1445,7 +1445,7 @@ static int stfsm_s25fl_config(struct stfsm *fsm) } /* Check status of 'QE' bit, update if required. */ - stfsm_read_status(fsm, SPINOR_OP_RDSR2, &cr1, 1); + stfsm_read_status(fsm, SPINOR_OP_RDCR, &cr1, 1); data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; if (data_pads == 4) { if (!(cr1 & STFSM_S25FL_CONFIG_QE)) { @@ -1490,7 +1490,7 @@ static int stfsm_w25q_config(struct stfsm *fsm) return ret; /* Check status of 'QE' bit, update if required. */ - stfsm_read_status(fsm, SPINOR_OP_RDSR2, &sr2, 1); + stfsm_read_status(fsm, SPINOR_OP_RDCR, &sr2, 1); data_pads = ((fsm->stfsm_seq_read.seq_cfg >> 16) & 0x3) + 1; if (data_pads == 4) { if (!(sr2 & W25Q_STATUS_QE)) { diff --git a/drivers/mtd/maps/physmap_of_gemini.c b/drivers/mtd/maps/physmap_of_gemini.c index 9d371cd728ea..05b286b5289f 100644 --- a/drivers/mtd/maps/physmap_of_gemini.c +++ b/drivers/mtd/maps/physmap_of_gemini.c @@ -59,7 +59,7 @@ int of_flash_probe_gemini(struct platform_device *pdev, struct device_node *np, struct map_info *map) { - static struct regmap *rmap; + struct regmap *rmap; struct device *dev = &pdev->dev; u32 val; int ret; diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 1517da3ddd7d..956382cea256 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -991,7 +991,7 @@ EXPORT_SYMBOL_GPL(mtd_point); /* We probably shouldn't allow XIP if the unpoint isn't a NULL */ int mtd_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { - if (!mtd->_point) + if (!mtd->_unpoint) return -EOPNOTSUPP; if (from < 0 || from >= mtd->size || len > mtd->size - from) return -EINVAL; diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c index ea5e5307f667..5736b0c90b33 100644 --- a/drivers/mtd/mtdpart.c +++ b/drivers/mtd/mtdpart.c @@ -37,10 +37,16 @@ static LIST_HEAD(mtd_partitions); static DEFINE_MUTEX(mtd_partitions_mutex); -/* Our partition node structure */ +/** + * struct mtd_part - our partition node structure + * + * @mtd: struct holding partition details + * @parent: parent mtd - flash device or another partition + * @offset: partition offset relative to the *flash device* + */ struct mtd_part { struct mtd_info mtd; - struct mtd_info *master; + struct mtd_info *parent; uint64_t offset; struct list_head list; }; @@ -67,15 +73,15 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len, struct mtd_ecc_stats stats; int res; - stats = part->master->ecc_stats; - res = part->master->_read(part->master, from + part->offset, len, + stats = part->parent->ecc_stats; + res = part->parent->_read(part->parent, from + part->offset, len, retlen, buf); if (unlikely(mtd_is_eccerr(res))) mtd->ecc_stats.failed += - part->master->ecc_stats.failed - stats.failed; + part->parent->ecc_stats.failed - stats.failed; else mtd->ecc_stats.corrected += - part->master->ecc_stats.corrected - stats.corrected; + part->parent->ecc_stats.corrected - stats.corrected; return res; } @@ -84,7 +90,7 @@ static int part_point(struct mtd_info *mtd, loff_t from, size_t len, { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_point(part->master, from + part->offset, len, + return part->parent->_point(part->parent, from + part->offset, len, retlen, virt, phys); } @@ -92,7 +98,7 @@ static int part_unpoint(struct mtd_info *mtd, loff_t from, size_t len) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_unpoint(part->master, from + part->offset, len); + return part->parent->_unpoint(part->parent, from + part->offset, len); } static unsigned long part_get_unmapped_area(struct mtd_info *mtd, @@ -103,7 +109,7 @@ static unsigned long part_get_unmapped_area(struct mtd_info *mtd, struct mtd_part *part = mtd_to_part(mtd); offset += part->offset; - return part->master->_get_unmapped_area(part->master, len, offset, + return part->parent->_get_unmapped_area(part->parent, len, offset, flags); } @@ -132,7 +138,7 @@ static int part_read_oob(struct mtd_info *mtd, loff_t from, return -EINVAL; } - res = part->master->_read_oob(part->master, from + part->offset, ops); + res = part->parent->_read_oob(part->parent, from + part->offset, ops); if (unlikely(res)) { if (mtd_is_bitflip(res)) mtd->ecc_stats.corrected++; @@ -146,7 +152,7 @@ static int part_read_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_read_user_prot_reg(part->master, from, len, + return part->parent->_read_user_prot_reg(part->parent, from, len, retlen, buf); } @@ -154,7 +160,7 @@ static int part_get_user_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_get_user_prot_info(part->master, len, retlen, + return part->parent->_get_user_prot_info(part->parent, len, retlen, buf); } @@ -162,7 +168,7 @@ static int part_read_fact_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_read_fact_prot_reg(part->master, from, len, + return part->parent->_read_fact_prot_reg(part->parent, from, len, retlen, buf); } @@ -170,7 +176,7 @@ static int part_get_fact_prot_info(struct mtd_info *mtd, size_t len, size_t *retlen, struct otp_info *buf) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_get_fact_prot_info(part->master, len, retlen, + return part->parent->_get_fact_prot_info(part->parent, len, retlen, buf); } @@ -178,7 +184,7 @@ static int part_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_write(part->master, to + part->offset, len, + return part->parent->_write(part->parent, to + part->offset, len, retlen, buf); } @@ -186,7 +192,7 @@ static int part_panic_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_panic_write(part->master, to + part->offset, len, + return part->parent->_panic_write(part->parent, to + part->offset, len, retlen, buf); } @@ -199,14 +205,14 @@ static int part_write_oob(struct mtd_info *mtd, loff_t to, return -EINVAL; if (ops->datbuf && to + ops->len > mtd->size) return -EINVAL; - return part->master->_write_oob(part->master, to + part->offset, ops); + return part->parent->_write_oob(part->parent, to + part->offset, ops); } static int part_write_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_write_user_prot_reg(part->master, from, len, + return part->parent->_write_user_prot_reg(part->parent, from, len, retlen, buf); } @@ -214,14 +220,14 @@ static int part_lock_user_prot_reg(struct mtd_info *mtd, loff_t from, size_t len) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_lock_user_prot_reg(part->master, from, len); + return part->parent->_lock_user_prot_reg(part->parent, from, len); } static int part_writev(struct mtd_info *mtd, const struct kvec *vecs, unsigned long count, loff_t to, size_t *retlen) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_writev(part->master, vecs, count, + return part->parent->_writev(part->parent, vecs, count, to + part->offset, retlen); } @@ -231,7 +237,7 @@ static int part_erase(struct mtd_info *mtd, struct erase_info *instr) int ret; instr->addr += part->offset; - ret = part->master->_erase(part->master, instr); + ret = part->parent->_erase(part->parent, instr); if (ret) { if (instr->fail_addr != MTD_FAIL_ADDR_UNKNOWN) instr->fail_addr -= part->offset; @@ -257,51 +263,51 @@ EXPORT_SYMBOL_GPL(mtd_erase_callback); static int part_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_lock(part->master, ofs + part->offset, len); + return part->parent->_lock(part->parent, ofs + part->offset, len); } static int part_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_unlock(part->master, ofs + part->offset, len); + return part->parent->_unlock(part->parent, ofs + part->offset, len); } static int part_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_is_locked(part->master, ofs + part->offset, len); + return part->parent->_is_locked(part->parent, ofs + part->offset, len); } static void part_sync(struct mtd_info *mtd) { struct mtd_part *part = mtd_to_part(mtd); - part->master->_sync(part->master); + part->parent->_sync(part->parent); } static int part_suspend(struct mtd_info *mtd) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_suspend(part->master); + return part->parent->_suspend(part->parent); } static void part_resume(struct mtd_info *mtd) { struct mtd_part *part = mtd_to_part(mtd); - part->master->_resume(part->master); + part->parent->_resume(part->parent); } static int part_block_isreserved(struct mtd_info *mtd, loff_t ofs) { struct mtd_part *part = mtd_to_part(mtd); ofs += part->offset; - return part->master->_block_isreserved(part->master, ofs); + return part->parent->_block_isreserved(part->parent, ofs); } static int part_block_isbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_part *part = mtd_to_part(mtd); ofs += part->offset; - return part->master->_block_isbad(part->master, ofs); + return part->parent->_block_isbad(part->parent, ofs); } static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) @@ -310,7 +316,7 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) int res; ofs += part->offset; - res = part->master->_block_markbad(part->master, ofs); + res = part->parent->_block_markbad(part->parent, ofs); if (!res) mtd->ecc_stats.badblocks++; return res; @@ -319,13 +325,13 @@ static int part_block_markbad(struct mtd_info *mtd, loff_t ofs) static int part_get_device(struct mtd_info *mtd) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_get_device(part->master); + return part->parent->_get_device(part->parent); } static void part_put_device(struct mtd_info *mtd) { struct mtd_part *part = mtd_to_part(mtd); - part->master->_put_device(part->master); + part->parent->_put_device(part->parent); } static int part_ooblayout_ecc(struct mtd_info *mtd, int section, @@ -333,7 +339,7 @@ static int part_ooblayout_ecc(struct mtd_info *mtd, int section, { struct mtd_part *part = mtd_to_part(mtd); - return mtd_ooblayout_ecc(part->master, section, oobregion); + return mtd_ooblayout_ecc(part->parent, section, oobregion); } static int part_ooblayout_free(struct mtd_info *mtd, int section, @@ -341,7 +347,7 @@ static int part_ooblayout_free(struct mtd_info *mtd, int section, { struct mtd_part *part = mtd_to_part(mtd); - return mtd_ooblayout_free(part->master, section, oobregion); + return mtd_ooblayout_free(part->parent, section, oobregion); } static const struct mtd_ooblayout_ops part_ooblayout_ops = { @@ -353,7 +359,7 @@ static int part_max_bad_blocks(struct mtd_info *mtd, loff_t ofs, size_t len) { struct mtd_part *part = mtd_to_part(mtd); - return part->master->_max_bad_blocks(part->master, + return part->parent->_max_bad_blocks(part->parent, ofs + part->offset, len); } @@ -363,63 +369,70 @@ static inline void free_partition(struct mtd_part *p) kfree(p); } -/* - * This function unregisters and destroy all slave MTD objects which are - * attached to the given master MTD object. +/** + * mtd_parse_part - parse MTD partition looking for subpartitions + * + * @slave: part that is supposed to be a container and should be parsed + * @types: NULL-terminated array with names of partition parsers to try + * + * Some partitions are kind of containers with extra subpartitions (volumes). + * There can be various formats of such containers. This function tries to use + * specified parsers to analyze given partition and registers found + * subpartitions on success. */ - -int del_mtd_partitions(struct mtd_info *master) +static int mtd_parse_part(struct mtd_part *slave, const char *const *types) { - struct mtd_part *slave, *next; - int ret, err = 0; + struct mtd_partitions parsed; + int err; - mutex_lock(&mtd_partitions_mutex); - list_for_each_entry_safe(slave, next, &mtd_partitions, list) - if (slave->master == master) { - ret = del_mtd_device(&slave->mtd); - if (ret < 0) { - err = ret; - continue; - } - list_del(&slave->list); - free_partition(slave); - } - mutex_unlock(&mtd_partitions_mutex); + err = parse_mtd_partitions(&slave->mtd, types, &parsed, NULL); + if (err) + return err; + else if (!parsed.nr_parts) + return -ENOENT; + + err = add_mtd_partitions(&slave->mtd, parsed.parts, parsed.nr_parts); + + mtd_part_parser_cleanup(&parsed); return err; } -static struct mtd_part *allocate_partition(struct mtd_info *master, +static struct mtd_part *allocate_partition(struct mtd_info *parent, const struct mtd_partition *part, int partno, uint64_t cur_offset) { + int wr_alignment = (parent->flags & MTD_NO_ERASE) ? parent->writesize : + parent->erasesize; struct mtd_part *slave; + u32 remainder; char *name; + u64 tmp; /* allocate the partition structure */ slave = kzalloc(sizeof(*slave), GFP_KERNEL); name = kstrdup(part->name, GFP_KERNEL); if (!name || !slave) { printk(KERN_ERR"memory allocation error while creating partitions for \"%s\"\n", - master->name); + parent->name); kfree(name); kfree(slave); return ERR_PTR(-ENOMEM); } /* set up the MTD object for this partition */ - slave->mtd.type = master->type; - slave->mtd.flags = master->flags & ~part->mask_flags; + slave->mtd.type = parent->type; + slave->mtd.flags = parent->flags & ~part->mask_flags; slave->mtd.size = part->size; - slave->mtd.writesize = master->writesize; - slave->mtd.writebufsize = master->writebufsize; - slave->mtd.oobsize = master->oobsize; - slave->mtd.oobavail = master->oobavail; - slave->mtd.subpage_sft = master->subpage_sft; - slave->mtd.pairing = master->pairing; + slave->mtd.writesize = parent->writesize; + slave->mtd.writebufsize = parent->writebufsize; + slave->mtd.oobsize = parent->oobsize; + slave->mtd.oobavail = parent->oobavail; + slave->mtd.subpage_sft = parent->subpage_sft; + slave->mtd.pairing = parent->pairing; slave->mtd.name = name; - slave->mtd.owner = master->owner; + slave->mtd.owner = parent->owner; /* NOTE: Historically, we didn't arrange MTDs as a tree out of * concern for showing the same data in multiple partitions. @@ -429,80 +442,81 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, * parent conditional on that option. Note, this is a way to * distinguish between the master and the partition in sysfs. */ - slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) ? - &master->dev : - master->dev.parent; + slave->mtd.dev.parent = IS_ENABLED(CONFIG_MTD_PARTITIONED_MASTER) || mtd_is_partition(parent) ? + &parent->dev : + parent->dev.parent; slave->mtd.dev.of_node = part->of_node; slave->mtd._read = part_read; slave->mtd._write = part_write; - if (master->_panic_write) + if (parent->_panic_write) slave->mtd._panic_write = part_panic_write; - if (master->_point && master->_unpoint) { + if (parent->_point && parent->_unpoint) { slave->mtd._point = part_point; slave->mtd._unpoint = part_unpoint; } - if (master->_get_unmapped_area) + if (parent->_get_unmapped_area) slave->mtd._get_unmapped_area = part_get_unmapped_area; - if (master->_read_oob) + if (parent->_read_oob) slave->mtd._read_oob = part_read_oob; - if (master->_write_oob) + if (parent->_write_oob) slave->mtd._write_oob = part_write_oob; - if (master->_read_user_prot_reg) + if (parent->_read_user_prot_reg) slave->mtd._read_user_prot_reg = part_read_user_prot_reg; - if (master->_read_fact_prot_reg) + if (parent->_read_fact_prot_reg) slave->mtd._read_fact_prot_reg = part_read_fact_prot_reg; - if (master->_write_user_prot_reg) + if (parent->_write_user_prot_reg) slave->mtd._write_user_prot_reg = part_write_user_prot_reg; - if (master->_lock_user_prot_reg) + if (parent->_lock_user_prot_reg) slave->mtd._lock_user_prot_reg = part_lock_user_prot_reg; - if (master->_get_user_prot_info) + if (parent->_get_user_prot_info) slave->mtd._get_user_prot_info = part_get_user_prot_info; - if (master->_get_fact_prot_info) + if (parent->_get_fact_prot_info) slave->mtd._get_fact_prot_info = part_get_fact_prot_info; - if (master->_sync) + if (parent->_sync) slave->mtd._sync = part_sync; - if (!partno && !master->dev.class && master->_suspend && - master->_resume) { - slave->mtd._suspend = part_suspend; - slave->mtd._resume = part_resume; + if (!partno && !parent->dev.class && parent->_suspend && + parent->_resume) { + slave->mtd._suspend = part_suspend; + slave->mtd._resume = part_resume; } - if (master->_writev) + if (parent->_writev) slave->mtd._writev = part_writev; - if (master->_lock) + if (parent->_lock) slave->mtd._lock = part_lock; - if (master->_unlock) + if (parent->_unlock) slave->mtd._unlock = part_unlock; - if (master->_is_locked) + if (parent->_is_locked) slave->mtd._is_locked = part_is_locked; - if (master->_block_isreserved) + if (parent->_block_isreserved) slave->mtd._block_isreserved = part_block_isreserved; - if (master->_block_isbad) + if (parent->_block_isbad) slave->mtd._block_isbad = part_block_isbad; - if (master->_block_markbad) + if (parent->_block_markbad) slave->mtd._block_markbad = part_block_markbad; - if (master->_max_bad_blocks) + if (parent->_max_bad_blocks) slave->mtd._max_bad_blocks = part_max_bad_blocks; - if (master->_get_device) + if (parent->_get_device) slave->mtd._get_device = part_get_device; - if (master->_put_device) + if (parent->_put_device) slave->mtd._put_device = part_put_device; slave->mtd._erase = part_erase; - slave->master = master; + slave->parent = parent; slave->offset = part->offset; if (slave->offset == MTDPART_OFS_APPEND) slave->offset = cur_offset; if (slave->offset == MTDPART_OFS_NXTBLK) { + tmp = cur_offset; slave->offset = cur_offset; - if (mtd_mod_by_eb(cur_offset, master) != 0) { - /* Round up to next erasesize */ - slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize; + remainder = do_div(tmp, wr_alignment); + if (remainder) { + slave->offset += wr_alignment - remainder; printk(KERN_NOTICE "Moving partition %d: " "0x%012llx -> 0x%012llx\n", partno, (unsigned long long)cur_offset, (unsigned long long)slave->offset); @@ -510,25 +524,25 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, } if (slave->offset == MTDPART_OFS_RETAIN) { slave->offset = cur_offset; - if (master->size - slave->offset >= slave->mtd.size) { - slave->mtd.size = master->size - slave->offset + if (parent->size - slave->offset >= slave->mtd.size) { + slave->mtd.size = parent->size - slave->offset - slave->mtd.size; } else { printk(KERN_ERR "mtd partition \"%s\" doesn't have enough space: %#llx < %#llx, disabled\n", - part->name, master->size - slave->offset, + part->name, parent->size - slave->offset, slave->mtd.size); /* register to preserve ordering */ goto out_register; } } if (slave->mtd.size == MTDPART_SIZ_FULL) - slave->mtd.size = master->size - slave->offset; + slave->mtd.size = parent->size - slave->offset; printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset, (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name); /* let's do some sanity checks */ - if (slave->offset >= master->size) { + if (slave->offset >= parent->size) { /* let's register it anyway to preserve ordering */ slave->offset = 0; slave->mtd.size = 0; @@ -536,16 +550,16 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, part->name); goto out_register; } - if (slave->offset + slave->mtd.size > master->size) { - slave->mtd.size = master->size - slave->offset; + if (slave->offset + slave->mtd.size > parent->size) { + slave->mtd.size = parent->size - slave->offset; printk(KERN_WARNING"mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#llx\n", - part->name, master->name, (unsigned long long)slave->mtd.size); + part->name, parent->name, (unsigned long long)slave->mtd.size); } - if (master->numeraseregions > 1) { + if (parent->numeraseregions > 1) { /* Deal with variable erase size stuff */ - int i, max = master->numeraseregions; + int i, max = parent->numeraseregions; u64 end = slave->offset + slave->mtd.size; - struct mtd_erase_region_info *regions = master->eraseregions; + struct mtd_erase_region_info *regions = parent->eraseregions; /* Find the first erase regions which is part of this * partition. */ @@ -564,37 +578,40 @@ static struct mtd_part *allocate_partition(struct mtd_info *master, BUG_ON(slave->mtd.erasesize == 0); } else { /* Single erase size */ - slave->mtd.erasesize = master->erasesize; + slave->mtd.erasesize = parent->erasesize; } - if ((slave->mtd.flags & MTD_WRITEABLE) && - mtd_mod_by_eb(slave->offset, &slave->mtd)) { + tmp = slave->offset; + remainder = do_div(tmp, wr_alignment); + if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { /* Doesn't start on a boundary of major erase size */ /* FIXME: Let it be writable if it is on a boundary of * _minor_ erase size though */ slave->mtd.flags &= ~MTD_WRITEABLE; - printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n", + printk(KERN_WARNING"mtd: partition \"%s\" doesn't start on an erase/write block boundary -- force read-only\n", part->name); } - if ((slave->mtd.flags & MTD_WRITEABLE) && - mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) { + + tmp = slave->mtd.size; + remainder = do_div(tmp, wr_alignment); + if ((slave->mtd.flags & MTD_WRITEABLE) && remainder) { slave->mtd.flags &= ~MTD_WRITEABLE; - printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n", + printk(KERN_WARNING"mtd: partition \"%s\" doesn't end on an erase/write block -- force read-only\n", part->name); } mtd_set_ooblayout(&slave->mtd, &part_ooblayout_ops); - slave->mtd.ecc_step_size = master->ecc_step_size; - slave->mtd.ecc_strength = master->ecc_strength; - slave->mtd.bitflip_threshold = master->bitflip_threshold; + slave->mtd.ecc_step_size = parent->ecc_step_size; + slave->mtd.ecc_strength = parent->ecc_strength; + slave->mtd.bitflip_threshold = parent->bitflip_threshold; - if (master->_block_isbad) { + if (parent->_block_isbad) { uint64_t offs = 0; while (offs < slave->mtd.size) { - if (mtd_block_isreserved(master, offs + slave->offset)) + if (mtd_block_isreserved(parent, offs + slave->offset)) slave->mtd.ecc_stats.bbtblocks++; - else if (mtd_block_isbad(master, offs + slave->offset)) + else if (mtd_block_isbad(parent, offs + slave->offset)) slave->mtd.ecc_stats.badblocks++; offs += slave->mtd.erasesize; } @@ -628,7 +645,7 @@ static int mtd_add_partition_attrs(struct mtd_part *new) return ret; } -int mtd_add_partition(struct mtd_info *master, const char *name, +int mtd_add_partition(struct mtd_info *parent, const char *name, long long offset, long long length) { struct mtd_partition part; @@ -641,7 +658,7 @@ int mtd_add_partition(struct mtd_info *master, const char *name, return -EINVAL; if (length == MTDPART_SIZ_FULL) - length = master->size - offset; + length = parent->size - offset; if (length <= 0) return -EINVAL; @@ -651,7 +668,7 @@ int mtd_add_partition(struct mtd_info *master, const char *name, part.size = length; part.offset = offset; - new = allocate_partition(master, &part, -1, offset); + new = allocate_partition(parent, &part, -1, offset); if (IS_ERR(new)) return PTR_ERR(new); @@ -667,23 +684,69 @@ int mtd_add_partition(struct mtd_info *master, const char *name, } EXPORT_SYMBOL_GPL(mtd_add_partition); -int mtd_del_partition(struct mtd_info *master, int partno) +/** + * __mtd_del_partition - delete MTD partition + * + * @priv: internal MTD struct for partition to be deleted + * + * This function must be called with the partitions mutex locked. + */ +static int __mtd_del_partition(struct mtd_part *priv) +{ + struct mtd_part *child, *next; + int err; + + list_for_each_entry_safe(child, next, &mtd_partitions, list) { + if (child->parent == &priv->mtd) { + err = __mtd_del_partition(child); + if (err) + return err; + } + } + + sysfs_remove_files(&priv->mtd.dev.kobj, mtd_partition_attrs); + + err = del_mtd_device(&priv->mtd); + if (err) + return err; + + list_del(&priv->list); + free_partition(priv); + + return 0; +} + +/* + * This function unregisters and destroy all slave MTD objects which are + * attached to the given MTD object. + */ +int del_mtd_partitions(struct mtd_info *mtd) { struct mtd_part *slave, *next; - int ret = -EINVAL; + int ret, err = 0; mutex_lock(&mtd_partitions_mutex); list_for_each_entry_safe(slave, next, &mtd_partitions, list) - if ((slave->master == master) && - (slave->mtd.index == partno)) { - sysfs_remove_files(&slave->mtd.dev.kobj, - mtd_partition_attrs); - ret = del_mtd_device(&slave->mtd); + if (slave->parent == mtd) { + ret = __mtd_del_partition(slave); if (ret < 0) - break; + err = ret; + } + mutex_unlock(&mtd_partitions_mutex); + + return err; +} + +int mtd_del_partition(struct mtd_info *mtd, int partno) +{ + struct mtd_part *slave, *next; + int ret = -EINVAL; - list_del(&slave->list); - free_partition(slave); + mutex_lock(&mtd_partitions_mutex); + list_for_each_entry_safe(slave, next, &mtd_partitions, list) + if ((slave->parent == mtd) && + (slave->mtd.index == partno)) { + ret = __mtd_del_partition(slave); break; } mutex_unlock(&mtd_partitions_mutex); @@ -724,6 +787,8 @@ int add_mtd_partitions(struct mtd_info *master, add_mtd_device(&slave->mtd); mtd_add_partition_attrs(slave); + if (parts[i].types) + mtd_parse_part(slave, parts[i].types); cur_offset = slave->offset + slave->mtd.size; } @@ -799,6 +864,27 @@ static const char * const default_mtd_part_types[] = { NULL }; +static int mtd_part_do_parse(struct mtd_part_parser *parser, + struct mtd_info *master, + struct mtd_partitions *pparts, + struct mtd_part_parser_data *data) +{ + int ret; + + ret = (*parser->parse_fn)(master, &pparts->parts, data); + pr_debug("%s: parser %s: %i\n", master->name, parser->name, ret); + if (ret <= 0) + return ret; + + pr_notice("%d %s partitions found on MTD device %s\n", ret, + parser->name, master->name); + + pparts->nr_parts = ret; + pparts->parser = parser; + + return ret; +} + /** * parse_mtd_partitions - parse MTD partitions * @master: the master partition (describes whole MTD device) @@ -839,16 +925,10 @@ int parse_mtd_partitions(struct mtd_info *master, const char *const *types, parser ? parser->name : NULL); if (!parser) continue; - ret = (*parser->parse_fn)(master, &pparts->parts, data); - pr_debug("%s: parser %s: %i\n", - master->name, parser->name, ret); - if (ret > 0) { - printk(KERN_NOTICE "%d %s partitions found on MTD device %s\n", - ret, parser->name, master->name); - pparts->nr_parts = ret; - pparts->parser = parser; + ret = mtd_part_do_parse(parser, master, pparts, data); + /* Found partitions! */ + if (ret > 0) return 0; - } mtd_part_parser_put(parser); /* * Stash the first error we see; only report it if no parser @@ -899,6 +979,6 @@ uint64_t mtd_get_device_size(const struct mtd_info *mtd) if (!mtd_is_partition(mtd)) return mtd->size; - return mtd_to_part(mtd)->master->size; + return mtd_get_device_size(mtd_to_part(mtd)->parent); } EXPORT_SYMBOL_GPL(mtd_get_device_size); diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index c3029528063b..dbfa72d61d5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -308,6 +308,7 @@ config MTD_NAND_CS553X config MTD_NAND_ATMEL tristate "Support for NAND Flash / SmartMedia on AT91" depends on ARCH_AT91 + select MFD_ATMEL_SMC help Enables support for NAND Flash / Smart Media Card interface on Atmel AT91 processors. @@ -542,6 +543,7 @@ config MTD_NAND_SUNXI config MTD_NAND_HISI504 tristate "Support for NAND controller on Hisilicon SoC Hip04" + depends on ARCH_HISI || COMPILE_TEST depends on HAS_DMA help Enables support for NAND controller on Hisilicon SoC Hip04. @@ -555,6 +557,7 @@ config MTD_NAND_QCOM config MTD_NAND_MTK tristate "Support for NAND controller on MTK SoCs" + depends on ARCH_MEDIATEK || COMPILE_TEST depends on HAS_DMA help Enables support for NAND controller on MTK SoCs. diff --git a/drivers/mtd/nand/atmel/nand-controller.c b/drivers/mtd/nand/atmel/nand-controller.c index 3b2446896147..d922a88e407f 100644 --- a/drivers/mtd/nand/atmel/nand-controller.c +++ b/drivers/mtd/nand/atmel/nand-controller.c @@ -57,6 +57,7 @@ #include <linux/interrupt.h> #include <linux/mfd/syscon.h> #include <linux/mfd/syscon/atmel-matrix.h> +#include <linux/mfd/syscon/atmel-smc.h> #include <linux/module.h> #include <linux/mtd/nand.h> #include <linux/of_address.h> @@ -64,7 +65,6 @@ #include <linux/of_platform.h> #include <linux/iopoll.h> #include <linux/platform_device.h> -#include <linux/platform_data/atmel.h> #include <linux/regmap.h> #include "pmecc.h" @@ -151,6 +151,8 @@ struct atmel_nand_cs { void __iomem *virt; dma_addr_t dma; } io; + + struct atmel_smc_cs_conf smcconf; }; struct atmel_nand { @@ -196,6 +198,8 @@ struct atmel_nand_controller_ops { void (*nand_init)(struct atmel_nand_controller *nc, struct atmel_nand *nand); int (*ecc_init)(struct atmel_nand *nand); + int (*setup_data_interface)(struct atmel_nand *nand, int csline, + const struct nand_data_interface *conf); }; struct atmel_nand_controller_caps { @@ -912,7 +916,7 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip, struct mtd_info *mtd = nand_to_mtd(chip); struct atmel_nand *nand = to_atmel_nand(chip); struct atmel_hsmc_nand_controller *nc; - int ret; + int ret, status; nc = to_hsmc_nand_controller(chip->controller); @@ -954,6 +958,10 @@ static int atmel_hsmc_nand_pmecc_write_pg(struct nand_chip *chip, dev_err(nc->base.dev, "Failed to program NAND page (err = %d)\n", ret); + status = chip->waitfunc(mtd, chip); + if (status & NAND_STATUS_FAIL) + return -EIO; + return ret; } @@ -1175,6 +1183,295 @@ static int atmel_hsmc_nand_ecc_init(struct atmel_nand *nand) return 0; } +static int atmel_smc_nand_prepare_smcconf(struct atmel_nand *nand, + const struct nand_data_interface *conf, + struct atmel_smc_cs_conf *smcconf) +{ + u32 ncycles, totalcycles, timeps, mckperiodps; + struct atmel_nand_controller *nc; + int ret; + + nc = to_nand_controller(nand->base.controller); + + /* DDR interface not supported. */ + if (conf->type != NAND_SDR_IFACE) + return -ENOTSUPP; + + /* + * tRC < 30ns implies EDO mode. This controller does not support this + * mode. + */ + if (conf->timings.sdr.tRC_min < 30) + return -ENOTSUPP; + + atmel_smc_cs_conf_init(smcconf); + + mckperiodps = NSEC_PER_SEC / clk_get_rate(nc->mck); + mckperiodps *= 1000; + + /* + * Set write pulse timing. This one is easy to extract: + * + * NWE_PULSE = tWP + */ + ncycles = DIV_ROUND_UP(conf->timings.sdr.tWP_min, mckperiodps); + totalcycles = ncycles; + ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NWE_SHIFT, + ncycles); + if (ret) + return ret; + + /* + * The write setup timing depends on the operation done on the NAND. + * All operations goes through the same data bus, but the operation + * type depends on the address we are writing to (ALE/CLE address + * lines). + * Since we have no way to differentiate the different operations at + * the SMC level, we must consider the worst case (the biggest setup + * time among all operation types): + * + * NWE_SETUP = max(tCLS, tCS, tALS, tDS) - NWE_PULSE + */ + timeps = max3(conf->timings.sdr.tCLS_min, conf->timings.sdr.tCS_min, + conf->timings.sdr.tALS_min); + timeps = max(timeps, conf->timings.sdr.tDS_min); + ncycles = DIV_ROUND_UP(timeps, mckperiodps); + ncycles = ncycles > totalcycles ? ncycles - totalcycles : 0; + totalcycles += ncycles; + ret = atmel_smc_cs_conf_set_setup(smcconf, ATMEL_SMC_NWE_SHIFT, + ncycles); + if (ret) + return ret; + + /* + * As for the write setup timing, the write hold timing depends on the + * operation done on the NAND: + * + * NWE_HOLD = max(tCLH, tCH, tALH, tDH, tWH) + */ + timeps = max3(conf->timings.sdr.tCLH_min, conf->timings.sdr.tCH_min, + conf->timings.sdr.tALH_min); + timeps = max3(timeps, conf->timings.sdr.tDH_min, + conf->timings.sdr.tWH_min); + ncycles = DIV_ROUND_UP(timeps, mckperiodps); + totalcycles += ncycles; + + /* + * The write cycle timing is directly matching tWC, but is also + * dependent on the other timings on the setup and hold timings we + * calculated earlier, which gives: + * + * NWE_CYCLE = max(tWC, NWE_SETUP + NWE_PULSE + NWE_HOLD) + */ + ncycles = DIV_ROUND_UP(conf->timings.sdr.tWC_min, mckperiodps); + ncycles = max(totalcycles, ncycles); + ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NWE_SHIFT, + ncycles); + if (ret) + return ret; + + /* + * We don't want the CS line to be toggled between each byte/word + * transfer to the NAND. The only way to guarantee that is to have the + * NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means: + * + * NCS_WR_PULSE = NWE_CYCLE + */ + ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_WR_SHIFT, + ncycles); + if (ret) + return ret; + + /* + * As for the write setup timing, the read hold timing depends on the + * operation done on the NAND: + * + * NRD_HOLD = max(tREH, tRHOH) + */ + timeps = max(conf->timings.sdr.tREH_min, conf->timings.sdr.tRHOH_min); + ncycles = DIV_ROUND_UP(timeps, mckperiodps); + totalcycles = ncycles; + + /* + * TDF = tRHZ - NRD_HOLD + */ + ncycles = DIV_ROUND_UP(conf->timings.sdr.tRHZ_max, mckperiodps); + ncycles -= totalcycles; + + /* + * In ONFI 4.0 specs, tRHZ has been increased to support EDO NANDs and + * we might end up with a config that does not fit in the TDF field. + * Just take the max value in this case and hope that the NAND is more + * tolerant than advertised. + */ + if (ncycles > ATMEL_SMC_MODE_TDF_MAX) + ncycles = ATMEL_SMC_MODE_TDF_MAX; + else if (ncycles < ATMEL_SMC_MODE_TDF_MIN) + ncycles = ATMEL_SMC_MODE_TDF_MIN; + + smcconf->mode |= ATMEL_SMC_MODE_TDF(ncycles) | + ATMEL_SMC_MODE_TDFMODE_OPTIMIZED; + + /* + * Read pulse timing directly matches tRP: + * + * NRD_PULSE = tRP + */ + ncycles = DIV_ROUND_UP(conf->timings.sdr.tRP_min, mckperiodps); + totalcycles += ncycles; + ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NRD_SHIFT, + ncycles); + if (ret) + return ret; + + /* + * The write cycle timing is directly matching tWC, but is also + * dependent on the setup and hold timings we calculated earlier, + * which gives: + * + * NRD_CYCLE = max(tRC, NRD_PULSE + NRD_HOLD) + * + * NRD_SETUP is always 0. + */ + ncycles = DIV_ROUND_UP(conf->timings.sdr.tRC_min, mckperiodps); + ncycles = max(totalcycles, ncycles); + ret = atmel_smc_cs_conf_set_cycle(smcconf, ATMEL_SMC_NRD_SHIFT, + ncycles); + if (ret) + return ret; + + /* + * We don't want the CS line to be toggled between each byte/word + * transfer from the NAND. The only way to guarantee that is to have + * the NCS_{WR,RD}_{SETUP,HOLD} timings set to 0, which in turn means: + * + * NCS_RD_PULSE = NRD_CYCLE + */ + ret = atmel_smc_cs_conf_set_pulse(smcconf, ATMEL_SMC_NCS_RD_SHIFT, + ncycles); + if (ret) + return ret; + + /* Txxx timings are directly matching tXXX ones. */ + ncycles = DIV_ROUND_UP(conf->timings.sdr.tCLR_min, mckperiodps); + ret = atmel_smc_cs_conf_set_timing(smcconf, + ATMEL_HSMC_TIMINGS_TCLR_SHIFT, + ncycles); + if (ret) + return ret; + + ncycles = DIV_ROUND_UP(conf->timings.sdr.tADL_min, mckperiodps); + ret = atmel_smc_cs_conf_set_timing(smcconf, + ATMEL_HSMC_TIMINGS_TADL_SHIFT, + ncycles); + if (ret) + return ret; + + ncycles = DIV_ROUND_UP(conf->timings.sdr.tAR_min, mckperiodps); + ret = atmel_smc_cs_conf_set_timing(smcconf, + ATMEL_HSMC_TIMINGS_TAR_SHIFT, + ncycles); + if (ret) + return ret; + + ncycles = DIV_ROUND_UP(conf->timings.sdr.tRR_min, mckperiodps); + ret = atmel_smc_cs_conf_set_timing(smcconf, + ATMEL_HSMC_TIMINGS_TRR_SHIFT, + ncycles); + if (ret) + return ret; + + ncycles = DIV_ROUND_UP(conf->timings.sdr.tWB_max, mckperiodps); + ret = atmel_smc_cs_conf_set_timing(smcconf, + ATMEL_HSMC_TIMINGS_TWB_SHIFT, + ncycles); + if (ret) + return ret; + + /* Attach the CS line to the NFC logic. */ + smcconf->timings |= ATMEL_HSMC_TIMINGS_NFSEL; + + /* Set the appropriate data bus width. */ + if (nand->base.options & NAND_BUSWIDTH_16) + smcconf->mode |= ATMEL_SMC_MODE_DBW_16; + + /* Operate in NRD/NWE READ/WRITEMODE. */ + smcconf->mode |= ATMEL_SMC_MODE_READMODE_NRD | + ATMEL_SMC_MODE_WRITEMODE_NWE; + + return 0; +} + +static int atmel_smc_nand_setup_data_interface(struct atmel_nand *nand, + int csline, + const struct nand_data_interface *conf) +{ + struct atmel_nand_controller *nc; + struct atmel_smc_cs_conf smcconf; + struct atmel_nand_cs *cs; + int ret; + + nc = to_nand_controller(nand->base.controller); + + ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf); + if (ret) + return ret; + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + cs = &nand->cs[csline]; + cs->smcconf = smcconf; + atmel_smc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf); + + return 0; +} + +static int atmel_hsmc_nand_setup_data_interface(struct atmel_nand *nand, + int csline, + const struct nand_data_interface *conf) +{ + struct atmel_nand_controller *nc; + struct atmel_smc_cs_conf smcconf; + struct atmel_nand_cs *cs; + int ret; + + nc = to_nand_controller(nand->base.controller); + + ret = atmel_smc_nand_prepare_smcconf(nand, conf, &smcconf); + if (ret) + return ret; + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + cs = &nand->cs[csline]; + cs->smcconf = smcconf; + + if (cs->rb.type == ATMEL_NAND_NATIVE_RB) + cs->smcconf.timings |= ATMEL_HSMC_TIMINGS_RBNSEL(cs->rb.id); + + atmel_hsmc_cs_conf_apply(nc->smc, cs->id, &cs->smcconf); + + return 0; +} + +static int atmel_nand_setup_data_interface(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf) +{ + struct nand_chip *chip = mtd_to_nand(mtd); + struct atmel_nand *nand = to_atmel_nand(chip); + struct atmel_nand_controller *nc; + + nc = to_nand_controller(nand->base.controller); + + if (csline >= nand->numcs || + (csline < 0 && csline != NAND_DATA_IFACE_CHECK_ONLY)) + return -EINVAL; + + return nc->caps->ops->setup_data_interface(nand, csline, conf); +} + static void atmel_nand_init(struct atmel_nand_controller *nc, struct atmel_nand *nand) { @@ -1192,6 +1489,9 @@ static void atmel_nand_init(struct atmel_nand_controller *nc, chip->write_buf = atmel_nand_write_buf; chip->select_chip = atmel_nand_select_chip; + if (nc->mck && nc->caps->ops->setup_data_interface) + chip->setup_data_interface = atmel_nand_setup_data_interface; + /* Some NANDs require a longer delay than the default one (20us). */ chip->chip_delay = 40; @@ -1677,6 +1977,12 @@ static int atmel_nand_controller_init(struct atmel_nand_controller *nc, if (nc->caps->legacy_of_bindings) return 0; + nc->mck = of_clk_get(dev->parent->of_node, 0); + if (IS_ERR(nc->mck)) { + dev_err(dev, "Failed to retrieve MCK clk\n"); + return PTR_ERR(nc->mck); + } + np = of_parse_phandle(dev->parent->of_node, "atmel,smc", 0); if (!np) { dev_err(dev, "Missing or invalid atmel,smc property\n"); @@ -1983,6 +2289,7 @@ static const struct atmel_nand_controller_ops atmel_hsmc_nc_ops = { .remove = atmel_hsmc_nand_controller_remove, .ecc_init = atmel_hsmc_nand_ecc_init, .nand_init = atmel_hsmc_nand_init, + .setup_data_interface = atmel_hsmc_nand_setup_data_interface, }; static const struct atmel_nand_controller_caps atmel_sama5_nc_caps = { @@ -2037,7 +2344,14 @@ atmel_smc_nand_controller_remove(struct atmel_nand_controller *nc) return 0; } -static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { +/* + * The SMC reg layout of at91rm9200 is completely different which prevents us + * from re-using atmel_smc_nand_setup_data_interface() for the + * ->setup_data_interface() hook. + * At this point, there's no support for the at91rm9200 SMC IP, so we leave + * ->setup_data_interface() unassigned. + */ +static const struct atmel_nand_controller_ops at91rm9200_nc_ops = { .probe = atmel_smc_nand_controller_probe, .remove = atmel_smc_nand_controller_remove, .ecc_init = atmel_nand_ecc_init, @@ -2047,6 +2361,20 @@ static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { static const struct atmel_nand_controller_caps atmel_rm9200_nc_caps = { .ale_offs = BIT(21), .cle_offs = BIT(22), + .ops = &at91rm9200_nc_ops, +}; + +static const struct atmel_nand_controller_ops atmel_smc_nc_ops = { + .probe = atmel_smc_nand_controller_probe, + .remove = atmel_smc_nand_controller_remove, + .ecc_init = atmel_nand_ecc_init, + .nand_init = atmel_smc_nand_init, + .setup_data_interface = atmel_smc_nand_setup_data_interface, +}; + +static const struct atmel_nand_controller_caps atmel_sam9260_nc_caps = { + .ale_offs = BIT(21), + .cle_offs = BIT(22), .ops = &atmel_smc_nc_ops, }; @@ -2093,7 +2421,7 @@ static const struct of_device_id atmel_nand_controller_of_ids[] = { }, { .compatible = "atmel,at91sam9260-nand-controller", - .data = &atmel_rm9200_nc_caps, + .data = &atmel_sam9260_nc_caps, }, { .compatible = "atmel,at91sam9261-nand-controller", @@ -2181,6 +2509,24 @@ static int atmel_nand_controller_remove(struct platform_device *pdev) return nc->caps->ops->remove(nc); } +static __maybe_unused int atmel_nand_controller_resume(struct device *dev) +{ + struct atmel_nand_controller *nc = dev_get_drvdata(dev); + struct atmel_nand *nand; + + list_for_each_entry(nand, &nc->chips, node) { + int i; + + for (i = 0; i < nand->numcs; i++) + nand_reset(&nand->base, i); + } + + return 0; +} + +static SIMPLE_DEV_PM_OPS(atmel_nand_controller_pm_ops, NULL, + atmel_nand_controller_resume); + static struct platform_driver atmel_nand_controller_driver = { .driver = { .name = "atmel-nand-controller", diff --git a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c index f1da4ea88f2c..54bac5b73f0a 100644 --- a/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c +++ b/drivers/mtd/nand/bcm47xxnflash/ops_bcm4706.c @@ -392,6 +392,8 @@ int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n) b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte; b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf; b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf; + b47n->nand_chip.onfi_set_features = nand_onfi_get_set_features_notsupp; + b47n->nand_chip.onfi_get_features = nand_onfi_get_set_features_notsupp; nand_chip->chip_delay = 50; b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH; diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c index d40c32d311d8..2fd733eba0a3 100644 --- a/drivers/mtd/nand/cafe_nand.c +++ b/drivers/mtd/nand/cafe_nand.c @@ -654,6 +654,8 @@ static int cafe_nand_probe(struct pci_dev *pdev, cafe->nand.read_buf = cafe_read_buf; cafe->nand.write_buf = cafe_write_buf; cafe->nand.select_chip = cafe_select_chip; + cafe->nand.onfi_set_features = nand_onfi_get_set_features_notsupp; + cafe->nand.onfi_get_features = nand_onfi_get_set_features_notsupp; cafe->nand.chip_delay = 0; diff --git a/drivers/mtd/nand/davinci_nand.c b/drivers/mtd/nand/davinci_nand.c index 531c51991e57..7b26e53b95b1 100644 --- a/drivers/mtd/nand/davinci_nand.c +++ b/drivers/mtd/nand/davinci_nand.c @@ -771,11 +771,14 @@ static int nand_davinci_probe(struct platform_device *pdev) info->chip.ecc.hwctl = nand_davinci_hwctl_4bit; info->chip.ecc.bytes = 10; info->chip.ecc.options = NAND_ECC_GENERIC_ERASED_CHECK; + info->chip.ecc.algo = NAND_ECC_BCH; } else { + /* 1bit ecc hamming */ info->chip.ecc.calculate = nand_davinci_calculate_1bit; info->chip.ecc.correct = nand_davinci_correct_1bit; info->chip.ecc.hwctl = nand_davinci_hwctl_1bit; info->chip.ecc.bytes = 3; + info->chip.ecc.algo = NAND_ECC_HAMMING; } info->chip.ecc.size = 512; info->chip.ecc.strength = pdata->ecc_bits; diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c index 16634df2e39a..d723be352148 100644 --- a/drivers/mtd/nand/denali.c +++ b/drivers/mtd/nand/denali.c @@ -23,50 +23,43 @@ #include <linux/mutex.h> #include <linux/mtd/mtd.h> #include <linux/module.h> +#include <linux/slab.h> #include "denali.h" MODULE_LICENSE("GPL"); -/* - * We define a module parameter that allows the user to override - * the hardware and decide what timing mode should be used. - */ -#define NAND_DEFAULT_TIMINGS -1 +#define DENALI_NAND_NAME "denali-nand" -static int onfi_timing_mode = NAND_DEFAULT_TIMINGS; -module_param(onfi_timing_mode, int, S_IRUGO); -MODULE_PARM_DESC(onfi_timing_mode, - "Overrides default ONFI setting. -1 indicates use default timings"); +/* Host Data/Command Interface */ +#define DENALI_HOST_ADDR 0x00 +#define DENALI_HOST_DATA 0x10 -#define DENALI_NAND_NAME "denali-nand" +#define DENALI_MAP00 (0 << 26) /* direct access to buffer */ +#define DENALI_MAP01 (1 << 26) /* read/write pages in PIO */ +#define DENALI_MAP10 (2 << 26) /* high-level control plane */ +#define DENALI_MAP11 (3 << 26) /* direct controller access */ -/* - * We define a macro here that combines all interrupts this driver uses into - * a single constant value, for convenience. - */ -#define DENALI_IRQ_ALL (INTR__DMA_CMD_COMP | \ - INTR__ECC_TRANSACTION_DONE | \ - INTR__ECC_ERR | \ - INTR__PROGRAM_FAIL | \ - INTR__LOAD_COMP | \ - INTR__PROGRAM_COMP | \ - INTR__TIME_OUT | \ - INTR__ERASE_FAIL | \ - INTR__RST_COMP | \ - INTR__ERASE_COMP) +/* MAP11 access cycle type */ +#define DENALI_MAP11_CMD ((DENALI_MAP11) | 0) /* command cycle */ +#define DENALI_MAP11_ADDR ((DENALI_MAP11) | 1) /* address cycle */ +#define DENALI_MAP11_DATA ((DENALI_MAP11) | 2) /* data cycle */ -/* - * indicates whether or not the internal value for the flash bank is - * valid or not - */ -#define CHIP_SELECT_INVALID -1 +/* MAP10 commands */ +#define DENALI_ERASE 0x01 + +#define DENALI_BANK(denali) ((denali)->active_bank << 24) + +#define DENALI_INVALID_BANK -1 +#define DENALI_NR_BANKS 4 /* - * This macro divides two integers and rounds fractional values up - * to the nearest integer value. + * The bus interface clock, clk_x, is phase aligned with the core clock. The + * clk_x is an integral multiple N of the core clk. The value N is configured + * at IP delivery time, and its available value is 4, 5, or 6. We need to align + * to the largest value to make it work with any possible configuration. */ -#define CEIL_DIV(X, Y) (((X)%(Y)) ? ((X)/(Y)+1) : ((X)/(Y))) +#define DENALI_CLK_X_MULT 6 /* * this macro allows us to convert from an MTD structure to our own @@ -77,339 +70,11 @@ static inline struct denali_nand_info *mtd_to_denali(struct mtd_info *mtd) return container_of(mtd_to_nand(mtd), struct denali_nand_info, nand); } -/* - * These constants are defined by the driver to enable common driver - * configuration options. - */ -#define SPARE_ACCESS 0x41 -#define MAIN_ACCESS 0x42 -#define MAIN_SPARE_ACCESS 0x43 - -#define DENALI_READ 0 -#define DENALI_WRITE 0x100 - -/* - * this is a helper macro that allows us to - * format the bank into the proper bits for the controller - */ -#define BANK(x) ((x) << 24) - -/* forward declarations */ -static void clear_interrupts(struct denali_nand_info *denali); -static uint32_t wait_for_irq(struct denali_nand_info *denali, - uint32_t irq_mask); -static void denali_irq_enable(struct denali_nand_info *denali, - uint32_t int_mask); -static uint32_t read_interrupt_status(struct denali_nand_info *denali); - -/* - * Certain operations for the denali NAND controller use an indexed mode to - * read/write data. The operation is performed by writing the address value - * of the command to the device memory followed by the data. This function - * abstracts this common operation. - */ -static void index_addr(struct denali_nand_info *denali, - uint32_t address, uint32_t data) -{ - iowrite32(address, denali->flash_mem); - iowrite32(data, denali->flash_mem + 0x10); -} - -/* Perform an indexed read of the device */ -static void index_addr_read_data(struct denali_nand_info *denali, - uint32_t address, uint32_t *pdata) -{ - iowrite32(address, denali->flash_mem); - *pdata = ioread32(denali->flash_mem + 0x10); -} - -/* - * We need to buffer some data for some of the NAND core routines. - * The operations manage buffering that data. - */ -static void reset_buf(struct denali_nand_info *denali) -{ - denali->buf.head = denali->buf.tail = 0; -} - -static void write_byte_to_buf(struct denali_nand_info *denali, uint8_t byte) -{ - denali->buf.buf[denali->buf.tail++] = byte; -} - -/* reads the status of the device */ -static void read_status(struct denali_nand_info *denali) -{ - uint32_t cmd; - - /* initialize the data buffer to store status */ - reset_buf(denali); - - cmd = ioread32(denali->flash_reg + WRITE_PROTECT); - if (cmd) - write_byte_to_buf(denali, NAND_STATUS_WP); - else - write_byte_to_buf(denali, 0); -} - -/* resets a specific device connected to the core */ -static void reset_bank(struct denali_nand_info *denali) -{ - uint32_t irq_status; - uint32_t irq_mask = INTR__RST_COMP | INTR__TIME_OUT; - - clear_interrupts(denali); - - iowrite32(1 << denali->flash_bank, denali->flash_reg + DEVICE_RESET); - - irq_status = wait_for_irq(denali, irq_mask); - - if (irq_status & INTR__TIME_OUT) - dev_err(denali->dev, "reset bank failed.\n"); -} - -/* Reset the flash controller */ -static uint16_t denali_nand_reset(struct denali_nand_info *denali) -{ - int i; - - for (i = 0; i < denali->max_banks; i++) - iowrite32(INTR__RST_COMP | INTR__TIME_OUT, - denali->flash_reg + INTR_STATUS(i)); - - for (i = 0; i < denali->max_banks; i++) { - iowrite32(1 << i, denali->flash_reg + DEVICE_RESET); - while (!(ioread32(denali->flash_reg + INTR_STATUS(i)) & - (INTR__RST_COMP | INTR__TIME_OUT))) - cpu_relax(); - if (ioread32(denali->flash_reg + INTR_STATUS(i)) & - INTR__TIME_OUT) - dev_dbg(denali->dev, - "NAND Reset operation timed out on bank %d\n", i); - } - - for (i = 0; i < denali->max_banks; i++) - iowrite32(INTR__RST_COMP | INTR__TIME_OUT, - denali->flash_reg + INTR_STATUS(i)); - - return PASS; -} - -/* - * this routine calculates the ONFI timing values for a given mode and - * programs the clocking register accordingly. The mode is determined by - * the get_onfi_nand_para routine. - */ -static void nand_onfi_timing_set(struct denali_nand_info *denali, - uint16_t mode) -{ - uint16_t Trea[6] = {40, 30, 25, 20, 20, 16}; - uint16_t Trp[6] = {50, 25, 17, 15, 12, 10}; - uint16_t Treh[6] = {30, 15, 15, 10, 10, 7}; - uint16_t Trc[6] = {100, 50, 35, 30, 25, 20}; - uint16_t Trhoh[6] = {0, 15, 15, 15, 15, 15}; - uint16_t Trloh[6] = {0, 0, 0, 0, 5, 5}; - uint16_t Tcea[6] = {100, 45, 30, 25, 25, 25}; - uint16_t Tadl[6] = {200, 100, 100, 100, 70, 70}; - uint16_t Trhw[6] = {200, 100, 100, 100, 100, 100}; - uint16_t Trhz[6] = {200, 100, 100, 100, 100, 100}; - uint16_t Twhr[6] = {120, 80, 80, 60, 60, 60}; - uint16_t Tcs[6] = {70, 35, 25, 25, 20, 15}; - - uint16_t data_invalid_rhoh, data_invalid_rloh, data_invalid; - uint16_t dv_window = 0; - uint16_t en_lo, en_hi; - uint16_t acc_clks; - uint16_t addr_2_data, re_2_we, re_2_re, we_2_re, cs_cnt; - - en_lo = CEIL_DIV(Trp[mode], CLK_X); - en_hi = CEIL_DIV(Treh[mode], CLK_X); -#if ONFI_BLOOM_TIME - if ((en_hi * CLK_X) < (Treh[mode] + 2)) - en_hi++; -#endif - - if ((en_lo + en_hi) * CLK_X < Trc[mode]) - en_lo += CEIL_DIV((Trc[mode] - (en_lo + en_hi) * CLK_X), CLK_X); - - if ((en_lo + en_hi) < CLK_MULTI) - en_lo += CLK_MULTI - en_lo - en_hi; - - while (dv_window < 8) { - data_invalid_rhoh = en_lo * CLK_X + Trhoh[mode]; - - data_invalid_rloh = (en_lo + en_hi) * CLK_X + Trloh[mode]; - - data_invalid = data_invalid_rhoh < data_invalid_rloh ? - data_invalid_rhoh : data_invalid_rloh; - - dv_window = data_invalid - Trea[mode]; - - if (dv_window < 8) - en_lo++; - } - - acc_clks = CEIL_DIV(Trea[mode], CLK_X); - - while (acc_clks * CLK_X - Trea[mode] < 3) - acc_clks++; - - if (data_invalid - acc_clks * CLK_X < 2) - dev_warn(denali->dev, "%s, Line %d: Warning!\n", - __FILE__, __LINE__); - - addr_2_data = CEIL_DIV(Tadl[mode], CLK_X); - re_2_we = CEIL_DIV(Trhw[mode], CLK_X); - re_2_re = CEIL_DIV(Trhz[mode], CLK_X); - we_2_re = CEIL_DIV(Twhr[mode], CLK_X); - cs_cnt = CEIL_DIV((Tcs[mode] - Trp[mode]), CLK_X); - if (cs_cnt == 0) - cs_cnt = 1; - - if (Tcea[mode]) { - while (cs_cnt * CLK_X + Trea[mode] < Tcea[mode]) - cs_cnt++; - } - -#if MODE5_WORKAROUND - if (mode == 5) - acc_clks = 5; -#endif - - /* Sighting 3462430: Temporary hack for MT29F128G08CJABAWP:B */ - if (ioread32(denali->flash_reg + MANUFACTURER_ID) == 0 && - ioread32(denali->flash_reg + DEVICE_ID) == 0x88) - acc_clks = 6; - - iowrite32(acc_clks, denali->flash_reg + ACC_CLKS); - iowrite32(re_2_we, denali->flash_reg + RE_2_WE); - iowrite32(re_2_re, denali->flash_reg + RE_2_RE); - iowrite32(we_2_re, denali->flash_reg + WE_2_RE); - iowrite32(addr_2_data, denali->flash_reg + ADDR_2_DATA); - iowrite32(en_lo, denali->flash_reg + RDWR_EN_LO_CNT); - iowrite32(en_hi, denali->flash_reg + RDWR_EN_HI_CNT); - iowrite32(cs_cnt, denali->flash_reg + CS_SETUP_CNT); -} - -/* queries the NAND device to see what ONFI modes it supports. */ -static uint16_t get_onfi_nand_para(struct denali_nand_info *denali) +static void denali_host_write(struct denali_nand_info *denali, + uint32_t addr, uint32_t data) { - int i; - - /* - * we needn't to do a reset here because driver has already - * reset all the banks before - */ - if (!(ioread32(denali->flash_reg + ONFI_TIMING_MODE) & - ONFI_TIMING_MODE__VALUE)) - return FAIL; - - for (i = 5; i > 0; i--) { - if (ioread32(denali->flash_reg + ONFI_TIMING_MODE) & - (0x01 << i)) - break; - } - - nand_onfi_timing_set(denali, i); - - /* - * By now, all the ONFI devices we know support the page cache - * rw feature. So here we enable the pipeline_rw_ahead feature - */ - /* iowrite32(1, denali->flash_reg + CACHE_WRITE_ENABLE); */ - /* iowrite32(1, denali->flash_reg + CACHE_READ_ENABLE); */ - - return PASS; -} - -static void get_samsung_nand_para(struct denali_nand_info *denali, - uint8_t device_id) -{ - if (device_id == 0xd3) { /* Samsung K9WAG08U1A */ - /* Set timing register values according to datasheet */ - iowrite32(5, denali->flash_reg + ACC_CLKS); - iowrite32(20, denali->flash_reg + RE_2_WE); - iowrite32(12, denali->flash_reg + WE_2_RE); - iowrite32(14, denali->flash_reg + ADDR_2_DATA); - iowrite32(3, denali->flash_reg + RDWR_EN_LO_CNT); - iowrite32(2, denali->flash_reg + RDWR_EN_HI_CNT); - iowrite32(2, denali->flash_reg + CS_SETUP_CNT); - } -} - -static void get_toshiba_nand_para(struct denali_nand_info *denali) -{ - /* - * Workaround to fix a controller bug which reports a wrong - * spare area size for some kind of Toshiba NAND device - */ - if ((ioread32(denali->flash_reg + DEVICE_MAIN_AREA_SIZE) == 4096) && - (ioread32(denali->flash_reg + DEVICE_SPARE_AREA_SIZE) == 64)) - iowrite32(216, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); -} - -static void get_hynix_nand_para(struct denali_nand_info *denali, - uint8_t device_id) -{ - switch (device_id) { - case 0xD5: /* Hynix H27UAG8T2A, H27UBG8U5A or H27UCG8VFA */ - case 0xD7: /* Hynix H27UDG8VEM, H27UCG8UDM or H27UCG8V5A */ - iowrite32(128, denali->flash_reg + PAGES_PER_BLOCK); - iowrite32(4096, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); - iowrite32(224, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); - iowrite32(0, denali->flash_reg + DEVICE_WIDTH); - break; - default: - dev_warn(denali->dev, - "Unknown Hynix NAND (Device ID: 0x%x).\n" - "Will use default parameter values instead.\n", - device_id); - } -} - -/* - * determines how many NAND chips are connected to the controller. Note for - * Intel CE4100 devices we don't support more than one device. - */ -static void find_valid_banks(struct denali_nand_info *denali) -{ - uint32_t id[denali->max_banks]; - int i; - - denali->total_used_banks = 1; - for (i = 0; i < denali->max_banks; i++) { - index_addr(denali, MODE_11 | (i << 24) | 0, 0x90); - index_addr(denali, MODE_11 | (i << 24) | 1, 0); - index_addr_read_data(denali, MODE_11 | (i << 24) | 2, &id[i]); - - dev_dbg(denali->dev, - "Return 1st ID for bank[%d]: %x\n", i, id[i]); - - if (i == 0) { - if (!(id[i] & 0x0ff)) - break; /* WTF? */ - } else { - if ((id[i] & 0x0ff) == (id[0] & 0x0ff)) - denali->total_used_banks++; - else - break; - } - } - - if (denali->platform == INTEL_CE4100) { - /* - * Platform limitations of the CE4100 device limit - * users to a single chip solution for NAND. - * Multichip support is not enabled. - */ - if (denali->total_used_banks != 1) { - dev_err(denali->dev, - "Sorry, Intel CE4100 only supports a single NAND device.\n"); - BUG(); - } - } - dev_dbg(denali->dev, - "denali->total_used_banks: %d\n", denali->total_used_banks); + iowrite32(addr, denali->host + DENALI_HOST_ADDR); + iowrite32(data, denali->host + DENALI_HOST_DATA); } /* @@ -418,7 +83,7 @@ static void find_valid_banks(struct denali_nand_info *denali) */ static void detect_max_banks(struct denali_nand_info *denali) { - uint32_t features = ioread32(denali->flash_reg + FEATURES); + uint32_t features = ioread32(denali->reg + FEATURES); denali->max_banks = 1 << (features & FEATURES__N_BANKS); @@ -427,227 +92,120 @@ static void detect_max_banks(struct denali_nand_info *denali) denali->max_banks <<= 1; } -static uint16_t denali_nand_timing_set(struct denali_nand_info *denali) +static void denali_enable_irq(struct denali_nand_info *denali) { - uint16_t status = PASS; - uint32_t id_bytes[8], addr; - uint8_t maf_id, device_id; int i; - /* - * Use read id method to get device ID and other params. - * For some NAND chips, controller can't report the correct - * device ID by reading from DEVICE_ID register - */ - addr = MODE_11 | BANK(denali->flash_bank); - index_addr(denali, addr | 0, 0x90); - index_addr(denali, addr | 1, 0); - for (i = 0; i < 8; i++) - index_addr_read_data(denali, addr | 2, &id_bytes[i]); - maf_id = id_bytes[0]; - device_id = id_bytes[1]; - - if (ioread32(denali->flash_reg + ONFI_DEVICE_NO_OF_LUNS) & - ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE) { /* ONFI 1.0 NAND */ - if (FAIL == get_onfi_nand_para(denali)) - return FAIL; - } else if (maf_id == 0xEC) { /* Samsung NAND */ - get_samsung_nand_para(denali, device_id); - } else if (maf_id == 0x98) { /* Toshiba NAND */ - get_toshiba_nand_para(denali); - } else if (maf_id == 0xAD) { /* Hynix NAND */ - get_hynix_nand_para(denali, device_id); - } - - dev_info(denali->dev, - "Dump timing register values:\n" - "acc_clks: %d, re_2_we: %d, re_2_re: %d\n" - "we_2_re: %d, addr_2_data: %d, rdwr_en_lo_cnt: %d\n" - "rdwr_en_hi_cnt: %d, cs_setup_cnt: %d\n", - ioread32(denali->flash_reg + ACC_CLKS), - ioread32(denali->flash_reg + RE_2_WE), - ioread32(denali->flash_reg + RE_2_RE), - ioread32(denali->flash_reg + WE_2_RE), - ioread32(denali->flash_reg + ADDR_2_DATA), - ioread32(denali->flash_reg + RDWR_EN_LO_CNT), - ioread32(denali->flash_reg + RDWR_EN_HI_CNT), - ioread32(denali->flash_reg + CS_SETUP_CNT)); - - find_valid_banks(denali); - - /* - * If the user specified to override the default timings - * with a specific ONFI mode, we apply those changes here. - */ - if (onfi_timing_mode != NAND_DEFAULT_TIMINGS) - nand_onfi_timing_set(denali, onfi_timing_mode); - - return status; + for (i = 0; i < DENALI_NR_BANKS; i++) + iowrite32(U32_MAX, denali->reg + INTR_EN(i)); + iowrite32(GLOBAL_INT_EN_FLAG, denali->reg + GLOBAL_INT_ENABLE); } -static void denali_set_intr_modes(struct denali_nand_info *denali, - uint16_t INT_ENABLE) +static void denali_disable_irq(struct denali_nand_info *denali) { - if (INT_ENABLE) - iowrite32(1, denali->flash_reg + GLOBAL_INT_ENABLE); - else - iowrite32(0, denali->flash_reg + GLOBAL_INT_ENABLE); -} - -/* - * validation function to verify that the controlling software is making - * a valid request - */ -static inline bool is_flash_bank_valid(int flash_bank) -{ - return flash_bank >= 0 && flash_bank < 4; -} - -static void denali_irq_init(struct denali_nand_info *denali) -{ - uint32_t int_mask; int i; - /* Disable global interrupts */ - denali_set_intr_modes(denali, false); - - int_mask = DENALI_IRQ_ALL; - - /* Clear all status bits */ - for (i = 0; i < denali->max_banks; ++i) - iowrite32(0xFFFF, denali->flash_reg + INTR_STATUS(i)); - - denali_irq_enable(denali, int_mask); + for (i = 0; i < DENALI_NR_BANKS; i++) + iowrite32(0, denali->reg + INTR_EN(i)); + iowrite32(0, denali->reg + GLOBAL_INT_ENABLE); } -static void denali_irq_cleanup(int irqnum, struct denali_nand_info *denali) +static void denali_clear_irq(struct denali_nand_info *denali, + int bank, uint32_t irq_status) { - denali_set_intr_modes(denali, false); + /* write one to clear bits */ + iowrite32(irq_status, denali->reg + INTR_STATUS(bank)); } -static void denali_irq_enable(struct denali_nand_info *denali, - uint32_t int_mask) +static void denali_clear_irq_all(struct denali_nand_info *denali) { int i; - for (i = 0; i < denali->max_banks; ++i) - iowrite32(int_mask, denali->flash_reg + INTR_EN(i)); + for (i = 0; i < DENALI_NR_BANKS; i++) + denali_clear_irq(denali, i, U32_MAX); } -/* - * This function only returns when an interrupt that this driver cares about - * occurs. This is to reduce the overhead of servicing interrupts - */ -static inline uint32_t denali_irq_detected(struct denali_nand_info *denali) +static irqreturn_t denali_isr(int irq, void *dev_id) { - return read_interrupt_status(denali) & DENALI_IRQ_ALL; -} + struct denali_nand_info *denali = dev_id; + irqreturn_t ret = IRQ_NONE; + uint32_t irq_status; + int i; -/* Interrupts are cleared by writing a 1 to the appropriate status bit */ -static inline void clear_interrupt(struct denali_nand_info *denali, - uint32_t irq_mask) -{ - uint32_t intr_status_reg; + spin_lock(&denali->irq_lock); - intr_status_reg = INTR_STATUS(denali->flash_bank); + for (i = 0; i < DENALI_NR_BANKS; i++) { + irq_status = ioread32(denali->reg + INTR_STATUS(i)); + if (irq_status) + ret = IRQ_HANDLED; - iowrite32(irq_mask, denali->flash_reg + intr_status_reg); -} + denali_clear_irq(denali, i, irq_status); -static void clear_interrupts(struct denali_nand_info *denali) -{ - uint32_t status; + if (i != denali->active_bank) + continue; - spin_lock_irq(&denali->irq_lock); + denali->irq_status |= irq_status; - status = read_interrupt_status(denali); - clear_interrupt(denali, status); + if (denali->irq_status & denali->irq_mask) + complete(&denali->complete); + } + + spin_unlock(&denali->irq_lock); - denali->irq_status = 0x0; - spin_unlock_irq(&denali->irq_lock); + return ret; } -static uint32_t read_interrupt_status(struct denali_nand_info *denali) +static void denali_reset_irq(struct denali_nand_info *denali) { - uint32_t intr_status_reg; - - intr_status_reg = INTR_STATUS(denali->flash_bank); + unsigned long flags; - return ioread32(denali->flash_reg + intr_status_reg); + spin_lock_irqsave(&denali->irq_lock, flags); + denali->irq_status = 0; + denali->irq_mask = 0; + spin_unlock_irqrestore(&denali->irq_lock, flags); } -/* - * This is the interrupt service routine. It handles all interrupts - * sent to this device. Note that on CE4100, this is a shared interrupt. - */ -static irqreturn_t denali_isr(int irq, void *dev_id) +static uint32_t denali_wait_for_irq(struct denali_nand_info *denali, + uint32_t irq_mask) { - struct denali_nand_info *denali = dev_id; + unsigned long time_left, flags; uint32_t irq_status; - irqreturn_t result = IRQ_NONE; - spin_lock(&denali->irq_lock); + spin_lock_irqsave(&denali->irq_lock, flags); - /* check to see if a valid NAND chip has been selected. */ - if (is_flash_bank_valid(denali->flash_bank)) { - /* - * check to see if controller generated the interrupt, - * since this is a shared interrupt - */ - irq_status = denali_irq_detected(denali); - if (irq_status != 0) { - /* handle interrupt */ - /* first acknowledge it */ - clear_interrupt(denali, irq_status); - /* - * store the status in the device context for someone - * to read - */ - denali->irq_status |= irq_status; - /* notify anyone who cares that it happened */ - complete(&denali->complete); - /* tell the OS that we've handled this */ - result = IRQ_HANDLED; - } + irq_status = denali->irq_status; + + if (irq_mask & irq_status) { + /* return immediately if the IRQ has already happened. */ + spin_unlock_irqrestore(&denali->irq_lock, flags); + return irq_status; } - spin_unlock(&denali->irq_lock); - return result; -} -static uint32_t wait_for_irq(struct denali_nand_info *denali, uint32_t irq_mask) -{ - unsigned long comp_res; - uint32_t intr_status; - unsigned long timeout = msecs_to_jiffies(1000); + denali->irq_mask = irq_mask; + reinit_completion(&denali->complete); + spin_unlock_irqrestore(&denali->irq_lock, flags); - do { - comp_res = - wait_for_completion_timeout(&denali->complete, timeout); - spin_lock_irq(&denali->irq_lock); - intr_status = denali->irq_status; - - if (intr_status & irq_mask) { - denali->irq_status &= ~irq_mask; - spin_unlock_irq(&denali->irq_lock); - /* our interrupt was detected */ - break; - } + time_left = wait_for_completion_timeout(&denali->complete, + msecs_to_jiffies(1000)); + if (!time_left) { + dev_err(denali->dev, "timeout while waiting for irq 0x%x\n", + denali->irq_mask); + return 0; + } - /* - * these are not the interrupts you are looking for - - * need to wait again - */ - spin_unlock_irq(&denali->irq_lock); - } while (comp_res != 0); + return denali->irq_status; +} + +static uint32_t denali_check_irq(struct denali_nand_info *denali) +{ + unsigned long flags; + uint32_t irq_status; - if (comp_res == 0) { - /* timeout */ - pr_err("timeout occurred, status = 0x%x, mask = 0x%x\n", - intr_status, irq_mask); + spin_lock_irqsave(&denali->irq_lock, flags); + irq_status = denali->irq_status; + spin_unlock_irqrestore(&denali->irq_lock, flags); - intr_status = 0; - } - return intr_status; + return irq_status; } /* @@ -664,153 +222,111 @@ static void setup_ecc_for_xfer(struct denali_nand_info *denali, bool ecc_en, transfer_spare_flag = transfer_spare ? TRANSFER_SPARE_REG__FLAG : 0; /* Enable spare area/ECC per user's request. */ - iowrite32(ecc_en_flag, denali->flash_reg + ECC_ENABLE); - iowrite32(transfer_spare_flag, denali->flash_reg + TRANSFER_SPARE_REG); + iowrite32(ecc_en_flag, denali->reg + ECC_ENABLE); + iowrite32(transfer_spare_flag, denali->reg + TRANSFER_SPARE_REG); } -/* - * sends a pipeline command operation to the controller. See the Denali NAND - * controller's user guide for more information (section 4.2.3.6). - */ -static int denali_send_pipeline_cmd(struct denali_nand_info *denali, - bool ecc_en, bool transfer_spare, - int access_type, int op) +static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { - int status = PASS; - uint32_t addr, cmd; - - setup_ecc_for_xfer(denali, ecc_en, transfer_spare); + struct denali_nand_info *denali = mtd_to_denali(mtd); + int i; - clear_interrupts(denali); + iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), + denali->host + DENALI_HOST_ADDR); - addr = BANK(denali->flash_bank) | denali->page; + for (i = 0; i < len; i++) + buf[i] = ioread32(denali->host + DENALI_HOST_DATA); +} - if (op == DENALI_WRITE && access_type != SPARE_ACCESS) { - cmd = MODE_01 | addr; - iowrite32(cmd, denali->flash_mem); - } else if (op == DENALI_WRITE && access_type == SPARE_ACCESS) { - /* read spare area */ - cmd = MODE_10 | addr; - index_addr(denali, cmd, access_type); +static void denali_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + int i; - cmd = MODE_01 | addr; - iowrite32(cmd, denali->flash_mem); - } else if (op == DENALI_READ) { - /* setup page read request for access type */ - cmd = MODE_10 | addr; - index_addr(denali, cmd, access_type); + iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), + denali->host + DENALI_HOST_ADDR); - cmd = MODE_01 | addr; - iowrite32(cmd, denali->flash_mem); - } - return status; + for (i = 0; i < len; i++) + iowrite32(buf[i], denali->host + DENALI_HOST_DATA); } -/* helper function that simply writes a buffer to the flash */ -static int write_data_to_flash_mem(struct denali_nand_info *denali, - const uint8_t *buf, int len) +static void denali_read_buf16(struct mtd_info *mtd, uint8_t *buf, int len) { - uint32_t *buf32; + struct denali_nand_info *denali = mtd_to_denali(mtd); + uint16_t *buf16 = (uint16_t *)buf; int i; - /* - * verify that the len is a multiple of 4. - * see comment in read_data_from_flash_mem() - */ - BUG_ON((len % 4) != 0); + iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), + denali->host + DENALI_HOST_ADDR); - /* write the data to the flash memory */ - buf32 = (uint32_t *)buf; - for (i = 0; i < len / 4; i++) - iowrite32(*buf32++, denali->flash_mem + 0x10); - return i * 4; /* intent is to return the number of bytes read */ + for (i = 0; i < len / 2; i++) + buf16[i] = ioread32(denali->host + DENALI_HOST_DATA); } -/* helper function that simply reads a buffer from the flash */ -static int read_data_from_flash_mem(struct denali_nand_info *denali, - uint8_t *buf, int len) +static void denali_write_buf16(struct mtd_info *mtd, const uint8_t *buf, + int len) { - uint32_t *buf32; + struct denali_nand_info *denali = mtd_to_denali(mtd); + const uint16_t *buf16 = (const uint16_t *)buf; int i; - /* - * we assume that len will be a multiple of 4, if not it would be nice - * to know about it ASAP rather than have random failures... - * This assumption is based on the fact that this function is designed - * to be used to read flash pages, which are typically multiples of 4. - */ - BUG_ON((len % 4) != 0); + iowrite32(DENALI_MAP11_DATA | DENALI_BANK(denali), + denali->host + DENALI_HOST_ADDR); - /* transfer the data from the flash */ - buf32 = (uint32_t *)buf; - for (i = 0; i < len / 4; i++) - *buf32++ = ioread32(denali->flash_mem + 0x10); - return i * 4; /* intent is to return the number of bytes read */ + for (i = 0; i < len / 2; i++) + iowrite32(buf16[i], denali->host + DENALI_HOST_DATA); } -/* writes OOB data to the device */ -static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) +static uint8_t denali_read_byte(struct mtd_info *mtd) { - struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t irq_status; - uint32_t irq_mask = INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL; - int status = 0; + uint8_t byte; - denali->page = page; + denali_read_buf(mtd, &byte, 1); - if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS, - DENALI_WRITE) == PASS) { - write_data_to_flash_mem(denali, buf, mtd->oobsize); + return byte; +} - /* wait for operation to complete */ - irq_status = wait_for_irq(denali, irq_mask); +static void denali_write_byte(struct mtd_info *mtd, uint8_t byte) +{ + denali_write_buf(mtd, &byte, 1); +} - if (irq_status == 0) { - dev_err(denali->dev, "OOB write failed\n"); - status = -EIO; - } - } else { - dev_err(denali->dev, "unable to send pipeline command\n"); - status = -EIO; - } - return status; +static uint16_t denali_read_word(struct mtd_info *mtd) +{ + uint16_t word; + + denali_read_buf16(mtd, (uint8_t *)&word, 2); + + return word; } -/* reads OOB data from the device */ -static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page) +static void denali_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t irq_mask = INTR__LOAD_COMP; - uint32_t irq_status, addr, cmd; + uint32_t type; - denali->page = page; + if (ctrl & NAND_CLE) + type = DENALI_MAP11_CMD; + else if (ctrl & NAND_ALE) + type = DENALI_MAP11_ADDR; + else + return; - if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS, - DENALI_READ) == PASS) { - read_data_from_flash_mem(denali, buf, mtd->oobsize); + /* + * Some commands are followed by chip->dev_ready or chip->waitfunc. + * irq_status must be cleared here to catch the R/B# interrupt later. + */ + if (ctrl & NAND_CTRL_CHANGE) + denali_reset_irq(denali); - /* - * wait for command to be accepted - * can always use status0 bit as the - * mask is identical for each bank. - */ - irq_status = wait_for_irq(denali, irq_mask); + denali_host_write(denali, DENALI_BANK(denali) | type, dat); +} - if (irq_status == 0) - dev_err(denali->dev, "page on OOB timeout %d\n", - denali->page); +static int denali_dev_ready(struct mtd_info *mtd) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); - /* - * We set the device back to MAIN_ACCESS here as I observed - * instability with the controller if you do a block erase - * and the last transaction was a SPARE_ACCESS. Block erase - * is reliable (according to the MTD test infrastructure) - * if you are in MAIN_ACCESS. - */ - addr = BANK(denali->flash_bank) | denali->page; - cmd = MODE_10 | addr; - index_addr(denali, cmd, MAIN_ACCESS); - } + return !!(denali_check_irq(denali) & INTR__INT_ACT); } static int denali_check_erased_page(struct mtd_info *mtd, @@ -856,11 +372,11 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, unsigned long *uncor_ecc_flags) { struct nand_chip *chip = mtd_to_nand(mtd); - int bank = denali->flash_bank; + int bank = denali->active_bank; uint32_t ecc_cor; unsigned int max_bitflips; - ecc_cor = ioread32(denali->flash_reg + ECC_COR_INFO(bank)); + ecc_cor = ioread32(denali->reg + ECC_COR_INFO(bank)); ecc_cor >>= ECC_COR_INFO__SHIFT(bank); if (ecc_cor & ECC_COR_INFO__UNCOR_ERR) { @@ -886,8 +402,6 @@ static int denali_hw_ecc_fixup(struct mtd_info *mtd, return max_bitflips; } -#define ECC_SECTOR_SIZE 512 - #define ECC_SECTOR(x) (((x) & ECC_ERROR_ADDRESS__SECTOR_NR) >> 12) #define ECC_BYTE(x) (((x) & ECC_ERROR_ADDRESS__OFFSET)) #define ECC_CORRECTION_VALUE(x) ((x) & ERR_CORRECTION_INFO__BYTEMASK) @@ -899,22 +413,23 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, struct denali_nand_info *denali, unsigned long *uncor_ecc_flags, uint8_t *buf) { + unsigned int ecc_size = denali->nand.ecc.size; unsigned int bitflips = 0; unsigned int max_bitflips = 0; uint32_t err_addr, err_cor_info; unsigned int err_byte, err_sector, err_device; uint8_t err_cor_value; unsigned int prev_sector = 0; + uint32_t irq_status; - /* read the ECC errors. we'll ignore them for now */ - denali_set_intr_modes(denali, false); + denali_reset_irq(denali); do { - err_addr = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS); + err_addr = ioread32(denali->reg + ECC_ERROR_ADDRESS); err_sector = ECC_SECTOR(err_addr); err_byte = ECC_BYTE(err_addr); - err_cor_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO); + err_cor_info = ioread32(denali->reg + ERR_CORRECTION_INFO); err_cor_value = ECC_CORRECTION_VALUE(err_cor_info); err_device = ECC_ERR_DEVICE(err_cor_info); @@ -928,9 +443,9 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, * an erased sector. */ *uncor_ecc_flags |= BIT(err_sector); - } else if (err_byte < ECC_SECTOR_SIZE) { + } else if (err_byte < ecc_size) { /* - * If err_byte is larger than ECC_SECTOR_SIZE, means error + * If err_byte is larger than ecc_size, means error * happened in OOB, so we ignore it. It's no need for * us to correct it err_device is represented the NAND * error bits are happened in if there are more than @@ -939,8 +454,8 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, int offset; unsigned int flips_in_byte; - offset = (err_sector * ECC_SECTOR_SIZE + err_byte) * - denali->devnum + err_device; + offset = (err_sector * ecc_size + err_byte) * + denali->devs_per_cs + err_device; /* correct the ECC error */ flips_in_byte = hweight8(buf[offset] ^ err_cor_value); @@ -959,10 +474,9 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, * ECC_TRANSACTION_DONE interrupt, so here just wait for * a while for this interrupt */ - while (!(read_interrupt_status(denali) & INTR__ECC_TRANSACTION_DONE)) - cpu_relax(); - clear_interrupts(denali); - denali_set_intr_modes(denali, true); + irq_status = denali_wait_for_irq(denali, INTR__ECC_TRANSACTION_DONE); + if (!(irq_status & INTR__ECC_TRANSACTION_DONE)) + return -EIO; return max_bitflips; } @@ -970,17 +484,17 @@ static int denali_sw_ecc_fixup(struct mtd_info *mtd, /* programs the controller to either enable/disable DMA transfers */ static void denali_enable_dma(struct denali_nand_info *denali, bool en) { - iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->flash_reg + DMA_ENABLE); - ioread32(denali->flash_reg + DMA_ENABLE); + iowrite32(en ? DMA_ENABLE__FLAG : 0, denali->reg + DMA_ENABLE); + ioread32(denali->reg + DMA_ENABLE); } -static void denali_setup_dma64(struct denali_nand_info *denali, int op) +static void denali_setup_dma64(struct denali_nand_info *denali, + dma_addr_t dma_addr, int page, int write) { uint32_t mode; const int page_count = 1; - uint64_t addr = denali->buf.dma_buf; - mode = MODE_10 | BANK(denali->flash_bank) | denali->page; + mode = DENALI_MAP10 | DENALI_BANK(denali) | page; /* DMA is a three step process */ @@ -988,191 +502,354 @@ static void denali_setup_dma64(struct denali_nand_info *denali, int op) * 1. setup transfer type, interrupt when complete, * burst len = 64 bytes, the number of pages */ - index_addr(denali, mode, 0x01002000 | (64 << 16) | op | page_count); + denali_host_write(denali, mode, + 0x01002000 | (64 << 16) | (write << 8) | page_count); /* 2. set memory low address */ - index_addr(denali, mode, addr); + denali_host_write(denali, mode, dma_addr); /* 3. set memory high address */ - index_addr(denali, mode, addr >> 32); + denali_host_write(denali, mode, (uint64_t)dma_addr >> 32); } -static void denali_setup_dma32(struct denali_nand_info *denali, int op) +static void denali_setup_dma32(struct denali_nand_info *denali, + dma_addr_t dma_addr, int page, int write) { uint32_t mode; const int page_count = 1; - uint32_t addr = denali->buf.dma_buf; - mode = MODE_10 | BANK(denali->flash_bank); + mode = DENALI_MAP10 | DENALI_BANK(denali); /* DMA is a four step process */ /* 1. setup transfer type and # of pages */ - index_addr(denali, mode | denali->page, 0x2000 | op | page_count); + denali_host_write(denali, mode | page, + 0x2000 | (write << 8) | page_count); /* 2. set memory high address bits 23:8 */ - index_addr(denali, mode | ((addr >> 16) << 8), 0x2200); + denali_host_write(denali, mode | ((dma_addr >> 16) << 8), 0x2200); /* 3. set memory low address bits 23:8 */ - index_addr(denali, mode | ((addr & 0xffff) << 8), 0x2300); + denali_host_write(denali, mode | ((dma_addr & 0xffff) << 8), 0x2300); /* 4. interrupt when complete, burst len = 64 bytes */ - index_addr(denali, mode | 0x14000, 0x2400); + denali_host_write(denali, mode | 0x14000, 0x2400); } -static void denali_setup_dma(struct denali_nand_info *denali, int op) +static void denali_setup_dma(struct denali_nand_info *denali, + dma_addr_t dma_addr, int page, int write) { if (denali->caps & DENALI_CAP_DMA_64BIT) - denali_setup_dma64(denali, op); + denali_setup_dma64(denali, dma_addr, page, write); else - denali_setup_dma32(denali, op); + denali_setup_dma32(denali, dma_addr, page, write); } -/* - * writes a page. user specifies type, and this function handles the - * configuration details. - */ -static int write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, bool raw_xfer) +static int denali_pio_read(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw) { - struct denali_nand_info *denali = mtd_to_denali(mtd); - dma_addr_t addr = denali->buf.dma_buf; - size_t size = mtd->writesize + mtd->oobsize; + uint32_t addr = DENALI_BANK(denali) | page; + uint32_t *buf32 = (uint32_t *)buf; + uint32_t irq_status, ecc_err_mask; + int i; + + if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) + ecc_err_mask = INTR__ECC_UNCOR_ERR; + else + ecc_err_mask = INTR__ECC_ERR; + + denali_reset_irq(denali); + + iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); + for (i = 0; i < size / 4; i++) + *buf32++ = ioread32(denali->host + DENALI_HOST_DATA); + + irq_status = denali_wait_for_irq(denali, INTR__PAGE_XFER_INC); + if (!(irq_status & INTR__PAGE_XFER_INC)) + return -EIO; + + if (irq_status & INTR__ERASED_PAGE) + memset(buf, 0xff, size); + + return irq_status & ecc_err_mask ? -EBADMSG : 0; +} + +static int denali_pio_write(struct denali_nand_info *denali, + const void *buf, size_t size, int page, int raw) +{ + uint32_t addr = DENALI_BANK(denali) | page; + const uint32_t *buf32 = (uint32_t *)buf; uint32_t irq_status; - uint32_t irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; + int i; - /* - * if it is a raw xfer, we want to disable ecc and send the spare area. - * !raw_xfer - enable ecc - * raw_xfer - transfer spare - */ - setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer); + denali_reset_irq(denali); - /* copy buffer into DMA buffer */ - memcpy(denali->buf.buf, buf, mtd->writesize); + iowrite32(DENALI_MAP01 | addr, denali->host + DENALI_HOST_ADDR); + for (i = 0; i < size / 4; i++) + iowrite32(*buf32++, denali->host + DENALI_HOST_DATA); - if (raw_xfer) { - /* transfer the data to the spare area */ - memcpy(denali->buf.buf + mtd->writesize, - chip->oob_poi, - mtd->oobsize); + irq_status = denali_wait_for_irq(denali, + INTR__PROGRAM_COMP | INTR__PROGRAM_FAIL); + if (!(irq_status & INTR__PROGRAM_COMP)) + return -EIO; + + return 0; +} + +static int denali_pio_xfer(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw, int write) +{ + if (write) + return denali_pio_write(denali, buf, size, page, raw); + else + return denali_pio_read(denali, buf, size, page, raw); +} + +static int denali_dma_xfer(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw, int write) +{ + dma_addr_t dma_addr; + uint32_t irq_mask, irq_status, ecc_err_mask; + enum dma_data_direction dir = write ? DMA_TO_DEVICE : DMA_FROM_DEVICE; + int ret = 0; + + dma_addr = dma_map_single(denali->dev, buf, size, dir); + if (dma_mapping_error(denali->dev, dma_addr)) { + dev_dbg(denali->dev, "Failed to DMA-map buffer. Trying PIO.\n"); + return denali_pio_xfer(denali, buf, size, page, raw, write); } - dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE); + if (write) { + /* + * INTR__PROGRAM_COMP is never asserted for the DMA transfer. + * We can use INTR__DMA_CMD_COMP instead. This flag is asserted + * when the page program is completed. + */ + irq_mask = INTR__DMA_CMD_COMP | INTR__PROGRAM_FAIL; + ecc_err_mask = 0; + } else if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) { + irq_mask = INTR__DMA_CMD_COMP; + ecc_err_mask = INTR__ECC_UNCOR_ERR; + } else { + irq_mask = INTR__DMA_CMD_COMP; + ecc_err_mask = INTR__ECC_ERR; + } - clear_interrupts(denali); denali_enable_dma(denali, true); - denali_setup_dma(denali, DENALI_WRITE); + denali_reset_irq(denali); + denali_setup_dma(denali, dma_addr, page, write); /* wait for operation to complete */ - irq_status = wait_for_irq(denali, irq_mask); - - if (irq_status == 0) { - dev_err(denali->dev, "timeout on write_page (type = %d)\n", - raw_xfer); - denali->status = NAND_STATUS_FAIL; - } + irq_status = denali_wait_for_irq(denali, irq_mask); + if (!(irq_status & INTR__DMA_CMD_COMP)) + ret = -EIO; + else if (irq_status & ecc_err_mask) + ret = -EBADMSG; denali_enable_dma(denali, false); - dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE); + dma_unmap_single(denali->dev, dma_addr, size, dir); - return 0; -} + if (irq_status & INTR__ERASED_PAGE) + memset(buf, 0xff, size); -/* NAND core entry points */ + return ret; +} -/* - * this is the callback that the NAND core calls to write a page. Since - * writing a page with ECC or without is similar, all the work is done - * by write_page above. - */ -static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page) +static int denali_data_xfer(struct denali_nand_info *denali, void *buf, + size_t size, int page, int raw, int write) { - /* - * for regular page writes, we let HW handle all the ECC - * data written to the device. - */ - return write_page(mtd, chip, buf, false); + setup_ecc_for_xfer(denali, !raw, raw); + + if (denali->dma_avail) + return denali_dma_xfer(denali, buf, size, page, raw, write); + else + return denali_pio_xfer(denali, buf, size, page, raw, write); } -/* - * This is the callback that the NAND core calls to write a page without ECC. - * raw access is similar to ECC page writes, so all the work is done in the - * write_page() function above. - */ -static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, - int page) +static void denali_oob_xfer(struct mtd_info *mtd, struct nand_chip *chip, + int page, int write) { - /* - * for raw page writes, we want to disable ECC and simply write - * whatever data is in the buffer. - */ - return write_page(mtd, chip, buf, true); + struct denali_nand_info *denali = mtd_to_denali(mtd); + unsigned int start_cmd = write ? NAND_CMD_SEQIN : NAND_CMD_READ0; + unsigned int rnd_cmd = write ? NAND_CMD_RNDIN : NAND_CMD_RNDOUT; + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + uint8_t *bufpoi = chip->oob_poi; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + int oob_skip = denali->oob_skip_bytes; + size_t size = writesize + oobsize; + int i, pos, len; + + /* BBM at the beginning of the OOB area */ + chip->cmdfunc(mtd, start_cmd, writesize, page); + if (write) + chip->write_buf(mtd, bufpoi, oob_skip); + else + chip->read_buf(mtd, bufpoi, oob_skip); + bufpoi += oob_skip; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + chip->cmdfunc(mtd, rnd_cmd, pos, -1); + if (write) + chip->write_buf(mtd, bufpoi, len); + else + chip->read_buf(mtd, bufpoi, len); + bufpoi += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + chip->cmdfunc(mtd, rnd_cmd, writesize + oob_skip, -1); + if (write) + chip->write_buf(mtd, bufpoi, len); + else + chip->read_buf(mtd, bufpoi, len); + bufpoi += len; + } + } + + /* OOB free */ + len = oobsize - (bufpoi - chip->oob_poi); + chip->cmdfunc(mtd, rnd_cmd, size - len, -1); + if (write) + chip->write_buf(mtd, bufpoi, len); + else + chip->read_buf(mtd, bufpoi, len); } -static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, - int page) +static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) { - return write_oob_data(mtd, chip->oob_poi, page); + struct denali_nand_info *denali = mtd_to_denali(mtd); + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *dma_buf = denali->buf; + int oob_skip = denali->oob_skip_bytes; + size_t size = writesize + oobsize; + int ret, i, pos, len; + + ret = denali_data_xfer(denali, dma_buf, size, page, 1, 0); + if (ret) + return ret; + + /* Arrange the buffer for syndrome payload/ecc layout */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(buf, dma_buf + pos, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(buf, dma_buf + writesize + oob_skip, + len); + buf += len; + } + } + } + + if (oob_required) { + uint8_t *oob = chip->oob_poi; + + /* BBM at the beginning of the OOB area */ + memcpy(oob, dma_buf + writesize, oob_skip); + oob += oob_skip; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(oob, dma_buf + pos, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(oob, dma_buf + writesize + oob_skip, + len); + oob += len; + } + } + + /* OOB free */ + len = oobsize - (oob - chip->oob_poi); + memcpy(oob, dma_buf + size - len, len); + } + + return 0; } static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { - read_oob_data(mtd, chip->oob_poi, page); + denali_oob_xfer(mtd, chip, page, 0); return 0; } -static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page) +static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip, + int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); - dma_addr_t addr = denali->buf.dma_buf; - size_t size = mtd->writesize + mtd->oobsize; - uint32_t irq_status; - uint32_t irq_mask = denali->caps & DENALI_CAP_HW_ECC_FIXUP ? - INTR__DMA_CMD_COMP | INTR__ECC_UNCOR_ERR : - INTR__ECC_TRANSACTION_DONE | INTR__ECC_ERR; - unsigned long uncor_ecc_flags = 0; - int stat = 0; + int status; - if (page != denali->page) { - dev_err(denali->dev, - "IN %s: page %d is not equal to denali->page %d", - __func__, page, denali->page); - BUG(); - } + denali_reset_irq(denali); - setup_ecc_for_xfer(denali, true, false); + denali_oob_xfer(mtd, chip, page, 1); - denali_enable_dma(denali, true); - dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE); + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + status = chip->waitfunc(mtd, chip); - clear_interrupts(denali); - denali_setup_dma(denali, DENALI_READ); - - /* wait for operation to complete */ - irq_status = wait_for_irq(denali, irq_mask); + return status & NAND_STATUS_FAIL ? -EIO : 0; +} - dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE); +static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) +{ + struct denali_nand_info *denali = mtd_to_denali(mtd); + unsigned long uncor_ecc_flags = 0; + int stat = 0; + int ret; - memcpy(buf, denali->buf.buf, mtd->writesize); + ret = denali_data_xfer(denali, buf, mtd->writesize, page, 0, 0); + if (ret && ret != -EBADMSG) + return ret; if (denali->caps & DENALI_CAP_HW_ECC_FIXUP) stat = denali_hw_ecc_fixup(mtd, denali, &uncor_ecc_flags); - else if (irq_status & INTR__ECC_ERR) + else if (ret == -EBADMSG) stat = denali_sw_ecc_fixup(mtd, denali, &uncor_ecc_flags, buf); - denali_enable_dma(denali, false); if (stat < 0) return stat; if (uncor_ecc_flags) { - read_oob_data(mtd, chip->oob_poi, denali->page); + ret = denali_read_oob(mtd, chip, page); + if (ret) + return ret; stat = denali_check_erased_page(mtd, chip, buf, uncor_ecc_flags, stat); @@ -1181,137 +858,266 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, return stat; } -static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page) +static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); - dma_addr_t addr = denali->buf.dma_buf; - size_t size = mtd->writesize + mtd->oobsize; - uint32_t irq_mask = INTR__DMA_CMD_COMP; - - if (page != denali->page) { - dev_err(denali->dev, - "IN %s: page %d is not equal to denali->page %d", - __func__, page, denali->page); - BUG(); - } - - setup_ecc_for_xfer(denali, false, true); - denali_enable_dma(denali, true); - - dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE); - - clear_interrupts(denali); - denali_setup_dma(denali, DENALI_READ); - - /* wait for operation to complete */ - wait_for_irq(denali, irq_mask); + int writesize = mtd->writesize; + int oobsize = mtd->oobsize; + int ecc_steps = chip->ecc.steps; + int ecc_size = chip->ecc.size; + int ecc_bytes = chip->ecc.bytes; + void *dma_buf = denali->buf; + int oob_skip = denali->oob_skip_bytes; + size_t size = writesize + oobsize; + int i, pos, len; - dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE); + /* + * Fill the buffer with 0xff first except the full page transfer. + * This simplifies the logic. + */ + if (!buf || !oob_required) + memset(dma_buf, 0xff, size); + + /* Arrange the buffer for syndrome payload/ecc layout */ + if (buf) { + for (i = 0; i < ecc_steps; i++) { + pos = i * (ecc_size + ecc_bytes); + len = ecc_size; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(dma_buf + pos, buf, len); + buf += len; + if (len < ecc_size) { + len = ecc_size - len; + memcpy(dma_buf + writesize + oob_skip, buf, + len); + buf += len; + } + } + } - denali_enable_dma(denali, false); + if (oob_required) { + const uint8_t *oob = chip->oob_poi; + + /* BBM at the beginning of the OOB area */ + memcpy(dma_buf + writesize, oob, oob_skip); + oob += oob_skip; + + /* OOB ECC */ + for (i = 0; i < ecc_steps; i++) { + pos = ecc_size + i * (ecc_size + ecc_bytes); + len = ecc_bytes; + + if (pos >= writesize) + pos += oob_skip; + else if (pos + len > writesize) + len = writesize - pos; + + memcpy(dma_buf + pos, oob, len); + oob += len; + if (len < ecc_bytes) { + len = ecc_bytes - len; + memcpy(dma_buf + writesize + oob_skip, oob, + len); + oob += len; + } + } - memcpy(buf, denali->buf.buf, mtd->writesize); - memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize); + /* OOB free */ + len = oobsize - (oob - chip->oob_poi); + memcpy(dma_buf + size - len, oob, len); + } - return 0; + return denali_data_xfer(denali, dma_buf, size, page, 1, 1); } -static uint8_t denali_read_byte(struct mtd_info *mtd) +static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint8_t result = 0xff; - - if (denali->buf.head < denali->buf.tail) - result = denali->buf.buf[denali->buf.head++]; - return result; + return denali_data_xfer(denali, (void *)buf, mtd->writesize, + page, 0, 1); } static void denali_select_chip(struct mtd_info *mtd, int chip) { struct denali_nand_info *denali = mtd_to_denali(mtd); - spin_lock_irq(&denali->irq_lock); - denali->flash_bank = chip; - spin_unlock_irq(&denali->irq_lock); + denali->active_bank = chip; } static int denali_waitfunc(struct mtd_info *mtd, struct nand_chip *chip) { struct denali_nand_info *denali = mtd_to_denali(mtd); - int status = denali->status; + uint32_t irq_status; - denali->status = 0; + /* R/B# pin transitioned from low to high? */ + irq_status = denali_wait_for_irq(denali, INTR__INT_ACT); - return status; + return irq_status & INTR__INT_ACT ? 0 : NAND_STATUS_FAIL; } static int denali_erase(struct mtd_info *mtd, int page) { struct denali_nand_info *denali = mtd_to_denali(mtd); + uint32_t irq_status; - uint32_t cmd, irq_status; - - clear_interrupts(denali); + denali_reset_irq(denali); - /* setup page read request for access type */ - cmd = MODE_10 | BANK(denali->flash_bank) | page; - index_addr(denali, cmd, 0x1); + denali_host_write(denali, DENALI_MAP10 | DENALI_BANK(denali) | page, + DENALI_ERASE); /* wait for erase to complete or failure to occur */ - irq_status = wait_for_irq(denali, INTR__ERASE_COMP | INTR__ERASE_FAIL); + irq_status = denali_wait_for_irq(denali, + INTR__ERASE_COMP | INTR__ERASE_FAIL); - return irq_status & INTR__ERASE_FAIL ? NAND_STATUS_FAIL : PASS; + return irq_status & INTR__ERASE_COMP ? 0 : NAND_STATUS_FAIL; } -static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col, - int page) +#define DIV_ROUND_DOWN_ULL(ll, d) \ + ({ unsigned long long _tmp = (ll); do_div(_tmp, d); _tmp; }) + +static int denali_setup_data_interface(struct mtd_info *mtd, int chipnr, + const struct nand_data_interface *conf) { struct denali_nand_info *denali = mtd_to_denali(mtd); - uint32_t addr, id; + const struct nand_sdr_timings *timings; + unsigned long t_clk; + int acc_clks, re_2_we, re_2_re, we_2_re, addr_2_data; + int rdwr_en_lo, rdwr_en_hi, rdwr_en_lo_hi, cs_setup; + int addr_2_data_mask; + uint32_t tmp; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return PTR_ERR(timings); + + /* clk_x period in picoseconds */ + t_clk = DIV_ROUND_DOWN_ULL(1000000000000ULL, denali->clk_x_rate); + if (!t_clk) + return -EINVAL; + + if (chipnr == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + /* tREA -> ACC_CLKS */ + acc_clks = DIV_ROUND_UP(timings->tREA_max, t_clk); + acc_clks = min_t(int, acc_clks, ACC_CLKS__VALUE); + + tmp = ioread32(denali->reg + ACC_CLKS); + tmp &= ~ACC_CLKS__VALUE; + tmp |= acc_clks; + iowrite32(tmp, denali->reg + ACC_CLKS); + + /* tRWH -> RE_2_WE */ + re_2_we = DIV_ROUND_UP(timings->tRHW_min, t_clk); + re_2_we = min_t(int, re_2_we, RE_2_WE__VALUE); + + tmp = ioread32(denali->reg + RE_2_WE); + tmp &= ~RE_2_WE__VALUE; + tmp |= re_2_we; + iowrite32(tmp, denali->reg + RE_2_WE); + + /* tRHZ -> RE_2_RE */ + re_2_re = DIV_ROUND_UP(timings->tRHZ_max, t_clk); + re_2_re = min_t(int, re_2_re, RE_2_RE__VALUE); + + tmp = ioread32(denali->reg + RE_2_RE); + tmp &= ~RE_2_RE__VALUE; + tmp |= re_2_re; + iowrite32(tmp, denali->reg + RE_2_RE); + + /* tWHR -> WE_2_RE */ + we_2_re = DIV_ROUND_UP(timings->tWHR_min, t_clk); + we_2_re = min_t(int, we_2_re, TWHR2_AND_WE_2_RE__WE_2_RE); + + tmp = ioread32(denali->reg + TWHR2_AND_WE_2_RE); + tmp &= ~TWHR2_AND_WE_2_RE__WE_2_RE; + tmp |= we_2_re; + iowrite32(tmp, denali->reg + TWHR2_AND_WE_2_RE); + + /* tADL -> ADDR_2_DATA */ + + /* for older versions, ADDR_2_DATA is only 6 bit wide */ + addr_2_data_mask = TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA; + if (denali->revision < 0x0501) + addr_2_data_mask >>= 1; + + addr_2_data = DIV_ROUND_UP(timings->tADL_min, t_clk); + addr_2_data = min_t(int, addr_2_data, addr_2_data_mask); + + tmp = ioread32(denali->reg + TCWAW_AND_ADDR_2_DATA); + tmp &= ~addr_2_data_mask; + tmp |= addr_2_data; + iowrite32(tmp, denali->reg + TCWAW_AND_ADDR_2_DATA); + + /* tREH, tWH -> RDWR_EN_HI_CNT */ + rdwr_en_hi = DIV_ROUND_UP(max(timings->tREH_min, timings->tWH_min), + t_clk); + rdwr_en_hi = min_t(int, rdwr_en_hi, RDWR_EN_HI_CNT__VALUE); + + tmp = ioread32(denali->reg + RDWR_EN_HI_CNT); + tmp &= ~RDWR_EN_HI_CNT__VALUE; + tmp |= rdwr_en_hi; + iowrite32(tmp, denali->reg + RDWR_EN_HI_CNT); + + /* tRP, tWP -> RDWR_EN_LO_CNT */ + rdwr_en_lo = DIV_ROUND_UP(max(timings->tRP_min, timings->tWP_min), + t_clk); + rdwr_en_lo_hi = DIV_ROUND_UP(max(timings->tRC_min, timings->tWC_min), + t_clk); + rdwr_en_lo_hi = max(rdwr_en_lo_hi, DENALI_CLK_X_MULT); + rdwr_en_lo = max(rdwr_en_lo, rdwr_en_lo_hi - rdwr_en_hi); + rdwr_en_lo = min_t(int, rdwr_en_lo, RDWR_EN_LO_CNT__VALUE); + + tmp = ioread32(denali->reg + RDWR_EN_LO_CNT); + tmp &= ~RDWR_EN_LO_CNT__VALUE; + tmp |= rdwr_en_lo; + iowrite32(tmp, denali->reg + RDWR_EN_LO_CNT); + + /* tCS, tCEA -> CS_SETUP_CNT */ + cs_setup = max3((int)DIV_ROUND_UP(timings->tCS_min, t_clk) - rdwr_en_lo, + (int)DIV_ROUND_UP(timings->tCEA_max, t_clk) - acc_clks, + 0); + cs_setup = min_t(int, cs_setup, CS_SETUP_CNT__VALUE); + + tmp = ioread32(denali->reg + CS_SETUP_CNT); + tmp &= ~CS_SETUP_CNT__VALUE; + tmp |= cs_setup; + iowrite32(tmp, denali->reg + CS_SETUP_CNT); + + return 0; +} + +static void denali_reset_banks(struct denali_nand_info *denali) +{ + u32 irq_status; int i; - switch (cmd) { - case NAND_CMD_PAGEPROG: - break; - case NAND_CMD_STATUS: - read_status(denali); - break; - case NAND_CMD_READID: - case NAND_CMD_PARAM: - reset_buf(denali); - /* - * sometimes ManufactureId read from register is not right - * e.g. some of Micron MT29F32G08QAA MLC NAND chips - * So here we send READID cmd to NAND insteand - */ - addr = MODE_11 | BANK(denali->flash_bank); - index_addr(denali, addr | 0, 0x90); - index_addr(denali, addr | 1, col); - for (i = 0; i < 8; i++) { - index_addr_read_data(denali, addr | 2, &id); - write_byte_to_buf(denali, id); - } - break; - case NAND_CMD_READ0: - case NAND_CMD_SEQIN: - denali->page = page; - break; - case NAND_CMD_RESET: - reset_bank(denali); - break; - case NAND_CMD_READOOB: - /* TODO: Read OOB data */ - break; - default: - pr_err(": unsupported command received 0x%x\n", cmd); - break; + for (i = 0; i < denali->max_banks; i++) { + denali->active_bank = i; + + denali_reset_irq(denali); + + iowrite32(DEVICE_RESET__BANK(i), + denali->reg + DEVICE_RESET); + + irq_status = denali_wait_for_irq(denali, + INTR__RST_COMP | INTR__INT_ACT | INTR__TIME_OUT); + if (!(irq_status & INTR__INT_ACT)) + break; } + + dev_dbg(denali->dev, "%d chips connected\n", i); + denali->max_banks = i; } -/* end NAND core entry points */ -/* Initialization code to bring the device up to a known good state */ static void denali_hw_init(struct denali_nand_info *denali) { /* @@ -1319,8 +1125,7 @@ static void denali_hw_init(struct denali_nand_info *denali) * override it. */ if (!denali->revision) - denali->revision = - swab16(ioread32(denali->flash_reg + REVISION)); + denali->revision = swab16(ioread32(denali->reg + REVISION)); /* * tell driver how many bit controller will skip before @@ -1328,30 +1133,51 @@ static void denali_hw_init(struct denali_nand_info *denali) * set by firmware. So we read this value out. * if this value is 0, just let it be. */ - denali->bbtskipbytes = ioread32(denali->flash_reg + - SPARE_AREA_SKIP_BYTES); + denali->oob_skip_bytes = ioread32(denali->reg + SPARE_AREA_SKIP_BYTES); detect_max_banks(denali); - denali_nand_reset(denali); - iowrite32(0x0F, denali->flash_reg + RB_PIN_ENABLED); - iowrite32(CHIP_EN_DONT_CARE__FLAG, - denali->flash_reg + CHIP_ENABLE_DONT_CARE); + iowrite32(0x0F, denali->reg + RB_PIN_ENABLED); + iowrite32(CHIP_EN_DONT_CARE__FLAG, denali->reg + CHIP_ENABLE_DONT_CARE); - iowrite32(0xffff, denali->flash_reg + SPARE_AREA_MARKER); + iowrite32(0xffff, denali->reg + SPARE_AREA_MARKER); /* Should set value for these registers when init */ - iowrite32(0, denali->flash_reg + TWO_ROW_ADDR_CYCLES); - iowrite32(1, denali->flash_reg + ECC_ENABLE); - denali_nand_timing_set(denali); - denali_irq_init(denali); + iowrite32(0, denali->reg + TWO_ROW_ADDR_CYCLES); + iowrite32(1, denali->reg + ECC_ENABLE); } -/* - * Althogh controller spec said SLC ECC is forceb to be 4bit, - * but denali controller in MRST only support 15bit and 8bit ECC - * correction - */ -#define ECC_8BITS 14 -#define ECC_15BITS 26 +int denali_calc_ecc_bytes(int step_size, int strength) +{ + /* BCH code. Denali requires ecc.bytes to be multiple of 2 */ + return DIV_ROUND_UP(strength * fls(step_size * 8), 16) * 2; +} +EXPORT_SYMBOL(denali_calc_ecc_bytes); + +static int denali_ecc_setup(struct mtd_info *mtd, struct nand_chip *chip, + struct denali_nand_info *denali) +{ + int oobavail = mtd->oobsize - denali->oob_skip_bytes; + int ret; + + /* + * If .size and .strength are already set (usually by DT), + * check if they are supported by this controller. + */ + if (chip->ecc.size && chip->ecc.strength) + return nand_check_ecc_caps(chip, denali->ecc_caps, oobavail); + + /* + * We want .size and .strength closest to the chip's requirement + * unless NAND_ECC_MAXIMIZE is requested. + */ + if (!(chip->ecc.options & NAND_ECC_MAXIMIZE)) { + ret = nand_match_ecc_req(chip, denali->ecc_caps, oobavail); + if (!ret) + return 0; + } + + /* Max ECC strength is the last thing we can do */ + return nand_maximize_ecc(chip, denali->ecc_caps, oobavail); +} static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) @@ -1362,7 +1188,7 @@ static int denali_ooblayout_ecc(struct mtd_info *mtd, int section, if (section) return -ERANGE; - oobregion->offset = denali->bbtskipbytes; + oobregion->offset = denali->oob_skip_bytes; oobregion->length = chip->ecc.total; return 0; @@ -1377,7 +1203,7 @@ static int denali_ooblayout_free(struct mtd_info *mtd, int section, if (section) return -ERANGE; - oobregion->offset = chip->ecc.total + denali->bbtskipbytes; + oobregion->offset = chip->ecc.total + denali->oob_skip_bytes; oobregion->length = mtd->oobsize - oobregion->offset; return 0; @@ -1388,29 +1214,6 @@ static const struct mtd_ooblayout_ops denali_ooblayout_ops = { .free = denali_ooblayout_free, }; -static uint8_t bbt_pattern[] = {'B', 'b', 't', '0' }; -static uint8_t mirror_pattern[] = {'1', 't', 'b', 'B' }; - -static struct nand_bbt_descr bbt_main_descr = { - .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE - | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, - .offs = 8, - .len = 4, - .veroffs = 12, - .maxblocks = 4, - .pattern = bbt_pattern, -}; - -static struct nand_bbt_descr bbt_mirror_descr = { - .options = NAND_BBT_LASTBLOCK | NAND_BBT_CREATE | NAND_BBT_WRITE - | NAND_BBT_2BIT | NAND_BBT_VERSION | NAND_BBT_PERCHIP, - .offs = 8, - .len = 4, - .veroffs = 12, - .maxblocks = 4, - .pattern = mirror_pattern, -}; - /* initialize driver data structures */ static void denali_drv_init(struct denali_nand_info *denali) { @@ -1425,12 +1228,6 @@ static void denali_drv_init(struct denali_nand_info *denali) * element that might be access shared data (interrupt status) */ spin_lock_init(&denali->irq_lock); - - /* indicate that MTD has not selected a valid bank yet */ - denali->flash_bank = CHIP_SELECT_INVALID; - - /* initialize our irq_status variable to indicate no interrupts */ - denali->irq_status = 0; } static int denali_multidev_fixup(struct denali_nand_info *denali) @@ -1445,23 +1242,23 @@ static int denali_multidev_fixup(struct denali_nand_info *denali) * In this case, the core framework knows nothing about this fact, * so we should tell it the _logical_ pagesize and anything necessary. */ - denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED); + denali->devs_per_cs = ioread32(denali->reg + DEVICES_CONNECTED); /* * On some SoCs, DEVICES_CONNECTED is not auto-detected. * For those, DEVICES_CONNECTED is left to 0. Set 1 if it is the case. */ - if (denali->devnum == 0) { - denali->devnum = 1; - iowrite32(1, denali->flash_reg + DEVICES_CONNECTED); + if (denali->devs_per_cs == 0) { + denali->devs_per_cs = 1; + iowrite32(1, denali->reg + DEVICES_CONNECTED); } - if (denali->devnum == 1) + if (denali->devs_per_cs == 1) return 0; - if (denali->devnum != 2) { + if (denali->devs_per_cs != 2) { dev_err(denali->dev, "unsupported number of devices %d\n", - denali->devnum); + denali->devs_per_cs); return -EINVAL; } @@ -1479,7 +1276,7 @@ static int denali_multidev_fixup(struct denali_nand_info *denali) chip->ecc.size <<= 1; chip->ecc.bytes <<= 1; chip->ecc.strength <<= 1; - denali->bbtskipbytes <<= 1; + denali->oob_skip_bytes <<= 1; return 0; } @@ -1490,27 +1287,12 @@ int denali_init(struct denali_nand_info *denali) struct mtd_info *mtd = nand_to_mtd(chip); int ret; - if (denali->platform == INTEL_CE4100) { - /* - * Due to a silicon limitation, we can only support - * ONFI timing mode 1 and below. - */ - if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { - pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n"); - return -EINVAL; - } - } - - /* allocate a temporary buffer for nand_scan_ident() */ - denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE, - GFP_DMA | GFP_KERNEL); - if (!denali->buf.buf) - return -ENOMEM; - mtd->dev.parent = denali->dev; denali_hw_init(denali); denali_drv_init(denali); + denali_clear_irq_all(denali); + /* Request IRQ after all the hardware initialization is finished */ ret = devm_request_irq(denali->dev, denali->irq, denali_isr, IRQF_SHARED, DENALI_NAND_NAME, denali); @@ -1519,8 +1301,11 @@ int denali_init(struct denali_nand_info *denali) return ret; } - /* now that our ISR is registered, we can enable interrupts */ - denali_set_intr_modes(denali, true); + denali_enable_irq(denali); + denali_reset_banks(denali); + + denali->active_bank = DENALI_INVALID_BANK; + nand_set_flash_node(chip, denali->dev->of_node); /* Fallback to the default name if DT did not give "label" property */ if (!mtd->name) @@ -1528,10 +1313,17 @@ int denali_init(struct denali_nand_info *denali) /* register the driver with the NAND core subsystem */ chip->select_chip = denali_select_chip; - chip->cmdfunc = denali_cmdfunc; chip->read_byte = denali_read_byte; + chip->write_byte = denali_write_byte; + chip->read_word = denali_read_word; + chip->cmd_ctrl = denali_cmd_ctrl; + chip->dev_ready = denali_dev_ready; chip->waitfunc = denali_waitfunc; + /* clk rate info is needed for setup_data_interface */ + if (denali->clk_x_rate) + chip->setup_data_interface = denali_setup_data_interface; + /* * scan for NAND devices attached to the controller * this is the first stage in a two step process to register @@ -1539,33 +1331,25 @@ int denali_init(struct denali_nand_info *denali) */ ret = nand_scan_ident(mtd, denali->max_banks, NULL); if (ret) - goto failed_req_irq; - - /* allocate the right size buffer now */ - devm_kfree(denali->dev, denali->buf.buf); - denali->buf.buf = devm_kzalloc(denali->dev, - mtd->writesize + mtd->oobsize, - GFP_KERNEL); - if (!denali->buf.buf) { - ret = -ENOMEM; - goto failed_req_irq; - } + goto disable_irq; - ret = dma_set_mask(denali->dev, - DMA_BIT_MASK(denali->caps & DENALI_CAP_DMA_64BIT ? - 64 : 32)); - if (ret) { - dev_err(denali->dev, "No usable DMA configuration\n"); - goto failed_req_irq; + if (ioread32(denali->reg + FEATURES) & FEATURES__DMA) + denali->dma_avail = 1; + + if (denali->dma_avail) { + int dma_bit = denali->caps & DENALI_CAP_DMA_64BIT ? 64 : 32; + + ret = dma_set_mask(denali->dev, DMA_BIT_MASK(dma_bit)); + if (ret) { + dev_info(denali->dev, + "Failed to set DMA mask. Disabling DMA.\n"); + denali->dma_avail = 0; + } } - denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf, - mtd->writesize + mtd->oobsize, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) { - dev_err(denali->dev, "Failed to map DMA buffer\n"); - ret = -EIO; - goto failed_req_irq; + if (denali->dma_avail) { + chip->options |= NAND_USE_BOUNCE_BUFFER; + chip->buf_align = 16; } /* @@ -1574,46 +1358,49 @@ int denali_init(struct denali_nand_info *denali) * bad block management. */ - /* Bad block management */ - chip->bbt_td = &bbt_main_descr; - chip->bbt_md = &bbt_mirror_descr; - - /* skip the scan for now until we have OOB read and write support */ chip->bbt_options |= NAND_BBT_USE_FLASH; - chip->options |= NAND_SKIP_BBTSCAN; + chip->bbt_options |= NAND_BBT_NO_OOB; + chip->ecc.mode = NAND_ECC_HW_SYNDROME; /* no subpage writes on denali */ chip->options |= NAND_NO_SUBPAGE_WRITE; - /* - * Denali Controller only support 15bit and 8bit ECC in MRST, - * so just let controller do 15bit ECC for MLC and 8bit ECC for - * SLC if possible. - * */ - if (!nand_is_slc(chip) && - (mtd->oobsize > (denali->bbtskipbytes + - ECC_15BITS * (mtd->writesize / - ECC_SECTOR_SIZE)))) { - /* if MLC OOB size is large enough, use 15bit ECC*/ - chip->ecc.strength = 15; - chip->ecc.bytes = ECC_15BITS; - iowrite32(15, denali->flash_reg + ECC_CORRECTION); - } else if (mtd->oobsize < (denali->bbtskipbytes + - ECC_8BITS * (mtd->writesize / - ECC_SECTOR_SIZE))) { - pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes"); - goto failed_req_irq; - } else { - chip->ecc.strength = 8; - chip->ecc.bytes = ECC_8BITS; - iowrite32(8, denali->flash_reg + ECC_CORRECTION); + ret = denali_ecc_setup(mtd, chip, denali); + if (ret) { + dev_err(denali->dev, "Failed to setup ECC settings.\n"); + goto disable_irq; } + dev_dbg(denali->dev, + "chosen ECC settings: step=%d, strength=%d, bytes=%d\n", + chip->ecc.size, chip->ecc.strength, chip->ecc.bytes); + + iowrite32(MAKE_ECC_CORRECTION(chip->ecc.strength, 1), + denali->reg + ECC_CORRECTION); + iowrite32(mtd->erasesize / mtd->writesize, + denali->reg + PAGES_PER_BLOCK); + iowrite32(chip->options & NAND_BUSWIDTH_16 ? 1 : 0, + denali->reg + DEVICE_WIDTH); + iowrite32(mtd->writesize, denali->reg + DEVICE_MAIN_AREA_SIZE); + iowrite32(mtd->oobsize, denali->reg + DEVICE_SPARE_AREA_SIZE); + + iowrite32(chip->ecc.size, denali->reg + CFG_DATA_BLOCK_SIZE); + iowrite32(chip->ecc.size, denali->reg + CFG_LAST_DATA_BLOCK_SIZE); + /* chip->ecc.steps is set by nand_scan_tail(); not available here */ + iowrite32(mtd->writesize / chip->ecc.size, + denali->reg + CFG_NUM_DATA_BLOCKS); + mtd_set_ooblayout(mtd, &denali_ooblayout_ops); - /* override the default read operations */ - chip->ecc.size = ECC_SECTOR_SIZE; + if (chip->options & NAND_BUSWIDTH_16) { + chip->read_buf = denali_read_buf16; + chip->write_buf = denali_write_buf16; + } else { + chip->read_buf = denali_read_buf; + chip->write_buf = denali_write_buf; + } + chip->ecc.options |= NAND_ECC_CUSTOM_PAGE_ACCESS; chip->ecc.read_page = denali_read_page; chip->ecc.read_page_raw = denali_read_page_raw; chip->ecc.write_page = denali_write_page; @@ -1624,21 +1411,34 @@ int denali_init(struct denali_nand_info *denali) ret = denali_multidev_fixup(denali); if (ret) - goto failed_req_irq; + goto disable_irq; + + /* + * This buffer is DMA-mapped by denali_{read,write}_page_raw. Do not + * use devm_kmalloc() because the memory allocated by devm_ does not + * guarantee DMA-safe alignment. + */ + denali->buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL); + if (!denali->buf) { + ret = -ENOMEM; + goto disable_irq; + } ret = nand_scan_tail(mtd); if (ret) - goto failed_req_irq; + goto free_buf; ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(denali->dev, "Failed to register MTD: %d\n", ret); - goto failed_req_irq; + goto free_buf; } return 0; -failed_req_irq: - denali_irq_cleanup(denali->irq, denali); +free_buf: + kfree(denali->buf); +disable_irq: + denali_disable_irq(denali); return ret; } @@ -1648,16 +1448,9 @@ EXPORT_SYMBOL(denali_init); void denali_remove(struct denali_nand_info *denali) { struct mtd_info *mtd = nand_to_mtd(&denali->nand); - /* - * Pre-compute DMA buffer size to avoid any problems in case - * nand_release() ever changes in a way that mtd->writesize and - * mtd->oobsize are not reliable after this call. - */ - int bufsize = mtd->writesize + mtd->oobsize; nand_release(mtd); - denali_irq_cleanup(denali->irq, denali); - dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize, - DMA_BIDIRECTIONAL); + kfree(denali->buf); + denali_disable_irq(denali); } EXPORT_SYMBOL(denali_remove); diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h index ec004850652a..237cc706b0fb 100644 --- a/drivers/mtd/nand/denali.h +++ b/drivers/mtd/nand/denali.h @@ -24,330 +24,315 @@ #include <linux/mtd/nand.h> #define DEVICE_RESET 0x0 -#define DEVICE_RESET__BANK0 0x0001 -#define DEVICE_RESET__BANK1 0x0002 -#define DEVICE_RESET__BANK2 0x0004 -#define DEVICE_RESET__BANK3 0x0008 +#define DEVICE_RESET__BANK(bank) BIT(bank) #define TRANSFER_SPARE_REG 0x10 -#define TRANSFER_SPARE_REG__FLAG 0x0001 +#define TRANSFER_SPARE_REG__FLAG BIT(0) #define LOAD_WAIT_CNT 0x20 -#define LOAD_WAIT_CNT__VALUE 0xffff +#define LOAD_WAIT_CNT__VALUE GENMASK(15, 0) #define PROGRAM_WAIT_CNT 0x30 -#define PROGRAM_WAIT_CNT__VALUE 0xffff +#define PROGRAM_WAIT_CNT__VALUE GENMASK(15, 0) #define ERASE_WAIT_CNT 0x40 -#define ERASE_WAIT_CNT__VALUE 0xffff +#define ERASE_WAIT_CNT__VALUE GENMASK(15, 0) #define INT_MON_CYCCNT 0x50 -#define INT_MON_CYCCNT__VALUE 0xffff +#define INT_MON_CYCCNT__VALUE GENMASK(15, 0) #define RB_PIN_ENABLED 0x60 -#define RB_PIN_ENABLED__BANK0 0x0001 -#define RB_PIN_ENABLED__BANK1 0x0002 -#define RB_PIN_ENABLED__BANK2 0x0004 -#define RB_PIN_ENABLED__BANK3 0x0008 +#define RB_PIN_ENABLED__BANK(bank) BIT(bank) #define MULTIPLANE_OPERATION 0x70 -#define MULTIPLANE_OPERATION__FLAG 0x0001 +#define MULTIPLANE_OPERATION__FLAG BIT(0) #define MULTIPLANE_READ_ENABLE 0x80 -#define MULTIPLANE_READ_ENABLE__FLAG 0x0001 +#define MULTIPLANE_READ_ENABLE__FLAG BIT(0) #define COPYBACK_DISABLE 0x90 -#define COPYBACK_DISABLE__FLAG 0x0001 +#define COPYBACK_DISABLE__FLAG BIT(0) #define CACHE_WRITE_ENABLE 0xa0 -#define CACHE_WRITE_ENABLE__FLAG 0x0001 +#define CACHE_WRITE_ENABLE__FLAG BIT(0) #define CACHE_READ_ENABLE 0xb0 -#define CACHE_READ_ENABLE__FLAG 0x0001 +#define CACHE_READ_ENABLE__FLAG BIT(0) #define PREFETCH_MODE 0xc0 -#define PREFETCH_MODE__PREFETCH_EN 0x0001 -#define PREFETCH_MODE__PREFETCH_BURST_LENGTH 0xfff0 +#define PREFETCH_MODE__PREFETCH_EN BIT(0) +#define PREFETCH_MODE__PREFETCH_BURST_LENGTH GENMASK(15, 4) #define CHIP_ENABLE_DONT_CARE 0xd0 -#define CHIP_EN_DONT_CARE__FLAG 0x01 +#define CHIP_EN_DONT_CARE__FLAG BIT(0) #define ECC_ENABLE 0xe0 -#define ECC_ENABLE__FLAG 0x0001 +#define ECC_ENABLE__FLAG BIT(0) #define GLOBAL_INT_ENABLE 0xf0 -#define GLOBAL_INT_EN_FLAG 0x01 +#define GLOBAL_INT_EN_FLAG BIT(0) -#define WE_2_RE 0x100 -#define WE_2_RE__VALUE 0x003f +#define TWHR2_AND_WE_2_RE 0x100 +#define TWHR2_AND_WE_2_RE__WE_2_RE GENMASK(5, 0) +#define TWHR2_AND_WE_2_RE__TWHR2 GENMASK(13, 8) -#define ADDR_2_DATA 0x110 -#define ADDR_2_DATA__VALUE 0x003f +#define TCWAW_AND_ADDR_2_DATA 0x110 +/* The width of ADDR_2_DATA is 6 bit for old IP, 7 bit for new IP */ +#define TCWAW_AND_ADDR_2_DATA__ADDR_2_DATA GENMASK(6, 0) +#define TCWAW_AND_ADDR_2_DATA__TCWAW GENMASK(13, 8) #define RE_2_WE 0x120 -#define RE_2_WE__VALUE 0x003f +#define RE_2_WE__VALUE GENMASK(5, 0) #define ACC_CLKS 0x130 -#define ACC_CLKS__VALUE 0x000f +#define ACC_CLKS__VALUE GENMASK(3, 0) #define NUMBER_OF_PLANES 0x140 -#define NUMBER_OF_PLANES__VALUE 0x0007 +#define NUMBER_OF_PLANES__VALUE GENMASK(2, 0) #define PAGES_PER_BLOCK 0x150 -#define PAGES_PER_BLOCK__VALUE 0xffff +#define PAGES_PER_BLOCK__VALUE GENMASK(15, 0) #define DEVICE_WIDTH 0x160 -#define DEVICE_WIDTH__VALUE 0x0003 +#define DEVICE_WIDTH__VALUE GENMASK(1, 0) #define DEVICE_MAIN_AREA_SIZE 0x170 -#define DEVICE_MAIN_AREA_SIZE__VALUE 0xffff +#define DEVICE_MAIN_AREA_SIZE__VALUE GENMASK(15, 0) #define DEVICE_SPARE_AREA_SIZE 0x180 -#define DEVICE_SPARE_AREA_SIZE__VALUE 0xffff +#define DEVICE_SPARE_AREA_SIZE__VALUE GENMASK(15, 0) #define TWO_ROW_ADDR_CYCLES 0x190 -#define TWO_ROW_ADDR_CYCLES__FLAG 0x0001 +#define TWO_ROW_ADDR_CYCLES__FLAG BIT(0) #define MULTIPLANE_ADDR_RESTRICT 0x1a0 -#define MULTIPLANE_ADDR_RESTRICT__FLAG 0x0001 +#define MULTIPLANE_ADDR_RESTRICT__FLAG BIT(0) #define ECC_CORRECTION 0x1b0 -#define ECC_CORRECTION__VALUE 0x001f +#define ECC_CORRECTION__VALUE GENMASK(4, 0) +#define ECC_CORRECTION__ERASE_THRESHOLD GENMASK(31, 16) +#define MAKE_ECC_CORRECTION(val, thresh) \ + (((val) & (ECC_CORRECTION__VALUE)) | \ + (((thresh) << 16) & (ECC_CORRECTION__ERASE_THRESHOLD))) #define READ_MODE 0x1c0 -#define READ_MODE__VALUE 0x000f +#define READ_MODE__VALUE GENMASK(3, 0) #define WRITE_MODE 0x1d0 -#define WRITE_MODE__VALUE 0x000f +#define WRITE_MODE__VALUE GENMASK(3, 0) #define COPYBACK_MODE 0x1e0 -#define COPYBACK_MODE__VALUE 0x000f +#define COPYBACK_MODE__VALUE GENMASK(3, 0) #define RDWR_EN_LO_CNT 0x1f0 -#define RDWR_EN_LO_CNT__VALUE 0x001f +#define RDWR_EN_LO_CNT__VALUE GENMASK(4, 0) #define RDWR_EN_HI_CNT 0x200 -#define RDWR_EN_HI_CNT__VALUE 0x001f +#define RDWR_EN_HI_CNT__VALUE GENMASK(4, 0) #define MAX_RD_DELAY 0x210 -#define MAX_RD_DELAY__VALUE 0x000f +#define MAX_RD_DELAY__VALUE GENMASK(3, 0) #define CS_SETUP_CNT 0x220 -#define CS_SETUP_CNT__VALUE 0x001f +#define CS_SETUP_CNT__VALUE GENMASK(4, 0) +#define CS_SETUP_CNT__TWB GENMASK(17, 12) #define SPARE_AREA_SKIP_BYTES 0x230 -#define SPARE_AREA_SKIP_BYTES__VALUE 0x003f +#define SPARE_AREA_SKIP_BYTES__VALUE GENMASK(5, 0) #define SPARE_AREA_MARKER 0x240 -#define SPARE_AREA_MARKER__VALUE 0xffff +#define SPARE_AREA_MARKER__VALUE GENMASK(15, 0) #define DEVICES_CONNECTED 0x250 -#define DEVICES_CONNECTED__VALUE 0x0007 +#define DEVICES_CONNECTED__VALUE GENMASK(2, 0) #define DIE_MASK 0x260 -#define DIE_MASK__VALUE 0x00ff +#define DIE_MASK__VALUE GENMASK(7, 0) #define FIRST_BLOCK_OF_NEXT_PLANE 0x270 -#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE 0xffff +#define FIRST_BLOCK_OF_NEXT_PLANE__VALUE GENMASK(15, 0) #define WRITE_PROTECT 0x280 -#define WRITE_PROTECT__FLAG 0x0001 +#define WRITE_PROTECT__FLAG BIT(0) #define RE_2_RE 0x290 -#define RE_2_RE__VALUE 0x003f +#define RE_2_RE__VALUE GENMASK(5, 0) #define MANUFACTURER_ID 0x300 -#define MANUFACTURER_ID__VALUE 0x00ff +#define MANUFACTURER_ID__VALUE GENMASK(7, 0) #define DEVICE_ID 0x310 -#define DEVICE_ID__VALUE 0x00ff +#define DEVICE_ID__VALUE GENMASK(7, 0) #define DEVICE_PARAM_0 0x320 -#define DEVICE_PARAM_0__VALUE 0x00ff +#define DEVICE_PARAM_0__VALUE GENMASK(7, 0) #define DEVICE_PARAM_1 0x330 -#define DEVICE_PARAM_1__VALUE 0x00ff +#define DEVICE_PARAM_1__VALUE GENMASK(7, 0) #define DEVICE_PARAM_2 0x340 -#define DEVICE_PARAM_2__VALUE 0x00ff +#define DEVICE_PARAM_2__VALUE GENMASK(7, 0) #define LOGICAL_PAGE_DATA_SIZE 0x350 -#define LOGICAL_PAGE_DATA_SIZE__VALUE 0xffff +#define LOGICAL_PAGE_DATA_SIZE__VALUE GENMASK(15, 0) #define LOGICAL_PAGE_SPARE_SIZE 0x360 -#define LOGICAL_PAGE_SPARE_SIZE__VALUE 0xffff +#define LOGICAL_PAGE_SPARE_SIZE__VALUE GENMASK(15, 0) #define REVISION 0x370 -#define REVISION__VALUE 0xffff +#define REVISION__VALUE GENMASK(15, 0) #define ONFI_DEVICE_FEATURES 0x380 -#define ONFI_DEVICE_FEATURES__VALUE 0x003f +#define ONFI_DEVICE_FEATURES__VALUE GENMASK(5, 0) #define ONFI_OPTIONAL_COMMANDS 0x390 -#define ONFI_OPTIONAL_COMMANDS__VALUE 0x003f +#define ONFI_OPTIONAL_COMMANDS__VALUE GENMASK(5, 0) #define ONFI_TIMING_MODE 0x3a0 -#define ONFI_TIMING_MODE__VALUE 0x003f +#define ONFI_TIMING_MODE__VALUE GENMASK(5, 0) #define ONFI_PGM_CACHE_TIMING_MODE 0x3b0 -#define ONFI_PGM_CACHE_TIMING_MODE__VALUE 0x003f +#define ONFI_PGM_CACHE_TIMING_MODE__VALUE GENMASK(5, 0) #define ONFI_DEVICE_NO_OF_LUNS 0x3c0 -#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS 0x00ff -#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE 0x0100 +#define ONFI_DEVICE_NO_OF_LUNS__NO_OF_LUNS GENMASK(7, 0) +#define ONFI_DEVICE_NO_OF_LUNS__ONFI_DEVICE BIT(8) #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L 0x3d0 -#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE 0xffff +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_L__VALUE GENMASK(15, 0) #define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U 0x3e0 -#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE 0xffff - -#define FEATURES 0x3f0 -#define FEATURES__N_BANKS 0x0003 -#define FEATURES__ECC_MAX_ERR 0x003c -#define FEATURES__DMA 0x0040 -#define FEATURES__CMD_DMA 0x0080 -#define FEATURES__PARTITION 0x0100 -#define FEATURES__XDMA_SIDEBAND 0x0200 -#define FEATURES__GPREG 0x0400 -#define FEATURES__INDEX_ADDR 0x0800 +#define ONFI_DEVICE_NO_OF_BLOCKS_PER_LUN_U__VALUE GENMASK(15, 0) + +#define FEATURES 0x3f0 +#define FEATURES__N_BANKS GENMASK(1, 0) +#define FEATURES__ECC_MAX_ERR GENMASK(5, 2) +#define FEATURES__DMA BIT(6) +#define FEATURES__CMD_DMA BIT(7) +#define FEATURES__PARTITION BIT(8) +#define FEATURES__XDMA_SIDEBAND BIT(9) +#define FEATURES__GPREG BIT(10) +#define FEATURES__INDEX_ADDR BIT(11) #define TRANSFER_MODE 0x400 -#define TRANSFER_MODE__VALUE 0x0003 +#define TRANSFER_MODE__VALUE GENMASK(1, 0) -#define INTR_STATUS(__bank) (0x410 + ((__bank) * 0x50)) -#define INTR_EN(__bank) (0x420 + ((__bank) * 0x50)) +#define INTR_STATUS(bank) (0x410 + (bank) * 0x50) +#define INTR_EN(bank) (0x420 + (bank) * 0x50) /* bit[1:0] is used differently depending on IP version */ -#define INTR__ECC_UNCOR_ERR 0x0001 /* new IP */ -#define INTR__ECC_TRANSACTION_DONE 0x0001 /* old IP */ -#define INTR__ECC_ERR 0x0002 /* old IP */ -#define INTR__DMA_CMD_COMP 0x0004 -#define INTR__TIME_OUT 0x0008 -#define INTR__PROGRAM_FAIL 0x0010 -#define INTR__ERASE_FAIL 0x0020 -#define INTR__LOAD_COMP 0x0040 -#define INTR__PROGRAM_COMP 0x0080 -#define INTR__ERASE_COMP 0x0100 -#define INTR__PIPE_CPYBCK_CMD_COMP 0x0200 -#define INTR__LOCKED_BLK 0x0400 -#define INTR__UNSUP_CMD 0x0800 -#define INTR__INT_ACT 0x1000 -#define INTR__RST_COMP 0x2000 -#define INTR__PIPE_CMD_ERR 0x4000 -#define INTR__PAGE_XFER_INC 0x8000 - -#define PAGE_CNT(__bank) (0x430 + ((__bank) * 0x50)) -#define ERR_PAGE_ADDR(__bank) (0x440 + ((__bank) * 0x50)) -#define ERR_BLOCK_ADDR(__bank) (0x450 + ((__bank) * 0x50)) +#define INTR__ECC_UNCOR_ERR BIT(0) /* new IP */ +#define INTR__ECC_TRANSACTION_DONE BIT(0) /* old IP */ +#define INTR__ECC_ERR BIT(1) /* old IP */ +#define INTR__DMA_CMD_COMP BIT(2) +#define INTR__TIME_OUT BIT(3) +#define INTR__PROGRAM_FAIL BIT(4) +#define INTR__ERASE_FAIL BIT(5) +#define INTR__LOAD_COMP BIT(6) +#define INTR__PROGRAM_COMP BIT(7) +#define INTR__ERASE_COMP BIT(8) +#define INTR__PIPE_CPYBCK_CMD_COMP BIT(9) +#define INTR__LOCKED_BLK BIT(10) +#define INTR__UNSUP_CMD BIT(11) +#define INTR__INT_ACT BIT(12) +#define INTR__RST_COMP BIT(13) +#define INTR__PIPE_CMD_ERR BIT(14) +#define INTR__PAGE_XFER_INC BIT(15) +#define INTR__ERASED_PAGE BIT(16) + +#define PAGE_CNT(bank) (0x430 + (bank) * 0x50) +#define ERR_PAGE_ADDR(bank) (0x440 + (bank) * 0x50) +#define ERR_BLOCK_ADDR(bank) (0x450 + (bank) * 0x50) #define ECC_THRESHOLD 0x600 -#define ECC_THRESHOLD__VALUE 0x03ff +#define ECC_THRESHOLD__VALUE GENMASK(9, 0) #define ECC_ERROR_BLOCK_ADDRESS 0x610 -#define ECC_ERROR_BLOCK_ADDRESS__VALUE 0xffff +#define ECC_ERROR_BLOCK_ADDRESS__VALUE GENMASK(15, 0) #define ECC_ERROR_PAGE_ADDRESS 0x620 -#define ECC_ERROR_PAGE_ADDRESS__VALUE 0x0fff -#define ECC_ERROR_PAGE_ADDRESS__BANK 0xf000 +#define ECC_ERROR_PAGE_ADDRESS__VALUE GENMASK(11, 0) +#define ECC_ERROR_PAGE_ADDRESS__BANK GENMASK(15, 12) #define ECC_ERROR_ADDRESS 0x630 -#define ECC_ERROR_ADDRESS__OFFSET 0x0fff -#define ECC_ERROR_ADDRESS__SECTOR_NR 0xf000 +#define ECC_ERROR_ADDRESS__OFFSET GENMASK(11, 0) +#define ECC_ERROR_ADDRESS__SECTOR_NR GENMASK(15, 12) #define ERR_CORRECTION_INFO 0x640 -#define ERR_CORRECTION_INFO__BYTEMASK 0x00ff -#define ERR_CORRECTION_INFO__DEVICE_NR 0x0f00 -#define ERR_CORRECTION_INFO__ERROR_TYPE 0x4000 -#define ERR_CORRECTION_INFO__LAST_ERR_INFO 0x8000 +#define ERR_CORRECTION_INFO__BYTEMASK GENMASK(7, 0) +#define ERR_CORRECTION_INFO__DEVICE_NR GENMASK(11, 8) +#define ERR_CORRECTION_INFO__ERROR_TYPE BIT(14) +#define ERR_CORRECTION_INFO__LAST_ERR_INFO BIT(15) #define ECC_COR_INFO(bank) (0x650 + (bank) / 2 * 0x10) #define ECC_COR_INFO__SHIFT(bank) ((bank) % 2 * 8) -#define ECC_COR_INFO__MAX_ERRORS 0x007f -#define ECC_COR_INFO__UNCOR_ERR 0x0080 +#define ECC_COR_INFO__MAX_ERRORS GENMASK(6, 0) +#define ECC_COR_INFO__UNCOR_ERR BIT(7) + +#define CFG_DATA_BLOCK_SIZE 0x6b0 + +#define CFG_LAST_DATA_BLOCK_SIZE 0x6c0 + +#define CFG_NUM_DATA_BLOCKS 0x6d0 + +#define CFG_META_DATA_SIZE 0x6e0 #define DMA_ENABLE 0x700 -#define DMA_ENABLE__FLAG 0x0001 +#define DMA_ENABLE__FLAG BIT(0) #define IGNORE_ECC_DONE 0x710 -#define IGNORE_ECC_DONE__FLAG 0x0001 +#define IGNORE_ECC_DONE__FLAG BIT(0) #define DMA_INTR 0x720 #define DMA_INTR_EN 0x730 -#define DMA_INTR__TARGET_ERROR 0x0001 -#define DMA_INTR__DESC_COMP_CHANNEL0 0x0002 -#define DMA_INTR__DESC_COMP_CHANNEL1 0x0004 -#define DMA_INTR__DESC_COMP_CHANNEL2 0x0008 -#define DMA_INTR__DESC_COMP_CHANNEL3 0x0010 -#define DMA_INTR__MEMCOPY_DESC_COMP 0x0020 +#define DMA_INTR__TARGET_ERROR BIT(0) +#define DMA_INTR__DESC_COMP_CHANNEL0 BIT(1) +#define DMA_INTR__DESC_COMP_CHANNEL1 BIT(2) +#define DMA_INTR__DESC_COMP_CHANNEL2 BIT(3) +#define DMA_INTR__DESC_COMP_CHANNEL3 BIT(4) +#define DMA_INTR__MEMCOPY_DESC_COMP BIT(5) #define TARGET_ERR_ADDR_LO 0x740 -#define TARGET_ERR_ADDR_LO__VALUE 0xffff +#define TARGET_ERR_ADDR_LO__VALUE GENMASK(15, 0) #define TARGET_ERR_ADDR_HI 0x750 -#define TARGET_ERR_ADDR_HI__VALUE 0xffff +#define TARGET_ERR_ADDR_HI__VALUE GENMASK(15, 0) #define CHNL_ACTIVE 0x760 -#define CHNL_ACTIVE__CHANNEL0 0x0001 -#define CHNL_ACTIVE__CHANNEL1 0x0002 -#define CHNL_ACTIVE__CHANNEL2 0x0004 -#define CHNL_ACTIVE__CHANNEL3 0x0008 - -#define FAIL 1 /*failed flag*/ -#define PASS 0 /*success flag*/ - -#define CLK_X 5 -#define CLK_MULTI 4 - -#define ONFI_BLOOM_TIME 1 -#define MODE5_WORKAROUND 0 - - -#define MODE_00 0x00000000 -#define MODE_01 0x04000000 -#define MODE_10 0x08000000 -#define MODE_11 0x0C000000 - -#define ECC_SECTOR_SIZE 512 - -struct nand_buf { - int head; - int tail; - uint8_t *buf; - dma_addr_t dma_buf; -}; - -#define INTEL_CE4100 1 -#define INTEL_MRST 2 -#define DT 3 +#define CHNL_ACTIVE__CHANNEL0 BIT(0) +#define CHNL_ACTIVE__CHANNEL1 BIT(1) +#define CHNL_ACTIVE__CHANNEL2 BIT(2) +#define CHNL_ACTIVE__CHANNEL3 BIT(3) struct denali_nand_info { struct nand_chip nand; - int flash_bank; /* currently selected chip */ - int status; - int platform; - struct nand_buf buf; + unsigned long clk_x_rate; /* bus interface clock rate */ + int active_bank; /* currently selected bank */ struct device *dev; - int total_used_banks; - int page; - void __iomem *flash_reg; /* Register Interface */ - void __iomem *flash_mem; /* Host Data/Command Interface */ + void __iomem *reg; /* Register Interface */ + void __iomem *host; /* Host Data/Command Interface */ /* elements used by ISR */ struct completion complete; spinlock_t irq_lock; + uint32_t irq_mask; uint32_t irq_status; int irq; - int devnum; /* represent how many nands connected */ - int bbtskipbytes; + void *buf; + dma_addr_t dma_addr; + int dma_avail; + int devs_per_cs; /* devices connected in parallel */ + int oob_skip_bytes; int max_banks; unsigned int revision; unsigned int caps; + const struct nand_ecc_caps *ecc_caps; }; #define DENALI_CAP_HW_ECC_FIXUP BIT(0) #define DENALI_CAP_DMA_64BIT BIT(1) +int denali_calc_ecc_bytes(int step_size, int strength); extern int denali_init(struct denali_nand_info *denali); extern void denali_remove(struct denali_nand_info *denali); diff --git a/drivers/mtd/nand/denali_dt.c b/drivers/mtd/nand/denali_dt.c index df9ef36cc2ce..47f398edf18f 100644 --- a/drivers/mtd/nand/denali_dt.c +++ b/drivers/mtd/nand/denali_dt.c @@ -32,10 +32,31 @@ struct denali_dt { struct denali_dt_data { unsigned int revision; unsigned int caps; + const struct nand_ecc_caps *ecc_caps; }; +NAND_ECC_CAPS_SINGLE(denali_socfpga_ecc_caps, denali_calc_ecc_bytes, + 512, 8, 15); static const struct denali_dt_data denali_socfpga_data = { .caps = DENALI_CAP_HW_ECC_FIXUP, + .ecc_caps = &denali_socfpga_ecc_caps, +}; + +NAND_ECC_CAPS_SINGLE(denali_uniphier_v5a_ecc_caps, denali_calc_ecc_bytes, + 1024, 8, 16, 24); +static const struct denali_dt_data denali_uniphier_v5a_data = { + .caps = DENALI_CAP_HW_ECC_FIXUP | + DENALI_CAP_DMA_64BIT, + .ecc_caps = &denali_uniphier_v5a_ecc_caps, +}; + +NAND_ECC_CAPS_SINGLE(denali_uniphier_v5b_ecc_caps, denali_calc_ecc_bytes, + 1024, 8, 16); +static const struct denali_dt_data denali_uniphier_v5b_data = { + .revision = 0x0501, + .caps = DENALI_CAP_HW_ECC_FIXUP | + DENALI_CAP_DMA_64BIT, + .ecc_caps = &denali_uniphier_v5b_ecc_caps, }; static const struct of_device_id denali_nand_dt_ids[] = { @@ -43,13 +64,21 @@ static const struct of_device_id denali_nand_dt_ids[] = { .compatible = "altr,socfpga-denali-nand", .data = &denali_socfpga_data, }, + { + .compatible = "socionext,uniphier-denali-nand-v5a", + .data = &denali_uniphier_v5a_data, + }, + { + .compatible = "socionext,uniphier-denali-nand-v5b", + .data = &denali_uniphier_v5b_data, + }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, denali_nand_dt_ids); static int denali_dt_probe(struct platform_device *pdev) { - struct resource *denali_reg, *nand_data; + struct resource *res; struct denali_dt *dt; const struct denali_dt_data *data; struct denali_nand_info *denali; @@ -64,9 +93,9 @@ static int denali_dt_probe(struct platform_device *pdev) if (data) { denali->revision = data->revision; denali->caps = data->caps; + denali->ecc_caps = data->ecc_caps; } - denali->platform = DT; denali->dev = &pdev->dev; denali->irq = platform_get_irq(pdev, 0); if (denali->irq < 0) { @@ -74,17 +103,15 @@ static int denali_dt_probe(struct platform_device *pdev) return denali->irq; } - denali_reg = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "denali_reg"); - denali->flash_reg = devm_ioremap_resource(&pdev->dev, denali_reg); - if (IS_ERR(denali->flash_reg)) - return PTR_ERR(denali->flash_reg); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "denali_reg"); + denali->reg = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(denali->reg)) + return PTR_ERR(denali->reg); - nand_data = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "nand_data"); - denali->flash_mem = devm_ioremap_resource(&pdev->dev, nand_data); - if (IS_ERR(denali->flash_mem)) - return PTR_ERR(denali->flash_mem); + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nand_data"); + denali->host = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(denali->host)) + return PTR_ERR(denali->host); dt->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(dt->clk)) { @@ -93,6 +120,8 @@ static int denali_dt_probe(struct platform_device *pdev) } clk_prepare_enable(dt->clk); + denali->clk_x_rate = clk_get_rate(dt->clk); + ret = denali_init(denali); if (ret) goto out_disable_clk; diff --git a/drivers/mtd/nand/denali_pci.c b/drivers/mtd/nand/denali_pci.c index ac843238b77e..81370c79aa48 100644 --- a/drivers/mtd/nand/denali_pci.c +++ b/drivers/mtd/nand/denali_pci.c @@ -19,6 +19,9 @@ #define DENALI_NAND_NAME "denali-nand-pci" +#define INTEL_CE4100 1 +#define INTEL_MRST 2 + /* List of platforms this NAND controller has be integrated into */ static const struct pci_device_id denali_pci_ids[] = { { PCI_VDEVICE(INTEL, 0x0701), INTEL_CE4100 }, @@ -27,6 +30,8 @@ static const struct pci_device_id denali_pci_ids[] = { }; MODULE_DEVICE_TABLE(pci, denali_pci_ids); +NAND_ECC_CAPS_SINGLE(denali_pci_ecc_caps, denali_calc_ecc_bytes, 512, 8, 15); + static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret; @@ -45,13 +50,11 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) } if (id->driver_data == INTEL_CE4100) { - denali->platform = INTEL_CE4100; mem_base = pci_resource_start(dev, 0); mem_len = pci_resource_len(dev, 1); csr_base = pci_resource_start(dev, 1); csr_len = pci_resource_len(dev, 1); } else { - denali->platform = INTEL_MRST; csr_base = pci_resource_start(dev, 0); csr_len = pci_resource_len(dev, 0); mem_base = pci_resource_start(dev, 1); @@ -65,6 +68,9 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_master(dev); denali->dev = &dev->dev; denali->irq = dev->irq; + denali->ecc_caps = &denali_pci_ecc_caps; + denali->nand.ecc.options |= NAND_ECC_MAXIMIZE; + denali->clk_x_rate = 200000000; /* 200 MHz */ ret = pci_request_regions(dev, DENALI_NAND_NAME); if (ret) { @@ -72,14 +78,14 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return ret; } - denali->flash_reg = ioremap_nocache(csr_base, csr_len); - if (!denali->flash_reg) { + denali->reg = ioremap_nocache(csr_base, csr_len); + if (!denali->reg) { dev_err(&dev->dev, "Spectra: Unable to remap memory region\n"); return -ENOMEM; } - denali->flash_mem = ioremap_nocache(mem_base, mem_len); - if (!denali->flash_mem) { + denali->host = ioremap_nocache(mem_base, mem_len); + if (!denali->host) { dev_err(&dev->dev, "Spectra: ioremap_nocache failed!"); ret = -ENOMEM; goto failed_remap_reg; @@ -94,9 +100,9 @@ static int denali_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return 0; failed_remap_mem: - iounmap(denali->flash_mem); + iounmap(denali->host); failed_remap_reg: - iounmap(denali->flash_reg); + iounmap(denali->reg); return ret; } @@ -106,8 +112,8 @@ static void denali_pci_remove(struct pci_dev *dev) struct denali_nand_info *denali = pci_get_drvdata(dev); denali_remove(denali); - iounmap(denali->flash_reg); - iounmap(denali->flash_mem); + iounmap(denali->reg); + iounmap(denali->host); } static struct pci_driver denali_pci_driver = { diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c index 7af2a3cd949e..a27a84fbfb84 100644 --- a/drivers/mtd/nand/docg4.c +++ b/drivers/mtd/nand/docg4.c @@ -1260,6 +1260,8 @@ static void __init init_mtd_structs(struct mtd_info *mtd) nand->read_buf = docg4_read_buf; nand->write_buf = docg4_write_buf16; nand->erase = docg4_erase_block; + nand->onfi_set_features = nand_onfi_get_set_features_notsupp; + nand->onfi_get_features = nand_onfi_get_set_features_notsupp; nand->ecc.read_page = docg4_read_page; nand->ecc.write_page = docg4_write_page; nand->ecc.read_page_raw = docg4_read_page_raw; diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c index 113f76e59937..b9ac16f05057 100644 --- a/drivers/mtd/nand/fsl_elbc_nand.c +++ b/drivers/mtd/nand/fsl_elbc_nand.c @@ -775,6 +775,8 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv) chip->select_chip = fsl_elbc_select_chip; chip->cmdfunc = fsl_elbc_cmdfunc; chip->waitfunc = fsl_elbc_wait; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; chip->bbt_td = &bbt_main_descr; chip->bbt_md = &bbt_mirror_descr; diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c index d1570f512f0b..59408ec2c69f 100644 --- a/drivers/mtd/nand/fsl_ifc_nand.c +++ b/drivers/mtd/nand/fsl_ifc_nand.c @@ -171,34 +171,6 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) ifc_nand_ctrl->index += mtd->writesize; } -static int is_blank(struct mtd_info *mtd, unsigned int bufnum) -{ - struct nand_chip *chip = mtd_to_nand(mtd); - struct fsl_ifc_mtd *priv = nand_get_controller_data(chip); - u8 __iomem *addr = priv->vbase + bufnum * (mtd->writesize * 2); - u32 __iomem *mainarea = (u32 __iomem *)addr; - u8 __iomem *oob = addr + mtd->writesize; - struct mtd_oob_region oobregion = { }; - int i, section = 0; - - for (i = 0; i < mtd->writesize / 4; i++) { - if (__raw_readl(&mainarea[i]) != 0xffffffff) - return 0; - } - - mtd_ooblayout_ecc(mtd, section++, &oobregion); - while (oobregion.length) { - for (i = 0; i < oobregion.length; i++) { - if (__raw_readb(&oob[oobregion.offset + i]) != 0xff) - return 0; - } - - mtd_ooblayout_ecc(mtd, section++, &oobregion); - } - - return 1; -} - /* returns nonzero if entire page is blank */ static int check_read_ecc(struct mtd_info *mtd, struct fsl_ifc_ctrl *ctrl, u32 *eccstat, unsigned int bufnum) @@ -274,16 +246,14 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) if (errors == 15) { /* * Uncorrectable error. - * OK only if the whole page is blank. + * We'll check for blank pages later. * * We disable ECCER reporting due to... * erratum IFC-A002770 -- so report it now if we * see an uncorrectable error in ECCSTAT. */ - if (!is_blank(mtd, bufnum)) - ctrl->nand_stat |= - IFC_NAND_EVTER_STAT_ECCER; - break; + ctrl->nand_stat |= IFC_NAND_EVTER_STAT_ECCER; + continue; } mtd->ecc_stats.corrected += errors; @@ -678,6 +648,39 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) return nand_fsr | NAND_STATUS_WP; } +/* + * The controller does not check for bitflips in erased pages, + * therefore software must check instead. + */ +static int check_erased_page(struct nand_chip *chip, u8 *buf) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + u8 *ecc = chip->oob_poi; + const int ecc_size = chip->ecc.bytes; + const int pkt_size = chip->ecc.size; + int i, res, bitflips = 0; + struct mtd_oob_region oobregion = { }; + + mtd_ooblayout_ecc(mtd, 0, &oobregion); + ecc += oobregion.offset; + + for (i = 0; i < chip->ecc.steps; ++i) { + res = nand_check_erased_ecc_chunk(buf, pkt_size, ecc, ecc_size, + NULL, 0, + chip->ecc.strength); + if (res < 0) + mtd->ecc_stats.failed++; + else + mtd->ecc_stats.corrected += res; + + bitflips = max(res, bitflips); + buf += pkt_size; + ecc += ecc_size; + } + + return bitflips; +} + static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { @@ -689,8 +692,12 @@ static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip, if (oob_required) fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); - if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) - dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n"); + if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) { + if (!oob_required) + fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); + + return check_erased_page(chip, buf); + } if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) mtd->ecc_stats.failed++; @@ -831,6 +838,8 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) chip->select_chip = fsl_ifc_select_chip; chip->cmdfunc = fsl_ifc_cmdfunc; chip->waitfunc = fsl_ifc_wait; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; chip->bbt_td = &bbt_main_descr; chip->bbt_md = &bbt_mirror_descr; @@ -904,7 +913,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) chip->ecc.algo = NAND_ECC_HAMMING; } - if (ctrl->version == FSL_IFC_VERSION_1_1_0) + if (ctrl->version >= FSL_IFC_VERSION_1_1_0) fsl_ifc_sram_init(priv); return 0; diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c index cea50d2f218c..9d8b051d3187 100644 --- a/drivers/mtd/nand/fsmc_nand.c +++ b/drivers/mtd/nand/fsmc_nand.c @@ -302,25 +302,13 @@ static void fsmc_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) * This routine initializes timing parameters related to NAND memory access in * FSMC registers */ -static void fsmc_nand_setup(void __iomem *regs, uint32_t bank, - uint32_t busw, struct fsmc_nand_timings *timings) +static void fsmc_nand_setup(struct fsmc_nand_data *host, + struct fsmc_nand_timings *tims) { uint32_t value = FSMC_DEVTYPE_NAND | FSMC_ENABLE | FSMC_WAITON; uint32_t tclr, tar, thiz, thold, twait, tset; - struct fsmc_nand_timings *tims; - struct fsmc_nand_timings default_timings = { - .tclr = FSMC_TCLR_1, - .tar = FSMC_TAR_1, - .thiz = FSMC_THIZ_1, - .thold = FSMC_THOLD_4, - .twait = FSMC_TWAIT_6, - .tset = FSMC_TSET_0, - }; - - if (timings) - tims = timings; - else - tims = &default_timings; + unsigned int bank = host->bank; + void __iomem *regs = host->regs_va; tclr = (tims->tclr & FSMC_TCLR_MASK) << FSMC_TCLR_SHIFT; tar = (tims->tar & FSMC_TAR_MASK) << FSMC_TAR_SHIFT; @@ -329,7 +317,7 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank, twait = (tims->twait & FSMC_TWAIT_MASK) << FSMC_TWAIT_SHIFT; tset = (tims->tset & FSMC_TSET_MASK) << FSMC_TSET_SHIFT; - if (busw) + if (host->nand.options & NAND_BUSWIDTH_16) writel_relaxed(value | FSMC_DEVWID_16, FSMC_NAND_REG(regs, bank, PC)); else @@ -344,6 +332,87 @@ static void fsmc_nand_setup(void __iomem *regs, uint32_t bank, FSMC_NAND_REG(regs, bank, ATTRIB)); } +static int fsmc_calc_timings(struct fsmc_nand_data *host, + const struct nand_sdr_timings *sdrt, + struct fsmc_nand_timings *tims) +{ + unsigned long hclk = clk_get_rate(host->clk); + unsigned long hclkn = NSEC_PER_SEC / hclk; + uint32_t thiz, thold, twait, tset; + + if (sdrt->tRC_min < 30000) + return -EOPNOTSUPP; + + tims->tar = DIV_ROUND_UP(sdrt->tAR_min / 1000, hclkn) - 1; + if (tims->tar > FSMC_TAR_MASK) + tims->tar = FSMC_TAR_MASK; + tims->tclr = DIV_ROUND_UP(sdrt->tCLR_min / 1000, hclkn) - 1; + if (tims->tclr > FSMC_TCLR_MASK) + tims->tclr = FSMC_TCLR_MASK; + + thiz = sdrt->tCS_min - sdrt->tWP_min; + tims->thiz = DIV_ROUND_UP(thiz / 1000, hclkn); + + thold = sdrt->tDH_min; + if (thold < sdrt->tCH_min) + thold = sdrt->tCH_min; + if (thold < sdrt->tCLH_min) + thold = sdrt->tCLH_min; + if (thold < sdrt->tWH_min) + thold = sdrt->tWH_min; + if (thold < sdrt->tALH_min) + thold = sdrt->tALH_min; + if (thold < sdrt->tREH_min) + thold = sdrt->tREH_min; + tims->thold = DIV_ROUND_UP(thold / 1000, hclkn); + if (tims->thold == 0) + tims->thold = 1; + else if (tims->thold > FSMC_THOLD_MASK) + tims->thold = FSMC_THOLD_MASK; + + twait = max(sdrt->tRP_min, sdrt->tWP_min); + tims->twait = DIV_ROUND_UP(twait / 1000, hclkn) - 1; + if (tims->twait == 0) + tims->twait = 1; + else if (tims->twait > FSMC_TWAIT_MASK) + tims->twait = FSMC_TWAIT_MASK; + + tset = max(sdrt->tCS_min - sdrt->tWP_min, + sdrt->tCEA_max - sdrt->tREA_max); + tims->tset = DIV_ROUND_UP(tset / 1000, hclkn) - 1; + if (tims->tset == 0) + tims->tset = 1; + else if (tims->tset > FSMC_TSET_MASK) + tims->tset = FSMC_TSET_MASK; + + return 0; +} + +static int fsmc_setup_data_interface(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf) +{ + struct nand_chip *nand = mtd_to_nand(mtd); + struct fsmc_nand_data *host = nand_get_controller_data(nand); + struct fsmc_nand_timings tims; + const struct nand_sdr_timings *sdrt; + int ret; + + sdrt = nand_get_sdr_timings(conf); + if (IS_ERR(sdrt)) + return PTR_ERR(sdrt); + + ret = fsmc_calc_timings(host, sdrt, &tims); + if (ret) + return ret; + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + fsmc_nand_setup(host, &tims); + + return 0; +} + /* * fsmc_enable_hwecc - Enables Hardware ECC through FSMC registers */ @@ -796,10 +865,8 @@ static int fsmc_nand_probe_config_dt(struct platform_device *pdev, return -ENOMEM; ret = of_property_read_u8_array(np, "timings", (u8 *)host->dev_timings, sizeof(*host->dev_timings)); - if (ret) { - dev_info(&pdev->dev, "No timings in dts specified, using default timings!\n"); + if (ret) host->dev_timings = NULL; - } /* Set default NAND bank to 0 */ host->bank = 0; @@ -933,9 +1000,10 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) break; } - fsmc_nand_setup(host->regs_va, host->bank, - nand->options & NAND_BUSWIDTH_16, - host->dev_timings); + if (host->dev_timings) + fsmc_nand_setup(host, host->dev_timings); + else + nand->setup_data_interface = fsmc_setup_data_interface; if (AMBA_REV_BITS(host->pid) >= 8) { nand->ecc.read_page = fsmc_read_page_hwecc; @@ -986,6 +1054,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev) break; } + case NAND_ECC_ON_DIE: + break; + default: dev_err(&pdev->dev, "Unsupported ECC mode!\n"); goto err_probe; @@ -1073,9 +1144,8 @@ static int fsmc_nand_resume(struct device *dev) struct fsmc_nand_data *host = dev_get_drvdata(dev); if (host) { clk_prepare_enable(host->clk); - fsmc_nand_setup(host->regs_va, host->bank, - host->nand.options & NAND_BUSWIDTH_16, - host->dev_timings); + if (host->dev_timings) + fsmc_nand_setup(host, host->dev_timings); } return 0; } diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c index 141bd70a49c2..97787246af41 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c @@ -26,7 +26,7 @@ #include "gpmi-regs.h" #include "bch-regs.h" -static struct timing_threshod timing_default_threshold = { +static struct timing_threshold timing_default_threshold = { .max_data_setup_cycles = (BM_GPMI_TIMING0_DATA_SETUP >> BP_GPMI_TIMING0_DATA_SETUP), .internal_data_setup_in_ns = 0, @@ -329,7 +329,7 @@ static unsigned int ns_to_cycles(unsigned int time, static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, struct gpmi_nfc_hardware_timing *hw) { - struct timing_threshod *nfc = &timing_default_threshold; + struct timing_threshold *nfc = &timing_default_threshold; struct resources *r = &this->resources; struct nand_chip *nand = &this->nand; struct nand_timing target = this->timing; @@ -932,7 +932,7 @@ static int enable_edo_mode(struct gpmi_nand_data *this, int mode) nand->select_chip(mtd, 0); - /* [1] send SET FEATURE commond to NAND */ + /* [1] send SET FEATURE command to NAND */ feature[0] = mode; ret = nand->onfi_set_features(mtd, nand, ONFI_FEATURE_ADDR_TIMING_MODE, feature); diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c index d52139635b67..50f8d4a1b983 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c @@ -82,6 +82,10 @@ static int gpmi_ooblayout_free(struct mtd_info *mtd, int section, return 0; } +static const char * const gpmi_clks_for_mx2x[] = { + "gpmi_io", +}; + static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = { .ecc = gpmi_ooblayout_ecc, .free = gpmi_ooblayout_free, @@ -91,24 +95,48 @@ static const struct gpmi_devdata gpmi_devdata_imx23 = { .type = IS_MX23, .bch_max_ecc_strength = 20, .max_chain_delay = 16, + .clks = gpmi_clks_for_mx2x, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), }; static const struct gpmi_devdata gpmi_devdata_imx28 = { .type = IS_MX28, .bch_max_ecc_strength = 20, .max_chain_delay = 16, + .clks = gpmi_clks_for_mx2x, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x), +}; + +static const char * const gpmi_clks_for_mx6[] = { + "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", }; static const struct gpmi_devdata gpmi_devdata_imx6q = { .type = IS_MX6Q, .bch_max_ecc_strength = 40, .max_chain_delay = 12, + .clks = gpmi_clks_for_mx6, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), }; static const struct gpmi_devdata gpmi_devdata_imx6sx = { .type = IS_MX6SX, .bch_max_ecc_strength = 62, .max_chain_delay = 12, + .clks = gpmi_clks_for_mx6, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6), +}; + +static const char * const gpmi_clks_for_mx7d[] = { + "gpmi_io", "gpmi_bch_apb", +}; + +static const struct gpmi_devdata gpmi_devdata_imx7d = { + .type = IS_MX7D, + .bch_max_ecc_strength = 62, + .max_chain_delay = 12, + .clks = gpmi_clks_for_mx7d, + .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d), }; static irqreturn_t bch_irq(int irq, void *cookie) @@ -599,35 +627,14 @@ acquire_err: return -EINVAL; } -static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = { - "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", -}; - static int gpmi_get_clks(struct gpmi_nand_data *this) { struct resources *r = &this->resources; - char **extra_clks = NULL; struct clk *clk; int err, i; - /* The main clock is stored in the first. */ - r->clock[0] = devm_clk_get(this->dev, "gpmi_io"); - if (IS_ERR(r->clock[0])) { - err = PTR_ERR(r->clock[0]); - goto err_clock; - } - - /* Get extra clocks */ - if (GPMI_IS_MX6(this)) - extra_clks = extra_clks_for_mx6q; - if (!extra_clks) - return 0; - - for (i = 1; i < GPMI_CLK_MAX; i++) { - if (extra_clks[i - 1] == NULL) - break; - - clk = devm_clk_get(this->dev, extra_clks[i - 1]); + for (i = 0; i < this->devdata->clks_count; i++) { + clk = devm_clk_get(this->dev, this->devdata->clks[i]); if (IS_ERR(clk)) { err = PTR_ERR(clk); goto err_clock; @@ -1929,12 +1936,6 @@ static int gpmi_set_geometry(struct gpmi_nand_data *this) return gpmi_alloc_dma_buffer(this); } -static void gpmi_nand_exit(struct gpmi_nand_data *this) -{ - nand_release(nand_to_mtd(&this->nand)); - gpmi_free_dma_buffer(this); -} - static int gpmi_init_last(struct gpmi_nand_data *this) { struct nand_chip *chip = &this->nand; @@ -2048,18 +2049,20 @@ static int gpmi_nand_init(struct gpmi_nand_data *this) ret = nand_boot_init(this); if (ret) - goto err_out; + goto err_nand_cleanup; ret = chip->scan_bbt(mtd); if (ret) - goto err_out; + goto err_nand_cleanup; ret = mtd_device_register(mtd, NULL, 0); if (ret) - goto err_out; + goto err_nand_cleanup; return 0; +err_nand_cleanup: + nand_cleanup(chip); err_out: - gpmi_nand_exit(this); + gpmi_free_dma_buffer(this); return ret; } @@ -2076,6 +2079,9 @@ static const struct of_device_id gpmi_nand_id_table[] = { }, { .compatible = "fsl,imx6sx-gpmi-nand", .data = &gpmi_devdata_imx6sx, + }, { + .compatible = "fsl,imx7d-gpmi-nand", + .data = &gpmi_devdata_imx7d, }, {} }; MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); @@ -2129,7 +2135,8 @@ static int gpmi_nand_remove(struct platform_device *pdev) { struct gpmi_nand_data *this = platform_get_drvdata(pdev); - gpmi_nand_exit(this); + nand_release(nand_to_mtd(&this->nand)); + gpmi_free_dma_buffer(this); release_resources(this); return 0; } diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h index 4e49a1f5fa27..9df0ad64e7e0 100644 --- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h +++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h @@ -123,13 +123,16 @@ enum gpmi_type { IS_MX23, IS_MX28, IS_MX6Q, - IS_MX6SX + IS_MX6SX, + IS_MX7D, }; struct gpmi_devdata { enum gpmi_type type; int bch_max_ecc_strength; int max_chain_delay; /* See the async EDO mode */ + const char * const *clks; + const int clks_count; }; struct gpmi_nand_data { @@ -231,7 +234,7 @@ struct gpmi_nfc_hardware_timing { }; /** - * struct timing_threshod - Timing threshold + * struct timing_threshold - Timing threshold * @max_data_setup_cycles: The maximum number of data setup cycles that * can be expressed in the hardware. * @internal_data_setup_in_ns: The time, in ns, that the NFC hardware requires @@ -253,7 +256,7 @@ struct gpmi_nfc_hardware_timing { * progress, this is the clock frequency during * the most recent I/O transaction. */ -struct timing_threshod { +struct timing_threshold { const unsigned int max_chip_count; const unsigned int max_data_setup_cycles; const unsigned int internal_data_setup_in_ns; @@ -305,6 +308,8 @@ void gpmi_copy_bits(u8 *dst, size_t dst_bit_off, #define GPMI_IS_MX28(x) ((x)->devdata->type == IS_MX28) #define GPMI_IS_MX6Q(x) ((x)->devdata->type == IS_MX6Q) #define GPMI_IS_MX6SX(x) ((x)->devdata->type == IS_MX6SX) +#define GPMI_IS_MX7D(x) ((x)->devdata->type == IS_MX7D) -#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x)) +#define GPMI_IS_MX6(x) (GPMI_IS_MX6Q(x) || GPMI_IS_MX6SX(x) || \ + GPMI_IS_MX7D(x)) #endif diff --git a/drivers/mtd/nand/hisi504_nand.c b/drivers/mtd/nand/hisi504_nand.c index e40364eeb556..530caa80b1b6 100644 --- a/drivers/mtd/nand/hisi504_nand.c +++ b/drivers/mtd/nand/hisi504_nand.c @@ -764,6 +764,8 @@ static int hisi_nfc_probe(struct platform_device *pdev) chip->write_buf = hisi_nfc_write_buf; chip->read_buf = hisi_nfc_read_buf; chip->chip_delay = HINFC504_CHIP_DELAY; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; hisi_nfc_host_init(host); diff --git a/drivers/mtd/nand/jz4780_nand.c b/drivers/mtd/nand/jz4780_nand.c index a39bb70175ee..8bc835f71b26 100644 --- a/drivers/mtd/nand/jz4780_nand.c +++ b/drivers/mtd/nand/jz4780_nand.c @@ -205,7 +205,7 @@ static int jz4780_nand_init_ecc(struct jz4780_nand_chip *nand, struct device *de return -EINVAL; } - mtd->ooblayout = &nand_ooblayout_lp_ops; + mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops); return 0; } diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c index 6d6eaed2d20c..0e86fb6277c3 100644 --- a/drivers/mtd/nand/mpc5121_nfc.c +++ b/drivers/mtd/nand/mpc5121_nfc.c @@ -708,6 +708,8 @@ static int mpc5121_nfc_probe(struct platform_device *op) chip->read_buf = mpc5121_nfc_read_buf; chip->write_buf = mpc5121_nfc_write_buf; chip->select_chip = mpc5121_nfc_select_chip; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; chip->bbt_options = NAND_BBT_USE_FLASH; chip->ecc.mode = NAND_ECC_SOFT; chip->ecc.algo = NAND_ECC_HAMMING; diff --git a/drivers/mtd/nand/mtk_ecc.c b/drivers/mtd/nand/mtk_ecc.c index dbf256217b3e..6c3a4aab0b48 100644 --- a/drivers/mtd/nand/mtk_ecc.c +++ b/drivers/mtd/nand/mtk_ecc.c @@ -28,36 +28,16 @@ #define ECC_IDLE_MASK BIT(0) #define ECC_IRQ_EN BIT(0) +#define ECC_PG_IRQ_SEL BIT(1) #define ECC_OP_ENABLE (1) #define ECC_OP_DISABLE (0) #define ECC_ENCCON (0x00) #define ECC_ENCCNFG (0x04) -#define ECC_CNFG_4BIT (0) -#define ECC_CNFG_6BIT (1) -#define ECC_CNFG_8BIT (2) -#define ECC_CNFG_10BIT (3) -#define ECC_CNFG_12BIT (4) -#define ECC_CNFG_14BIT (5) -#define ECC_CNFG_16BIT (6) -#define ECC_CNFG_18BIT (7) -#define ECC_CNFG_20BIT (8) -#define ECC_CNFG_22BIT (9) -#define ECC_CNFG_24BIT (0xa) -#define ECC_CNFG_28BIT (0xb) -#define ECC_CNFG_32BIT (0xc) -#define ECC_CNFG_36BIT (0xd) -#define ECC_CNFG_40BIT (0xe) -#define ECC_CNFG_44BIT (0xf) -#define ECC_CNFG_48BIT (0x10) -#define ECC_CNFG_52BIT (0x11) -#define ECC_CNFG_56BIT (0x12) -#define ECC_CNFG_60BIT (0x13) #define ECC_MODE_SHIFT (5) #define ECC_MS_SHIFT (16) #define ECC_ENCDIADDR (0x08) #define ECC_ENCIDLE (0x0C) -#define ECC_ENCPAR(x) (0x10 + (x) * sizeof(u32)) #define ECC_ENCIRQ_EN (0x80) #define ECC_ENCIRQ_STA (0x84) #define ECC_DECCON (0x100) @@ -66,7 +46,6 @@ #define DEC_CNFG_CORRECT (0x3 << 12) #define ECC_DECIDLE (0x10C) #define ECC_DECENUM0 (0x114) -#define ERR_MASK (0x3f) #define ECC_DECDONE (0x124) #define ECC_DECIRQ_EN (0x200) #define ECC_DECIRQ_STA (0x204) @@ -78,8 +57,17 @@ #define ECC_IRQ_REG(op) ((op) == ECC_ENCODE ? \ ECC_ENCIRQ_EN : ECC_DECIRQ_EN) +struct mtk_ecc_caps { + u32 err_mask; + const u8 *ecc_strength; + u8 num_ecc_strength; + u32 encode_parity_reg0; + int pg_irq_sel; +}; + struct mtk_ecc { struct device *dev; + const struct mtk_ecc_caps *caps; void __iomem *regs; struct clk *clk; @@ -87,7 +75,18 @@ struct mtk_ecc { struct mutex lock; u32 sectors; - u8 eccdata[112]; + u8 *eccdata; +}; + +/* ecc strength that each IP supports */ +static const u8 ecc_strength_mt2701[] = { + 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, + 40, 44, 48, 52, 56, 60 +}; + +static const u8 ecc_strength_mt2712[] = { + 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, + 40, 44, 48, 52, 56, 60, 68, 72, 80 }; static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, @@ -136,77 +135,24 @@ static irqreturn_t mtk_ecc_irq(int irq, void *id) return IRQ_HANDLED; } -static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) +static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) { - u32 ecc_bit = ECC_CNFG_4BIT, dec_sz, enc_sz; - u32 reg; - - switch (config->strength) { - case 4: - ecc_bit = ECC_CNFG_4BIT; - break; - case 6: - ecc_bit = ECC_CNFG_6BIT; - break; - case 8: - ecc_bit = ECC_CNFG_8BIT; - break; - case 10: - ecc_bit = ECC_CNFG_10BIT; - break; - case 12: - ecc_bit = ECC_CNFG_12BIT; - break; - case 14: - ecc_bit = ECC_CNFG_14BIT; - break; - case 16: - ecc_bit = ECC_CNFG_16BIT; - break; - case 18: - ecc_bit = ECC_CNFG_18BIT; - break; - case 20: - ecc_bit = ECC_CNFG_20BIT; - break; - case 22: - ecc_bit = ECC_CNFG_22BIT; - break; - case 24: - ecc_bit = ECC_CNFG_24BIT; - break; - case 28: - ecc_bit = ECC_CNFG_28BIT; - break; - case 32: - ecc_bit = ECC_CNFG_32BIT; - break; - case 36: - ecc_bit = ECC_CNFG_36BIT; - break; - case 40: - ecc_bit = ECC_CNFG_40BIT; - break; - case 44: - ecc_bit = ECC_CNFG_44BIT; - break; - case 48: - ecc_bit = ECC_CNFG_48BIT; - break; - case 52: - ecc_bit = ECC_CNFG_52BIT; - break; - case 56: - ecc_bit = ECC_CNFG_56BIT; - break; - case 60: - ecc_bit = ECC_CNFG_60BIT; - break; - default: - dev_err(ecc->dev, "invalid strength %d, default to 4 bits\n", + u32 ecc_bit, dec_sz, enc_sz; + u32 reg, i; + + for (i = 0; i < ecc->caps->num_ecc_strength; i++) { + if (ecc->caps->ecc_strength[i] == config->strength) + break; + } + + if (i == ecc->caps->num_ecc_strength) { + dev_err(ecc->dev, "invalid ecc strength %d\n", config->strength); + return -EINVAL; } + ecc_bit = i; + if (config->op == ECC_ENCODE) { /* configure ECC encoder (in bits) */ enc_sz = config->len << 3; @@ -232,6 +178,8 @@ static void mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) if (config->sectors) ecc->sectors = 1 << (config->sectors - 1); } + + return 0; } void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, @@ -247,8 +195,8 @@ void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, offset = (i >> 2) << 2; err = readl(ecc->regs + ECC_DECENUM0 + offset); err = err >> ((i % 4) * 8); - err &= ERR_MASK; - if (err == ERR_MASK) { + err &= ecc->caps->err_mask; + if (err == ecc->caps->err_mask) { /* uncorrectable errors */ stats->failed++; continue; @@ -313,6 +261,7 @@ EXPORT_SYMBOL(of_mtk_ecc_get); int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) { enum mtk_ecc_operation op = config->op; + u16 reg_val; int ret; ret = mutex_lock_interruptible(&ecc->lock); @@ -322,11 +271,27 @@ int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) } mtk_ecc_wait_idle(ecc, op); - mtk_ecc_config(ecc, config); - writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); - init_completion(&ecc->done); - writew(ECC_IRQ_EN, ecc->regs + ECC_IRQ_REG(op)); + ret = mtk_ecc_config(ecc, config); + if (ret) { + mutex_unlock(&ecc->lock); + return ret; + } + + if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) { + init_completion(&ecc->done); + reg_val = ECC_IRQ_EN; + /* + * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it + * means this chip can only generate one ecc irq during page + * read / write. If is 0, generate one ecc irq each ecc step. + */ + if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE) + reg_val |= ECC_PG_IRQ_SEL; + writew(reg_val, ecc->regs + ECC_IRQ_REG(op)); + } + + writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); return 0; } @@ -396,7 +361,9 @@ int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, len = (config->strength * ECC_PARITY_BITS + 7) >> 3; /* write the parity bytes generated by the ECC back to temp buffer */ - __ioread32_copy(ecc->eccdata, ecc->regs + ECC_ENCPAR(0), round_up(len, 4)); + __ioread32_copy(ecc->eccdata, + ecc->regs + ecc->caps->encode_parity_reg0, + round_up(len, 4)); /* copy into possibly unaligned OOB region with actual length */ memcpy(data + bytes, ecc->eccdata, len); @@ -409,37 +376,79 @@ timeout: } EXPORT_SYMBOL(mtk_ecc_encode); -void mtk_ecc_adjust_strength(u32 *p) +void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p) { - u32 ecc[] = {4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, - 40, 44, 48, 52, 56, 60}; + const u8 *ecc_strength = ecc->caps->ecc_strength; int i; - for (i = 0; i < ARRAY_SIZE(ecc); i++) { - if (*p <= ecc[i]) { + for (i = 0; i < ecc->caps->num_ecc_strength; i++) { + if (*p <= ecc_strength[i]) { if (!i) - *p = ecc[i]; - else if (*p != ecc[i]) - *p = ecc[i - 1]; + *p = ecc_strength[i]; + else if (*p != ecc_strength[i]) + *p = ecc_strength[i - 1]; return; } } - *p = ecc[ARRAY_SIZE(ecc) - 1]; + *p = ecc_strength[ecc->caps->num_ecc_strength - 1]; } EXPORT_SYMBOL(mtk_ecc_adjust_strength); +static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { + .err_mask = 0x3f, + .ecc_strength = ecc_strength_mt2701, + .num_ecc_strength = 20, + .encode_parity_reg0 = 0x10, + .pg_irq_sel = 0, +}; + +static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { + .err_mask = 0x7f, + .ecc_strength = ecc_strength_mt2712, + .num_ecc_strength = 23, + .encode_parity_reg0 = 0x300, + .pg_irq_sel = 1, +}; + +static const struct of_device_id mtk_ecc_dt_match[] = { + { + .compatible = "mediatek,mt2701-ecc", + .data = &mtk_ecc_caps_mt2701, + }, { + .compatible = "mediatek,mt2712-ecc", + .data = &mtk_ecc_caps_mt2712, + }, + {}, +}; + static int mtk_ecc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_ecc *ecc; struct resource *res; + const struct of_device_id *of_ecc_id = NULL; + u32 max_eccdata_size; int irq, ret; ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); if (!ecc) return -ENOMEM; + of_ecc_id = of_match_device(mtk_ecc_dt_match, &pdev->dev); + if (!of_ecc_id) + return -ENODEV; + + ecc->caps = of_ecc_id->data; + + max_eccdata_size = ecc->caps->num_ecc_strength - 1; + max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size]; + max_eccdata_size = (max_eccdata_size * ECC_PARITY_BITS + 7) >> 3; + max_eccdata_size = round_up(max_eccdata_size, 4); + ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL); + if (!ecc->eccdata) + return -ENOMEM; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ecc->regs = devm_ioremap_resource(dev, res); if (IS_ERR(ecc->regs)) { @@ -500,19 +509,12 @@ static int mtk_ecc_resume(struct device *dev) return ret; } - mtk_ecc_hw_init(ecc); - return 0; } static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); #endif -static const struct of_device_id mtk_ecc_dt_match[] = { - { .compatible = "mediatek,mt2701-ecc" }, - {}, -}; - MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); static struct platform_driver mtk_ecc_driver = { diff --git a/drivers/mtd/nand/mtk_ecc.h b/drivers/mtd/nand/mtk_ecc.h index cbeba5cd1c13..d245c14f1b80 100644 --- a/drivers/mtd/nand/mtk_ecc.h +++ b/drivers/mtd/nand/mtk_ecc.h @@ -42,7 +42,7 @@ void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); void mtk_ecc_disable(struct mtk_ecc *); -void mtk_ecc_adjust_strength(u32 *); +void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p); struct mtk_ecc *of_mtk_ecc_get(struct device_node *); void mtk_ecc_release(struct mtk_ecc *); diff --git a/drivers/mtd/nand/mtk_nand.c b/drivers/mtd/nand/mtk_nand.c index 6c517c682939..f7ae99464375 100644 --- a/drivers/mtd/nand/mtk_nand.c +++ b/drivers/mtd/nand/mtk_nand.c @@ -24,6 +24,7 @@ #include <linux/module.h> #include <linux/iopoll.h> #include <linux/of.h> +#include <linux/of_device.h> #include "mtk_ecc.h" /* NAND controller register definition */ @@ -38,23 +39,6 @@ #define NFI_PAGEFMT (0x04) #define PAGEFMT_FDM_ECC_SHIFT (12) #define PAGEFMT_FDM_SHIFT (8) -#define PAGEFMT_SPARE_16 (0) -#define PAGEFMT_SPARE_26 (1) -#define PAGEFMT_SPARE_27 (2) -#define PAGEFMT_SPARE_28 (3) -#define PAGEFMT_SPARE_32 (4) -#define PAGEFMT_SPARE_36 (5) -#define PAGEFMT_SPARE_40 (6) -#define PAGEFMT_SPARE_44 (7) -#define PAGEFMT_SPARE_48 (8) -#define PAGEFMT_SPARE_49 (9) -#define PAGEFMT_SPARE_50 (0xa) -#define PAGEFMT_SPARE_51 (0xb) -#define PAGEFMT_SPARE_52 (0xc) -#define PAGEFMT_SPARE_62 (0xd) -#define PAGEFMT_SPARE_63 (0xe) -#define PAGEFMT_SPARE_64 (0xf) -#define PAGEFMT_SPARE_SHIFT (4) #define PAGEFMT_SEC_SEL_512 BIT(2) #define PAGEFMT_512_2K (0) #define PAGEFMT_2K_4K (1) @@ -115,6 +99,17 @@ #define MTK_RESET_TIMEOUT (1000000) #define MTK_MAX_SECTOR (16) #define MTK_NAND_MAX_NSELS (2) +#define MTK_NFC_MIN_SPARE (16) +#define ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt) \ + ((tpoecs) << 28 | (tprecs) << 22 | (tc2r) << 16 | \ + (tw2r) << 12 | (twh) << 8 | (twst) << 4 | (trlt)) + +struct mtk_nfc_caps { + const u8 *spare_size; + u8 num_spare_size; + u8 pageformat_spare_shift; + u8 nfi_clk_div; +}; struct mtk_nfc_bad_mark_ctl { void (*bm_swap)(struct mtd_info *, u8 *buf, int raw); @@ -155,6 +150,7 @@ struct mtk_nfc { struct mtk_ecc *ecc; struct device *dev; + const struct mtk_nfc_caps *caps; void __iomem *regs; struct completion done; @@ -163,6 +159,20 @@ struct mtk_nfc { u8 *buffer; }; +/* + * supported spare size of each IP. + * order should be the same with the spare size bitfiled defination of + * register NFI_PAGEFMT. + */ +static const u8 spare_size_mt2701[] = { + 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 63, 64 +}; + +static const u8 spare_size_mt2712[] = { + 16, 26, 27, 28, 32, 36, 40, 44, 48, 49, 50, 51, 52, 62, 61, 63, 64, 67, + 74 +}; + static inline struct mtk_nfc_nand_chip *to_mtk_nand(struct nand_chip *nand) { return container_of(nand, struct mtk_nfc_nand_chip, nand); @@ -308,7 +318,7 @@ static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd) struct nand_chip *chip = mtd_to_nand(mtd); struct mtk_nfc_nand_chip *mtk_nand = to_mtk_nand(chip); struct mtk_nfc *nfc = nand_get_controller_data(chip); - u32 fmt, spare; + u32 fmt, spare, i; if (!mtd->writesize) return 0; @@ -352,63 +362,21 @@ static int mtk_nfc_hw_runtime_config(struct mtd_info *mtd) if (chip->ecc.size == 1024) spare >>= 1; - switch (spare) { - case 16: - fmt |= (PAGEFMT_SPARE_16 << PAGEFMT_SPARE_SHIFT); - break; - case 26: - fmt |= (PAGEFMT_SPARE_26 << PAGEFMT_SPARE_SHIFT); - break; - case 27: - fmt |= (PAGEFMT_SPARE_27 << PAGEFMT_SPARE_SHIFT); - break; - case 28: - fmt |= (PAGEFMT_SPARE_28 << PAGEFMT_SPARE_SHIFT); - break; - case 32: - fmt |= (PAGEFMT_SPARE_32 << PAGEFMT_SPARE_SHIFT); - break; - case 36: - fmt |= (PAGEFMT_SPARE_36 << PAGEFMT_SPARE_SHIFT); - break; - case 40: - fmt |= (PAGEFMT_SPARE_40 << PAGEFMT_SPARE_SHIFT); - break; - case 44: - fmt |= (PAGEFMT_SPARE_44 << PAGEFMT_SPARE_SHIFT); - break; - case 48: - fmt |= (PAGEFMT_SPARE_48 << PAGEFMT_SPARE_SHIFT); - break; - case 49: - fmt |= (PAGEFMT_SPARE_49 << PAGEFMT_SPARE_SHIFT); - break; - case 50: - fmt |= (PAGEFMT_SPARE_50 << PAGEFMT_SPARE_SHIFT); - break; - case 51: - fmt |= (PAGEFMT_SPARE_51 << PAGEFMT_SPARE_SHIFT); - break; - case 52: - fmt |= (PAGEFMT_SPARE_52 << PAGEFMT_SPARE_SHIFT); - break; - case 62: - fmt |= (PAGEFMT_SPARE_62 << PAGEFMT_SPARE_SHIFT); - break; - case 63: - fmt |= (PAGEFMT_SPARE_63 << PAGEFMT_SPARE_SHIFT); - break; - case 64: - fmt |= (PAGEFMT_SPARE_64 << PAGEFMT_SPARE_SHIFT); - break; - default: - dev_err(nfc->dev, "invalid spare per sector %d\n", spare); + for (i = 0; i < nfc->caps->num_spare_size; i++) { + if (nfc->caps->spare_size[i] == spare) + break; + } + + if (i == nfc->caps->num_spare_size) { + dev_err(nfc->dev, "invalid spare size %d\n", spare); return -EINVAL; } + fmt |= i << nfc->caps->pageformat_spare_shift; + fmt |= mtk_nand->fdm.reg_size << PAGEFMT_FDM_SHIFT; fmt |= mtk_nand->fdm.ecc_size << PAGEFMT_FDM_ECC_SHIFT; - nfi_writew(nfc, fmt, NFI_PAGEFMT); + nfi_writel(nfc, fmt, NFI_PAGEFMT); nfc->ecc_cfg.strength = chip->ecc.strength; nfc->ecc_cfg.len = chip->ecc.size + mtk_nand->fdm.ecc_size; @@ -531,6 +499,74 @@ static void mtk_nfc_write_buf(struct mtd_info *mtd, const u8 *buf, int len) mtk_nfc_write_byte(mtd, buf[i]); } +static int mtk_nfc_setup_data_interface(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf) +{ + struct mtk_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); + const struct nand_sdr_timings *timings; + u32 rate, tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt; + + timings = nand_get_sdr_timings(conf); + if (IS_ERR(timings)) + return -ENOTSUPP; + + if (csline == NAND_DATA_IFACE_CHECK_ONLY) + return 0; + + rate = clk_get_rate(nfc->clk.nfi_clk); + /* There is a frequency divider in some IPs */ + rate /= nfc->caps->nfi_clk_div; + + /* turn clock rate into KHZ */ + rate /= 1000; + + tpoecs = max(timings->tALH_min, timings->tCLH_min) / 1000; + tpoecs = DIV_ROUND_UP(tpoecs * rate, 1000000); + tpoecs &= 0xf; + + tprecs = max(timings->tCLS_min, timings->tALS_min) / 1000; + tprecs = DIV_ROUND_UP(tprecs * rate, 1000000); + tprecs &= 0x3f; + + /* sdr interface has no tCR which means CE# low to RE# low */ + tc2r = 0; + + tw2r = timings->tWHR_min / 1000; + tw2r = DIV_ROUND_UP(tw2r * rate, 1000000); + tw2r = DIV_ROUND_UP(tw2r - 1, 2); + tw2r &= 0xf; + + twh = max(timings->tREH_min, timings->tWH_min) / 1000; + twh = DIV_ROUND_UP(twh * rate, 1000000) - 1; + twh &= 0xf; + + twst = timings->tWP_min / 1000; + twst = DIV_ROUND_UP(twst * rate, 1000000) - 1; + twst &= 0xf; + + trlt = max(timings->tREA_max, timings->tRP_min) / 1000; + trlt = DIV_ROUND_UP(trlt * rate, 1000000) - 1; + trlt &= 0xf; + + /* + * ACCON: access timing control register + * ------------------------------------- + * 31:28: tpoecs, minimum required time for CS post pulling down after + * accessing the device + * 27:22: tprecs, minimum required time for CS pre pulling down before + * accessing the device + * 21:16: tc2r, minimum required time from NCEB low to NREB low + * 15:12: tw2r, minimum required time from NWEB high to NREB low. + * 11:08: twh, write enable hold time + * 07:04: twst, write wait states + * 03:00: trlt, read wait states + */ + trlt = ACCTIMING(tpoecs, tprecs, tc2r, tw2r, twh, twst, trlt); + nfi_writel(nfc, trlt, NFI_ACCCON); + + return 0; +} + static int mtk_nfc_sector_encode(struct nand_chip *chip, u8 *data) { struct mtk_nfc *nfc = nand_get_controller_data(chip); @@ -988,28 +1024,13 @@ static int mtk_nfc_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, static inline void mtk_nfc_hw_init(struct mtk_nfc *nfc) { /* - * ACCON: access timing control register - * ------------------------------------- - * 31:28: minimum required time for CS post pulling down after accessing - * the device - * 27:22: minimum required time for CS pre pulling down before accessing - * the device - * 21:16: minimum required time from NCEB low to NREB low - * 15:12: minimum required time from NWEB high to NREB low. - * 11:08: write enable hold time - * 07:04: write wait states - * 03:00: read wait states - */ - nfi_writel(nfc, 0x10804211, NFI_ACCCON); - - /* * CNRNB: nand ready/busy register * ------------------------------- * 7:4: timeout register for polling the NAND busy/ready signal * 0 : poll the status of the busy/ready signal after [7:4]*16 cycles. */ nfi_writew(nfc, 0xf1, NFI_CNRNB); - nfi_writew(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT); + nfi_writel(nfc, PAGEFMT_8K_16K, NFI_PAGEFMT); mtk_nfc_hw_reset(nfc); @@ -1131,12 +1152,12 @@ static void mtk_nfc_set_bad_mark_ctl(struct mtk_nfc_bad_mark_ctl *bm_ctl, } } -static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd) +static int mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); - u32 spare[] = {16, 26, 27, 28, 32, 36, 40, 44, - 48, 49, 50, 51, 52, 62, 63, 64}; - u32 eccsteps, i; + struct mtk_nfc *nfc = nand_get_controller_data(nand); + const u8 *spare = nfc->caps->spare_size; + u32 eccsteps, i, closest_spare = 0; eccsteps = mtd->writesize / nand->ecc.size; *sps = mtd->oobsize / eccsteps; @@ -1144,28 +1165,31 @@ static void mtk_nfc_set_spare_per_sector(u32 *sps, struct mtd_info *mtd) if (nand->ecc.size == 1024) *sps >>= 1; - for (i = 0; i < ARRAY_SIZE(spare); i++) { - if (*sps <= spare[i]) { - if (!i) - *sps = spare[i]; - else if (*sps != spare[i]) - *sps = spare[i - 1]; - break; + if (*sps < MTK_NFC_MIN_SPARE) + return -EINVAL; + + for (i = 0; i < nfc->caps->num_spare_size; i++) { + if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) { + closest_spare = i; + if (*sps == spare[i]) + break; } } - if (i >= ARRAY_SIZE(spare)) - *sps = spare[ARRAY_SIZE(spare) - 1]; + *sps = spare[closest_spare]; if (nand->ecc.size == 1024) *sps <<= 1; + + return 0; } static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) { struct nand_chip *nand = mtd_to_nand(mtd); + struct mtk_nfc *nfc = nand_get_controller_data(nand); u32 spare; - int free; + int free, ret; /* support only ecc hw mode */ if (nand->ecc.mode != NAND_ECC_HW) { @@ -1194,7 +1218,9 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) nand->ecc.size = 1024; } - mtk_nfc_set_spare_per_sector(&spare, mtd); + ret = mtk_nfc_set_spare_per_sector(&spare, mtd); + if (ret) + return ret; /* calculate oob bytes except ecc parity data */ free = ((nand->ecc.strength * ECC_PARITY_BITS) + 7) >> 3; @@ -1214,7 +1240,7 @@ static int mtk_nfc_ecc_init(struct device *dev, struct mtd_info *mtd) } } - mtk_ecc_adjust_strength(&nand->ecc.strength); + mtk_ecc_adjust_strength(nfc->ecc, &nand->ecc.strength); dev_info(dev, "eccsize %d eccstrength %d\n", nand->ecc.size, nand->ecc.strength); @@ -1271,6 +1297,7 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, nand->read_byte = mtk_nfc_read_byte; nand->read_buf = mtk_nfc_read_buf; nand->cmd_ctrl = mtk_nfc_cmd_ctrl; + nand->setup_data_interface = mtk_nfc_setup_data_interface; /* set default mode in case dt entry is missing */ nand->ecc.mode = NAND_ECC_HW; @@ -1312,7 +1339,10 @@ static int mtk_nfc_nand_chip_init(struct device *dev, struct mtk_nfc *nfc, return -EINVAL; } - mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd); + ret = mtk_nfc_set_spare_per_sector(&chip->spare_per_sector, mtd); + if (ret) + return ret; + mtk_nfc_set_fdm(&chip->fdm, mtd); mtk_nfc_set_bad_mark_ctl(&chip->bad_mark, mtd); @@ -1354,12 +1384,39 @@ static int mtk_nfc_nand_chips_init(struct device *dev, struct mtk_nfc *nfc) return 0; } +static const struct mtk_nfc_caps mtk_nfc_caps_mt2701 = { + .spare_size = spare_size_mt2701, + .num_spare_size = 16, + .pageformat_spare_shift = 4, + .nfi_clk_div = 1, +}; + +static const struct mtk_nfc_caps mtk_nfc_caps_mt2712 = { + .spare_size = spare_size_mt2712, + .num_spare_size = 19, + .pageformat_spare_shift = 16, + .nfi_clk_div = 2, +}; + +static const struct of_device_id mtk_nfc_id_table[] = { + { + .compatible = "mediatek,mt2701-nfc", + .data = &mtk_nfc_caps_mt2701, + }, { + .compatible = "mediatek,mt2712-nfc", + .data = &mtk_nfc_caps_mt2712, + }, + {} +}; +MODULE_DEVICE_TABLE(of, mtk_nfc_id_table); + static int mtk_nfc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct mtk_nfc *nfc; struct resource *res; + const struct of_device_id *of_nfc_id = NULL; int ret, irq; nfc = devm_kzalloc(dev, sizeof(*nfc), GFP_KERNEL); @@ -1423,6 +1480,14 @@ static int mtk_nfc_probe(struct platform_device *pdev) goto clk_disable; } + of_nfc_id = of_match_device(mtk_nfc_id_table, &pdev->dev); + if (!of_nfc_id) { + ret = -ENODEV; + goto clk_disable; + } + + nfc->caps = of_nfc_id->data; + platform_set_drvdata(pdev, nfc); ret = mtk_nfc_nand_chips_init(dev, nfc); @@ -1485,8 +1550,6 @@ static int mtk_nfc_resume(struct device *dev) if (ret) return ret; - mtk_nfc_hw_init(nfc); - /* reset NAND chip if VCC was powered off */ list_for_each_entry(chip, &nfc->chips, node) { nand = &chip->nand; @@ -1503,12 +1566,6 @@ static int mtk_nfc_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(mtk_nfc_pm_ops, mtk_nfc_suspend, mtk_nfc_resume); #endif -static const struct of_device_id mtk_nfc_id_table[] = { - { .compatible = "mediatek,mt2701-nfc" }, - {} -}; -MODULE_DEVICE_TABLE(of, mtk_nfc_id_table); - static struct platform_driver mtk_nfc_driver = { .probe = mtk_nfc_probe, .remove = mtk_nfc_remove, diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c index 61ca020c5272..a764d5ca7536 100644 --- a/drivers/mtd/nand/mxc_nand.c +++ b/drivers/mtd/nand/mxc_nand.c @@ -152,9 +152,8 @@ struct mxc_nand_devtype_data { void (*select_chip)(struct mtd_info *mtd, int chip); int (*correct_data)(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc); - int (*setup_data_interface)(struct mtd_info *mtd, - const struct nand_data_interface *conf, - bool check_only); + int (*setup_data_interface)(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf); /* * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked @@ -1015,9 +1014,8 @@ static void preset_v1(struct mtd_info *mtd) writew(0x4, NFC_V1_V2_WRPROT); } -static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd, - const struct nand_data_interface *conf, - bool check_only) +static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf) { struct nand_chip *nand_chip = mtd_to_nand(mtd); struct mxc_nand_host *host = nand_get_controller_data(nand_chip); @@ -1075,7 +1073,7 @@ static int mxc_nand_v2_setup_data_interface(struct mtd_info *mtd, return -EINVAL; } - if (check_only) + if (csline == NAND_DATA_IFACE_CHECK_ONLY) return 0; ret = clk_set_rate(host->clk, rate); diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c index bf8486c406d3..5fa5ddc94834 100644 --- a/drivers/mtd/nand/nand_base.c +++ b/drivers/mtd/nand/nand_base.c @@ -755,6 +755,16 @@ static void nand_command(struct mtd_info *mtd, unsigned int command, return; /* This applies to read commands */ + case NAND_CMD_READ0: + /* + * READ0 is sometimes used to exit GET STATUS mode. When this + * is the case no address cycles are requested, and we can use + * this information to detect that we should not wait for the + * device to be ready. + */ + if (column == -1 && page_addr == -1) + return; + default: /* * If we don't have access to the busy pin, we apply the given @@ -889,6 +899,15 @@ static void nand_command_lp(struct mtd_info *mtd, unsigned int command, return; case NAND_CMD_READ0: + /* + * READ0 is sometimes used to exit GET STATUS mode. When this + * is the case no address cycles are requested, and we can use + * this information to detect that READSTART should not be + * issued. + */ + if (column == -1 && page_addr == -1) + return; + chip->cmd_ctrl(mtd, NAND_CMD_READSTART, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE); chip->cmd_ctrl(mtd, NAND_CMD_NONE, @@ -1044,12 +1063,13 @@ static int nand_wait(struct mtd_info *mtd, struct nand_chip *chip) /** * nand_reset_data_interface - Reset data interface and timings * @chip: The NAND chip + * @chipnr: Internal die id * * Reset the Data interface and timings to ONFI mode 0. * * Returns 0 for success or negative error code otherwise. */ -static int nand_reset_data_interface(struct nand_chip *chip) +static int nand_reset_data_interface(struct nand_chip *chip, int chipnr) { struct mtd_info *mtd = nand_to_mtd(chip); const struct nand_data_interface *conf; @@ -1073,7 +1093,7 @@ static int nand_reset_data_interface(struct nand_chip *chip) */ conf = nand_get_default_data_interface(); - ret = chip->setup_data_interface(mtd, conf, false); + ret = chip->setup_data_interface(mtd, chipnr, conf); if (ret) pr_err("Failed to configure data interface to SDR timing mode 0\n"); @@ -1083,6 +1103,7 @@ static int nand_reset_data_interface(struct nand_chip *chip) /** * nand_setup_data_interface - Setup the best data interface and timings * @chip: The NAND chip + * @chipnr: Internal die id * * Find and configure the best data interface and NAND timings supported by * the chip and the driver. @@ -1092,7 +1113,7 @@ static int nand_reset_data_interface(struct nand_chip *chip) * * Returns 0 for success or negative error code otherwise. */ -static int nand_setup_data_interface(struct nand_chip *chip) +static int nand_setup_data_interface(struct nand_chip *chip, int chipnr) { struct mtd_info *mtd = nand_to_mtd(chip); int ret; @@ -1116,7 +1137,7 @@ static int nand_setup_data_interface(struct nand_chip *chip) goto err; } - ret = chip->setup_data_interface(mtd, chip->data_interface, false); + ret = chip->setup_data_interface(mtd, chipnr, chip->data_interface); err: return ret; } @@ -1167,8 +1188,10 @@ static int nand_init_data_interface(struct nand_chip *chip) if (ret) continue; - ret = chip->setup_data_interface(mtd, chip->data_interface, - true); + /* Pass -1 to only */ + ret = chip->setup_data_interface(mtd, + NAND_DATA_IFACE_CHECK_ONLY, + chip->data_interface); if (!ret) { chip->onfi_timing_mode_default = mode; break; @@ -1195,7 +1218,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) struct mtd_info *mtd = nand_to_mtd(chip); int ret; - ret = nand_reset_data_interface(chip); + ret = nand_reset_data_interface(chip, chipnr); if (ret) return ret; @@ -1208,7 +1231,7 @@ int nand_reset(struct nand_chip *chip, int chipnr) chip->select_chip(mtd, -1); chip->select_chip(mtd, chipnr); - ret = nand_setup_data_interface(chip); + ret = nand_setup_data_interface(chip, chipnr); chip->select_chip(mtd, -1); if (ret) return ret; @@ -1424,7 +1447,10 @@ static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold) for (; len >= sizeof(long); len -= sizeof(long), bitmap += sizeof(long)) { - weight = hweight_long(*((unsigned long *)bitmap)); + unsigned long d = *((unsigned long *)bitmap); + if (d == ~0UL) + continue; + weight = hweight_long(d); bitflips += BITS_PER_LONG - weight; if (unlikely(bitflips > bitflips_threshold)) return -EBADMSG; @@ -1527,14 +1553,15 @@ EXPORT_SYMBOL(nand_check_erased_ecc_chunk); * * Not for syndrome calculating ECC controllers, which use a special oob layout. */ -static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - uint8_t *buf, int oob_required, int page) +int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, int page) { chip->read_buf(mtd, buf, mtd->writesize); if (oob_required) chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); return 0; } +EXPORT_SYMBOL(nand_read_page_raw); /** * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc @@ -2472,8 +2499,8 @@ static int nand_read_oob(struct mtd_info *mtd, loff_t from, * * Not for syndrome calculating ECC controllers, which use a special oob layout. */ -static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, - const uint8_t *buf, int oob_required, int page) +int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, int page) { chip->write_buf(mtd, buf, mtd->writesize); if (oob_required) @@ -2481,6 +2508,7 @@ static int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, return 0; } +EXPORT_SYMBOL(nand_write_page_raw); /** * nand_write_page_raw_syndrome - [INTERN] raw page write function @@ -2718,7 +2746,7 @@ static int nand_write_page_syndrome(struct mtd_info *mtd, */ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, uint32_t offset, int data_len, const uint8_t *buf, - int oob_required, int page, int cached, int raw) + int oob_required, int page, int raw) { int status, subpage; @@ -2744,30 +2772,12 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, if (status < 0) return status; - /* - * Cached progamming disabled for now. Not sure if it's worth the - * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s). - */ - cached = 0; + if (nand_standard_page_accessors(&chip->ecc)) { + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); - if (!cached || !NAND_HAS_CACHEPROG(chip)) { - - if (nand_standard_page_accessors(&chip->ecc)) - chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); - /* - * See if operation failed and additional status checks are - * available. - */ - if ((status & NAND_STATUS_FAIL) && (chip->errstat)) - status = chip->errstat(mtd, chip, FL_WRITING, status, - page); - if (status & NAND_STATUS_FAIL) return -EIO; - } else { - chip->cmdfunc(mtd, NAND_CMD_CACHEDPROG, -1, -1); - status = chip->waitfunc(mtd, chip); } return 0; @@ -2875,7 +2885,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, while (1) { int bytes = mtd->writesize; - int cached = writelen > bytes && page != blockmask; uint8_t *wbuf = buf; int use_bufpoi; int part_pagewr = (column || writelen < mtd->writesize); @@ -2893,7 +2902,6 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, if (use_bufpoi) { pr_debug("%s: using write bounce buffer for buf@%p\n", __func__, buf); - cached = 0; if (part_pagewr) bytes = min_t(int, bytes - column, writelen); chip->pagebuf = -1; @@ -2912,7 +2920,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to, } ret = nand_write_page(mtd, chip, column, bytes, wbuf, - oob_required, page, cached, + oob_required, page, (ops->mode == MTD_OPS_RAW)); if (ret) break; @@ -3228,14 +3236,6 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr, status = chip->erase(mtd, page & chip->pagemask); - /* - * See if operation failed and additional status checks are - * available - */ - if ((status & NAND_STATUS_FAIL) && (chip->errstat)) - status = chip->errstat(mtd, chip, FL_ERASING, - status, page); - /* See if block erase succeeded */ if (status & NAND_STATUS_FAIL) { pr_debug("%s: failed erase, page 0x%08x\n", @@ -3422,6 +3422,25 @@ static int nand_onfi_get_features(struct mtd_info *mtd, struct nand_chip *chip, } /** + * nand_onfi_get_set_features_notsupp - set/get features stub returning + * -ENOTSUPP + * @mtd: MTD device structure + * @chip: nand chip info structure + * @addr: feature address. + * @subfeature_param: the subfeature parameters, a four bytes array. + * + * Should be used by NAND controller drivers that do not support the SET/GET + * FEATURES operations. + */ +int nand_onfi_get_set_features_notsupp(struct mtd_info *mtd, + struct nand_chip *chip, int addr, + u8 *subfeature_param) +{ + return -ENOTSUPP; +} +EXPORT_SYMBOL(nand_onfi_get_set_features_notsupp); + +/** * nand_suspend - [MTD Interface] Suspend the NAND flash * @mtd: MTD device structure */ @@ -4180,6 +4199,7 @@ static const char * const nand_ecc_modes[] = { [NAND_ECC_HW] = "hw", [NAND_ECC_HW_SYNDROME] = "hw_syndrome", [NAND_ECC_HW_OOB_FIRST] = "hw_oob_first", + [NAND_ECC_ON_DIE] = "on-die", }; static int of_get_nand_ecc_mode(struct device_node *np) @@ -4374,7 +4394,7 @@ int nand_scan_ident(struct mtd_info *mtd, int maxchips, * For the other dies, nand_reset() will automatically switch to the * best mode for us. */ - ret = nand_setup_data_interface(chip); + ret = nand_setup_data_interface(chip, 0); if (ret) goto err_nand_init; @@ -4512,6 +4532,226 @@ static int nand_set_ecc_soft_ops(struct mtd_info *mtd) } } +/** + * nand_check_ecc_caps - check the sanity of preset ECC settings + * @chip: nand chip info structure + * @caps: ECC caps info structure + * @oobavail: OOB size that the ECC engine can use + * + * When ECC step size and strength are already set, check if they are supported + * by the controller and the calculated ECC bytes fit within the chip's OOB. + * On success, the calculated ECC bytes is set. + */ +int nand_check_ecc_caps(struct nand_chip *chip, + const struct nand_ecc_caps *caps, int oobavail) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + const struct nand_ecc_step_info *stepinfo; + int preset_step = chip->ecc.size; + int preset_strength = chip->ecc.strength; + int nsteps, ecc_bytes; + int i, j; + + if (WARN_ON(oobavail < 0)) + return -EINVAL; + + if (!preset_step || !preset_strength) + return -ENODATA; + + nsteps = mtd->writesize / preset_step; + + for (i = 0; i < caps->nstepinfos; i++) { + stepinfo = &caps->stepinfos[i]; + + if (stepinfo->stepsize != preset_step) + continue; + + for (j = 0; j < stepinfo->nstrengths; j++) { + if (stepinfo->strengths[j] != preset_strength) + continue; + + ecc_bytes = caps->calc_ecc_bytes(preset_step, + preset_strength); + if (WARN_ON_ONCE(ecc_bytes < 0)) + return ecc_bytes; + + if (ecc_bytes * nsteps > oobavail) { + pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB", + preset_step, preset_strength); + return -ENOSPC; + } + + chip->ecc.bytes = ecc_bytes; + + return 0; + } + } + + pr_err("ECC (step, strength) = (%d, %d) not supported on this controller", + preset_step, preset_strength); + + return -ENOTSUPP; +} +EXPORT_SYMBOL_GPL(nand_check_ecc_caps); + +/** + * nand_match_ecc_req - meet the chip's requirement with least ECC bytes + * @chip: nand chip info structure + * @caps: ECC engine caps info structure + * @oobavail: OOB size that the ECC engine can use + * + * If a chip's ECC requirement is provided, try to meet it with the least + * number of ECC bytes (i.e. with the largest number of OOB-free bytes). + * On success, the chosen ECC settings are set. + */ +int nand_match_ecc_req(struct nand_chip *chip, + const struct nand_ecc_caps *caps, int oobavail) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + const struct nand_ecc_step_info *stepinfo; + int req_step = chip->ecc_step_ds; + int req_strength = chip->ecc_strength_ds; + int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; + int best_step, best_strength, best_ecc_bytes; + int best_ecc_bytes_total = INT_MAX; + int i, j; + + if (WARN_ON(oobavail < 0)) + return -EINVAL; + + /* No information provided by the NAND chip */ + if (!req_step || !req_strength) + return -ENOTSUPP; + + /* number of correctable bits the chip requires in a page */ + req_corr = mtd->writesize / req_step * req_strength; + + for (i = 0; i < caps->nstepinfos; i++) { + stepinfo = &caps->stepinfos[i]; + step_size = stepinfo->stepsize; + + for (j = 0; j < stepinfo->nstrengths; j++) { + strength = stepinfo->strengths[j]; + + /* + * If both step size and strength are smaller than the + * chip's requirement, it is not easy to compare the + * resulted reliability. + */ + if (step_size < req_step && strength < req_strength) + continue; + + if (mtd->writesize % step_size) + continue; + + nsteps = mtd->writesize / step_size; + + ecc_bytes = caps->calc_ecc_bytes(step_size, strength); + if (WARN_ON_ONCE(ecc_bytes < 0)) + continue; + ecc_bytes_total = ecc_bytes * nsteps; + + if (ecc_bytes_total > oobavail || + strength * nsteps < req_corr) + continue; + + /* + * We assume the best is to meet the chip's requrement + * with the least number of ECC bytes. + */ + if (ecc_bytes_total < best_ecc_bytes_total) { + best_ecc_bytes_total = ecc_bytes_total; + best_step = step_size; + best_strength = strength; + best_ecc_bytes = ecc_bytes; + } + } + } + + if (best_ecc_bytes_total == INT_MAX) + return -ENOTSUPP; + + chip->ecc.size = best_step; + chip->ecc.strength = best_strength; + chip->ecc.bytes = best_ecc_bytes; + + return 0; +} +EXPORT_SYMBOL_GPL(nand_match_ecc_req); + +/** + * nand_maximize_ecc - choose the max ECC strength available + * @chip: nand chip info structure + * @caps: ECC engine caps info structure + * @oobavail: OOB size that the ECC engine can use + * + * Choose the max ECC strength that is supported on the controller, and can fit + * within the chip's OOB. On success, the chosen ECC settings are set. + */ +int nand_maximize_ecc(struct nand_chip *chip, + const struct nand_ecc_caps *caps, int oobavail) +{ + struct mtd_info *mtd = nand_to_mtd(chip); + const struct nand_ecc_step_info *stepinfo; + int step_size, strength, nsteps, ecc_bytes, corr; + int best_corr = 0; + int best_step = 0; + int best_strength, best_ecc_bytes; + int i, j; + + if (WARN_ON(oobavail < 0)) + return -EINVAL; + + for (i = 0; i < caps->nstepinfos; i++) { + stepinfo = &caps->stepinfos[i]; + step_size = stepinfo->stepsize; + + /* If chip->ecc.size is already set, respect it */ + if (chip->ecc.size && step_size != chip->ecc.size) + continue; + + for (j = 0; j < stepinfo->nstrengths; j++) { + strength = stepinfo->strengths[j]; + + if (mtd->writesize % step_size) + continue; + + nsteps = mtd->writesize / step_size; + + ecc_bytes = caps->calc_ecc_bytes(step_size, strength); + if (WARN_ON_ONCE(ecc_bytes < 0)) + continue; + + if (ecc_bytes * nsteps > oobavail) + continue; + + corr = strength * nsteps; + + /* + * If the number of correctable bits is the same, + * bigger step_size has more reliability. + */ + if (corr > best_corr || + (corr == best_corr && step_size > best_step)) { + best_corr = corr; + best_step = step_size; + best_strength = strength; + best_ecc_bytes = ecc_bytes; + } + } + } + + if (!best_corr) + return -ENOTSUPP; + + chip->ecc.size = best_step; + chip->ecc.strength = best_strength; + chip->ecc.bytes = best_ecc_bytes; + + return 0; +} +EXPORT_SYMBOL_GPL(nand_maximize_ecc); + /* * Check if the chip configuration meet the datasheet requirements. @@ -4733,6 +4973,18 @@ int nand_scan_tail(struct mtd_info *mtd) } break; + case NAND_ECC_ON_DIE: + if (!ecc->read_page || !ecc->write_page) { + WARN(1, "No ECC functions supplied; on-die ECC not possible\n"); + ret = -EINVAL; + goto err_free; + } + if (!ecc->read_oob) + ecc->read_oob = nand_read_oob_std; + if (!ecc->write_oob) + ecc->write_oob = nand_write_oob_std; + break; + case NAND_ECC_NONE: pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n"); ecc->read_page = nand_read_page_raw; @@ -4773,6 +5025,11 @@ int nand_scan_tail(struct mtd_info *mtd) goto err_free; } ecc->total = ecc->steps * ecc->bytes; + if (ecc->total > mtd->oobsize) { + WARN(1, "Total number of ECC bytes exceeded oobsize\n"); + ret = -EINVAL; + goto err_free; + } /* * The number of bytes available for a client to place data into diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c index 877011069251..c30ab60f8e1b 100644 --- a/drivers/mtd/nand/nand_micron.c +++ b/drivers/mtd/nand/nand_micron.c @@ -17,6 +17,12 @@ #include <linux/mtd/nand.h> +/* + * Special Micron status bit that indicates when the block has been + * corrected by on-die ECC and should be rewritten + */ +#define NAND_STATUS_WRITE_RECOMMENDED BIT(3) + struct nand_onfi_vendor_micron { u8 two_plane_read; u8 read_cache; @@ -66,9 +72,197 @@ static int micron_nand_onfi_init(struct nand_chip *chip) return 0; } +static int micron_nand_on_die_ooblayout_ecc(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section >= 4) + return -ERANGE; + + oobregion->offset = (section * 16) + 8; + oobregion->length = 8; + + return 0; +} + +static int micron_nand_on_die_ooblayout_free(struct mtd_info *mtd, int section, + struct mtd_oob_region *oobregion) +{ + if (section >= 4) + return -ERANGE; + + oobregion->offset = (section * 16) + 2; + oobregion->length = 6; + + return 0; +} + +static const struct mtd_ooblayout_ops micron_nand_on_die_ooblayout_ops = { + .ecc = micron_nand_on_die_ooblayout_ecc, + .free = micron_nand_on_die_ooblayout_free, +}; + +static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable) +{ + u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, }; + + if (enable) + feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN; + + return chip->onfi_set_features(nand_to_mtd(chip), chip, + ONFI_FEATURE_ON_DIE_ECC, feature); +} + +static int +micron_nand_read_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip, + uint8_t *buf, int oob_required, + int page) +{ + int status; + int max_bitflips = 0; + + micron_nand_on_die_ecc_setup(chip, true); + + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); + chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); + status = chip->read_byte(mtd); + if (status & NAND_STATUS_FAIL) + mtd->ecc_stats.failed++; + /* + * The internal ECC doesn't tell us the number of bitflips + * that have been corrected, but tells us if it recommends to + * rewrite the block. If it's the case, then we pretend we had + * a number of bitflips equal to the ECC strength, which will + * hint the NAND core to rewrite the block. + */ + else if (status & NAND_STATUS_WRITE_RECOMMENDED) + max_bitflips = chip->ecc.strength; + + chip->cmdfunc(mtd, NAND_CMD_READ0, -1, -1); + + nand_read_page_raw(mtd, chip, buf, oob_required, page); + + micron_nand_on_die_ecc_setup(chip, false); + + return max_bitflips; +} + +static int +micron_nand_write_page_on_die_ecc(struct mtd_info *mtd, struct nand_chip *chip, + const uint8_t *buf, int oob_required, + int page) +{ + int status; + + micron_nand_on_die_ecc_setup(chip, true); + + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); + nand_write_page_raw(mtd, chip, buf, oob_required, page); + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + status = chip->waitfunc(mtd, chip); + + micron_nand_on_die_ecc_setup(chip, false); + + return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +static int +micron_nand_read_page_raw_on_die_ecc(struct mtd_info *mtd, + struct nand_chip *chip, + uint8_t *buf, int oob_required, + int page) +{ + chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); + nand_read_page_raw(mtd, chip, buf, oob_required, page); + + return 0; +} + +static int +micron_nand_write_page_raw_on_die_ecc(struct mtd_info *mtd, + struct nand_chip *chip, + const uint8_t *buf, int oob_required, + int page) +{ + int status; + + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); + nand_write_page_raw(mtd, chip, buf, oob_required, page); + chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + status = chip->waitfunc(mtd, chip); + + return status & NAND_STATUS_FAIL ? -EIO : 0; +} + +enum { + /* The NAND flash doesn't support on-die ECC */ + MICRON_ON_DIE_UNSUPPORTED, + + /* + * The NAND flash supports on-die ECC and it can be + * enabled/disabled by a set features command. + */ + MICRON_ON_DIE_SUPPORTED, + + /* + * The NAND flash supports on-die ECC, and it cannot be + * disabled. + */ + MICRON_ON_DIE_MANDATORY, +}; + +/* + * Try to detect if the NAND support on-die ECC. To do this, we enable + * the feature, and read back if it has been enabled as expected. We + * also check if it can be disabled, because some Micron NANDs do not + * allow disabling the on-die ECC and we don't support such NANDs for + * now. + * + * This function also has the side effect of disabling on-die ECC if + * it had been left enabled by the firmware/bootloader. + */ +static int micron_supports_on_die_ecc(struct nand_chip *chip) +{ + u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, }; + int ret; + + if (chip->onfi_version == 0) + return MICRON_ON_DIE_UNSUPPORTED; + + if (chip->bits_per_cell != 1) + return MICRON_ON_DIE_UNSUPPORTED; + + ret = micron_nand_on_die_ecc_setup(chip, true); + if (ret) + return MICRON_ON_DIE_UNSUPPORTED; + + chip->onfi_get_features(nand_to_mtd(chip), chip, + ONFI_FEATURE_ON_DIE_ECC, feature); + if ((feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN) == 0) + return MICRON_ON_DIE_UNSUPPORTED; + + ret = micron_nand_on_die_ecc_setup(chip, false); + if (ret) + return MICRON_ON_DIE_UNSUPPORTED; + + chip->onfi_get_features(nand_to_mtd(chip), chip, + ONFI_FEATURE_ON_DIE_ECC, feature); + if (feature[0] & ONFI_FEATURE_ON_DIE_ECC_EN) + return MICRON_ON_DIE_MANDATORY; + + /* + * Some Micron NANDs have an on-die ECC of 4/512, some other + * 8/512. We only support the former. + */ + if (chip->onfi_params.ecc_bits != 4) + return MICRON_ON_DIE_UNSUPPORTED; + + return MICRON_ON_DIE_SUPPORTED; +} + static int micron_nand_init(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); + int ondie; int ret; ret = micron_nand_onfi_init(chip); @@ -78,6 +272,34 @@ static int micron_nand_init(struct nand_chip *chip) if (mtd->writesize == 2048) chip->bbt_options |= NAND_BBT_SCAN2NDPAGE; + ondie = micron_supports_on_die_ecc(chip); + + if (ondie == MICRON_ON_DIE_MANDATORY) { + pr_err("On-die ECC forcefully enabled, not supported\n"); + return -EINVAL; + } + + if (chip->ecc.mode == NAND_ECC_ON_DIE) { + if (ondie == MICRON_ON_DIE_UNSUPPORTED) { + pr_err("On-die ECC selected but not supported\n"); + return -EINVAL; + } + + chip->ecc.options = NAND_ECC_CUSTOM_PAGE_ACCESS; + chip->ecc.bytes = 8; + chip->ecc.size = 512; + chip->ecc.strength = 4; + chip->ecc.algo = NAND_ECC_BCH; + chip->ecc.read_page = micron_nand_read_page_on_die_ecc; + chip->ecc.write_page = micron_nand_write_page_on_die_ecc; + chip->ecc.read_page_raw = + micron_nand_read_page_raw_on_die_ecc; + chip->ecc.write_page_raw = + micron_nand_write_page_raw_on_die_ecc; + + mtd_set_ooblayout(mtd, µn_nand_on_die_ooblayout_ops); + } + return 0; } diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c index f8e463a97b9e..209170ed2b76 100644 --- a/drivers/mtd/nand/orion_nand.c +++ b/drivers/mtd/nand/orion_nand.c @@ -166,7 +166,11 @@ static int __init orion_nand_probe(struct platform_device *pdev) } } - clk_prepare_enable(info->clk); + ret = clk_prepare_enable(info->clk); + if (ret) { + dev_err(&pdev->dev, "failed to prepare clock!\n"); + return ret; + } ret = nand_scan(mtd, 1); if (ret) diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 649ba8200832..74dae4bbdac8 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -1812,6 +1812,8 @@ static int alloc_nand_resource(struct platform_device *pdev) chip->write_buf = pxa3xx_nand_write_buf; chip->options |= NAND_NO_SUBPAGE_WRITE; chip->cmdfunc = nand_cmdfunc; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; } nand_hw_control_init(chip->controller); diff --git a/drivers/mtd/nand/qcom_nandc.c b/drivers/mtd/nand/qcom_nandc.c index 57d483ac5765..88af7145a51a 100644 --- a/drivers/mtd/nand/qcom_nandc.c +++ b/drivers/mtd/nand/qcom_nandc.c @@ -2008,6 +2008,8 @@ static int qcom_nand_host_init(struct qcom_nand_controller *nandc, chip->read_byte = qcom_nandc_read_byte; chip->read_buf = qcom_nandc_read_buf; chip->write_buf = qcom_nandc_write_buf; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; /* * the bad block marker is readable only when we read the last codeword diff --git a/drivers/mtd/nand/s3c2410.c b/drivers/mtd/nand/s3c2410.c index f0b030d44f71..9e0c849607b9 100644 --- a/drivers/mtd/nand/s3c2410.c +++ b/drivers/mtd/nand/s3c2410.c @@ -812,9 +812,8 @@ static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, return -ENODEV; } -static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd, - const struct nand_data_interface *conf, - bool check_only) +static int s3c2410_nand_setup_data_interface(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); struct s3c2410_platform_nand *pdata = info->platform; diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c index 442ce619b3b6..891ac7b99305 100644 --- a/drivers/mtd/nand/sh_flctl.c +++ b/drivers/mtd/nand/sh_flctl.c @@ -1183,6 +1183,8 @@ static int flctl_probe(struct platform_device *pdev) nand->read_buf = flctl_read_buf; nand->select_chip = flctl_select_chip; nand->cmdfunc = flctl_cmdfunc; + nand->onfi_set_features = nand_onfi_get_set_features_notsupp; + nand->onfi_get_features = nand_onfi_get_set_features_notsupp; if (pdata->flcmncr_val & SEL_16BIT) nand->options |= NAND_BUSWIDTH_16; diff --git a/drivers/mtd/nand/sunxi_nand.c b/drivers/mtd/nand/sunxi_nand.c index 118a26fff368..d0b6f8f9f297 100644 --- a/drivers/mtd/nand/sunxi_nand.c +++ b/drivers/mtd/nand/sunxi_nand.c @@ -1301,7 +1301,6 @@ static int sunxi_nfc_hw_ecc_read_subpage(struct mtd_info *mtd, sunxi_nfc_hw_ecc_enable(mtd); - chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); for (i = data_offs / ecc->size; i < DIV_ROUND_UP(data_offs + readlen, ecc->size); i++) { int data_off = i * ecc->size; @@ -1592,9 +1591,8 @@ static int _sunxi_nand_lookup_timing(const s32 *lut, int lut_size, u32 duration, #define sunxi_nand_lookup_timing(l, p, c) \ _sunxi_nand_lookup_timing(l, ARRAY_SIZE(l), p, c) -static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, - const struct nand_data_interface *conf, - bool check_only) +static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf) { struct nand_chip *nand = mtd_to_nand(mtd); struct sunxi_nand_chip *chip = to_sunxi_nand(nand); @@ -1707,7 +1705,7 @@ static int sunxi_nfc_setup_data_interface(struct mtd_info *mtd, return tRHW; } - if (check_only) + if (csline == NAND_DATA_IFACE_CHECK_ONLY) return 0; /* @@ -1922,7 +1920,6 @@ static int sunxi_nand_hw_ecc_ctrl_init(struct mtd_info *mtd, ecc->write_subpage = sunxi_nfc_hw_ecc_write_subpage; ecc->read_oob_raw = nand_read_oob_std; ecc->write_oob_raw = nand_write_oob_std; - ecc->read_subpage = sunxi_nfc_hw_ecc_read_subpage; return 0; } diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c index 49b286c6c10f..9d40b793b1c4 100644 --- a/drivers/mtd/nand/tango_nand.c +++ b/drivers/mtd/nand/tango_nand.c @@ -303,7 +303,7 @@ static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip, const u8 *buf, int oob_required, int page) { struct tango_nfc *nfc = to_tango_nfc(chip->controller); - int err, len = mtd->writesize; + int err, status, len = mtd->writesize; /* Calling tango_write_oob() would send PAGEPROG twice */ if (oob_required) @@ -314,6 +314,10 @@ static int tango_write_page(struct mtd_info *mtd, struct nand_chip *chip, if (err) return err; + status = chip->waitfunc(mtd, chip); + if (status & NAND_STATUS_FAIL) + return -EIO; + return 0; } @@ -340,7 +344,7 @@ static void aux_write(struct nand_chip *chip, const u8 **buf, int len, int *pos) if (!*buf) { /* skip over "len" bytes */ - chip->cmdfunc(mtd, NAND_CMD_SEQIN, *pos, -1); + chip->cmdfunc(mtd, NAND_CMD_RNDIN, *pos, -1); } else { tango_write_buf(mtd, *buf, len); *buf += len; @@ -431,9 +435,16 @@ static int tango_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, static int tango_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, const u8 *buf, int oob_required, int page) { + int status; + chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0, page); raw_write(chip, buf, chip->oob_poi); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); + + status = chip->waitfunc(mtd, chip); + if (status & NAND_STATUS_FAIL) + return -EIO; + return 0; } @@ -484,9 +495,8 @@ static u32 to_ticks(int kHz, int ps) return DIV_ROUND_UP_ULL((u64)kHz * ps, NSEC_PER_SEC); } -static int tango_set_timings(struct mtd_info *mtd, - const struct nand_data_interface *conf, - bool check_only) +static int tango_set_timings(struct mtd_info *mtd, int csline, + const struct nand_data_interface *conf) { const struct nand_sdr_timings *sdr = nand_get_sdr_timings(conf); struct nand_chip *chip = mtd_to_nand(mtd); @@ -498,7 +508,7 @@ static int tango_set_timings(struct mtd_info *mtd, if (IS_ERR(sdr)) return PTR_ERR(sdr); - if (check_only) + if (csline == NAND_DATA_IFACE_CHECK_ONLY) return 0; Trdy = to_ticks(kHz, sdr->tCEA_max - sdr->tREA_max); diff --git a/drivers/mtd/nand/vf610_nfc.c b/drivers/mtd/nand/vf610_nfc.c index 3ea4bb19e12d..744ab10e8962 100644 --- a/drivers/mtd/nand/vf610_nfc.c +++ b/drivers/mtd/nand/vf610_nfc.c @@ -703,6 +703,8 @@ static int vf610_nfc_probe(struct platform_device *pdev) chip->read_buf = vf610_nfc_read_buf; chip->write_buf = vf610_nfc_write_buf; chip->select_chip = vf610_nfc_select_chip; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; chip->options |= NAND_NO_SUBPAGE_WRITE; diff --git a/drivers/mtd/parsers/Kconfig b/drivers/mtd/parsers/Kconfig new file mode 100644 index 000000000000..d206b3c533bc --- /dev/null +++ b/drivers/mtd/parsers/Kconfig @@ -0,0 +1,8 @@ +config MTD_PARSER_TRX + tristate "Parser for TRX format partitions" + depends on MTD && (BCM47XX || ARCH_BCM_5301X || COMPILE_TEST) + help + TRX is a firmware format used by Broadcom on their devices. It + may contain up to 3/4 partitions (depending on the version). + This driver will parse TRX header and report at least two partitions: + kernel and rootfs. diff --git a/drivers/mtd/parsers/Makefile b/drivers/mtd/parsers/Makefile new file mode 100644 index 000000000000..4d9024e0be3b --- /dev/null +++ b/drivers/mtd/parsers/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MTD_PARSER_TRX) += parser_trx.o diff --git a/drivers/mtd/parsers/parser_trx.c b/drivers/mtd/parsers/parser_trx.c new file mode 100644 index 000000000000..df360a75e1eb --- /dev/null +++ b/drivers/mtd/parsers/parser_trx.c @@ -0,0 +1,126 @@ +/* + * Parser for TRX format partitions + * + * Copyright (C) 2012 - 2017 RafaÅ‚ MiÅ‚ecki <rafal@milecki.pl> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> + +#define TRX_PARSER_MAX_PARTS 4 + +/* Magics */ +#define TRX_MAGIC 0x30524448 +#define UBI_EC_MAGIC 0x23494255 /* UBI# */ + +struct trx_header { + uint32_t magic; + uint32_t length; + uint32_t crc32; + uint16_t flags; + uint16_t version; + uint32_t offset[3]; +} __packed; + +static const char *parser_trx_data_part_name(struct mtd_info *master, + size_t offset) +{ + uint32_t buf; + size_t bytes_read; + int err; + + err = mtd_read(master, offset, sizeof(buf), &bytes_read, + (uint8_t *)&buf); + if (err && !mtd_is_bitflip(err)) { + pr_err("mtd_read error while parsing (offset: 0x%zX): %d\n", + offset, err); + goto out_default; + } + + if (buf == UBI_EC_MAGIC) + return "ubi"; + +out_default: + return "rootfs"; +} + +static int parser_trx_parse(struct mtd_info *mtd, + const struct mtd_partition **pparts, + struct mtd_part_parser_data *data) +{ + struct mtd_partition *parts; + struct mtd_partition *part; + struct trx_header trx; + size_t bytes_read; + uint8_t curr_part = 0, i = 0; + int err; + + parts = kzalloc(sizeof(struct mtd_partition) * TRX_PARSER_MAX_PARTS, + GFP_KERNEL); + if (!parts) + return -ENOMEM; + + err = mtd_read(mtd, 0, sizeof(trx), &bytes_read, (uint8_t *)&trx); + if (err) { + pr_err("MTD reading error: %d\n", err); + kfree(parts); + return err; + } + + if (trx.magic != TRX_MAGIC) { + kfree(parts); + return -ENOENT; + } + + /* We have LZMA loader if there is address in offset[2] */ + if (trx.offset[2]) { + part = &parts[curr_part++]; + part->name = "loader"; + part->offset = trx.offset[i]; + i++; + } + + if (trx.offset[i]) { + part = &parts[curr_part++]; + part->name = "linux"; + part->offset = trx.offset[i]; + i++; + } + + if (trx.offset[i]) { + part = &parts[curr_part++]; + part->name = parser_trx_data_part_name(mtd, trx.offset[i]); + part->offset = trx.offset[i]; + i++; + } + + /* + * Assume that every partition ends at the beginning of the one it is + * followed by. + */ + for (i = 0; i < curr_part; i++) { + u64 next_part_offset = (i < curr_part - 1) ? + parts[i + 1].offset : mtd->size; + + parts[i].size = next_part_offset - parts[i].offset; + } + + *pparts = parts; + return i; +}; + +static struct mtd_part_parser mtd_parser_trx = { + .parse_fn = parser_trx_parse, + .name = "trx", +}; +module_mtd_part_parser(mtd_parser_trx); + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Parser for TRX format partitions"); diff --git a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig index bfdfb1e72b38..293c8a4d1e49 100644 --- a/drivers/mtd/spi-nor/Kconfig +++ b/drivers/mtd/spi-nor/Kconfig @@ -108,7 +108,7 @@ config SPI_INTEL_SPI_PLATFORM config SPI_STM32_QUADSPI tristate "STM32 Quad SPI controller" - depends on ARCH_STM32 + depends on ARCH_STM32 || COMPILE_TEST help This enables support for the STM32 Quad SPI controller. We only connect the NOR to this controller. diff --git a/drivers/mtd/spi-nor/aspeed-smc.c b/drivers/mtd/spi-nor/aspeed-smc.c index 56051d30f000..0106357421bd 100644 --- a/drivers/mtd/spi-nor/aspeed-smc.c +++ b/drivers/mtd/spi-nor/aspeed-smc.c @@ -19,6 +19,7 @@ #include <linux/mtd/spi-nor.h> #include <linux/of.h> #include <linux/of_platform.h> +#include <linux/sizes.h> #include <linux/sysfs.h> #define DEVICE_NAME "aspeed-smc" @@ -97,6 +98,7 @@ struct aspeed_smc_chip { struct aspeed_smc_controller *controller; void __iomem *ctl; /* control register */ void __iomem *ahb_base; /* base of chip window */ + u32 ahb_window_size; /* chip mapping window size */ u32 ctl_val[smc_max]; /* control settings */ enum aspeed_smc_flash_type type; /* what type of flash */ struct spi_nor nor; @@ -109,6 +111,7 @@ struct aspeed_smc_controller { const struct aspeed_smc_info *info; /* type info of controller */ void __iomem *regs; /* controller registers */ void __iomem *ahb_base; /* per-chip windows resource */ + u32 ahb_window_size; /* full mapping window size */ struct aspeed_smc_chip *chips[0]; /* pointers to attached chips */ }; @@ -180,8 +183,7 @@ struct aspeed_smc_controller { #define CONTROL_KEEP_MASK \ (CONTROL_AAF_MODE | CONTROL_CE_INACTIVE_MASK | CONTROL_CLK_DIV4 | \ - CONTROL_IO_DUMMY_MASK | CONTROL_CLOCK_FREQ_SEL_MASK | \ - CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3) + CONTROL_CLOCK_FREQ_SEL_MASK | CONTROL_LSB_FIRST | CONTROL_CLOCK_MODE_3) /* * The Segment Register uses a 8MB unit to encode the start address @@ -194,6 +196,10 @@ struct aspeed_smc_controller { #define SEGMENT_ADDR_REG0 0x30 #define SEGMENT_ADDR_START(_r) ((((_r) >> 16) & 0xFF) << 23) #define SEGMENT_ADDR_END(_r) ((((_r) >> 24) & 0xFF) << 23) +#define SEGMENT_ADDR_VALUE(start, end) \ + (((((start) >> 23) & 0xFF) << 16) | ((((end) >> 23) & 0xFF) << 24)) +#define SEGMENT_ADDR_REG(controller, cs) \ + ((controller)->regs + SEGMENT_ADDR_REG0 + (cs) * 4) /* * In user mode all data bytes read or written to the chip decode address @@ -439,8 +445,7 @@ static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip, u32 reg; if (controller->info->nce > 1) { - reg = readl(controller->regs + SEGMENT_ADDR_REG0 + - chip->cs * 4); + reg = readl(SEGMENT_ADDR_REG(controller, chip->cs)); if (SEGMENT_ADDR_START(reg) >= SEGMENT_ADDR_END(reg)) return NULL; @@ -451,6 +456,146 @@ static void __iomem *aspeed_smc_chip_base(struct aspeed_smc_chip *chip, return controller->ahb_base + offset; } +static u32 aspeed_smc_ahb_base_phy(struct aspeed_smc_controller *controller) +{ + u32 seg0_val = readl(SEGMENT_ADDR_REG(controller, 0)); + + return SEGMENT_ADDR_START(seg0_val); +} + +static u32 chip_set_segment(struct aspeed_smc_chip *chip, u32 cs, u32 start, + u32 size) +{ + struct aspeed_smc_controller *controller = chip->controller; + void __iomem *seg_reg; + u32 seg_oldval, seg_newval, ahb_base_phy, end; + + ahb_base_phy = aspeed_smc_ahb_base_phy(controller); + + seg_reg = SEGMENT_ADDR_REG(controller, cs); + seg_oldval = readl(seg_reg); + + /* + * If the chip size is not specified, use the default segment + * size, but take into account the possible overlap with the + * previous segment + */ + if (!size) + size = SEGMENT_ADDR_END(seg_oldval) - start; + + /* + * The segment cannot exceed the maximum window size of the + * controller. + */ + if (start + size > ahb_base_phy + controller->ahb_window_size) { + size = ahb_base_phy + controller->ahb_window_size - start; + dev_warn(chip->nor.dev, "CE%d window resized to %dMB", + cs, size >> 20); + } + + end = start + size; + seg_newval = SEGMENT_ADDR_VALUE(start, end); + writel(seg_newval, seg_reg); + + /* + * Restore default value if something goes wrong. The chip + * might have set some bogus value and we would loose access + * to the chip. + */ + if (seg_newval != readl(seg_reg)) { + dev_err(chip->nor.dev, "CE%d window invalid", cs); + writel(seg_oldval, seg_reg); + start = SEGMENT_ADDR_START(seg_oldval); + end = SEGMENT_ADDR_END(seg_oldval); + size = end - start; + } + + dev_info(chip->nor.dev, "CE%d window [ 0x%.8x - 0x%.8x ] %dMB", + cs, start, end, size >> 20); + + return size; +} + +/* + * The segment register defines the mapping window on the AHB bus and + * it needs to be configured depending on the chip size. The segment + * register of the following CE also needs to be tuned in order to + * provide a contiguous window across multiple chips. + * + * This is expected to be called in increasing CE order + */ +static u32 aspeed_smc_chip_set_segment(struct aspeed_smc_chip *chip) +{ + struct aspeed_smc_controller *controller = chip->controller; + u32 ahb_base_phy, start; + u32 size = chip->nor.mtd.size; + + /* + * Each controller has a chip size limit for direct memory + * access + */ + if (size > controller->info->maxsize) + size = controller->info->maxsize; + + /* + * The AST2400 SPI controller only handles one chip and does + * not have segment registers. Let's use the chip size for the + * AHB window. + */ + if (controller->info == &spi_2400_info) + goto out; + + /* + * The AST2500 SPI controller has a HW bug when the CE0 chip + * size reaches 128MB. Enforce a size limit of 120MB to + * prevent the controller from using bogus settings in the + * segment register. + */ + if (chip->cs == 0 && controller->info == &spi_2500_info && + size == SZ_128M) { + size = 120 << 20; + dev_info(chip->nor.dev, + "CE%d window resized to %dMB (AST2500 HW quirk)", + chip->cs, size >> 20); + } + + ahb_base_phy = aspeed_smc_ahb_base_phy(controller); + + /* + * As a start address for the current segment, use the default + * start address if we are handling CE0 or use the previous + * segment ending address + */ + if (chip->cs) { + u32 prev = readl(SEGMENT_ADDR_REG(controller, chip->cs - 1)); + + start = SEGMENT_ADDR_END(prev); + } else { + start = ahb_base_phy; + } + + size = chip_set_segment(chip, chip->cs, start, size); + + /* Update chip base address on the AHB bus */ + chip->ahb_base = controller->ahb_base + (start - ahb_base_phy); + + /* + * Now, make sure the next segment does not overlap with the + * current one we just configured, even if there is no + * available chip. That could break access in Command Mode. + */ + if (chip->cs < controller->info->nce - 1) + chip_set_segment(chip, chip->cs + 1, start + size, 0); + +out: + if (size < chip->nor.mtd.size) + dev_warn(chip->nor.dev, + "CE%d window too small for chip %dMB", + chip->cs, (u32)chip->nor.mtd.size >> 20); + + return size; +} + static void aspeed_smc_chip_enable_write(struct aspeed_smc_chip *chip) { struct aspeed_smc_controller *controller = chip->controller; @@ -524,7 +669,7 @@ static int aspeed_smc_chip_setup_init(struct aspeed_smc_chip *chip, */ chip->ahb_base = aspeed_smc_chip_base(chip, res); if (!chip->ahb_base) { - dev_warn(chip->nor.dev, "CE segment window closed.\n"); + dev_warn(chip->nor.dev, "CE%d window closed", chip->cs); return -EINVAL; } @@ -571,6 +716,9 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip) if (chip->nor.addr_width == 4 && info->set_4b) info->set_4b(chip); + /* This is for direct AHB access when using Command Mode. */ + chip->ahb_window_size = aspeed_smc_chip_set_segment(chip); + /* * base mode has not been optimized yet. use it for writes. */ @@ -585,14 +733,12 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip) * TODO: Adjust clocks if fast read is supported and interpret * SPI-NOR flags to adjust controller settings. */ - switch (chip->nor.flash_read) { - case SPI_NOR_NORMAL: - cmd = CONTROL_COMMAND_MODE_NORMAL; - break; - case SPI_NOR_FAST: - cmd = CONTROL_COMMAND_MODE_FREAD; - break; - default: + if (chip->nor.read_proto == SNOR_PROTO_1_1_1) { + if (chip->nor.read_dummy == 0) + cmd = CONTROL_COMMAND_MODE_NORMAL; + else + cmd = CONTROL_COMMAND_MODE_FREAD; + } else { dev_err(chip->nor.dev, "unsupported SPI read mode\n"); return -EINVAL; } @@ -608,6 +754,11 @@ static int aspeed_smc_chip_setup_finish(struct aspeed_smc_chip *chip) static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller, struct device_node *np, struct resource *r) { + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; const struct aspeed_smc_info *info = controller->info; struct device *dev = controller->dev; struct device_node *child; @@ -671,11 +822,11 @@ static int aspeed_smc_setup_flash(struct aspeed_smc_controller *controller, break; /* - * TODO: Add support for SPI_NOR_QUAD and SPI_NOR_DUAL + * TODO: Add support for Dual and Quad SPI protocols * attach when board support is present as determined * by of property. */ - ret = spi_nor_scan(nor, NULL, SPI_NOR_NORMAL); + ret = spi_nor_scan(nor, NULL, &hwcaps); if (ret) break; @@ -731,6 +882,8 @@ static int aspeed_smc_probe(struct platform_device *pdev) if (IS_ERR(controller->ahb_base)) return PTR_ERR(controller->ahb_base); + controller->ahb_window_size = resource_size(res); + ret = aspeed_smc_setup_flash(controller, np, res); if (ret) dev_err(dev, "Aspeed SMC probe failed %d\n", ret); diff --git a/drivers/mtd/spi-nor/atmel-quadspi.c b/drivers/mtd/spi-nor/atmel-quadspi.c index 47937d9beec6..ba76fa8f2031 100644 --- a/drivers/mtd/spi-nor/atmel-quadspi.c +++ b/drivers/mtd/spi-nor/atmel-quadspi.c @@ -275,14 +275,48 @@ static void atmel_qspi_debug_command(struct atmel_qspi *aq, static int atmel_qspi_run_command(struct atmel_qspi *aq, const struct atmel_qspi_command *cmd, - u32 ifr_tfrtyp, u32 ifr_width) + u32 ifr_tfrtyp, enum spi_nor_protocol proto) { u32 iar, icr, ifr, sr; int err = 0; iar = 0; icr = 0; - ifr = ifr_tfrtyp | ifr_width; + ifr = ifr_tfrtyp; + + /* Set the SPI protocol */ + switch (proto) { + case SNOR_PROTO_1_1_1: + ifr |= QSPI_IFR_WIDTH_SINGLE_BIT_SPI; + break; + + case SNOR_PROTO_1_1_2: + ifr |= QSPI_IFR_WIDTH_DUAL_OUTPUT; + break; + + case SNOR_PROTO_1_1_4: + ifr |= QSPI_IFR_WIDTH_QUAD_OUTPUT; + break; + + case SNOR_PROTO_1_2_2: + ifr |= QSPI_IFR_WIDTH_DUAL_IO; + break; + + case SNOR_PROTO_1_4_4: + ifr |= QSPI_IFR_WIDTH_QUAD_IO; + break; + + case SNOR_PROTO_2_2_2: + ifr |= QSPI_IFR_WIDTH_DUAL_CMD; + break; + + case SNOR_PROTO_4_4_4: + ifr |= QSPI_IFR_WIDTH_QUAD_CMD; + break; + + default: + return -EINVAL; + } /* Compute instruction parameters */ if (cmd->enable.bits.instruction) { @@ -434,7 +468,7 @@ static int atmel_qspi_read_reg(struct spi_nor *nor, u8 opcode, cmd.rx_buf = buf; cmd.buf_len = len; return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ, - QSPI_IFR_WIDTH_SINGLE_BIT_SPI); + nor->reg_proto); } static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode, @@ -450,7 +484,7 @@ static int atmel_qspi_write_reg(struct spi_nor *nor, u8 opcode, cmd.tx_buf = buf; cmd.buf_len = len; return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE, - QSPI_IFR_WIDTH_SINGLE_BIT_SPI); + nor->reg_proto); } static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len, @@ -469,7 +503,7 @@ static ssize_t atmel_qspi_write(struct spi_nor *nor, loff_t to, size_t len, cmd.tx_buf = write_buf; cmd.buf_len = len; ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE_MEM, - QSPI_IFR_WIDTH_SINGLE_BIT_SPI); + nor->write_proto); return (ret < 0) ? ret : len; } @@ -484,7 +518,7 @@ static int atmel_qspi_erase(struct spi_nor *nor, loff_t offs) cmd.instruction = nor->erase_opcode; cmd.address = (u32)offs; return atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_WRITE, - QSPI_IFR_WIDTH_SINGLE_BIT_SPI); + nor->reg_proto); } static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len, @@ -493,27 +527,8 @@ static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len, struct atmel_qspi *aq = nor->priv; struct atmel_qspi_command cmd; u8 num_mode_cycles, num_dummy_cycles; - u32 ifr_width; ssize_t ret; - switch (nor->flash_read) { - case SPI_NOR_NORMAL: - case SPI_NOR_FAST: - ifr_width = QSPI_IFR_WIDTH_SINGLE_BIT_SPI; - break; - - case SPI_NOR_DUAL: - ifr_width = QSPI_IFR_WIDTH_DUAL_OUTPUT; - break; - - case SPI_NOR_QUAD: - ifr_width = QSPI_IFR_WIDTH_QUAD_OUTPUT; - break; - - default: - return -EINVAL; - } - if (nor->read_dummy >= 2) { num_mode_cycles = 2; num_dummy_cycles = nor->read_dummy - 2; @@ -536,7 +551,7 @@ static ssize_t atmel_qspi_read(struct spi_nor *nor, loff_t from, size_t len, cmd.rx_buf = read_buf; cmd.buf_len = len; ret = atmel_qspi_run_command(aq, &cmd, QSPI_IFR_TFRTYP_TRSFR_READ_MEM, - ifr_width); + nor->read_proto); return (ret < 0) ? ret : len; } @@ -590,6 +605,20 @@ static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) static int atmel_qspi_probe(struct platform_device *pdev) { + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_READ_1_1_2 | + SNOR_HWCAPS_READ_1_2_2 | + SNOR_HWCAPS_READ_2_2_2 | + SNOR_HWCAPS_READ_1_1_4 | + SNOR_HWCAPS_READ_1_4_4 | + SNOR_HWCAPS_READ_4_4_4 | + SNOR_HWCAPS_PP | + SNOR_HWCAPS_PP_1_1_4 | + SNOR_HWCAPS_PP_1_4_4 | + SNOR_HWCAPS_PP_4_4_4, + }; struct device_node *child, *np = pdev->dev.of_node; struct atmel_qspi *aq; struct resource *res; @@ -679,7 +708,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (err) goto disable_clk; - err = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); + err = spi_nor_scan(nor, NULL, &hwcaps); if (err) goto disable_clk; diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c index 9f8102de1b16..53c7d8e0327a 100644 --- a/drivers/mtd/spi-nor/cadence-quadspi.c +++ b/drivers/mtd/spi-nor/cadence-quadspi.c @@ -855,15 +855,14 @@ static int cqspi_set_protocol(struct spi_nor *nor, const int read) f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; if (read) { - switch (nor->flash_read) { - case SPI_NOR_NORMAL: - case SPI_NOR_FAST: + switch (nor->read_proto) { + case SNOR_PROTO_1_1_1: f_pdata->data_width = CQSPI_INST_TYPE_SINGLE; break; - case SPI_NOR_DUAL: + case SNOR_PROTO_1_1_2: f_pdata->data_width = CQSPI_INST_TYPE_DUAL; break; - case SPI_NOR_QUAD: + case SNOR_PROTO_1_1_4: f_pdata->data_width = CQSPI_INST_TYPE_QUAD; break; default: @@ -1069,6 +1068,13 @@ static void cqspi_controller_init(struct cqspi_st *cqspi) static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) { + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_READ_1_1_2 | + SNOR_HWCAPS_READ_1_1_4 | + SNOR_HWCAPS_PP, + }; struct platform_device *pdev = cqspi->pdev; struct device *dev = &pdev->dev; struct cqspi_flash_pdata *f_pdata; @@ -1123,7 +1129,7 @@ static int cqspi_setup_flash(struct cqspi_st *cqspi, struct device_node *np) goto err; } - ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); + ret = spi_nor_scan(nor, NULL, &hwcaps); if (ret) goto err; @@ -1277,7 +1283,7 @@ static const struct dev_pm_ops cqspi__dev_pm_ops = { #define CQSPI_DEV_PM_OPS NULL #endif -static struct of_device_id const cqspi_dt_ids[] = { +static const struct of_device_id cqspi_dt_ids[] = { {.compatible = "cdns,qspi-nor",}, { /* end of table */ } }; diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c index 1476135e0d50..f17d22435bfc 100644 --- a/drivers/mtd/spi-nor/fsl-quadspi.c +++ b/drivers/mtd/spi-nor/fsl-quadspi.c @@ -957,6 +957,10 @@ static void fsl_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) static int fsl_qspi_probe(struct platform_device *pdev) { + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ_1_1_4 | + SNOR_HWCAPS_PP, + }; struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct fsl_qspi *q; @@ -1065,7 +1069,7 @@ static int fsl_qspi_probe(struct platform_device *pdev) /* set the chip address for READID */ fsl_qspi_set_base_addr(q, nor); - ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); + ret = spi_nor_scan(nor, NULL, &hwcaps); if (ret) goto mutex_failed; diff --git a/drivers/mtd/spi-nor/hisi-sfc.c b/drivers/mtd/spi-nor/hisi-sfc.c index a286350627a6..d1106832b9d5 100644 --- a/drivers/mtd/spi-nor/hisi-sfc.c +++ b/drivers/mtd/spi-nor/hisi-sfc.c @@ -120,19 +120,24 @@ static inline int wait_op_finish(struct hifmc_host *host) (reg & FMC_INT_OP_DONE), 0, FMC_WAIT_TIMEOUT); } -static int get_if_type(enum read_mode flash_read) +static int get_if_type(enum spi_nor_protocol proto) { enum hifmc_iftype if_type; - switch (flash_read) { - case SPI_NOR_DUAL: + switch (proto) { + case SNOR_PROTO_1_1_2: if_type = IF_TYPE_DUAL; break; - case SPI_NOR_QUAD: + case SNOR_PROTO_1_2_2: + if_type = IF_TYPE_DIO; + break; + case SNOR_PROTO_1_1_4: if_type = IF_TYPE_QUAD; break; - case SPI_NOR_NORMAL: - case SPI_NOR_FAST: + case SNOR_PROTO_1_4_4: + if_type = IF_TYPE_QIO; + break; + case SNOR_PROTO_1_1_1: default: if_type = IF_TYPE_STD; break; @@ -253,7 +258,10 @@ static int hisi_spi_nor_dma_transfer(struct spi_nor *nor, loff_t start_off, writel(FMC_DMA_LEN_SET(len), host->regbase + FMC_DMA_LEN); reg = OP_CFG_FM_CS(priv->chipselect); - if_type = get_if_type(nor->flash_read); + if (op_type == FMC_OP_READ) + if_type = get_if_type(nor->read_proto); + else + if_type = get_if_type(nor->write_proto); reg |= OP_CFG_MEM_IF_TYPE(if_type); if (op_type == FMC_OP_READ) reg |= OP_CFG_DUMMY_NUM(nor->read_dummy >> 3); @@ -321,6 +329,13 @@ static ssize_t hisi_spi_nor_write(struct spi_nor *nor, loff_t to, static int hisi_spi_nor_register(struct device_node *np, struct hifmc_host *host) { + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_READ_1_1_2 | + SNOR_HWCAPS_READ_1_1_4 | + SNOR_HWCAPS_PP, + }; struct device *dev = host->dev; struct spi_nor *nor; struct hifmc_priv *priv; @@ -362,7 +377,7 @@ static int hisi_spi_nor_register(struct device_node *np, nor->read = hisi_spi_nor_read; nor->write = hisi_spi_nor_write; nor->erase = NULL; - ret = spi_nor_scan(nor, NULL, SPI_NOR_QUAD); + ret = spi_nor_scan(nor, NULL, &hwcaps); if (ret) return ret; diff --git a/drivers/mtd/spi-nor/intel-spi.c b/drivers/mtd/spi-nor/intel-spi.c index 986a3d020a3a..8a596bfeddff 100644 --- a/drivers/mtd/spi-nor/intel-spi.c +++ b/drivers/mtd/spi-nor/intel-spi.c @@ -715,6 +715,11 @@ static void intel_spi_fill_partition(struct intel_spi *ispi, struct intel_spi *intel_spi_probe(struct device *dev, struct resource *mem, const struct intel_spi_boardinfo *info) { + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; struct mtd_partition part; struct intel_spi *ispi; int ret; @@ -746,7 +751,7 @@ struct intel_spi *intel_spi_probe(struct device *dev, ispi->nor.write = intel_spi_write; ispi->nor.erase = intel_spi_erase; - ret = spi_nor_scan(&ispi->nor, NULL, SPI_NOR_NORMAL); + ret = spi_nor_scan(&ispi->nor, NULL, &hwcaps); if (ret) { dev_info(dev, "failed to locate the chip\n"); return ERR_PTR(ret); diff --git a/drivers/mtd/spi-nor/mtk-quadspi.c b/drivers/mtd/spi-nor/mtk-quadspi.c index b6377707ce32..8a20ec4991c8 100644 --- a/drivers/mtd/spi-nor/mtk-quadspi.c +++ b/drivers/mtd/spi-nor/mtk-quadspi.c @@ -123,20 +123,20 @@ static void mt8173_nor_set_read_mode(struct mt8173_nor *mt8173_nor) { struct spi_nor *nor = &mt8173_nor->nor; - switch (nor->flash_read) { - case SPI_NOR_FAST: + switch (nor->read_proto) { + case SNOR_PROTO_1_1_1: writeb(nor->read_opcode, mt8173_nor->base + MTK_NOR_PRGDATA3_REG); writeb(MTK_NOR_FAST_READ, mt8173_nor->base + MTK_NOR_CFG1_REG); break; - case SPI_NOR_DUAL: + case SNOR_PROTO_1_1_2: writeb(nor->read_opcode, mt8173_nor->base + MTK_NOR_PRGDATA3_REG); writeb(MTK_NOR_DUAL_READ_EN, mt8173_nor->base + MTK_NOR_DUAL_REG); break; - case SPI_NOR_QUAD: + case SNOR_PROTO_1_1_4: writeb(nor->read_opcode, mt8173_nor->base + MTK_NOR_PRGDATA4_REG); writeb(MTK_NOR_QUAD_READ_EN, mt8173_nor->base + @@ -408,6 +408,11 @@ static int mt8173_nor_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, static int mtk_nor_init(struct mt8173_nor *mt8173_nor, struct device_node *flash_node) { + const struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_READ_1_1_2 | + SNOR_HWCAPS_PP, + }; int ret; struct spi_nor *nor; @@ -426,7 +431,7 @@ static int mtk_nor_init(struct mt8173_nor *mt8173_nor, nor->write_reg = mt8173_nor_write_reg; nor->mtd.name = "mtk_nor"; /* initialized with NULL */ - ret = spi_nor_scan(nor, NULL, SPI_NOR_DUAL); + ret = spi_nor_scan(nor, NULL, &hwcaps); if (ret) return ret; diff --git a/drivers/mtd/spi-nor/nxp-spifi.c b/drivers/mtd/spi-nor/nxp-spifi.c index 73a14f40928b..15374216d4d9 100644 --- a/drivers/mtd/spi-nor/nxp-spifi.c +++ b/drivers/mtd/spi-nor/nxp-spifi.c @@ -240,13 +240,12 @@ static int nxp_spifi_erase(struct spi_nor *nor, loff_t offs) static int nxp_spifi_setup_memory_cmd(struct nxp_spifi *spifi) { - switch (spifi->nor.flash_read) { - case SPI_NOR_NORMAL: - case SPI_NOR_FAST: + switch (spifi->nor.read_proto) { + case SNOR_PROTO_1_1_1: spifi->mcmd = SPIFI_CMD_FIELDFORM_ALL_SERIAL; break; - case SPI_NOR_DUAL: - case SPI_NOR_QUAD: + case SNOR_PROTO_1_1_2: + case SNOR_PROTO_1_1_4: spifi->mcmd = SPIFI_CMD_FIELDFORM_QUAD_DUAL_DATA; break; default: @@ -274,7 +273,11 @@ static void nxp_spifi_dummy_id_read(struct spi_nor *nor) static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, struct device_node *np) { - enum read_mode flash_read; + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; u32 ctrl, property; u16 mode = 0; int ret; @@ -308,13 +311,12 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, if (mode & SPI_RX_DUAL) { ctrl |= SPIFI_CTRL_DUAL; - flash_read = SPI_NOR_DUAL; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; } else if (mode & SPI_RX_QUAD) { ctrl &= ~SPIFI_CTRL_DUAL; - flash_read = SPI_NOR_QUAD; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; } else { ctrl |= SPIFI_CTRL_DUAL; - flash_read = SPI_NOR_NORMAL; } switch (mode & (SPI_CPHA | SPI_CPOL)) { @@ -351,7 +353,7 @@ static int nxp_spifi_setup_flash(struct nxp_spifi *spifi, */ nxp_spifi_dummy_id_read(&spifi->nor); - ret = spi_nor_scan(&spifi->nor, NULL, flash_read); + ret = spi_nor_scan(&spifi->nor, NULL, &hwcaps); if (ret) { dev_err(spifi->dev, "device scan failed\n"); return ret; diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c index dea8c9cbadf0..1413828ff1fb 100644 --- a/drivers/mtd/spi-nor/spi-nor.c +++ b/drivers/mtd/spi-nor/spi-nor.c @@ -150,24 +150,6 @@ static int read_cr(struct spi_nor *nor) } /* - * Dummy Cycle calculation for different type of read. - * It can be used to support more commands with - * different dummy cycle requirements. - */ -static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor) -{ - switch (nor->flash_read) { - case SPI_NOR_FAST: - case SPI_NOR_DUAL: - case SPI_NOR_QUAD: - return 8; - case SPI_NOR_NORMAL: - return 0; - } - return 0; -} - -/* * Write status register 1 byte * Returns negative if error occurred. */ @@ -221,6 +203,10 @@ static inline u8 spi_nor_convert_3to4_read(u8 opcode) { SPINOR_OP_READ_1_2_2, SPINOR_OP_READ_1_2_2_4B }, { SPINOR_OP_READ_1_1_4, SPINOR_OP_READ_1_1_4_4B }, { SPINOR_OP_READ_1_4_4, SPINOR_OP_READ_1_4_4_4B }, + + { SPINOR_OP_READ_1_1_1_DTR, SPINOR_OP_READ_1_1_1_DTR_4B }, + { SPINOR_OP_READ_1_2_2_DTR, SPINOR_OP_READ_1_2_2_DTR_4B }, + { SPINOR_OP_READ_1_4_4_DTR, SPINOR_OP_READ_1_4_4_DTR_4B }, }; return spi_nor_convert_opcode(opcode, spi_nor_3to4_read, @@ -1022,10 +1008,12 @@ static const struct flash_info spi_nor_ids[] = { { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, - { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, + { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx25u25635f", INFO(0xc22539, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_4B_OPCODES) }, { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, - { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) }, + { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "mx66u51235f", INFO(0xc2253a, 0, 64 * 1024, 1024, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, + { "mx66l1g45g", INFO(0xc2201b, 0, 64 * 1024, 2048, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, /* Micron */ @@ -1036,7 +1024,7 @@ static const struct flash_info spi_nor_ids[] = { { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, - { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, + { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, { "n25q256ax1", INFO(0x20bb19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, @@ -1076,6 +1064,7 @@ static const struct flash_info spi_nor_ids[] = { { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) }, { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) }, { "s25fl208k", INFO(0x014014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ) }, + { "s25fl064l", INFO(0x016017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ | SPI_NOR_4B_OPCODES) }, /* SST -- large erase sizes are "overlays", "sectors" are 4K */ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, @@ -1159,7 +1148,9 @@ static const struct flash_info spi_nor_ids[] = { { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, - { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, + { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, + { "w25m512jv", INFO(0xef7119, 0, 64 * 1024, 1024, + SECT_4K | SPI_NOR_QUAD_READ | SPI_NOR_DUAL_READ) }, /* Catalyst / On Semiconductor -- non-JEDEC */ { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, @@ -1403,8 +1394,9 @@ static int macronix_quad_enable(struct spi_nor *nor) write_sr(nor, val | SR_QUAD_EN_MX); - if (spi_nor_wait_till_ready(nor)) - return 1; + ret = spi_nor_wait_till_ready(nor); + if (ret) + return ret; ret = read_sr(nor); if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) { @@ -1460,30 +1452,6 @@ static int spansion_quad_enable(struct spi_nor *nor) return 0; } -static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info) -{ - int status; - - switch (JEDEC_MFR(info)) { - case SNOR_MFR_MACRONIX: - status = macronix_quad_enable(nor); - if (status) { - dev_err(nor->dev, "Macronix quad-read not enabled\n"); - return -EINVAL; - } - return status; - case SNOR_MFR_MICRON: - return 0; - default: - status = spansion_quad_enable(nor); - if (status) { - dev_err(nor->dev, "Spansion quad-read not enabled\n"); - return -EINVAL; - } - return status; - } -} - static int spi_nor_check(struct spi_nor *nor) { if (!nor->dev || !nor->read || !nor->write || @@ -1536,8 +1504,349 @@ static int s3an_nor_scan(const struct flash_info *info, struct spi_nor *nor) return 0; } -int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) +struct spi_nor_read_command { + u8 num_mode_clocks; + u8 num_wait_states; + u8 opcode; + enum spi_nor_protocol proto; +}; + +struct spi_nor_pp_command { + u8 opcode; + enum spi_nor_protocol proto; +}; + +enum spi_nor_read_command_index { + SNOR_CMD_READ, + SNOR_CMD_READ_FAST, + SNOR_CMD_READ_1_1_1_DTR, + + /* Dual SPI */ + SNOR_CMD_READ_1_1_2, + SNOR_CMD_READ_1_2_2, + SNOR_CMD_READ_2_2_2, + SNOR_CMD_READ_1_2_2_DTR, + + /* Quad SPI */ + SNOR_CMD_READ_1_1_4, + SNOR_CMD_READ_1_4_4, + SNOR_CMD_READ_4_4_4, + SNOR_CMD_READ_1_4_4_DTR, + + /* Octo SPI */ + SNOR_CMD_READ_1_1_8, + SNOR_CMD_READ_1_8_8, + SNOR_CMD_READ_8_8_8, + SNOR_CMD_READ_1_8_8_DTR, + + SNOR_CMD_READ_MAX +}; + +enum spi_nor_pp_command_index { + SNOR_CMD_PP, + + /* Quad SPI */ + SNOR_CMD_PP_1_1_4, + SNOR_CMD_PP_1_4_4, + SNOR_CMD_PP_4_4_4, + + /* Octo SPI */ + SNOR_CMD_PP_1_1_8, + SNOR_CMD_PP_1_8_8, + SNOR_CMD_PP_8_8_8, + + SNOR_CMD_PP_MAX +}; + +struct spi_nor_flash_parameter { + u64 size; + u32 page_size; + + struct spi_nor_hwcaps hwcaps; + struct spi_nor_read_command reads[SNOR_CMD_READ_MAX]; + struct spi_nor_pp_command page_programs[SNOR_CMD_PP_MAX]; + + int (*quad_enable)(struct spi_nor *nor); +}; + +static void +spi_nor_set_read_settings(struct spi_nor_read_command *read, + u8 num_mode_clocks, + u8 num_wait_states, + u8 opcode, + enum spi_nor_protocol proto) { + read->num_mode_clocks = num_mode_clocks; + read->num_wait_states = num_wait_states; + read->opcode = opcode; + read->proto = proto; +} + +static void +spi_nor_set_pp_settings(struct spi_nor_pp_command *pp, + u8 opcode, + enum spi_nor_protocol proto) +{ + pp->opcode = opcode; + pp->proto = proto; +} + +static int spi_nor_init_params(struct spi_nor *nor, + const struct flash_info *info, + struct spi_nor_flash_parameter *params) +{ + /* Set legacy flash parameters as default. */ + memset(params, 0, sizeof(*params)); + + /* Set SPI NOR sizes. */ + params->size = info->sector_size * info->n_sectors; + params->page_size = info->page_size; + + /* (Fast) Read settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_READ; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ], + 0, 0, SPINOR_OP_READ, + SNOR_PROTO_1_1_1); + + if (!(info->flags & SPI_NOR_NO_FR)) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_FAST; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_FAST], + 0, 8, SPINOR_OP_READ_FAST, + SNOR_PROTO_1_1_1); + } + + if (info->flags & SPI_NOR_DUAL_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_2], + 0, 8, SPINOR_OP_READ_1_1_2, + SNOR_PROTO_1_1_2); + } + + if (info->flags & SPI_NOR_QUAD_READ) { + params->hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; + spi_nor_set_read_settings(¶ms->reads[SNOR_CMD_READ_1_1_4], + 0, 8, SPINOR_OP_READ_1_1_4, + SNOR_PROTO_1_1_4); + } + + /* Page Program settings. */ + params->hwcaps.mask |= SNOR_HWCAPS_PP; + spi_nor_set_pp_settings(¶ms->page_programs[SNOR_CMD_PP], + SPINOR_OP_PP, SNOR_PROTO_1_1_1); + + /* Select the procedure to set the Quad Enable bit. */ + if (params->hwcaps.mask & (SNOR_HWCAPS_READ_QUAD | + SNOR_HWCAPS_PP_QUAD)) { + switch (JEDEC_MFR(info)) { + case SNOR_MFR_MACRONIX: + params->quad_enable = macronix_quad_enable; + break; + + case SNOR_MFR_MICRON: + break; + + default: + params->quad_enable = spansion_quad_enable; + break; + } + } + + return 0; +} + +static int spi_nor_hwcaps2cmd(u32 hwcaps, const int table[][2], size_t size) +{ + size_t i; + + for (i = 0; i < size; i++) + if (table[i][0] == (int)hwcaps) + return table[i][1]; + + return -EINVAL; +} + +static int spi_nor_hwcaps_read2cmd(u32 hwcaps) +{ + static const int hwcaps_read2cmd[][2] = { + { SNOR_HWCAPS_READ, SNOR_CMD_READ }, + { SNOR_HWCAPS_READ_FAST, SNOR_CMD_READ_FAST }, + { SNOR_HWCAPS_READ_1_1_1_DTR, SNOR_CMD_READ_1_1_1_DTR }, + { SNOR_HWCAPS_READ_1_1_2, SNOR_CMD_READ_1_1_2 }, + { SNOR_HWCAPS_READ_1_2_2, SNOR_CMD_READ_1_2_2 }, + { SNOR_HWCAPS_READ_2_2_2, SNOR_CMD_READ_2_2_2 }, + { SNOR_HWCAPS_READ_1_2_2_DTR, SNOR_CMD_READ_1_2_2_DTR }, + { SNOR_HWCAPS_READ_1_1_4, SNOR_CMD_READ_1_1_4 }, + { SNOR_HWCAPS_READ_1_4_4, SNOR_CMD_READ_1_4_4 }, + { SNOR_HWCAPS_READ_4_4_4, SNOR_CMD_READ_4_4_4 }, + { SNOR_HWCAPS_READ_1_4_4_DTR, SNOR_CMD_READ_1_4_4_DTR }, + { SNOR_HWCAPS_READ_1_1_8, SNOR_CMD_READ_1_1_8 }, + { SNOR_HWCAPS_READ_1_8_8, SNOR_CMD_READ_1_8_8 }, + { SNOR_HWCAPS_READ_8_8_8, SNOR_CMD_READ_8_8_8 }, + { SNOR_HWCAPS_READ_1_8_8_DTR, SNOR_CMD_READ_1_8_8_DTR }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_read2cmd, + ARRAY_SIZE(hwcaps_read2cmd)); +} + +static int spi_nor_hwcaps_pp2cmd(u32 hwcaps) +{ + static const int hwcaps_pp2cmd[][2] = { + { SNOR_HWCAPS_PP, SNOR_CMD_PP }, + { SNOR_HWCAPS_PP_1_1_4, SNOR_CMD_PP_1_1_4 }, + { SNOR_HWCAPS_PP_1_4_4, SNOR_CMD_PP_1_4_4 }, + { SNOR_HWCAPS_PP_4_4_4, SNOR_CMD_PP_4_4_4 }, + { SNOR_HWCAPS_PP_1_1_8, SNOR_CMD_PP_1_1_8 }, + { SNOR_HWCAPS_PP_1_8_8, SNOR_CMD_PP_1_8_8 }, + { SNOR_HWCAPS_PP_8_8_8, SNOR_CMD_PP_8_8_8 }, + }; + + return spi_nor_hwcaps2cmd(hwcaps, hwcaps_pp2cmd, + ARRAY_SIZE(hwcaps_pp2cmd)); +} + +static int spi_nor_select_read(struct spi_nor *nor, + const struct spi_nor_flash_parameter *params, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_READ_MASK) - 1; + const struct spi_nor_read_command *read; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_read2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + read = ¶ms->reads[cmd]; + nor->read_opcode = read->opcode; + nor->read_proto = read->proto; + + /* + * In the spi-nor framework, we don't need to make the difference + * between mode clock cycles and wait state clock cycles. + * Indeed, the value of the mode clock cycles is used by a QSPI + * flash memory to know whether it should enter or leave its 0-4-4 + * (Continuous Read / XIP) mode. + * eXecution In Place is out of the scope of the mtd sub-system. + * Hence we choose to merge both mode and wait state clock cycles + * into the so called dummy clock cycles. + */ + nor->read_dummy = read->num_mode_clocks + read->num_wait_states; + return 0; +} + +static int spi_nor_select_pp(struct spi_nor *nor, + const struct spi_nor_flash_parameter *params, + u32 shared_hwcaps) +{ + int cmd, best_match = fls(shared_hwcaps & SNOR_HWCAPS_PP_MASK) - 1; + const struct spi_nor_pp_command *pp; + + if (best_match < 0) + return -EINVAL; + + cmd = spi_nor_hwcaps_pp2cmd(BIT(best_match)); + if (cmd < 0) + return -EINVAL; + + pp = ¶ms->page_programs[cmd]; + nor->program_opcode = pp->opcode; + nor->write_proto = pp->proto; + return 0; +} + +static int spi_nor_select_erase(struct spi_nor *nor, + const struct flash_info *info) +{ + struct mtd_info *mtd = &nor->mtd; + +#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS + /* prefer "small sector" erase if possible */ + if (info->flags & SECT_4K) { + nor->erase_opcode = SPINOR_OP_BE_4K; + mtd->erasesize = 4096; + } else if (info->flags & SECT_4K_PMC) { + nor->erase_opcode = SPINOR_OP_BE_4K_PMC; + mtd->erasesize = 4096; + } else +#endif + { + nor->erase_opcode = SPINOR_OP_SE; + mtd->erasesize = info->sector_size; + } + return 0; +} + +static int spi_nor_setup(struct spi_nor *nor, const struct flash_info *info, + const struct spi_nor_flash_parameter *params, + const struct spi_nor_hwcaps *hwcaps) +{ + u32 ignored_mask, shared_mask; + bool enable_quad_io; + int err; + + /* + * Keep only the hardware capabilities supported by both the SPI + * controller and the SPI flash memory. + */ + shared_mask = hwcaps->mask & params->hwcaps.mask; + + /* SPI n-n-n protocols are not supported yet. */ + ignored_mask = (SNOR_HWCAPS_READ_2_2_2 | + SNOR_HWCAPS_READ_4_4_4 | + SNOR_HWCAPS_READ_8_8_8 | + SNOR_HWCAPS_PP_4_4_4 | + SNOR_HWCAPS_PP_8_8_8); + if (shared_mask & ignored_mask) { + dev_dbg(nor->dev, + "SPI n-n-n protocols are not supported yet.\n"); + shared_mask &= ~ignored_mask; + } + + /* Select the (Fast) Read command. */ + err = spi_nor_select_read(nor, params, shared_mask); + if (err) { + dev_err(nor->dev, + "can't select read settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Page Program command. */ + err = spi_nor_select_pp(nor, params, shared_mask); + if (err) { + dev_err(nor->dev, + "can't select write settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Select the Sector Erase command. */ + err = spi_nor_select_erase(nor, info); + if (err) { + dev_err(nor->dev, + "can't select erase settings supported by both the SPI controller and memory.\n"); + return err; + } + + /* Enable Quad I/O if needed. */ + enable_quad_io = (spi_nor_get_protocol_width(nor->read_proto) == 4 || + spi_nor_get_protocol_width(nor->write_proto) == 4); + if (enable_quad_io && params->quad_enable) { + err = params->quad_enable(nor); + if (err) { + dev_err(nor->dev, "quad mode not supported\n"); + return err; + } + } + + return 0; +} + +int spi_nor_scan(struct spi_nor *nor, const char *name, + const struct spi_nor_hwcaps *hwcaps) +{ + struct spi_nor_flash_parameter params; const struct flash_info *info = NULL; struct device *dev = nor->dev; struct mtd_info *mtd = &nor->mtd; @@ -1549,6 +1858,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) if (ret) return ret; + /* Reset SPI protocol for all commands. */ + nor->reg_proto = SNOR_PROTO_1_1_1; + nor->read_proto = SNOR_PROTO_1_1_1; + nor->write_proto = SNOR_PROTO_1_1_1; + if (name) info = spi_nor_match_id(name); /* Try to auto-detect if chip name wasn't specified or not found */ @@ -1591,6 +1905,11 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) if (info->flags & SPI_S3AN) nor->flags |= SNOR_F_READY_XSR_RDY; + /* Parse the Serial Flash Discoverable Parameters table. */ + ret = spi_nor_init_params(nor, info, ¶ms); + if (ret) + return ret; + /* * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up * with the software protection bits set @@ -1611,7 +1930,7 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) mtd->type = MTD_NORFLASH; mtd->writesize = 1; mtd->flags = MTD_CAP_NORFLASH; - mtd->size = info->sector_size * info->n_sectors; + mtd->size = params.size; mtd->_erase = spi_nor_erase; mtd->_read = spi_nor_read; @@ -1642,75 +1961,38 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) if (info->flags & NO_CHIP_ERASE) nor->flags |= SNOR_F_NO_OP_CHIP_ERASE; -#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS - /* prefer "small sector" erase if possible */ - if (info->flags & SECT_4K) { - nor->erase_opcode = SPINOR_OP_BE_4K; - mtd->erasesize = 4096; - } else if (info->flags & SECT_4K_PMC) { - nor->erase_opcode = SPINOR_OP_BE_4K_PMC; - mtd->erasesize = 4096; - } else -#endif - { - nor->erase_opcode = SPINOR_OP_SE; - mtd->erasesize = info->sector_size; - } - if (info->flags & SPI_NOR_NO_ERASE) mtd->flags |= MTD_NO_ERASE; mtd->dev.parent = dev; - nor->page_size = info->page_size; + nor->page_size = params.page_size; mtd->writebufsize = nor->page_size; if (np) { /* If we were instantiated by DT, use it */ if (of_property_read_bool(np, "m25p,fast-read")) - nor->flash_read = SPI_NOR_FAST; + params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST; else - nor->flash_read = SPI_NOR_NORMAL; + params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; } else { /* If we weren't instantiated by DT, default to fast-read */ - nor->flash_read = SPI_NOR_FAST; + params.hwcaps.mask |= SNOR_HWCAPS_READ_FAST; } /* Some devices cannot do fast-read, no matter what DT tells us */ if (info->flags & SPI_NOR_NO_FR) - nor->flash_read = SPI_NOR_NORMAL; - - /* Quad/Dual-read mode takes precedence over fast/normal */ - if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) { - ret = set_quad_mode(nor, info); - if (ret) { - dev_err(dev, "quad mode not supported\n"); - return ret; - } - nor->flash_read = SPI_NOR_QUAD; - } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) { - nor->flash_read = SPI_NOR_DUAL; - } - - /* Default commands */ - switch (nor->flash_read) { - case SPI_NOR_QUAD: - nor->read_opcode = SPINOR_OP_READ_1_1_4; - break; - case SPI_NOR_DUAL: - nor->read_opcode = SPINOR_OP_READ_1_1_2; - break; - case SPI_NOR_FAST: - nor->read_opcode = SPINOR_OP_READ_FAST; - break; - case SPI_NOR_NORMAL: - nor->read_opcode = SPINOR_OP_READ; - break; - default: - dev_err(dev, "No Read opcode defined\n"); - return -EINVAL; - } + params.hwcaps.mask &= ~SNOR_HWCAPS_READ_FAST; - nor->program_opcode = SPINOR_OP_PP; + /* + * Configure the SPI memory: + * - select op codes for (Fast) Read, Page Program and Sector Erase. + * - set the number of dummy cycles (mode cycles + wait states). + * - set the SPI protocols for register and memory accesses. + * - set the Quad Enable bit if needed (required by SPI x-y-4 protos). + */ + ret = spi_nor_setup(nor, info, ¶ms, hwcaps); + if (ret) + return ret; if (info->addr_width) nor->addr_width = info->addr_width; @@ -1732,8 +2014,6 @@ int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) return -EINVAL; } - nor->read_dummy = spi_nor_read_dummy_cycles(nor); - if (info->flags & SPI_S3AN) { ret = s3an_nor_scan(info, nor); if (ret) diff --git a/drivers/mtd/spi-nor/stm32-quadspi.c b/drivers/mtd/spi-nor/stm32-quadspi.c index ae45f81b8cd3..86c0931543c5 100644 --- a/drivers/mtd/spi-nor/stm32-quadspi.c +++ b/drivers/mtd/spi-nor/stm32-quadspi.c @@ -19,6 +19,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/reset.h> +#include <linux/sizes.h> #define QUADSPI_CR 0x00 #define CR_EN BIT(0) @@ -192,15 +193,15 @@ static void stm32_qspi_set_framemode(struct spi_nor *nor, cmd->framemode = CCR_IMODE_1; if (read) { - switch (nor->flash_read) { - case SPI_NOR_NORMAL: - case SPI_NOR_FAST: + switch (nor->read_proto) { + default: + case SNOR_PROTO_1_1_1: dmode = CCR_DMODE_1; break; - case SPI_NOR_DUAL: + case SNOR_PROTO_1_1_2: dmode = CCR_DMODE_2; break; - case SPI_NOR_QUAD: + case SNOR_PROTO_1_1_4: dmode = CCR_DMODE_4; break; } @@ -375,7 +376,7 @@ static ssize_t stm32_qspi_read(struct spi_nor *nor, loff_t from, size_t len, struct stm32_qspi_cmd cmd; int err; - dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#x\n", + dev_dbg(qspi->dev, "read(%#.2x): buf:%p from:%#.8x len:%#zx\n", nor->read_opcode, buf, (u32)from, len); memset(&cmd, 0, sizeof(cmd)); @@ -402,7 +403,7 @@ static ssize_t stm32_qspi_write(struct spi_nor *nor, loff_t to, size_t len, struct stm32_qspi_cmd cmd; int err; - dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#x\n", + dev_dbg(dev, "write(%#.2x): buf:%p to:%#.8x len:%#zx\n", nor->program_opcode, buf, (u32)to, len); memset(&cmd, 0, sizeof(cmd)); @@ -480,7 +481,12 @@ static void stm32_qspi_unprep(struct spi_nor *nor, enum spi_nor_ops ops) static int stm32_qspi_flash_setup(struct stm32_qspi *qspi, struct device_node *np) { - u32 width, flash_read, presc, cs_num, max_rate = 0; + struct spi_nor_hwcaps hwcaps = { + .mask = SNOR_HWCAPS_READ | + SNOR_HWCAPS_READ_FAST | + SNOR_HWCAPS_PP, + }; + u32 width, presc, cs_num, max_rate = 0; struct stm32_qspi_flash *flash; struct mtd_info *mtd; int ret; @@ -499,12 +505,10 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi, width = 1; if (width == 4) - flash_read = SPI_NOR_QUAD; + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_4; else if (width == 2) - flash_read = SPI_NOR_DUAL; - else if (width == 1) - flash_read = SPI_NOR_NORMAL; - else + hwcaps.mask |= SNOR_HWCAPS_READ_1_1_2; + else if (width != 1) return -EINVAL; flash = &qspi->flash[cs_num]; @@ -539,7 +543,7 @@ static int stm32_qspi_flash_setup(struct stm32_qspi *qspi, */ flash->fsize = FSIZE_VAL(SZ_1K); - ret = spi_nor_scan(&flash->nor, NULL, flash_read); + ret = spi_nor_scan(&flash->nor, NULL, &hwcaps); if (ret) { dev_err(qspi->dev, "device scan failed\n"); return ret; diff --git a/drivers/mtd/tests/subpagetest.c b/drivers/mtd/tests/subpagetest.c index aecc6ce5a9e1..fa2519ad2435 100644 --- a/drivers/mtd/tests/subpagetest.c +++ b/drivers/mtd/tests/subpagetest.c @@ -102,7 +102,7 @@ static int write_eraseblock2(int ebnum) if (unlikely(err || written != subpgsize * k)) { pr_err("error: write failed at %#llx\n", (long long)addr); - if (written != subpgsize) { + if (written != subpgsize * k) { pr_err(" write size: %#x\n", subpgsize * k); pr_err(" written: %#08zx\n", diff --git a/drivers/mux/Kconfig b/drivers/mux/Kconfig index 7c754a0f14bb..19e4e904c9bf 100644 --- a/drivers/mux/Kconfig +++ b/drivers/mux/Kconfig @@ -2,20 +2,11 @@ # Multiplexer devices # -menuconfig MULTIPLEXER - tristate "Multiplexer subsystem" - help - Multiplexer controller subsystem. Multiplexers are used in a - variety of settings, and this subsystem abstracts their use - so that the rest of the kernel sees a common interface. When - multiple parallel multiplexers are controlled by one single - multiplexer controller, this subsystem also coordinates the - multiplexer accesses. - - To compile the subsystem as a module, choose M here: the module will - be called mux-core. +config MULTIPLEXER + tristate -if MULTIPLEXER +menu "Multiplexer drivers" + depends on MULTIPLEXER config MUX_ADG792A tristate "Analog Devices ADG792A/ADG792G Multiplexers" @@ -56,4 +47,4 @@ config MUX_MMIO To compile the driver as a module, choose M here: the module will be called mux-mmio. -endif +endmenu diff --git a/drivers/mux/mux-core.c b/drivers/mux/mux-core.c index 90b8995f07cb..2fe96c470112 100644 --- a/drivers/mux/mux-core.c +++ b/drivers/mux/mux-core.c @@ -46,7 +46,7 @@ static int __init mux_init(void) static void __exit mux_exit(void) { - class_register(&mux_class); + class_unregister(&mux_class); ida_destroy(&mux_ida); } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 14ff622190a5..181839d6fbea 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -4596,7 +4596,7 @@ static int bond_check_params(struct bond_params *params) } ad_user_port_key = valptr->value; - if (bond_mode == BOND_MODE_TLB) { + if ((bond_mode == BOND_MODE_TLB) || (bond_mode == BOND_MODE_ALB)) { bond_opt_initstr(&newval, "default"); valptr = bond_opt_parse(bond_opt_get(BOND_OPT_TLB_DYNAMIC_LB), &newval); diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index e68d368e20ac..7f36d3e3c98b 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1665,6 +1665,7 @@ static const struct b53_chip_data b53_switch_chips[] = { .dev_name = "BCM53125", .vlans = 4096, .enabled_ports = 0xff, + .arl_entries = 4, .cpu_port = B53_CPU_PORT, .vta_regs = B53_VTA_REGS, .duplex_reg = B53_DUPLEX_STAT_GE, diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 53b088166c28..5bcdd33101b0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3178,6 +3178,7 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_limit = mv88e6390_port_pause_limit, + .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .stats_snapshot = mv88e6390_g1_stats_snapshot, diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d3906f6b01bd..86058a9f3417 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1785,16 +1785,18 @@ static int xgene_enet_get_resources(struct xgene_enet_pdata *pdata) xgene_enet_gpiod_get(pdata); - pdata->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(pdata->clk)) { - /* Abort if the clock is defined but couldn't be retrived. - * Always abort if the clock is missing on DT system as - * the driver can't cope with this case. - */ - if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) - return PTR_ERR(pdata->clk); - /* Firmware may have set up the clock already. */ - dev_info(dev, "clocks have been setup already\n"); + if (pdata->phy_mode != PHY_INTERFACE_MODE_SGMII) { + pdata->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pdata->clk)) { + /* Abort if the clock is defined but couldn't be + * retrived. Always abort if the clock is missing on + * DT system as the driver can't cope with this case. + */ + if (PTR_ERR(pdata->clk) != -ENOENT || dev->of_node) + return PTR_ERR(pdata->clk); + /* Firmware may have set up the clock already. */ + dev_info(dev, "clocks have been setup already\n"); + } } if (pdata->phy_mode != PHY_INTERFACE_MODE_XGMII) diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c index 73aca97a96bc..d937083db9a4 100644 --- a/drivers/net/ethernet/broadcom/bgmac-platform.c +++ b/drivers/net/ethernet/broadcom/bgmac-platform.c @@ -50,11 +50,14 @@ static u32 platform_bgmac_idm_read(struct bgmac *bgmac, u16 offset) static void platform_bgmac_idm_write(struct bgmac *bgmac, u16 offset, u32 value) { - return writel(value, bgmac->plat.idm_base + offset); + writel(value, bgmac->plat.idm_base + offset); } static bool platform_bgmac_clk_enabled(struct bgmac *bgmac) { + if (!bgmac->plat.idm_base) + return true; + if ((bgmac_idm_read(bgmac, BCMA_IOCTL) & BGMAC_CLK_EN) != BGMAC_CLK_EN) return false; if (bgmac_idm_read(bgmac, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) @@ -66,6 +69,9 @@ static void platform_bgmac_clk_enable(struct bgmac *bgmac, u32 flags) { u32 val; + if (!bgmac->plat.idm_base) + return; + /* The Reset Control register only contains a single bit to show if the * controller is currently in reset. Do a sanity check here, just in * case the bootloader happened to leave the device in reset. @@ -180,6 +186,7 @@ static int bgmac_probe(struct platform_device *pdev) bgmac->feature_flags |= BGMAC_FEAT_CMDCFG_SR_REV4; bgmac->feature_flags |= BGMAC_FEAT_TX_MASK_SETUP; bgmac->feature_flags |= BGMAC_FEAT_RX_MASK_SETUP; + bgmac->feature_flags |= BGMAC_FEAT_IDM_MASK; bgmac->dev = &pdev->dev; bgmac->dma_dev = &pdev->dev; @@ -207,15 +214,13 @@ static int bgmac_probe(struct platform_device *pdev) return PTR_ERR(bgmac->plat.base); regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "idm_base"); - if (!regs) { - dev_err(&pdev->dev, "Unable to obtain idm resource\n"); - return -EINVAL; + if (regs) { + bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(bgmac->plat.idm_base)) + return PTR_ERR(bgmac->plat.idm_base); + bgmac->feature_flags &= ~BGMAC_FEAT_IDM_MASK; } - bgmac->plat.idm_base = devm_ioremap_resource(&pdev->dev, regs); - if (IS_ERR(bgmac->plat.idm_base)) - return PTR_ERR(bgmac->plat.idm_base); - regs = platform_get_resource_byname(pdev, IORESOURCE_MEM, "nicpm_base"); if (regs) { bgmac->plat.nicpm_base = devm_ioremap_resource(&pdev->dev, diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index ba4d2e145bb9..48d672b204a4 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -622,9 +622,11 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); - if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { - dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); - return -ENOTSUPP; + if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { + if (!(bgmac_idm_read(bgmac, BCMA_IOST) & BCMA_IOST_DMA64)) { + dev_err(bgmac->dev, "Core does not report 64-bit DMA\n"); + return -ENOTSUPP; + } } for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { @@ -855,9 +857,11 @@ static void bgmac_mac_speed(struct bgmac *bgmac) static void bgmac_miiconfig(struct bgmac *bgmac) { if (bgmac->feature_flags & BGMAC_FEAT_FORCE_SPEED_2500) { - bgmac_idm_write(bgmac, BCMA_IOCTL, - bgmac_idm_read(bgmac, BCMA_IOCTL) | 0x40 | - BGMAC_BCMA_IOCTL_SW_CLKEN); + if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) | + 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); + } bgmac->mac_speed = SPEED_2500; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); @@ -874,11 +878,36 @@ static void bgmac_miiconfig(struct bgmac *bgmac) } } +static void bgmac_chip_reset_idm_config(struct bgmac *bgmac) +{ + u32 iost; + + iost = bgmac_idm_read(bgmac, BCMA_IOST); + if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) + iost &= ~BGMAC_BCMA_IOST_ATTACHED; + + /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ + if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) { + u32 flags = 0; + + if (iost & BGMAC_BCMA_IOST_ATTACHED) { + flags = BGMAC_BCMA_IOCTL_SW_CLKEN; + if (!bgmac->has_robosw) + flags |= BGMAC_BCMA_IOCTL_SW_RESET; + } + bgmac_clk_enable(bgmac, flags); + } + + if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) + bgmac_idm_write(bgmac, BCMA_IOCTL, + bgmac_idm_read(bgmac, BCMA_IOCTL) & + ~BGMAC_BCMA_IOCTL_SW_RESET); +} + /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ static void bgmac_chip_reset(struct bgmac *bgmac) { u32 cmdcfg_sr; - u32 iost; int i; if (bgmac_clk_enabled(bgmac)) { @@ -899,20 +928,8 @@ static void bgmac_chip_reset(struct bgmac *bgmac) /* TODO: Clear software multicast filter list */ } - iost = bgmac_idm_read(bgmac, BCMA_IOST); - if (bgmac->feature_flags & BGMAC_FEAT_IOST_ATTACHED) - iost &= ~BGMAC_BCMA_IOST_ATTACHED; - - /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ - if (!(bgmac->feature_flags & BGMAC_FEAT_NO_RESET)) { - u32 flags = 0; - if (iost & BGMAC_BCMA_IOST_ATTACHED) { - flags = BGMAC_BCMA_IOCTL_SW_CLKEN; - if (!bgmac->has_robosw) - flags |= BGMAC_BCMA_IOCTL_SW_RESET; - } - bgmac_clk_enable(bgmac, flags); - } + if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) + bgmac_chip_reset_idm_config(bgmac); /* Request Misc PLL for corerev > 2 */ if (bgmac->feature_flags & BGMAC_FEAT_MISC_PLL_REQ) { @@ -970,11 +987,6 @@ static void bgmac_chip_reset(struct bgmac *bgmac) BGMAC_CHIPCTL_7_IF_TYPE_RGMII); } - if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) - bgmac_idm_write(bgmac, BCMA_IOCTL, - bgmac_idm_read(bgmac, BCMA_IOCTL) & - ~BGMAC_BCMA_IOCTL_SW_RESET); - /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to @@ -1497,8 +1509,10 @@ int bgmac_enet_probe(struct bgmac *bgmac) bgmac_clk_enable(bgmac, 0); /* This seems to be fixing IRQ by assigning OOB #6 to the core */ - if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) - bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); + if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { + if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) + bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); + } bgmac_chip_reset(bgmac); diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h index c1818766c501..443d57b10264 100644 --- a/drivers/net/ethernet/broadcom/bgmac.h +++ b/drivers/net/ethernet/broadcom/bgmac.h @@ -425,6 +425,7 @@ #define BGMAC_FEAT_CC4_IF_SW_TYPE BIT(17) #define BGMAC_FEAT_CC4_IF_SW_TYPE_RGMII BIT(18) #define BGMAC_FEAT_CC7_IF_TYPE_RGMII BIT(19) +#define BGMAC_FEAT_IDM_MASK BIT(20) struct bgmac_slot_info { union { diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 43423744fdfa..1e33abde4a3e 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -2886,7 +2886,7 @@ static int bnx2x_test_nvram_tbl(struct bnx2x *bp, static int bnx2x_test_nvram(struct bnx2x *bp) { - const struct crc_pair nvram_tbl[] = { + static const struct crc_pair nvram_tbl[] = { { 0, 0x14 }, /* bootstrap */ { 0x14, 0xec }, /* dir */ { 0x100, 0x350 }, /* manuf_info */ @@ -2895,7 +2895,7 @@ static int bnx2x_test_nvram(struct bnx2x *bp) { 0x708, 0x70 }, /* manuf_key_info */ { 0, 0 } }; - const struct crc_pair nvram_tbl2[] = { + static const struct crc_pair nvram_tbl2[] = { { 0x7e8, 0x350 }, /* manuf_info2 */ { 0xb38, 0xf0 }, /* feature_info */ { 0, 0 } @@ -3162,7 +3162,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) if (is_multi(bp)) { for_each_eth_queue(bp, i) { memset(queue_name, 0, sizeof(queue_name)); - sprintf(queue_name, "%d", i); + snprintf(queue_name, sizeof(queue_name), + "%d", i); for (j = 0; j < BNX2X_NUM_Q_STATS; j++) snprintf(buf + (k + j)*ETH_GSTRING_LEN, ETH_GSTRING_LEN, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index a19f68f5862d..e7c8539cbddf 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3458,13 +3458,18 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) req.ver_upd = DRV_VER_UPD; if (BNXT_PF(bp)) { - DECLARE_BITMAP(vf_req_snif_bmap, 256); - u32 *data = (u32 *)vf_req_snif_bmap; + u32 data[8]; int i; - memset(vf_req_snif_bmap, 0, sizeof(vf_req_snif_bmap)); - for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) - __set_bit(bnxt_vf_req_snif[i], vf_req_snif_bmap); + memset(data, 0, sizeof(data)); + for (i = 0; i < ARRAY_SIZE(bnxt_vf_req_snif); i++) { + u16 cmd = bnxt_vf_req_snif[i]; + unsigned int bit, idx; + + idx = cmd / 32; + bit = cmd % 32; + data[idx] |= 1 << bit; + } for (i = 0; i < 8; i++) req.vf_req_fwd[i] = cpu_to_le32(data[i]); @@ -6279,6 +6284,12 @@ static int bnxt_open(struct net_device *dev) return __bnxt_open_nic(bp, true, true); } +static bool bnxt_drv_busy(struct bnxt *bp) +{ + return (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state) || + test_bit(BNXT_STATE_READ_STATS, &bp->state)); +} + int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { int rc = 0; @@ -6297,7 +6308,7 @@ int bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) clear_bit(BNXT_STATE_OPEN, &bp->state); smp_mb__after_atomic(); - while (test_bit(BNXT_STATE_IN_SP_TASK, &bp->state)) + while (bnxt_drv_busy(bp)) msleep(20); /* Flush rings and and disable interrupts */ @@ -6358,8 +6369,15 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) u32 i; struct bnxt *bp = netdev_priv(dev); - if (!bp->bnapi) + set_bit(BNXT_STATE_READ_STATS, &bp->state); + /* Make sure bnxt_close_nic() sees that we are reading stats before + * we check the BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_READ_STATS, &bp->state); return; + } /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { @@ -6406,6 +6424,7 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_fifo_errors = le64_to_cpu(tx->tx_fifo_underruns); stats->tx_errors = le64_to_cpu(tx->tx_err); } + clear_bit(BNXT_STATE_READ_STATS, &bp->state); } static bool bnxt_mc_list_updated(struct bnxt *bp, u32 *rx_mask) @@ -6904,16 +6923,13 @@ static void bnxt_sp_task(struct work_struct *work) } /* Under rtnl_lock */ -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp) +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp) { int max_rx, max_tx, tx_sets = 1; int tx_rings_needed; - bool sh = true; int rc; - if (!(bp->flags & BNXT_FLAG_SHARED_RINGS)) - sh = false; - if (tcs) tx_sets = tcs; @@ -7121,7 +7137,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) sh = true; rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, - tc, bp->tx_nr_rings_xdp); + sh, tc, bp->tx_nr_rings_xdp); if (rc) return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index f872a7db2ca8..f34691f85602 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1117,6 +1117,7 @@ struct bnxt { unsigned long state; #define BNXT_STATE_OPEN 0 #define BNXT_STATE_IN_SP_TASK 1 +#define BNXT_STATE_READ_STATS 2 struct bnxt_irq *irq_tbl; int total_irqs; @@ -1300,7 +1301,8 @@ int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); int bnxt_close_nic(struct bnxt *, bool, bool); -int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, int tcs, int tx_xdp); +int bnxt_reserve_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, + int tx_xdp); int bnxt_setup_mq_tc(struct net_device *dev, u8 tc); int bnxt_get_max_rings(struct bnxt *, int *, int *, bool); void bnxt_restore_pf_fw_resources(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index fd1181510b65..be6acadcb202 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -432,7 +432,8 @@ static int bnxt_set_channels(struct net_device *dev, } tx_xdp = req_rx_rings; } - rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, tcs, tx_xdp); + rc = bnxt_reserve_rings(bp, req_tx_rings, req_rx_rings, sh, tcs, + tx_xdp); if (rc) { netdev_warn(dev, "Unable to allocate the requested rings\n"); return rc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 7d67552e70d7..3961a6807454 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -170,7 +170,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) if (!tc) tc = 1; rc = bnxt_reserve_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings, - tc, tx_xdp); + true, tc, tx_xdp); if (rc) { netdev_warn(dev, "Unable to reserve enough TX rings to support XDP.\n"); return rc; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index daca1c9d254b..7b0b399aaedd 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1202,12 +1202,21 @@ static struct enet_cb *bcmgenet_get_txcb(struct bcmgenet_priv *priv, return tx_cb_ptr; } -/* Simple helper to free a control block's resources */ -static void bcmgenet_free_cb(struct enet_cb *cb) +static struct enet_cb *bcmgenet_put_txcb(struct bcmgenet_priv *priv, + struct bcmgenet_tx_ring *ring) { - dev_kfree_skb_any(cb->skb); - cb->skb = NULL; - dma_unmap_addr_set(cb, dma_addr, 0); + struct enet_cb *tx_cb_ptr; + + tx_cb_ptr = ring->cbs; + tx_cb_ptr += ring->write_ptr - ring->cb_ptr; + + /* Rewinding local write pointer */ + if (ring->write_ptr == ring->cb_ptr) + ring->write_ptr = ring->end_ptr; + else + ring->write_ptr--; + + return tx_cb_ptr; } static inline void bcmgenet_rx_ring16_int_disable(struct bcmgenet_rx_ring *ring) @@ -1260,18 +1269,72 @@ static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_tx_ring *ring) INTRL2_CPU_MASK_SET); } +/* Simple helper to free a transmit control block's resources + * Returns an skb when the last transmit control block associated with the + * skb is freed. The skb should be freed by the caller if necessary. + */ +static struct sk_buff *bcmgenet_free_tx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + + if (skb) { + cb->skb = NULL; + if (cb == GENET_CB(skb)->first_cb) + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + else + dma_unmap_page(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + + if (cb == GENET_CB(skb)->last_cb) + return skb; + + } else if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_page(dev, + dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), + DMA_TO_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return 0; +} + +/* Simple helper to free a receive control block's resources */ +static struct sk_buff *bcmgenet_free_rx_cb(struct device *dev, + struct enet_cb *cb) +{ + struct sk_buff *skb; + + skb = cb->skb; + cb->skb = NULL; + + if (dma_unmap_addr(cb, dma_addr)) { + dma_unmap_single(dev, dma_unmap_addr(cb, dma_addr), + dma_unmap_len(cb, dma_len), DMA_FROM_DEVICE); + dma_unmap_addr_set(cb, dma_addr, 0); + } + + return skb; +} + /* Unlocked version of the reclaim routine */ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, struct bcmgenet_tx_ring *ring) { struct bcmgenet_priv *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; - struct enet_cb *tx_cb_ptr; - unsigned int pkts_compl = 0; + unsigned int txbds_processed = 0; unsigned int bytes_compl = 0; - unsigned int c_index; + unsigned int pkts_compl = 0; unsigned int txbds_ready; - unsigned int txbds_processed = 0; + unsigned int c_index; + struct sk_buff *skb; /* Clear status before servicing to reduce spurious interrupts */ if (ring->index == DESC_INDEX) @@ -1292,21 +1355,12 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev, /* Reclaim transmitted buffers */ while (txbds_processed < txbds_ready) { - tx_cb_ptr = &priv->tx_cbs[ring->clean_ptr]; - if (tx_cb_ptr->skb) { + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, + &priv->tx_cbs[ring->clean_ptr]); + if (skb) { pkts_compl++; - bytes_compl += GENET_CB(tx_cb_ptr->skb)->bytes_sent; - dma_unmap_single(kdev, - dma_unmap_addr(tx_cb_ptr, dma_addr), - dma_unmap_len(tx_cb_ptr, dma_len), - DMA_TO_DEVICE); - bcmgenet_free_cb(tx_cb_ptr); - } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) { - dma_unmap_page(kdev, - dma_unmap_addr(tx_cb_ptr, dma_addr), - dma_unmap_len(tx_cb_ptr, dma_len), - DMA_TO_DEVICE); - dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0); + bytes_compl += GENET_CB(skb)->bytes_sent; + dev_kfree_skb_any(skb); } txbds_processed++; @@ -1380,95 +1434,6 @@ static void bcmgenet_tx_reclaim_all(struct net_device *dev) bcmgenet_tx_reclaim(dev, &priv->tx_rings[DESC_INDEX]); } -/* Transmits a single SKB (either head of a fragment or a single SKB) - * caller must hold priv->lock - */ -static int bcmgenet_xmit_single(struct net_device *dev, - struct sk_buff *skb, - u16 dma_desc_flags, - struct bcmgenet_tx_ring *ring) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; - struct enet_cb *tx_cb_ptr; - unsigned int skb_len; - dma_addr_t mapping; - u32 length_status; - int ret; - - tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - - if (unlikely(!tx_cb_ptr)) - BUG(); - - tx_cb_ptr->skb = skb; - - skb_len = skb_headlen(skb); - - mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { - priv->mib.tx_dma_failed++; - netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); - dev_kfree_skb(skb); - return ret; - } - - dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, skb_len); - length_status = (skb_len << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | - (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT) | - DMA_TX_APPEND_CRC; - - if (skb->ip_summed == CHECKSUM_PARTIAL) - length_status |= DMA_TX_DO_CSUM; - - dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, length_status); - - return 0; -} - -/* Transmit a SKB fragment */ -static int bcmgenet_xmit_frag(struct net_device *dev, - skb_frag_t *frag, - u16 dma_desc_flags, - struct bcmgenet_tx_ring *ring) -{ - struct bcmgenet_priv *priv = netdev_priv(dev); - struct device *kdev = &priv->pdev->dev; - struct enet_cb *tx_cb_ptr; - unsigned int frag_size; - dma_addr_t mapping; - int ret; - - tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - - if (unlikely(!tx_cb_ptr)) - BUG(); - - tx_cb_ptr->skb = NULL; - - frag_size = skb_frag_size(frag); - - mapping = skb_frag_dma_map(kdev, frag, 0, frag_size, DMA_TO_DEVICE); - ret = dma_mapping_error(kdev, mapping); - if (ret) { - priv->mib.tx_dma_failed++; - netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n", - __func__); - return ret; - } - - dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); - dma_unmap_len_set(tx_cb_ptr, dma_len, frag_size); - - dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, - (frag_size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags | - (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT)); - - return 0; -} - /* Reallocate the SKB to put enough headroom in front of it and insert * the transmit checksum offsets in the descriptors */ @@ -1535,11 +1500,16 @@ static struct sk_buff *bcmgenet_put_tx_csum(struct net_device *dev, static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); + struct device *kdev = &priv->pdev->dev; struct bcmgenet_tx_ring *ring = NULL; + struct enet_cb *tx_cb_ptr; struct netdev_queue *txq; unsigned long flags = 0; int nr_frags, index; - u16 dma_desc_flags; + dma_addr_t mapping; + unsigned int size; + skb_frag_t *frag; + u32 len_stat; int ret; int i; @@ -1592,29 +1562,53 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev) } } - dma_desc_flags = DMA_SOP; - if (nr_frags == 0) - dma_desc_flags |= DMA_EOP; + for (i = 0; i <= nr_frags; i++) { + tx_cb_ptr = bcmgenet_get_txcb(priv, ring); - /* Transmit single SKB or head of fragment list */ - ret = bcmgenet_xmit_single(dev, skb, dma_desc_flags, ring); - if (ret) { - ret = NETDEV_TX_OK; - goto out; - } + if (unlikely(!tx_cb_ptr)) + BUG(); + + if (!i) { + /* Transmit single SKB or head of fragment list */ + GENET_CB(skb)->first_cb = tx_cb_ptr; + size = skb_headlen(skb); + mapping = dma_map_single(kdev, skb->data, size, + DMA_TO_DEVICE); + } else { + /* xmit fragment */ + frag = &skb_shinfo(skb)->frags[i - 1]; + size = skb_frag_size(frag); + mapping = skb_frag_dma_map(kdev, frag, 0, size, + DMA_TO_DEVICE); + } - /* xmit fragment */ - for (i = 0; i < nr_frags; i++) { - ret = bcmgenet_xmit_frag(dev, - &skb_shinfo(skb)->frags[i], - (i == nr_frags - 1) ? DMA_EOP : 0, - ring); + ret = dma_mapping_error(kdev, mapping); if (ret) { + priv->mib.tx_dma_failed++; + netif_err(priv, tx_err, dev, "Tx DMA map failed\n"); ret = NETDEV_TX_OK; - goto out; + goto out_unmap_frags; + } + dma_unmap_addr_set(tx_cb_ptr, dma_addr, mapping); + dma_unmap_len_set(tx_cb_ptr, dma_len, size); + + tx_cb_ptr->skb = skb; + + len_stat = (size << DMA_BUFLENGTH_SHIFT) | + (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT); + + if (!i) { + len_stat |= DMA_TX_APPEND_CRC | DMA_SOP; + if (skb->ip_summed == CHECKSUM_PARTIAL) + len_stat |= DMA_TX_DO_CSUM; } + if (i == nr_frags) + len_stat |= DMA_EOP; + + dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping, len_stat); } + GENET_CB(skb)->last_cb = tx_cb_ptr; skb_tx_timestamp(skb); /* Decrement total BD count and advance our write pointer */ @@ -1635,6 +1629,19 @@ out: spin_unlock_irqrestore(&ring->lock, flags); return ret; + +out_unmap_frags: + /* Back up for failed control block mapping */ + bcmgenet_put_txcb(priv, ring); + + /* Unmap successfully mapped control blocks */ + while (i-- > 0) { + tx_cb_ptr = bcmgenet_put_txcb(priv, ring); + bcmgenet_free_tx_cb(kdev, tx_cb_ptr); + } + + dev_kfree_skb(skb); + goto out; } static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, @@ -1666,14 +1673,12 @@ static struct sk_buff *bcmgenet_rx_refill(struct bcmgenet_priv *priv, } /* Grab the current Rx skb from the ring and DMA-unmap it */ - rx_skb = cb->skb; - if (likely(rx_skb)) - dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr), - priv->rx_buf_len, DMA_FROM_DEVICE); + rx_skb = bcmgenet_free_rx_cb(kdev, cb); /* Put the new Rx skb on the ring */ cb->skb = skb; dma_unmap_addr_set(cb, dma_addr, mapping); + dma_unmap_len_set(cb, dma_len, priv->rx_buf_len); dmadesc_set_addr(priv, cb->bd_addr, mapping); /* Return the current Rx skb to caller */ @@ -1880,22 +1885,16 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv, static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv) { - struct device *kdev = &priv->pdev->dev; + struct sk_buff *skb; struct enet_cb *cb; int i; for (i = 0; i < priv->num_rx_bds; i++) { cb = &priv->rx_cbs[i]; - if (dma_unmap_addr(cb, dma_addr)) { - dma_unmap_single(kdev, - dma_unmap_addr(cb, dma_addr), - priv->rx_buf_len, DMA_FROM_DEVICE); - dma_unmap_addr_set(cb, dma_addr, 0); - } - - if (cb->skb) - bcmgenet_free_cb(cb); + skb = bcmgenet_free_rx_cb(&priv->pdev->dev, cb); + if (skb) + dev_kfree_skb_any(skb); } } @@ -2479,8 +2478,10 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv) static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) { - int i; struct netdev_queue *txq; + struct sk_buff *skb; + struct enet_cb *cb; + int i; bcmgenet_fini_rx_napi(priv); bcmgenet_fini_tx_napi(priv); @@ -2489,10 +2490,10 @@ static void bcmgenet_fini_dma(struct bcmgenet_priv *priv) bcmgenet_dma_teardown(priv); for (i = 0; i < priv->num_tx_bds; i++) { - if (priv->tx_cbs[i].skb != NULL) { - dev_kfree_skb(priv->tx_cbs[i].skb); - priv->tx_cbs[i].skb = NULL; - } + cb = priv->tx_cbs + i; + skb = bcmgenet_free_tx_cb(&priv->pdev->dev, cb); + if (skb) + dev_kfree_skb(skb); } for (i = 0; i < priv->hw_params->tx_queues; i++) { diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h index efd07020b89f..b9344de669f8 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h @@ -544,6 +544,8 @@ struct bcmgenet_hw_params { }; struct bcmgenet_skb_cb { + struct enet_cb *first_cb; /* First control block of SKB */ + struct enet_cb *last_cb; /* Last control block of SKB */ unsigned int bytes_sent; /* bytes on the wire (no TSB) */ }; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 28ecda3d3404..ebd353bc78ff 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -335,7 +335,7 @@ lio_ethtool_get_channels(struct net_device *dev, static int lio_get_eeprom_len(struct net_device *netdev) { - u8 buf[128]; + u8 buf[192]; struct lio *lio = GET_LIO(netdev); struct octeon_device *oct_dev = lio->oct_dev; struct octeon_board_info *board_info; diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index a0ca68ce3fbb..79112563a25a 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -1008,7 +1008,7 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid) { struct device *dev = &bgx->pdev->dev; struct lmac *lmac; - char str[20]; + char str[27]; if (!bgx->is_dlm && lmacid) return; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index 86f92e31e8aa..e403fa18f1b1 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -2083,12 +2083,12 @@ static void detach_ulds(struct adapter *adap) mutex_lock(&uld_mutex); list_del(&adap->list_node); + for (i = 0; i < CXGB4_ULD_MAX; i++) - if (adap->uld && adap->uld[i].handle) { + if (adap->uld && adap->uld[i].handle) adap->uld[i].state_change(adap->uld[i].handle, CXGB4_STATE_DETACH); - adap->uld[i].handle = NULL; - } + if (netevent_registered && list_empty(&adapter_list)) { unregister_netevent_notifier(&cxgb4_netevent_nb); netevent_registered = false; @@ -5303,8 +5303,10 @@ static void remove_one(struct pci_dev *pdev) */ destroy_workqueue(adapter->workq); - if (is_uld(adapter)) + if (is_uld(adapter)) { detach_ulds(adapter); + t4_uld_clean_up(adapter); + } disable_interrupts(adapter); @@ -5385,7 +5387,11 @@ static void shutdown_one(struct pci_dev *pdev) if (adapter->port[i]->reg_state == NETREG_REGISTERED) cxgb_close(adapter->port[i]); - t4_uld_clean_up(adapter); + if (is_uld(adapter)) { + detach_ulds(adapter); + t4_uld_clean_up(adapter); + } + disable_interrupts(adapter); disable_msi(adapter); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c index 50517cfd9671..9f9d6cae39d5 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ptp.c @@ -441,7 +441,8 @@ void cxgb4_ptp_init(struct adapter *adapter) adapter->ptp_clock = ptp_clock_register(&adapter->ptp_clock_info, &adapter->pdev->dev); - if (!adapter->ptp_clock) { + if (IS_ERR_OR_NULL(adapter->ptp_clock)) { + adapter->ptp_clock = NULL; dev_err(adapter->pdev_dev, "PTP %s Clock registration has failed\n", __func__); return; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c index ec53fe9dec68..71a315bc1409 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c @@ -589,22 +589,37 @@ void t4_uld_mem_free(struct adapter *adap) kfree(adap->uld); } +/* This function should be called with uld_mutex taken. */ +static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type) +{ + if (adap->uld[type].handle) { + adap->uld[type].handle = NULL; + adap->uld[type].add = NULL; + release_sge_txq_uld(adap, type); + + if (adap->flags & FULL_INIT_DONE) + quiesce_rx_uld(adap, type); + + if (adap->flags & USING_MSIX) + free_msix_queue_irqs_uld(adap, type); + + free_sge_queues_uld(adap, type); + free_queues_uld(adap, type); + } +} + void t4_uld_clean_up(struct adapter *adap) { unsigned int i; - if (!adap->uld) - return; + mutex_lock(&uld_mutex); for (i = 0; i < CXGB4_ULD_MAX; i++) { if (!adap->uld[i].handle) continue; - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, i); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, i); - free_sge_queues_uld(adap, i); - free_queues_uld(adap, i); + + cxgb4_shutdown_uld_adapter(adap, i); } + mutex_unlock(&uld_mutex); } static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld) @@ -783,15 +798,8 @@ int cxgb4_unregister_uld(enum cxgb4_uld type) continue; if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip)) continue; - adap->uld[type].handle = NULL; - adap->uld[type].add = NULL; - release_sge_txq_uld(adap, type); - if (adap->flags & FULL_INIT_DONE) - quiesce_rx_uld(adap, type); - if (adap->flags & USING_MSIX) - free_msix_queue_irqs_uld(adap, type); - free_sge_queues_uld(adap, type); - free_queues_uld(adap, type); + + cxgb4_shutdown_uld_adapter(adap, type); } mutex_unlock(&uld_mutex); diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 99987d8e437e..aa28299aef5f 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -174,6 +174,8 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x50a0), /* Custom T540-CR */ CH_PCI_ID_TABLE_FENTRY(0x50a1), /* Custom T540-CR */ CH_PCI_ID_TABLE_FENTRY(0x50a2), /* Custom T540-KR4 */ + CH_PCI_ID_TABLE_FENTRY(0x50a3), /* Custom T580-KR4 */ + CH_PCI_ID_TABLE_FENTRY(0x50a4), /* Custom 2x T540-CR */ /* T6 adapters: */ diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c index 1841ad45d215..39bad67422dd 100644 --- a/drivers/net/ethernet/cisco/enic/vnic_dev.c +++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c @@ -402,8 +402,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ vdev_err(vdev, "Fatal error in devcmd2 init - hardware surprise removal\n"); - - return -ENODEV; + err = -ENODEV; + goto err_free_wq; } enic_wq_init_start(&vdev->devcmd2->wq, 0, fetch_index, fetch_index, 0, @@ -414,7 +414,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); if (err) - goto err_free_wq; + goto err_disable_wq; vdev->devcmd2->result = vdev->devcmd2->results_ring.descs; vdev->devcmd2->cmd_ring = vdev->devcmd2->wq.ring.descs; @@ -433,8 +433,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) err_free_desc_ring: vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); -err_free_wq: +err_disable_wq: vnic_wq_disable(&vdev->devcmd2->wq); +err_free_wq: vnic_wq_free(&vdev->devcmd2->wq); err_free_devcmd2: kfree(vdev->devcmd2); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c index ff864a187d5a..a37166ee577b 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ae_adapt.c @@ -776,8 +776,9 @@ void hns_ae_update_led_status(struct hnae_handle *handle) assert(handle); mac_cb = hns_get_mac_cb(handle); - if (!mac_cb->cpld_ctrl) + if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER) return; + hns_set_led_opt(mac_cb); } diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index 7a8addda726e..408b63faf9a8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -53,6 +53,34 @@ static u32 dsaf_read_sub(struct dsaf_device *dsaf_dev, u32 reg) return ret; } +static void hns_dsaf_acpi_ledctrl_by_port(struct hns_mac_cb *mac_cb, u8 op_type, + u32 link, u32 port, u32 act) +{ + union acpi_object *obj; + union acpi_object obj_args[3], argv4; + + obj_args[0].integer.type = ACPI_TYPE_INTEGER; + obj_args[0].integer.value = link; + obj_args[1].integer.type = ACPI_TYPE_INTEGER; + obj_args[1].integer.value = port; + obj_args[2].integer.type = ACPI_TYPE_INTEGER; + obj_args[2].integer.value = act; + + argv4.type = ACPI_TYPE_PACKAGE; + argv4.package.count = 3; + argv4.package.elements = obj_args; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(mac_cb->dev), + &hns_dsaf_acpi_dsm_guid, 0, op_type, &argv4); + if (!obj) { + dev_warn(mac_cb->dev, "ledctrl fail, link:%d port:%d act:%d!\n", + link, port, act); + return; + } + + ACPI_FREE(obj); +} + static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, u16 speed, int data) { @@ -93,6 +121,18 @@ static void hns_cpld_set_led(struct hns_mac_cb *mac_cb, int link_status, } } +static void hns_cpld_set_led_acpi(struct hns_mac_cb *mac_cb, int link_status, + u16 speed, int data) +{ + if (!mac_cb) { + pr_err("cpld_led_set mac_cb is null!\n"); + return; + } + + hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC, + link_status, mac_cb->mac_id, data); +} + static void cpld_led_reset(struct hns_mac_cb *mac_cb) { if (!mac_cb || !mac_cb->cpld_ctrl) @@ -103,6 +143,20 @@ static void cpld_led_reset(struct hns_mac_cb *mac_cb) mac_cb->cpld_led_value = CPLD_LED_DEFAULT_VALUE; } +static void cpld_led_reset_acpi(struct hns_mac_cb *mac_cb) +{ + if (!mac_cb) { + pr_err("cpld_led_reset mac_cb is null!\n"); + return; + } + + if (mac_cb->media_type != HNAE_MEDIA_TYPE_FIBER) + return; + + hns_dsaf_acpi_ledctrl_by_port(mac_cb, HNS_OP_LED_SET_FUNC, + 0, mac_cb->mac_id, 0); +} + static int cpld_set_led_id(struct hns_mac_cb *mac_cb, enum hnae_led_state status) { @@ -604,8 +658,8 @@ struct dsaf_misc_op *hns_misc_op_get(struct dsaf_device *dsaf_dev) misc_op->cfg_serdes_loopback = hns_mac_config_sds_loopback; } else if (is_acpi_node(dsaf_dev->dev->fwnode)) { - misc_op->cpld_set_led = hns_cpld_set_led; - misc_op->cpld_reset_led = cpld_led_reset; + misc_op->cpld_set_led = hns_cpld_set_led_acpi; + misc_op->cpld_reset_led = cpld_led_reset_acpi; misc_op->cpld_set_led_id = cpld_set_led_id; misc_op->dsaf_reset = hns_dsaf_rst_acpi; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index fe166e0f6781..3987699f8fe6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1378,13 +1378,20 @@ void hns_nic_net_reset(struct net_device *ndev) void hns_nic_net_reinit(struct net_device *netdev) { struct hns_nic_priv *priv = netdev_priv(netdev); + enum hnae_port_type type = priv->ae_handle->port_type; netif_trans_update(priv->netdev); while (test_and_set_bit(NIC_STATE_REINITING, &priv->state)) usleep_range(1000, 2000); hns_nic_net_down(netdev); - hns_nic_net_reset(netdev); + + /* Only do hns_nic_net_reset in debug mode + * because of hardware limitation. + */ + if (type == HNAE_PORT_DEBUG) + hns_nic_net_reset(netdev); + (void)hns_nic_net_up(netdev); clear_bit(NIC_STATE_REINITING, &priv->state); } @@ -1997,13 +2004,8 @@ static void hns_nic_reset_subtask(struct hns_nic_priv *priv) rtnl_lock(); /* put off any impending NetWatchDogTimeout */ netif_trans_update(priv->netdev); + hns_nic_net_reinit(priv->netdev); - if (type == HNAE_PORT_DEBUG) { - hns_nic_net_reinit(priv->netdev); - } else { - netif_carrier_off(priv->netdev); - netif_tx_disable(priv->netdev); - } rtnl_unlock(); } diff --git a/drivers/net/ethernet/mellanox/mlx4/alloc.c b/drivers/net/ethernet/mellanox/mlx4/alloc.c index 249a4584401a..b651c1210555 100644 --- a/drivers/net/ethernet/mellanox/mlx4/alloc.c +++ b/drivers/net/ethernet/mellanox/mlx4/alloc.c @@ -283,7 +283,7 @@ int mlx4_zone_add_one(struct mlx4_zone_allocator *zone_alloc, } /* Should be called under a lock */ -static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) +static void __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) { struct mlx4_zone_allocator *zone_alloc = entry->allocator; @@ -315,8 +315,6 @@ static int __mlx4_zone_remove_one_entry(struct mlx4_zone_entry *entry) } zone_alloc->mask = mask; } - - return 0; } void mlx4_zone_allocator_destroy(struct mlx4_zone_allocator *zone_alloc) @@ -457,7 +455,7 @@ struct mlx4_bitmap *mlx4_zone_get_bitmap(struct mlx4_zone_allocator *zones, u32 int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) { struct mlx4_zone_entry *zone; - int res; + int res = 0; spin_lock(&zones->lock); @@ -468,7 +466,7 @@ int mlx4_zone_remove_one(struct mlx4_zone_allocator *zones, u32 uid) goto out; } - res = __mlx4_zone_remove_one_entry(zone); + __mlx4_zone_remove_one_entry(zone); out: spin_unlock(&zones->lock); @@ -578,7 +576,7 @@ out: } static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, - struct mlx4_buf *buf, gfp_t gfp) + struct mlx4_buf *buf) { dma_addr_t t; @@ -587,7 +585,7 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, buf->page_shift = get_order(size) + PAGE_SHIFT; buf->direct.buf = dma_zalloc_coherent(&dev->persist->pdev->dev, - size, &t, gfp); + size, &t, GFP_KERNEL); if (!buf->direct.buf) return -ENOMEM; @@ -607,10 +605,10 @@ static int mlx4_buf_direct_alloc(struct mlx4_dev *dev, int size, * multiple pages, so we don't require too much contiguous memory. */ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, - struct mlx4_buf *buf, gfp_t gfp) + struct mlx4_buf *buf) { if (size <= max_direct) { - return mlx4_buf_direct_alloc(dev, size, buf, gfp); + return mlx4_buf_direct_alloc(dev, size, buf); } else { dma_addr_t t; int i; @@ -620,14 +618,14 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, buf->npages = buf->nbufs; buf->page_shift = PAGE_SHIFT; buf->page_list = kcalloc(buf->nbufs, sizeof(*buf->page_list), - gfp); + GFP_KERNEL); if (!buf->page_list) return -ENOMEM; for (i = 0; i < buf->nbufs; ++i) { buf->page_list[i].buf = dma_zalloc_coherent(&dev->persist->pdev->dev, - PAGE_SIZE, &t, gfp); + PAGE_SIZE, &t, GFP_KERNEL); if (!buf->page_list[i].buf) goto err_free; @@ -663,12 +661,11 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) } EXPORT_SYMBOL_GPL(mlx4_buf_free); -static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, - gfp_t gfp) +static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device) { struct mlx4_db_pgdir *pgdir; - pgdir = kzalloc(sizeof *pgdir, gfp); + pgdir = kzalloc(sizeof(*pgdir), GFP_KERNEL); if (!pgdir) return NULL; @@ -676,7 +673,7 @@ static struct mlx4_db_pgdir *mlx4_alloc_db_pgdir(struct device *dma_device, pgdir->bits[0] = pgdir->order0; pgdir->bits[1] = pgdir->order1; pgdir->db_page = dma_alloc_coherent(dma_device, PAGE_SIZE, - &pgdir->db_dma, gfp); + &pgdir->db_dma, GFP_KERNEL); if (!pgdir->db_page) { kfree(pgdir); return NULL; @@ -716,7 +713,7 @@ found: return 0; } -int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp) +int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_db_pgdir *pgdir; @@ -728,7 +725,7 @@ int mlx4_db_alloc(struct mlx4_dev *dev, struct mlx4_db *db, int order, gfp_t gfp if (!mlx4_alloc_db_from_pgdir(pgdir, db, order)) goto out; - pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev, gfp); + pgdir = mlx4_alloc_db_pgdir(&dev->persist->pdev->dev); if (!pgdir) { ret = -ENOMEM; goto out; @@ -780,13 +777,13 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, { int err; - err = mlx4_db_alloc(dev, &wqres->db, 1, GFP_KERNEL); + err = mlx4_db_alloc(dev, &wqres->db, 1); if (err) return err; *wqres->db.db = 0; - err = mlx4_buf_direct_alloc(dev, size, &wqres->buf, GFP_KERNEL); + err = mlx4_buf_direct_alloc(dev, size, &wqres->buf); if (err) goto err_db; @@ -795,7 +792,7 @@ int mlx4_alloc_hwq_res(struct mlx4_dev *dev, struct mlx4_hwq_resources *wqres, if (err) goto err_buf; - err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); + err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); if (err) goto err_mtt; diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index fa6d2354a0e9..c56a511b918e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -224,11 +224,11 @@ int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn) if (*cqn == -1) return -ENOMEM; - err = mlx4_table_get(dev, &cq_table->table, *cqn, GFP_KERNEL); + err = mlx4_table_get(dev, &cq_table->table, *cqn); if (err) goto err_out; - err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn, GFP_KERNEL); + err = mlx4_table_get(dev, &cq_table->cmpt_table, *cqn); if (err) goto err_put; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index e5fb89505a13..436f7689a032 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -1042,7 +1042,7 @@ static int mlx4_en_config_rss_qp(struct mlx4_en_priv *priv, int qpn, if (!context) return -ENOMEM; - err = mlx4_qp_alloc(mdev->dev, qpn, qp, GFP_KERNEL); + err = mlx4_qp_alloc(mdev->dev, qpn, qp); if (err) { en_err(priv, "Failed to allocate qp #%x\n", qpn); goto out; @@ -1086,7 +1086,7 @@ int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) en_err(priv, "Failed reserving drop qpn\n"); return err; } - err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp, GFP_KERNEL); + err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp); if (err) { en_err(priv, "Failed allocating drop qp\n"); mlx4_qp_release_range(priv->mdev->dev, qpn, 1); @@ -1158,8 +1158,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) } /* Configure RSS indirection qp */ - err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp, - GFP_KERNEL); + err = mlx4_qp_alloc(mdev->dev, priv->base_qpn, rss_map->indir_qp); if (err) { en_err(priv, "Failed to allocate RSS indirection QP\n"); goto rss_err; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 4f3a9b27ce4a..73faa3d77921 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -111,7 +111,7 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, goto err_hwq_res; } - err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp, GFP_KERNEL); + err = mlx4_qp_alloc(mdev->dev, ring->qpn, &ring->sp_qp); if (err) { en_err(priv, "Failed allocating qp %d\n", ring->qpn); goto err_reserve; diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.c b/drivers/net/ethernet/mellanox/mlx4/icm.c index e1f9e7cebf8f..5a7816e7c7b4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.c +++ b/drivers/net/ethernet/mellanox/mlx4/icm.c @@ -251,8 +251,7 @@ int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev) MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE); } -int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, - gfp_t gfp) +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj) { u32 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size); @@ -266,7 +265,7 @@ int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, } table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT, - (table->lowmem ? gfp : GFP_HIGHUSER) | + (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | __GFP_NOWARN, table->coherent); if (!table->icm[i]) { ret = -ENOMEM; @@ -363,7 +362,7 @@ int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 i; for (i = start; i <= end; i += inc) { - err = mlx4_table_get(dev, table, i, GFP_KERNEL); + err = mlx4_table_get(dev, table, i); if (err) goto fail; } diff --git a/drivers/net/ethernet/mellanox/mlx4/icm.h b/drivers/net/ethernet/mellanox/mlx4/icm.h index 0c7364550150..dee67fa39107 100644 --- a/drivers/net/ethernet/mellanox/mlx4/icm.h +++ b/drivers/net/ethernet/mellanox/mlx4/icm.h @@ -71,8 +71,7 @@ struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages, gfp_t gfp_mask, int coherent); void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent); -int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj, - gfp_t gfp); +int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj); int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 start, u32 end); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 30616cd0140d..706d7f21ac5c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -969,7 +969,7 @@ void mlx4_cleanup_cq_table(struct mlx4_dev *dev); void mlx4_cleanup_qp_table(struct mlx4_dev *dev); void mlx4_cleanup_srq_table(struct mlx4_dev *dev); void mlx4_cleanup_mcg_table(struct mlx4_dev *dev); -int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp); +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn); void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn); int __mlx4_cq_alloc_icm(struct mlx4_dev *dev, int *cqn); void __mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn); @@ -977,7 +977,7 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn); void __mlx4_srq_free_icm(struct mlx4_dev *dev, int srqn); int __mlx4_mpt_reserve(struct mlx4_dev *dev); void __mlx4_mpt_release(struct mlx4_dev *dev, u32 index); -int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp); +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index); void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index); u32 __mlx4_alloc_mtt_range(struct mlx4_dev *dev, int order); void __mlx4_free_mtt_range(struct mlx4_dev *dev, u32 first_seg, int order); diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index ce852ca22a96..24282cd017d3 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -479,14 +479,14 @@ static void mlx4_mpt_release(struct mlx4_dev *dev, u32 index) __mlx4_mpt_release(dev, index); } -int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) +int __mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { struct mlx4_mr_table *mr_table = &mlx4_priv(dev)->mr_table; - return mlx4_table_get(dev, &mr_table->dmpt_table, index, gfp); + return mlx4_table_get(dev, &mr_table->dmpt_table, index); } -static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) +static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index) { u64 param = 0; @@ -497,7 +497,7 @@ static int mlx4_mpt_alloc_icm(struct mlx4_dev *dev, u32 index, gfp_t gfp) MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } - return __mlx4_mpt_alloc_icm(dev, index, gfp); + return __mlx4_mpt_alloc_icm(dev, index); } void __mlx4_mpt_free_icm(struct mlx4_dev *dev, u32 index) @@ -629,7 +629,7 @@ int mlx4_mr_enable(struct mlx4_dev *dev, struct mlx4_mr *mr) struct mlx4_mpt_entry *mpt_entry; int err; - err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key), GFP_KERNEL); + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mr->key)); if (err) return err; @@ -787,14 +787,13 @@ int mlx4_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, EXPORT_SYMBOL_GPL(mlx4_write_mtt); int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, - struct mlx4_buf *buf, gfp_t gfp) + struct mlx4_buf *buf) { u64 *page_list; int err; int i; - page_list = kmalloc(buf->npages * sizeof *page_list, - gfp); + page_list = kcalloc(buf->npages, sizeof(*page_list), GFP_KERNEL); if (!page_list) return -ENOMEM; @@ -841,7 +840,7 @@ int mlx4_mw_enable(struct mlx4_dev *dev, struct mlx4_mw *mw) struct mlx4_mpt_entry *mpt_entry; int err; - err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key), GFP_KERNEL); + err = mlx4_mpt_alloc_icm(dev, key_to_hw_index(mw->key)); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 5a310d313e94..26747212526b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -301,29 +301,29 @@ void mlx4_qp_release_range(struct mlx4_dev *dev, int base_qpn, int cnt) } EXPORT_SYMBOL_GPL(mlx4_qp_release_range); -int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) +int __mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; int err; - err = mlx4_table_get(dev, &qp_table->qp_table, qpn, gfp); + err = mlx4_table_get(dev, &qp_table->qp_table, qpn); if (err) goto err_out; - err = mlx4_table_get(dev, &qp_table->auxc_table, qpn, gfp); + err = mlx4_table_get(dev, &qp_table->auxc_table, qpn); if (err) goto err_put_qp; - err = mlx4_table_get(dev, &qp_table->altc_table, qpn, gfp); + err = mlx4_table_get(dev, &qp_table->altc_table, qpn); if (err) goto err_put_auxc; - err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn, gfp); + err = mlx4_table_get(dev, &qp_table->rdmarc_table, qpn); if (err) goto err_put_altc; - err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn, gfp); + err = mlx4_table_get(dev, &qp_table->cmpt_table, qpn); if (err) goto err_put_rdmarc; @@ -345,7 +345,7 @@ err_out: return err; } -static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) +static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn) { u64 param = 0; @@ -355,7 +355,7 @@ static int mlx4_qp_alloc_icm(struct mlx4_dev *dev, int qpn, gfp_t gfp) MLX4_CMD_ALLOC_RES, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } - return __mlx4_qp_alloc_icm(dev, qpn, gfp); + return __mlx4_qp_alloc_icm(dev, qpn); } void __mlx4_qp_free_icm(struct mlx4_dev *dev, int qpn) @@ -397,7 +397,7 @@ struct mlx4_qp *mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn) return qp; } -int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) +int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_qp_table *qp_table = &priv->qp_table; @@ -408,7 +408,7 @@ int mlx4_qp_alloc(struct mlx4_dev *dev, int qpn, struct mlx4_qp *qp, gfp_t gfp) qp->qpn = qpn; - err = mlx4_qp_alloc_icm(dev, qpn, gfp); + err = mlx4_qp_alloc_icm(dev, qpn); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 812783865205..215e21c3dc8a 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -1822,7 +1822,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, return err; if (!fw_reserved(dev, qpn)) { - err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL); + err = __mlx4_qp_alloc_icm(dev, qpn); if (err) { res_abort_move(dev, slave, RES_QP, qpn); return err; @@ -1909,7 +1909,7 @@ static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd, if (err) return err; - err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL); + err = __mlx4_mpt_alloc_icm(dev, mpt->key); if (err) { res_abort_move(dev, slave, RES_MPT, id); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/srq.c b/drivers/net/ethernet/mellanox/mlx4/srq.c index f44d089e2ca6..bedf52126824 100644 --- a/drivers/net/ethernet/mellanox/mlx4/srq.c +++ b/drivers/net/ethernet/mellanox/mlx4/srq.c @@ -100,11 +100,11 @@ int __mlx4_srq_alloc_icm(struct mlx4_dev *dev, int *srqn) if (*srqn == -1) return -ENOMEM; - err = mlx4_table_get(dev, &srq_table->table, *srqn, GFP_KERNEL); + err = mlx4_table_get(dev, &srq_table->table, *srqn); if (err) goto err_out; - err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn, GFP_KERNEL); + err = mlx4_table_get(dev, &srq_table->cmpt_table, *srqn); if (err) goto err_put; return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index ca367445f864..9d17e4e76d3a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -4,14 +4,14 @@ subdir-ccflags-y += -I$(src) mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ - fs_counters.o rl.o lag.o dev.o lib/gid.o + fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o mlx5_core-$(CONFIG_MLX5_ACCEL) += accel/ipsec.o mlx5_core-$(CONFIG_MLX5_FPGA) += fpga/cmd.o fpga/core.o fpga/conn.o fpga/sdk.o \ fpga/ipsec.o -mlx5_core-$(CONFIG_MLX5_CORE_EN) += wq.o eswitch.o eswitch_offloads.o \ +mlx5_core-$(CONFIG_MLX5_CORE_EN) += eswitch.o eswitch_offloads.o \ en_main.o en_common.o en_fs.o en_ethtool.o en_tx.o \ en_rx.o en_rx_am.o en_txrx.o en_clock.o vxlan.o \ en_tc.o en_arfs.o en_rep.o en_fs_ethtool.o en_selftest.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/accel/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/accel/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/accel/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 4a78aefdf157..4614ddfa91eb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -372,7 +372,7 @@ void mlx5e_ipsec_build_inverse_table(void) */ mlx5e_ipsec_inverse_table[1] = htons(0xFFFF); for (mss = 2; mss < MAX_LSO_MSS; mss++) { - mss_inv = ((1ULL << 32) / mss) >> 16; + mss_inv = div_u64(1ULL << 32, mss) >> 16; mlx5e_ipsec_inverse_table[mss] = htons(mss_inv); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index 8fa23f6a1f67..2eb54d36e16e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -464,6 +464,8 @@ static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev, if (!perm_addr) return; + memset(perm_addr, 0xff, MAX_ADDR_LEN); + mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/fpga/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 31e5a2627eb8..9034e9960a76 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -102,7 +102,7 @@ static int mlx5_fpga_device_load_check(struct mlx5_fpga_device *fdev) return 0; } -int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) +static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) { int err; struct mlx5_core_dev *mdev = fdev->mdev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 42970e2a05ff..35d0e33381ca 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -275,7 +275,7 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, { struct mlx5_fpga_device *fdev = mdev->fpga; unsigned int i; - u32 *data; + __be32 *data; u32 count; u64 addr; int ret; @@ -290,7 +290,7 @@ int mlx5_fpga_ipsec_counters_read(struct mlx5_core_dev *mdev, u64 *counters, count = mlx5_fpga_ipsec_counters_count(mdev); - data = kzalloc(sizeof(u32) * count * 2, GFP_KERNEL); + data = kzalloc(sizeof(*data) * count * 2, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto out; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/lib/Makefile new file mode 100644 index 000000000000..d8e17110f25d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/Makefile @@ -0,0 +1 @@ +subdir-ccflags-y += -I$(src)/.. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c index de2aed44ab85..573f59f46d41 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/gid.c @@ -34,6 +34,7 @@ #include <linux/etherdevice.h> #include <linux/idr.h> #include "mlx5_core.h" +#include "lib/mlx5.h" void mlx5_init_reserved_gids(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 192cb93e7669..383fef5a8e24 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -1790,6 +1790,7 @@ static int mlxsw_sp_nexthop_init(struct mlxsw_sp *mlxsw_sp, return 0; err_nexthop_neigh_init: + mlxsw_sp_nexthop_rif_fini(nh); mlxsw_sp_nexthop_remove(mlxsw_sp, nh); return err; } @@ -1866,6 +1867,7 @@ mlxsw_sp_nexthop_group_create(struct mlxsw_sp *mlxsw_sp, struct fib_info *fi) nh_grp->gateway = fi->fib_nh->nh_scope == RT_SCOPE_LINK; nh_grp->count = fi->fib_nhs; nh_grp->key.fi = fi; + fib_info_hold(fi); for (i = 0; i < nh_grp->count; i++) { nh = &nh_grp->nexthops[i]; fib_nh = &fi->fib_nh[i]; @@ -1885,6 +1887,7 @@ err_nexthop_init: nh = &nh_grp->nexthops[i]; mlxsw_sp_nexthop_fini(mlxsw_sp, nh); } + fib_info_put(nh_grp->key.fi); kfree(nh_grp); return ERR_PTR(err); } @@ -1903,6 +1906,7 @@ mlxsw_sp_nexthop_group_destroy(struct mlxsw_sp *mlxsw_sp, } mlxsw_sp_nexthop_group_refresh(mlxsw_sp, nh_grp); WARN_ON_ONCE(nh_grp->adj_index_valid); + fib_info_put(nh_grp->key.fi); kfree(nh_grp); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index cd89a3e6cd81..656b2d3f1bee 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -979,7 +979,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, { u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - struct mlxsw_sp_bridge_vlan *bridge_vlan; u16 old_pvid = mlxsw_sp_port->pvid; int err; @@ -1000,8 +999,6 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, if (err) goto err_port_vlan_bridge_join; - bridge_vlan = mlxsw_sp_bridge_vlan_find(bridge_port, vid); - return 0; err_port_vlan_bridge_join: @@ -1919,6 +1916,8 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, memcpy(&switchdev_work->fdb_info, ptr, sizeof(switchdev_work->fdb_info)); switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, fdb_info->addr); /* Take a reference on the device. This can be either @@ -1935,6 +1934,10 @@ static int mlxsw_sp_switchdev_event(struct notifier_block *unused, mlxsw_core_schedule_work(&switchdev_work->work); return NOTIFY_DONE; + +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; } static struct notifier_block mlxsw_sp_switchdev_notifier = { diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index fec0ff2ca94f..3226ddc55f99 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -419,7 +419,7 @@ int nfp_flower_metadata_init(struct nfp_app *app) return 0; err_free_last_used: - kfree(priv->stats_ids.free_list.buf); + kfree(priv->mask_ids.last_used); err_free_mask_id: kfree(priv->mask_ids.mask_id_free_list.buf); return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index b251ebaec4db..9d989c96278c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -575,7 +575,7 @@ qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, if (p_tcp_ramrod->tcp.ip_version == TCP_IPV4) { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "local_ip=%pI4h:%x, remote_ip=%pI4h%x, vlan=%x\n", + "local_ip=%pI4h:%x, remote_ip=%pI4h:%x, vlan=%x\n", p_tcp_ramrod->tcp.local_ip, p_tcp_ramrod->tcp.local_port, p_tcp_ramrod->tcp.remote_ip, @@ -583,7 +583,7 @@ qed_iwarp_print_tcp_ramrod(struct qed_hwfn *p_hwfn, p_tcp_ramrod->tcp.vlan_id); } else { DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "local_ip=%pI6h:%x, remote_ip=%pI6h:%x, vlan=%x\n", + "local_ip=%pI6:%x, remote_ip=%pI6:%x, vlan=%x\n", p_tcp_ramrod->tcp.local_ip, p_tcp_ramrod->tcp.local_port, p_tcp_ramrod->tcp.remote_ip, @@ -1519,7 +1519,7 @@ qed_iwarp_print_cm_info(struct qed_hwfn *p_hwfn, cm_info->vlan); else DP_VERBOSE(p_hwfn, QED_MSG_RDMA, - "remote_ip %pI6h:%x, local_ip %pI6h:%x vlan=%x\n", + "remote_ip %pI6:%x, local_ip %pI6:%x vlan=%x\n", cm_info->remote_ip, cm_info->remote_port, cm_info->local_ip, cm_info->local_port, cm_info->vlan); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index 746d94e28470..60850bfa3d32 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -766,11 +766,13 @@ static void emac_shutdown(struct platform_device *pdev) struct emac_adapter *adpt = netdev_priv(netdev); struct emac_sgmii *sgmii = &adpt->phy; - /* Closing the SGMII turns off its interrupts */ - sgmii->close(adpt); + if (netdev->flags & IFF_UP) { + /* Closing the SGMII turns off its interrupts */ + sgmii->close(adpt); - /* Resetting the MAC turns off all DMA and its interrupts */ - emac_mac_reset(adpt); + /* Resetting the MAC turns off all DMA and its interrupts */ + emac_mac_reset(adpt); + } } static struct platform_driver emac_platform_driver = { diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 761c518b2f92..13f72f5b18d2 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -5034,12 +5034,9 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *uc; - int addr_count; unsigned int i; - addr_count = netdev_uc_count(net_dev); table->uc_promisc = !!(net_dev->flags & IFF_PROMISC); - table->dev_uc_count = 1 + addr_count; ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr); i = 1; netdev_for_each_uc_addr(uc, net_dev) { @@ -5050,6 +5047,8 @@ static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx) ether_addr_copy(table->dev_uc_list[i].addr, uc->addr); i++; } + + table->dev_uc_count = i; } static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) @@ -5057,12 +5056,11 @@ static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx) struct efx_ef10_filter_table *table = efx->filter_state; struct net_device *net_dev = efx->net_dev; struct netdev_hw_addr *mc; - unsigned int i, addr_count; + unsigned int i; table->mc_overflow = false; table->mc_promisc = !!(net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)); - addr_count = netdev_mc_count(net_dev); i = 0; netdev_for_each_mc_addr(mc, net_dev) { if (i >= EFX_EF10_FILTER_DEV_MC_MAX) { diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index b607936e1b3e..9c0488e0f08e 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -90,17 +90,13 @@ struct ioc3_private { spinlock_t ioc3_lock; struct mii_if_info mii; + struct net_device *dev; struct pci_dev *pdev; /* Members used by autonegotiation */ struct timer_list ioc3_timer; }; -static inline struct net_device *priv_netdev(struct ioc3_private *dev) -{ - return (void *)dev - ((sizeof(struct net_device) + 31) & ~31); -} - static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static void ioc3_set_multicast_list(struct net_device *dev); static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev); @@ -427,7 +423,7 @@ static void ioc3_get_eaddr_nic(struct ioc3_private *ip) nic[i] = nic_read_byte(ioc3); for (i = 2; i < 8; i++) - priv_netdev(ip)->dev_addr[i - 2] = nic[i]; + ip->dev->dev_addr[i - 2] = nic[i]; } /* @@ -439,7 +435,7 @@ static void ioc3_get_eaddr(struct ioc3_private *ip) { ioc3_get_eaddr_nic(ip); - printk("Ethernet address is %pM.\n", priv_netdev(ip)->dev_addr); + printk("Ethernet address is %pM.\n", ip->dev->dev_addr); } static void __ioc3_set_mac_address(struct net_device *dev) @@ -790,13 +786,12 @@ static void ioc3_timer(unsigned long data) */ static int ioc3_mii_init(struct ioc3_private *ip) { - struct net_device *dev = priv_netdev(ip); int i, found = 0, res = 0; int ioc3_phy_workaround = 1; u16 word; for (i = 0; i < 32; i++) { - word = ioc3_mdio_read(dev, i, MII_PHYSID1); + word = ioc3_mdio_read(ip->dev, i, MII_PHYSID1); if (word != 0xffff && word != 0x0000) { found = 1; @@ -1276,6 +1271,7 @@ static int ioc3_probe(struct pci_dev *pdev, const struct pci_device_id *ent) SET_NETDEV_DEV(dev, &pdev->dev); ip = netdev_priv(dev); + ip->dev = dev; dev->irq = pdev->irq; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index ea1bbc355b4d..0b6a39b003a4 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2467,6 +2467,10 @@ static int smsc911x_drv_probe(struct platform_device *pdev) pdata = netdev_priv(dev); dev->irq = irq; pdata->ioaddr = ioremap_nocache(res->start, res_size); + if (!pdata->ioaddr) { + retval = -ENOMEM; + goto out_ioremap_fail; + } pdata->dev = dev; pdata->msg_enable = ((1 << debug) - 1); @@ -2572,6 +2576,7 @@ out_enable_resources_fail: smsc911x_free_resources(pdev); out_request_resources_fail: iounmap(pdata->ioaddr); +out_ioremap_fail: free_netdev(dev); out_release_io_1: release_mem_region(res->start, resource_size(res)); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c index 6c2d1da05588..fffd6d5fc907 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c @@ -638,7 +638,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) { struct sunxi_priv_data *gmac = priv->plat->bsp_priv; struct device_node *node = priv->device->of_node; - int ret, phy_interface; + int ret; u32 reg, val; regmap_read(gmac->regmap, SYSCON_EMAC_REG, &val); @@ -718,11 +718,7 @@ static int sun8i_dwmac_set_syscon(struct stmmac_priv *priv) if (gmac->variant->support_rmii) reg &= ~SYSCON_RMII_EN; - phy_interface = priv->plat->interface; - /* if PHY is internal, select the mode (xMII) used by the SoC */ - if (gmac->use_internal_phy) - phy_interface = gmac->variant->internal_phy; - switch (phy_interface) { + switch (priv->plat->interface) { case PHY_INTERFACE_MODE_MII: /* default */ break; @@ -936,7 +932,7 @@ static int sun8i_dwmac_probe(struct platform_device *pdev) } plat_dat->interface = of_get_phy_mode(dev->of_node); - if (plat_dat->interface == PHY_INTERFACE_MODE_INTERNAL) { + if (plat_dat->interface == gmac->variant->internal_phy) { dev_info(&pdev->dev, "Will use internal PHY\n"); gmac->use_internal_phy = true; gmac->ephy_clk = of_clk_get(plat_dat->phy_node, 0); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index f233bf8b4ebb..c4407e8e39a3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -117,7 +117,7 @@ static void dwmac4_tx_queue_routing(struct mac_device_info *hw, void __iomem *ioaddr = hw->pcsr; u32 value; - const struct stmmac_rx_routing route_possibilities[] = { + static const struct stmmac_rx_routing route_possibilities[] = { { GMAC_RXQCTRL_AVCPQ_MASK, GMAC_RXQCTRL_AVCPQ_SHIFT }, { GMAC_RXQCTRL_PTPQ_MASK, GMAC_RXQCTRL_PTPQ_SHIFT }, { GMAC_RXQCTRL_DCBCPQ_MASK, GMAC_RXQCTRL_DCBCPQ_SHIFT }, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 19bba6281dab..1763e48c84e2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1449,7 +1449,7 @@ static void free_dma_rx_desc_resources(struct stmmac_priv *priv) static void free_dma_tx_desc_resources(struct stmmac_priv *priv) { u32 tx_count = priv->plat->tx_queues_to_use; - u32 queue = 0; + u32 queue; /* Free TX queue resources */ for (queue = 0; queue < tx_count; queue++) { @@ -1498,7 +1498,7 @@ static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv) sizeof(dma_addr_t), GFP_KERNEL); if (!rx_q->rx_skbuff_dma) - return -ENOMEM; + goto err_dma; rx_q->rx_skbuff = kmalloc_array(DMA_RX_SIZE, sizeof(struct sk_buff *), @@ -1561,13 +1561,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) sizeof(*tx_q->tx_skbuff_dma), GFP_KERNEL); if (!tx_q->tx_skbuff_dma) - return -ENOMEM; + goto err_dma; tx_q->tx_skbuff = kmalloc_array(DMA_TX_SIZE, sizeof(struct sk_buff *), GFP_KERNEL); if (!tx_q->tx_skbuff) - goto err_dma_buffers; + goto err_dma; if (priv->extend_desc) { tx_q->dma_etx = dma_zalloc_coherent(priv->device, @@ -1577,7 +1577,7 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) &tx_q->dma_tx_phy, GFP_KERNEL); if (!tx_q->dma_etx) - goto err_dma_buffers; + goto err_dma; } else { tx_q->dma_tx = dma_zalloc_coherent(priv->device, DMA_TX_SIZE * @@ -1586,13 +1586,13 @@ static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv) &tx_q->dma_tx_phy, GFP_KERNEL); if (!tx_q->dma_tx) - goto err_dma_buffers; + goto err_dma; } } return 0; -err_dma_buffers: +err_dma: free_dma_tx_desc_resources(priv); return ret; @@ -4120,8 +4120,15 @@ int stmmac_dvr_probe(struct device *device, if ((phyaddr >= 0) && (phyaddr <= 31)) priv->plat->phy_addr = phyaddr; - if (priv->plat->stmmac_rst) + if (priv->plat->stmmac_rst) { + ret = reset_control_assert(priv->plat->stmmac_rst); reset_control_deassert(priv->plat->stmmac_rst); + /* Some reset controllers have only reset callback instead of + * assert + deassert callbacks pair. + */ + if (ret == -ENOTSUPP) + reset_control_reset(priv->plat->stmmac_rst); + } /* Init MAC and get the capabilities */ ret = stmmac_hw_init(priv); diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 46cb7f8955a2..4bb04aaf9650 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9532,7 +9532,7 @@ static struct niu_parent *niu_get_parent(struct niu *np, p = niu_new_parent(np, id, ptype); if (p) { - char port_name[6]; + char port_name[8]; int err; sprintf(port_name, "port%d", port); @@ -9553,7 +9553,7 @@ static void niu_put_parent(struct niu *np) { struct niu_parent *p = np->parent; u8 port = np->port; - char port_name[6]; + char port_name[8]; BUG_ON(!p || p->ports[port] != np); diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index 711fbbbc4b1f..163d8d16bc24 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -654,6 +654,8 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) RET(-EFAULT); } DBG("%d 0x%x 0x%x\n", data[0], data[1], data[2]); + } else { + return -EOPNOTSUPP; } if (!capable(CAP_SYS_RAWIO)) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 1850e348f555..badd0a8caeb9 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -3089,6 +3089,31 @@ static int cpsw_probe(struct platform_device *pdev) cpsw->quirk_irq = true; } + ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + ndev->netdev_ops = &cpsw_netdev_ops; + ndev->ethtool_ops = &cpsw_ethtool_ops; + netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); + netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); + cpsw_split_res(ndev); + + /* register the network device */ + SET_NETDEV_DEV(ndev, &pdev->dev); + ret = register_netdev(ndev); + if (ret) { + dev_err(priv->dev, "error registering net device\n"); + ret = -ENODEV; + goto clean_ale_ret; + } + + if (cpsw->data.dual_emac) { + ret = cpsw_probe_dual_emac(priv); + if (ret) { + cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); + goto clean_unregister_netdev_ret; + } + } + /* Grab RX and TX IRQs. Note that we also have RX_THRESHOLD and * MISC IRQs which are always kept disabled with this driver so * we will not request them. @@ -3127,33 +3152,9 @@ static int cpsw_probe(struct platform_device *pdev) goto clean_ale_ret; } - ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; - - ndev->netdev_ops = &cpsw_netdev_ops; - ndev->ethtool_ops = &cpsw_ethtool_ops; - netif_napi_add(ndev, &cpsw->napi_rx, cpsw_rx_poll, CPSW_POLL_WEIGHT); - netif_tx_napi_add(ndev, &cpsw->napi_tx, cpsw_tx_poll, CPSW_POLL_WEIGHT); - cpsw_split_res(ndev); - - /* register the network device */ - SET_NETDEV_DEV(ndev, &pdev->dev); - ret = register_netdev(ndev); - if (ret) { - dev_err(priv->dev, "error registering net device\n"); - ret = -ENODEV; - goto clean_ale_ret; - } - cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d, pool size %d)\n", &ss_res->start, ndev->irq, dma_params.descs_pool_size); - if (cpsw->data.dual_emac) { - ret = cpsw_probe_dual_emac(priv); - if (ret) { - cpsw_err(priv, probe, "error probe slave 2 emac interface\n"); - goto clean_unregister_netdev_ret; - } - } pm_runtime_put(&pdev->dev); diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c index 4daf3d0926a8..0250aa9ae2cb 100644 --- a/drivers/net/ntb_netdev.c +++ b/drivers/net/ntb_netdev.c @@ -418,6 +418,8 @@ static int ntb_netdev_probe(struct device *client_dev) if (!ndev) return -ENOMEM; + SET_NETDEV_DEV(ndev, client_dev); + dev = netdev_priv(ndev); dev->ndev = ndev; dev->pdev = pdev; diff --git a/drivers/net/phy/mdio-mux.c b/drivers/net/phy/mdio-mux.c index 00755b6a42cf..c608e1dfaf09 100644 --- a/drivers/net/phy/mdio-mux.c +++ b/drivers/net/phy/mdio-mux.c @@ -135,8 +135,8 @@ int mdio_mux_init(struct device *dev, for_each_available_child_of_node(dev->of_node, child_bus_node) { int v; - v = of_mdio_parse_addr(dev, child_bus_node); - if (v < 0) { + r = of_property_read_u32(child_bus_node, "reg", &v); + if (r) { dev_err(dev, "Error: Failed to find reg for child %s\n", of_node_full_name(child_bus_node)); diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 13028833bee3..bd4303944e44 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -120,6 +120,7 @@ struct ppp { int n_channels; /* how many channels are attached 54 */ spinlock_t rlock; /* lock for receive side 58 */ spinlock_t wlock; /* lock for transmit side 5c */ + int *xmit_recursion __percpu; /* xmit recursion detect */ int mru; /* max receive unit 60 */ unsigned int flags; /* control bits 64 */ unsigned int xstate; /* transmit state bits 68 */ @@ -1025,6 +1026,7 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, struct ppp *ppp = netdev_priv(dev); int indx; int err; + int cpu; ppp->dev = dev; ppp->ppp_net = src_net; @@ -1039,6 +1041,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, INIT_LIST_HEAD(&ppp->channels); spin_lock_init(&ppp->rlock); spin_lock_init(&ppp->wlock); + + ppp->xmit_recursion = alloc_percpu(int); + if (!ppp->xmit_recursion) { + err = -ENOMEM; + goto err1; + } + for_each_possible_cpu(cpu) + (*per_cpu_ptr(ppp->xmit_recursion, cpu)) = 0; + #ifdef CONFIG_PPP_MULTILINK ppp->minseq = -1; skb_queue_head_init(&ppp->mrq); @@ -1050,11 +1061,15 @@ static int ppp_dev_configure(struct net *src_net, struct net_device *dev, err = ppp_unit_register(ppp, conf->unit, conf->ifname_is_set); if (err < 0) - return err; + goto err2; conf->file->private_data = &ppp->file; return 0; +err2: + free_percpu(ppp->xmit_recursion); +err1: + return err; } static const struct nla_policy ppp_nl_policy[IFLA_PPP_MAX + 1] = { @@ -1400,18 +1415,16 @@ static void __ppp_xmit_process(struct ppp *ppp) ppp_xmit_unlock(ppp); } -static DEFINE_PER_CPU(int, ppp_xmit_recursion); - static void ppp_xmit_process(struct ppp *ppp) { local_bh_disable(); - if (unlikely(__this_cpu_read(ppp_xmit_recursion))) + if (unlikely(*this_cpu_ptr(ppp->xmit_recursion))) goto err; - __this_cpu_inc(ppp_xmit_recursion); + (*this_cpu_ptr(ppp->xmit_recursion))++; __ppp_xmit_process(ppp); - __this_cpu_dec(ppp_xmit_recursion); + (*this_cpu_ptr(ppp->xmit_recursion))--; local_bh_enable(); @@ -1905,7 +1918,7 @@ static void __ppp_channel_push(struct channel *pch) read_lock(&pch->upl); ppp = pch->ppp; if (ppp) - __ppp_xmit_process(ppp); + ppp_xmit_process(ppp); read_unlock(&pch->upl); } } @@ -1914,9 +1927,7 @@ static void ppp_channel_push(struct channel *pch) { local_bh_disable(); - __this_cpu_inc(ppp_xmit_recursion); __ppp_channel_push(pch); - __this_cpu_dec(ppp_xmit_recursion); local_bh_enable(); } @@ -3057,6 +3068,7 @@ static void ppp_destroy_interface(struct ppp *ppp) #endif /* CONFIG_PPP_FILTER */ kfree_skb(ppp->xmit_pending); + free_percpu(ppp->xmit_recursion); free_netdev(ppp->dev); } diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 9af3239d6ad5..3570c7576993 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -106,7 +106,7 @@ struct major_info { struct rcu_head rcu; dev_t major; struct idr minor_idr; - struct mutex minor_lock; + spinlock_t minor_lock; const char *device_name; struct list_head next; }; @@ -416,15 +416,15 @@ int tap_get_minor(dev_t major, struct tap_dev *tap) goto unlock; } - mutex_lock(&tap_major->minor_lock); - retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_KERNEL); + spin_lock(&tap_major->minor_lock); + retval = idr_alloc(&tap_major->minor_idr, tap, 1, TAP_NUM_DEVS, GFP_ATOMIC); if (retval >= 0) { tap->minor = retval; } else if (retval == -ENOSPC) { netdev_err(tap->dev, "Too many tap devices\n"); retval = -EINVAL; } - mutex_unlock(&tap_major->minor_lock); + spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); @@ -442,12 +442,12 @@ void tap_free_minor(dev_t major, struct tap_dev *tap) goto unlock; } - mutex_lock(&tap_major->minor_lock); + spin_lock(&tap_major->minor_lock); if (tap->minor) { idr_remove(&tap_major->minor_idr, tap->minor); tap->minor = 0; } - mutex_unlock(&tap_major->minor_lock); + spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); @@ -467,13 +467,13 @@ static struct tap_dev *dev_get_by_tap_file(int major, int minor) goto unlock; } - mutex_lock(&tap_major->minor_lock); + spin_lock(&tap_major->minor_lock); tap = idr_find(&tap_major->minor_idr, minor); if (tap) { dev = tap->dev; dev_hold(dev); } - mutex_unlock(&tap_major->minor_lock); + spin_unlock(&tap_major->minor_lock); unlock: rcu_read_unlock(); @@ -1244,7 +1244,7 @@ static int tap_list_add(dev_t major, const char *device_name) tap_major->major = MAJOR(major); idr_init(&tap_major->minor_idr); - mutex_init(&tap_major->minor_lock); + spin_lock_init(&tap_major->minor_lock); tap_major->device_name = device_name; diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index d103a1d4fb36..8f572b9f3625 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -768,8 +768,10 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ u8 *buf; int len; int temp; + int err; u8 iface_no; struct usb_cdc_parsed_header hdr; + u16 curr_ntb_format; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) @@ -874,6 +876,32 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ goto error2; } + /* + * Some Huawei devices have been observed to come out of reset in NDP32 mode. + * Let's check if this is the case, and set the device to NDP16 mode again if + * needed. + */ + if (ctx->drvflags & CDC_NCM_FLAG_RESET_NTB16) { + err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_FORMAT, + USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, + 0, iface_no, &curr_ntb_format, 2); + if (err < 0) { + goto error2; + } + + if (curr_ntb_format == USB_CDC_NCM_NTB32_FORMAT) { + dev_info(&intf->dev, "resetting NTB format to 16-bit"); + err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_FORMAT, + USB_TYPE_CLASS | USB_DIR_OUT + | USB_RECIP_INTERFACE, + USB_CDC_NCM_NTB16_FORMAT, + iface_no, NULL, 0); + + if (err < 0) + goto error2; + } + } + cdc_ncm_find_endpoints(dev, ctx->data); cdc_ncm_find_endpoints(dev, ctx->control); if (!dev->in || !dev->out || !dev->status) { diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c index 2680a65cd5e4..63f28908afda 100644 --- a/drivers/net/usb/huawei_cdc_ncm.c +++ b/drivers/net/usb/huawei_cdc_ncm.c @@ -80,6 +80,12 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev, * be at the end of the frame. */ drvflags |= CDC_NCM_FLAG_NDP_TO_END; + + /* Additionally, it has been reported that some Huawei E3372H devices, with + * firmware version 21.318.01.00.541, come out of reset in NTB32 format mode, hence + * needing to be set to the NTB16 one again. + */ + drvflags |= CDC_NCM_FLAG_RESET_NTB16; ret = cdc_ncm_bind_common(usbnet_dev, intf, 1, drvflags); if (ret) goto err; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index 2dfca96a63b6..340c13484e5c 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -898,6 +898,7 @@ static const struct ethtool_ops smsc95xx_ethtool_ops = { .set_wol = smsc95xx_ethtool_set_wol, .get_link_ksettings = smsc95xx_get_link_ksettings, .set_link_ksettings = smsc95xx_set_link_ksettings, + .get_ts_info = ethtool_op_get_ts_info, }; static int smsc95xx_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index ba1c9f93592b..9c51b8be0038 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -311,7 +311,7 @@ struct vmxnet3_intr { u8 num_intrs; /* # of intr vectors */ u8 event_intr_idx; /* idx of the intr vector for event */ u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ - char event_msi_vector_name[IFNAMSIZ+11]; + char event_msi_vector_name[IFNAMSIZ+17]; #ifdef CONFIG_PCI_MSI struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; #endif diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index dcde596c9eb9..7e689c86d565 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4934,6 +4934,11 @@ brcmf_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, cfg80211_mgmt_tx_status(wdev, *cookie, buf, len, true, GFP_KERNEL); } else if (ieee80211_is_action(mgmt->frame_control)) { + if (len > BRCMF_FIL_ACTION_FRAME_SIZE + DOT11_MGMT_HDR_LEN) { + brcmf_err("invalid action frame length\n"); + err = -EINVAL; + goto exit; + } af_params = kzalloc(sizeof(*af_params), GFP_KERNEL); if (af_params == NULL) { brcmf_err("unable to allocate frame\n"); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 6e2e760d98b1..0b75def39c6c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -5704,7 +5704,7 @@ static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev) static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev) { - const u8 glrt_table[] = { + static const u8 glrt_table[] = { 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */ 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */ 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */ diff --git a/drivers/ntb/hw/Kconfig b/drivers/ntb/hw/Kconfig index 7116472b4625..a89243c9fdd3 100644 --- a/drivers/ntb/hw/Kconfig +++ b/drivers/ntb/hw/Kconfig @@ -1,2 +1,3 @@ source "drivers/ntb/hw/amd/Kconfig" +source "drivers/ntb/hw/idt/Kconfig" source "drivers/ntb/hw/intel/Kconfig" diff --git a/drivers/ntb/hw/Makefile b/drivers/ntb/hw/Makefile index 532e0859b4a1..87332c3905f0 100644 --- a/drivers/ntb/hw/Makefile +++ b/drivers/ntb/hw/Makefile @@ -1,2 +1,3 @@ obj-$(CONFIG_NTB_AMD) += amd/ +obj-$(CONFIG_NTB_IDT) += idt/ obj-$(CONFIG_NTB_INTEL) += intel/ diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.c b/drivers/ntb/hw/amd/ntb_hw_amd.c index 019a158e1128..f0788aae05c9 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.c +++ b/drivers/ntb/hw/amd/ntb_hw_amd.c @@ -5,6 +5,7 @@ * GPL LICENSE SUMMARY * * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -13,6 +14,7 @@ * BSD LICENSE * * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -79,40 +81,42 @@ static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx) return 1 << idx; } -static int amd_ntb_mw_count(struct ntb_dev *ntb) +static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx) { + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + return ntb_ndev(ntb)->mw_count; } -static int amd_ntb_mw_get_range(struct ntb_dev *ntb, int idx, - phys_addr_t *base, - resource_size_t *size, - resource_size_t *align, - resource_size_t *align_size) +static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); int bar; + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + bar = ndev_mw_to_bar(ndev, idx); if (bar < 0) return bar; - if (base) - *base = pci_resource_start(ndev->ntb.pdev, bar); - - if (size) - *size = pci_resource_len(ndev->ntb.pdev, bar); + if (addr_align) + *addr_align = SZ_4K; - if (align) - *align = SZ_4K; + if (size_align) + *size_align = 1; - if (align_size) - *align_size = 1; + if (size_max) + *size_max = pci_resource_len(ndev->ntb.pdev, bar); return 0; } -static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, +static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, dma_addr_t addr, resource_size_t size) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); @@ -122,11 +126,14 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, u64 base_addr, limit, reg_val; int bar; + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + bar = ndev_mw_to_bar(ndev, idx); if (bar < 0) return bar; - mw_size = pci_resource_len(ndev->ntb.pdev, bar); + mw_size = pci_resource_len(ntb->pdev, bar); /* make sure the range fits in the usable mw size */ if (size > mw_size) @@ -135,7 +142,7 @@ static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, mmio = ndev->self_mmio; peer_mmio = ndev->peer_mmio; - base_addr = pci_resource_start(ndev->ntb.pdev, bar); + base_addr = pci_resource_start(ntb->pdev, bar); if (bar != 1) { xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2); @@ -212,7 +219,7 @@ static int amd_link_is_up(struct amd_ntb_dev *ndev) return 0; } -static int amd_ntb_link_is_up(struct ntb_dev *ntb, +static u64 amd_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, enum ntb_width *width) { @@ -225,7 +232,7 @@ static int amd_ntb_link_is_up(struct ntb_dev *ntb, if (width) *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); - dev_dbg(ndev_dev(ndev), "link is up.\n"); + dev_dbg(&ntb->pdev->dev, "link is up.\n"); ret = 1; } else { @@ -234,7 +241,7 @@ static int amd_ntb_link_is_up(struct ntb_dev *ntb, if (width) *width = NTB_WIDTH_NONE; - dev_dbg(ndev_dev(ndev), "link is down.\n"); + dev_dbg(&ntb->pdev->dev, "link is down.\n"); } return ret; @@ -254,7 +261,7 @@ static int amd_ntb_link_enable(struct ntb_dev *ntb, if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; - dev_dbg(ndev_dev(ndev), "Enabling Link.\n"); + dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); @@ -275,7 +282,7 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb) if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; - dev_dbg(ndev_dev(ndev), "Enabling Link.\n"); + dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); @@ -284,6 +291,31 @@ static int amd_ntb_link_disable(struct ntb_dev *ntb) return 0; } +static int amd_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + /* The same as for inbound MWs */ + return ntb_ndev(ntb)->mw_count; +} + +static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size) +{ + struct amd_ntb_dev *ndev = ntb_ndev(ntb); + int bar; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + if (base) + *base = pci_resource_start(ndev->ntb.pdev, bar); + + if (size) + *size = pci_resource_len(ndev->ntb.pdev, bar); + + return 0; +} + static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb) { return ntb_ndev(ntb)->db_valid_mask; @@ -400,30 +432,30 @@ static int amd_ntb_spad_write(struct ntb_dev *ntb, return 0; } -static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int idx) +static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; - if (idx < 0 || idx >= ndev->spad_count) + if (sidx < 0 || sidx >= ndev->spad_count) return -EINVAL; - offset = ndev->peer_spad + (idx << 2); + offset = ndev->peer_spad + (sidx << 2); return readl(mmio + AMD_SPAD_OFFSET + offset); } -static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, - int idx, u32 val) +static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, + int sidx, u32 val) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; - if (idx < 0 || idx >= ndev->spad_count) + if (sidx < 0 || sidx >= ndev->spad_count) return -EINVAL; - offset = ndev->peer_spad + (idx << 2); + offset = ndev->peer_spad + (sidx << 2); writel(val, mmio + AMD_SPAD_OFFSET + offset); return 0; @@ -431,8 +463,10 @@ static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, static const struct ntb_dev_ops amd_ntb_ops = { .mw_count = amd_ntb_mw_count, - .mw_get_range = amd_ntb_mw_get_range, + .mw_get_align = amd_ntb_mw_get_align, .mw_set_trans = amd_ntb_mw_set_trans, + .peer_mw_count = amd_ntb_peer_mw_count, + .peer_mw_get_addr = amd_ntb_peer_mw_get_addr, .link_is_up = amd_ntb_link_is_up, .link_enable = amd_ntb_link_enable, .link_disable = amd_ntb_link_disable, @@ -466,18 +500,19 @@ static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit) static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) { void __iomem *mmio = ndev->self_mmio; + struct device *dev = &ndev->ntb.pdev->dev; u32 status; status = readl(mmio + AMD_INTSTAT_OFFSET); if (!(status & AMD_EVENT_INTMASK)) return; - dev_dbg(ndev_dev(ndev), "status = 0x%x and vec = %d\n", status, vec); + dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec); status &= AMD_EVENT_INTMASK; switch (status) { case AMD_PEER_FLUSH_EVENT: - dev_info(ndev_dev(ndev), "Flush is done.\n"); + dev_info(dev, "Flush is done.\n"); break; case AMD_PEER_RESET_EVENT: amd_ack_smu(ndev, AMD_PEER_RESET_EVENT); @@ -503,7 +538,7 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) status = readl(mmio + AMD_PMESTAT_OFFSET); /* check if this is WAKEUP event */ if (status & 0x1) - dev_info(ndev_dev(ndev), "Wakeup is done.\n"); + dev_info(dev, "Wakeup is done.\n"); amd_ack_smu(ndev, AMD_PEER_D0_EVENT); @@ -512,14 +547,14 @@ static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) AMD_LINK_HB_TIMEOUT); break; default: - dev_info(ndev_dev(ndev), "event status = 0x%x.\n", status); + dev_info(dev, "event status = 0x%x.\n", status); break; } } static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) { - dev_dbg(ndev_dev(ndev), "vec %d\n", vec); + dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec); if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1)) amd_handle_event(ndev, vec); @@ -541,7 +576,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev) { struct amd_ntb_dev *ndev = dev; - return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq); + return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); } static int ndev_init_isr(struct amd_ntb_dev *ndev, @@ -550,7 +585,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev, struct pci_dev *pdev; int rc, i, msix_count, node; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; node = dev_to_node(&pdev->dev); @@ -592,7 +627,7 @@ static int ndev_init_isr(struct amd_ntb_dev *ndev, goto err_msix_request; } - dev_dbg(ndev_dev(ndev), "Using msix interrupts\n"); + dev_dbg(&pdev->dev, "Using msix interrupts\n"); ndev->db_count = msix_min; ndev->msix_vec_count = msix_max; return 0; @@ -619,7 +654,7 @@ err_msix_vec_alloc: if (rc) goto err_msi_request; - dev_dbg(ndev_dev(ndev), "Using msi interrupts\n"); + dev_dbg(&pdev->dev, "Using msi interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; @@ -636,7 +671,7 @@ err_msi_enable: if (rc) goto err_intx_request; - dev_dbg(ndev_dev(ndev), "Using intx interrupts\n"); + dev_dbg(&pdev->dev, "Using intx interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; @@ -651,7 +686,7 @@ static void ndev_deinit_isr(struct amd_ntb_dev *ndev) void __iomem *mmio = ndev->self_mmio; int i; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; /* Mask all doorbell interrupts */ ndev->db_mask = ndev->db_valid_mask; @@ -777,7 +812,8 @@ static void ndev_init_debugfs(struct amd_ntb_dev *ndev) ndev->debugfs_info = NULL; } else { ndev->debugfs_dir = - debugfs_create_dir(ndev_name(ndev), debugfs_dir); + debugfs_create_dir(pci_name(ndev->ntb.pdev), + debugfs_dir); if (!ndev->debugfs_dir) ndev->debugfs_info = NULL; else @@ -812,7 +848,7 @@ static int amd_poll_link(struct amd_ntb_dev *ndev) reg = readl(mmio + AMD_SIDEINFO_OFFSET); reg &= NTB_LIN_STA_ACTIVE_BIT; - dev_dbg(ndev_dev(ndev), "%s: reg_val = 0x%x.\n", __func__, reg); + dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg); if (reg == ndev->cntl_sta) return 0; @@ -894,7 +930,8 @@ static int amd_init_ntb(struct amd_ntb_dev *ndev) break; default: - dev_err(ndev_dev(ndev), "AMD NTB does not support B2B mode.\n"); + dev_err(&ndev->ntb.pdev->dev, + "AMD NTB does not support B2B mode.\n"); return -EINVAL; } @@ -923,10 +960,10 @@ static int amd_init_dev(struct amd_ntb_dev *ndev) struct pci_dev *pdev; int rc = 0; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; ndev->ntb.topo = amd_get_topo(ndev); - dev_dbg(ndev_dev(ndev), "AMD NTB topo is %s\n", + dev_dbg(&pdev->dev, "AMD NTB topo is %s\n", ntb_topo_string(ndev->ntb.topo)); rc = amd_init_ntb(ndev); @@ -935,7 +972,7 @@ static int amd_init_dev(struct amd_ntb_dev *ndev) rc = amd_init_isr(ndev); if (rc) { - dev_err(ndev_dev(ndev), "fail to init isr.\n"); + dev_err(&pdev->dev, "fail to init isr.\n"); return rc; } @@ -973,7 +1010,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; - dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n"); + dev_warn(&pdev->dev, "Cannot DMA highmem\n"); } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -981,7 +1018,7 @@ static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; - dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n"); + dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n"); } ndev->self_mmio = pci_iomap(pdev, 0, 0); @@ -1004,7 +1041,7 @@ err_pci_enable: static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev) { - struct pci_dev *pdev = ndev_pdev(ndev); + struct pci_dev *pdev = ndev->ntb.pdev; pci_iounmap(pdev, ndev->self_mmio); diff --git a/drivers/ntb/hw/amd/ntb_hw_amd.h b/drivers/ntb/hw/amd/ntb_hw_amd.h index 13d73ed94a52..8f3617a46292 100644 --- a/drivers/ntb/hw/amd/ntb_hw_amd.h +++ b/drivers/ntb/hw/amd/ntb_hw_amd.h @@ -211,9 +211,6 @@ struct amd_ntb_dev { struct dentry *debugfs_info; }; -#define ndev_pdev(ndev) ((ndev)->ntb.pdev) -#define ndev_name(ndev) pci_name(ndev_pdev(ndev)) -#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev) #define ntb_ndev(__ntb) container_of(__ntb, struct amd_ntb_dev, ntb) #define hb_ndev(__work) container_of(__work, struct amd_ntb_dev, hb_timer.work) diff --git a/drivers/ntb/hw/idt/Kconfig b/drivers/ntb/hw/idt/Kconfig new file mode 100644 index 000000000000..b360e5613b9f --- /dev/null +++ b/drivers/ntb/hw/idt/Kconfig @@ -0,0 +1,31 @@ +config NTB_IDT + tristate "IDT PCIe-switch Non-Transparent Bridge support" + depends on PCI + help + This driver supports NTB of cappable IDT PCIe-switches. + + Some of the pre-initializations must be made before IDT PCIe-switch + exposes it NT-functions correctly. It should be done by either proper + initialisation of EEPROM connected to master smbus of the switch or + by BIOS using slave-SMBus interface changing corresponding registers + value. Evidently it must be done before PCI bus enumeration is + finished in Linux kernel. + + First of all partitions must be activated and properly assigned to all + the ports with NT-functions intended to be activated (see SWPARTxCTL + and SWPORTxCTL registers). Then all NT-function BARs must be enabled + with chosen valid aperture. For memory windows related BARs the + aperture settings shall determine the maximum size of memory windows + accepted by a BAR. Note that BAR0 must map PCI configuration space + registers. + + It's worth to note, that since a part of this driver relies on the + BAR settings of peer NT-functions, the BAR setups can't be done over + kernel PCI fixups. That's why the alternative pre-initialization + techniques like BIOS using SMBus interface or EEPROM should be + utilized. Additionally if one needs to have temperature sensor + information printed to system log, the corresponding registers must + be initialized within BIOS/EEPROM as well. + + If unsure, say N. + diff --git a/drivers/ntb/hw/idt/Makefile b/drivers/ntb/hw/idt/Makefile new file mode 100644 index 000000000000..a102cf154be0 --- /dev/null +++ b/drivers/ntb/hw/idt/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_NTB_IDT) += ntb_hw_idt.o diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.c b/drivers/ntb/hw/idt/ntb_hw_idt.c new file mode 100644 index 000000000000..d44d7ef38fe8 --- /dev/null +++ b/drivers/ntb/hw/idt/ntb_hw_idt.c @@ -0,0 +1,2712 @@ +/* + * This file is provided under a GPLv2 license. When using or + * redistributing this file, you may do so under that license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2016 T-Platforms All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, one can be found http://www.gnu.org/licenses/. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * IDT PCIe-switch NTB Linux driver + * + * Contact Information: + * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru> + */ + +#include <linux/stddef.h> +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/bitops.h> +#include <linux/sizes.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/aer.h> +#include <linux/slab.h> +#include <linux/list.h> +#include <linux/debugfs.h> +#include <linux/ntb.h> + +#include "ntb_hw_idt.h" + +#define NTB_NAME "ntb_hw_idt" +#define NTB_DESC "IDT PCI-E Non-Transparent Bridge Driver" +#define NTB_VER "2.0" +#define NTB_IRQNAME "ntb_irq_idt" + +MODULE_DESCRIPTION(NTB_DESC); +MODULE_VERSION(NTB_VER); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("T-platforms"); + +/* + * NT Endpoint registers table simplifying a loop access to the functionally + * related registers + */ +static const struct idt_ntb_regs ntdata_tbl = { + { {IDT_NT_BARSETUP0, IDT_NT_BARLIMIT0, + IDT_NT_BARLTBASE0, IDT_NT_BARUTBASE0}, + {IDT_NT_BARSETUP1, IDT_NT_BARLIMIT1, + IDT_NT_BARLTBASE1, IDT_NT_BARUTBASE1}, + {IDT_NT_BARSETUP2, IDT_NT_BARLIMIT2, + IDT_NT_BARLTBASE2, IDT_NT_BARUTBASE2}, + {IDT_NT_BARSETUP3, IDT_NT_BARLIMIT3, + IDT_NT_BARLTBASE3, IDT_NT_BARUTBASE3}, + {IDT_NT_BARSETUP4, IDT_NT_BARLIMIT4, + IDT_NT_BARLTBASE4, IDT_NT_BARUTBASE4}, + {IDT_NT_BARSETUP5, IDT_NT_BARLIMIT5, + IDT_NT_BARLTBASE5, IDT_NT_BARUTBASE5} }, + { {IDT_NT_INMSG0, IDT_NT_OUTMSG0, IDT_NT_INMSGSRC0}, + {IDT_NT_INMSG1, IDT_NT_OUTMSG1, IDT_NT_INMSGSRC1}, + {IDT_NT_INMSG2, IDT_NT_OUTMSG2, IDT_NT_INMSGSRC2}, + {IDT_NT_INMSG3, IDT_NT_OUTMSG3, IDT_NT_INMSGSRC3} } +}; + +/* + * NT Endpoint ports data table with the corresponding pcie command, link + * status, control and BAR-related registers + */ +static const struct idt_ntb_port portdata_tbl[IDT_MAX_NR_PORTS] = { +/*0*/ { IDT_SW_NTP0_PCIECMDSTS, IDT_SW_NTP0_PCIELCTLSTS, + IDT_SW_NTP0_NTCTL, + IDT_SW_SWPORT0CTL, IDT_SW_SWPORT0STS, + { {IDT_SW_NTP0_BARSETUP0, IDT_SW_NTP0_BARLIMIT0, + IDT_SW_NTP0_BARLTBASE0, IDT_SW_NTP0_BARUTBASE0}, + {IDT_SW_NTP0_BARSETUP1, IDT_SW_NTP0_BARLIMIT1, + IDT_SW_NTP0_BARLTBASE1, IDT_SW_NTP0_BARUTBASE1}, + {IDT_SW_NTP0_BARSETUP2, IDT_SW_NTP0_BARLIMIT2, + IDT_SW_NTP0_BARLTBASE2, IDT_SW_NTP0_BARUTBASE2}, + {IDT_SW_NTP0_BARSETUP3, IDT_SW_NTP0_BARLIMIT3, + IDT_SW_NTP0_BARLTBASE3, IDT_SW_NTP0_BARUTBASE3}, + {IDT_SW_NTP0_BARSETUP4, IDT_SW_NTP0_BARLIMIT4, + IDT_SW_NTP0_BARLTBASE4, IDT_SW_NTP0_BARUTBASE4}, + {IDT_SW_NTP0_BARSETUP5, IDT_SW_NTP0_BARLIMIT5, + IDT_SW_NTP0_BARLTBASE5, IDT_SW_NTP0_BARUTBASE5} } }, +/*1*/ {0}, +/*2*/ { IDT_SW_NTP2_PCIECMDSTS, IDT_SW_NTP2_PCIELCTLSTS, + IDT_SW_NTP2_NTCTL, + IDT_SW_SWPORT2CTL, IDT_SW_SWPORT2STS, + { {IDT_SW_NTP2_BARSETUP0, IDT_SW_NTP2_BARLIMIT0, + IDT_SW_NTP2_BARLTBASE0, IDT_SW_NTP2_BARUTBASE0}, + {IDT_SW_NTP2_BARSETUP1, IDT_SW_NTP2_BARLIMIT1, + IDT_SW_NTP2_BARLTBASE1, IDT_SW_NTP2_BARUTBASE1}, + {IDT_SW_NTP2_BARSETUP2, IDT_SW_NTP2_BARLIMIT2, + IDT_SW_NTP2_BARLTBASE2, IDT_SW_NTP2_BARUTBASE2}, + {IDT_SW_NTP2_BARSETUP3, IDT_SW_NTP2_BARLIMIT3, + IDT_SW_NTP2_BARLTBASE3, IDT_SW_NTP2_BARUTBASE3}, + {IDT_SW_NTP2_BARSETUP4, IDT_SW_NTP2_BARLIMIT4, + IDT_SW_NTP2_BARLTBASE4, IDT_SW_NTP2_BARUTBASE4}, + {IDT_SW_NTP2_BARSETUP5, IDT_SW_NTP2_BARLIMIT5, + IDT_SW_NTP2_BARLTBASE5, IDT_SW_NTP2_BARUTBASE5} } }, +/*3*/ {0}, +/*4*/ { IDT_SW_NTP4_PCIECMDSTS, IDT_SW_NTP4_PCIELCTLSTS, + IDT_SW_NTP4_NTCTL, + IDT_SW_SWPORT4CTL, IDT_SW_SWPORT4STS, + { {IDT_SW_NTP4_BARSETUP0, IDT_SW_NTP4_BARLIMIT0, + IDT_SW_NTP4_BARLTBASE0, IDT_SW_NTP4_BARUTBASE0}, + {IDT_SW_NTP4_BARSETUP1, IDT_SW_NTP4_BARLIMIT1, + IDT_SW_NTP4_BARLTBASE1, IDT_SW_NTP4_BARUTBASE1}, + {IDT_SW_NTP4_BARSETUP2, IDT_SW_NTP4_BARLIMIT2, + IDT_SW_NTP4_BARLTBASE2, IDT_SW_NTP4_BARUTBASE2}, + {IDT_SW_NTP4_BARSETUP3, IDT_SW_NTP4_BARLIMIT3, + IDT_SW_NTP4_BARLTBASE3, IDT_SW_NTP4_BARUTBASE3}, + {IDT_SW_NTP4_BARSETUP4, IDT_SW_NTP4_BARLIMIT4, + IDT_SW_NTP4_BARLTBASE4, IDT_SW_NTP4_BARUTBASE4}, + {IDT_SW_NTP4_BARSETUP5, IDT_SW_NTP4_BARLIMIT5, + IDT_SW_NTP4_BARLTBASE5, IDT_SW_NTP4_BARUTBASE5} } }, +/*5*/ {0}, +/*6*/ { IDT_SW_NTP6_PCIECMDSTS, IDT_SW_NTP6_PCIELCTLSTS, + IDT_SW_NTP6_NTCTL, + IDT_SW_SWPORT6CTL, IDT_SW_SWPORT6STS, + { {IDT_SW_NTP6_BARSETUP0, IDT_SW_NTP6_BARLIMIT0, + IDT_SW_NTP6_BARLTBASE0, IDT_SW_NTP6_BARUTBASE0}, + {IDT_SW_NTP6_BARSETUP1, IDT_SW_NTP6_BARLIMIT1, + IDT_SW_NTP6_BARLTBASE1, IDT_SW_NTP6_BARUTBASE1}, + {IDT_SW_NTP6_BARSETUP2, IDT_SW_NTP6_BARLIMIT2, + IDT_SW_NTP6_BARLTBASE2, IDT_SW_NTP6_BARUTBASE2}, + {IDT_SW_NTP6_BARSETUP3, IDT_SW_NTP6_BARLIMIT3, + IDT_SW_NTP6_BARLTBASE3, IDT_SW_NTP6_BARUTBASE3}, + {IDT_SW_NTP6_BARSETUP4, IDT_SW_NTP6_BARLIMIT4, + IDT_SW_NTP6_BARLTBASE4, IDT_SW_NTP6_BARUTBASE4}, + {IDT_SW_NTP6_BARSETUP5, IDT_SW_NTP6_BARLIMIT5, + IDT_SW_NTP6_BARLTBASE5, IDT_SW_NTP6_BARUTBASE5} } }, +/*7*/ {0}, +/*8*/ { IDT_SW_NTP8_PCIECMDSTS, IDT_SW_NTP8_PCIELCTLSTS, + IDT_SW_NTP8_NTCTL, + IDT_SW_SWPORT8CTL, IDT_SW_SWPORT8STS, + { {IDT_SW_NTP8_BARSETUP0, IDT_SW_NTP8_BARLIMIT0, + IDT_SW_NTP8_BARLTBASE0, IDT_SW_NTP8_BARUTBASE0}, + {IDT_SW_NTP8_BARSETUP1, IDT_SW_NTP8_BARLIMIT1, + IDT_SW_NTP8_BARLTBASE1, IDT_SW_NTP8_BARUTBASE1}, + {IDT_SW_NTP8_BARSETUP2, IDT_SW_NTP8_BARLIMIT2, + IDT_SW_NTP8_BARLTBASE2, IDT_SW_NTP8_BARUTBASE2}, + {IDT_SW_NTP8_BARSETUP3, IDT_SW_NTP8_BARLIMIT3, + IDT_SW_NTP8_BARLTBASE3, IDT_SW_NTP8_BARUTBASE3}, + {IDT_SW_NTP8_BARSETUP4, IDT_SW_NTP8_BARLIMIT4, + IDT_SW_NTP8_BARLTBASE4, IDT_SW_NTP8_BARUTBASE4}, + {IDT_SW_NTP8_BARSETUP5, IDT_SW_NTP8_BARLIMIT5, + IDT_SW_NTP8_BARLTBASE5, IDT_SW_NTP8_BARUTBASE5} } }, +/*9*/ {0}, +/*10*/ {0}, +/*11*/ {0}, +/*12*/ { IDT_SW_NTP12_PCIECMDSTS, IDT_SW_NTP12_PCIELCTLSTS, + IDT_SW_NTP12_NTCTL, + IDT_SW_SWPORT12CTL, IDT_SW_SWPORT12STS, + { {IDT_SW_NTP12_BARSETUP0, IDT_SW_NTP12_BARLIMIT0, + IDT_SW_NTP12_BARLTBASE0, IDT_SW_NTP12_BARUTBASE0}, + {IDT_SW_NTP12_BARSETUP1, IDT_SW_NTP12_BARLIMIT1, + IDT_SW_NTP12_BARLTBASE1, IDT_SW_NTP12_BARUTBASE1}, + {IDT_SW_NTP12_BARSETUP2, IDT_SW_NTP12_BARLIMIT2, + IDT_SW_NTP12_BARLTBASE2, IDT_SW_NTP12_BARUTBASE2}, + {IDT_SW_NTP12_BARSETUP3, IDT_SW_NTP12_BARLIMIT3, + IDT_SW_NTP12_BARLTBASE3, IDT_SW_NTP12_BARUTBASE3}, + {IDT_SW_NTP12_BARSETUP4, IDT_SW_NTP12_BARLIMIT4, + IDT_SW_NTP12_BARLTBASE4, IDT_SW_NTP12_BARUTBASE4}, + {IDT_SW_NTP12_BARSETUP5, IDT_SW_NTP12_BARLIMIT5, + IDT_SW_NTP12_BARLTBASE5, IDT_SW_NTP12_BARUTBASE5} } }, +/*13*/ {0}, +/*14*/ {0}, +/*15*/ {0}, +/*16*/ { IDT_SW_NTP16_PCIECMDSTS, IDT_SW_NTP16_PCIELCTLSTS, + IDT_SW_NTP16_NTCTL, + IDT_SW_SWPORT16CTL, IDT_SW_SWPORT16STS, + { {IDT_SW_NTP16_BARSETUP0, IDT_SW_NTP16_BARLIMIT0, + IDT_SW_NTP16_BARLTBASE0, IDT_SW_NTP16_BARUTBASE0}, + {IDT_SW_NTP16_BARSETUP1, IDT_SW_NTP16_BARLIMIT1, + IDT_SW_NTP16_BARLTBASE1, IDT_SW_NTP16_BARUTBASE1}, + {IDT_SW_NTP16_BARSETUP2, IDT_SW_NTP16_BARLIMIT2, + IDT_SW_NTP16_BARLTBASE2, IDT_SW_NTP16_BARUTBASE2}, + {IDT_SW_NTP16_BARSETUP3, IDT_SW_NTP16_BARLIMIT3, + IDT_SW_NTP16_BARLTBASE3, IDT_SW_NTP16_BARUTBASE3}, + {IDT_SW_NTP16_BARSETUP4, IDT_SW_NTP16_BARLIMIT4, + IDT_SW_NTP16_BARLTBASE4, IDT_SW_NTP16_BARUTBASE4}, + {IDT_SW_NTP16_BARSETUP5, IDT_SW_NTP16_BARLIMIT5, + IDT_SW_NTP16_BARLTBASE5, IDT_SW_NTP16_BARUTBASE5} } }, +/*17*/ {0}, +/*18*/ {0}, +/*19*/ {0}, +/*20*/ { IDT_SW_NTP20_PCIECMDSTS, IDT_SW_NTP20_PCIELCTLSTS, + IDT_SW_NTP20_NTCTL, + IDT_SW_SWPORT20CTL, IDT_SW_SWPORT20STS, + { {IDT_SW_NTP20_BARSETUP0, IDT_SW_NTP20_BARLIMIT0, + IDT_SW_NTP20_BARLTBASE0, IDT_SW_NTP20_BARUTBASE0}, + {IDT_SW_NTP20_BARSETUP1, IDT_SW_NTP20_BARLIMIT1, + IDT_SW_NTP20_BARLTBASE1, IDT_SW_NTP20_BARUTBASE1}, + {IDT_SW_NTP20_BARSETUP2, IDT_SW_NTP20_BARLIMIT2, + IDT_SW_NTP20_BARLTBASE2, IDT_SW_NTP20_BARUTBASE2}, + {IDT_SW_NTP20_BARSETUP3, IDT_SW_NTP20_BARLIMIT3, + IDT_SW_NTP20_BARLTBASE3, IDT_SW_NTP20_BARUTBASE3}, + {IDT_SW_NTP20_BARSETUP4, IDT_SW_NTP20_BARLIMIT4, + IDT_SW_NTP20_BARLTBASE4, IDT_SW_NTP20_BARUTBASE4}, + {IDT_SW_NTP20_BARSETUP5, IDT_SW_NTP20_BARLIMIT5, + IDT_SW_NTP20_BARLTBASE5, IDT_SW_NTP20_BARUTBASE5} } }, +/*21*/ {0}, +/*22*/ {0}, +/*23*/ {0} +}; + +/* + * IDT PCIe-switch partitions table with the corresponding control, status + * and messages control registers + */ +static const struct idt_ntb_part partdata_tbl[IDT_MAX_NR_PARTS] = { +/*0*/ { IDT_SW_SWPART0CTL, IDT_SW_SWPART0STS, + {IDT_SW_SWP0MSGCTL0, IDT_SW_SWP0MSGCTL1, + IDT_SW_SWP0MSGCTL2, IDT_SW_SWP0MSGCTL3} }, +/*1*/ { IDT_SW_SWPART1CTL, IDT_SW_SWPART1STS, + {IDT_SW_SWP1MSGCTL0, IDT_SW_SWP1MSGCTL1, + IDT_SW_SWP1MSGCTL2, IDT_SW_SWP1MSGCTL3} }, +/*2*/ { IDT_SW_SWPART2CTL, IDT_SW_SWPART2STS, + {IDT_SW_SWP2MSGCTL0, IDT_SW_SWP2MSGCTL1, + IDT_SW_SWP2MSGCTL2, IDT_SW_SWP2MSGCTL3} }, +/*3*/ { IDT_SW_SWPART3CTL, IDT_SW_SWPART3STS, + {IDT_SW_SWP3MSGCTL0, IDT_SW_SWP3MSGCTL1, + IDT_SW_SWP3MSGCTL2, IDT_SW_SWP3MSGCTL3} }, +/*4*/ { IDT_SW_SWPART4CTL, IDT_SW_SWPART4STS, + {IDT_SW_SWP4MSGCTL0, IDT_SW_SWP4MSGCTL1, + IDT_SW_SWP4MSGCTL2, IDT_SW_SWP4MSGCTL3} }, +/*5*/ { IDT_SW_SWPART5CTL, IDT_SW_SWPART5STS, + {IDT_SW_SWP5MSGCTL0, IDT_SW_SWP5MSGCTL1, + IDT_SW_SWP5MSGCTL2, IDT_SW_SWP5MSGCTL3} }, +/*6*/ { IDT_SW_SWPART6CTL, IDT_SW_SWPART6STS, + {IDT_SW_SWP6MSGCTL0, IDT_SW_SWP6MSGCTL1, + IDT_SW_SWP6MSGCTL2, IDT_SW_SWP6MSGCTL3} }, +/*7*/ { IDT_SW_SWPART7CTL, IDT_SW_SWPART7STS, + {IDT_SW_SWP7MSGCTL0, IDT_SW_SWP7MSGCTL1, + IDT_SW_SWP7MSGCTL2, IDT_SW_SWP7MSGCTL3} } +}; + +/* + * DebugFS directory to place the driver debug file + */ +static struct dentry *dbgfs_topdir; + +/*============================================================================= + * 1. IDT PCIe-switch registers IO-functions + * + * Beside ordinary configuration space registers IDT PCIe-switch expose + * global configuration registers, which are used to determine state of other + * device ports as well as being notified of some switch-related events. + * Additionally all the configuration space registers of all the IDT + * PCIe-switch functions are mapped to the Global Address space, so each + * function can determine a configuration of any other PCI-function. + * Functions declared in this chapter are created to encapsulate access + * to configuration and global registers, so the driver code just need to + * provide IDT NTB hardware descriptor and a register address. + *============================================================================= + */ + +/* + * idt_nt_write() - PCI configuration space registers write method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * @data: Value to write to the register + * + * IDT PCIe-switch registers are all Little endian. + */ +static void idt_nt_write(struct idt_ntb_dev *ndev, + const unsigned int reg, const u32 data) +{ + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return; + + /* Just write the value to the specified register */ + iowrite32(data, ndev->cfgspc + (ptrdiff_t)reg); +} + +/* + * idt_nt_read() - PCI configuration space registers read method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * + * IDT PCIe-switch Global configuration registers are all Little endian. + * + * Return: register value + */ +static u32 idt_nt_read(struct idt_ntb_dev *ndev, const unsigned int reg) +{ + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_PCI_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return ~0; + + /* Just read the value from the specified register */ + return ioread32(ndev->cfgspc + (ptrdiff_t)reg); +} + +/* + * idt_sw_write() - Global registers write method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * @data: Value to write to the register + * + * IDT PCIe-switch Global configuration registers are all Little endian. + */ +static void idt_sw_write(struct idt_ntb_dev *ndev, + const unsigned int reg, const u32 data) +{ + unsigned long irqflags; + + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return; + + /* Lock GASA registers operations */ + spin_lock_irqsave(&ndev->gasa_lock, irqflags); + /* Set the global register address */ + iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR); + /* Put the new value of the register */ + iowrite32(data, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA); + /* Make sure the PCIe transactions are executed */ + mmiowb(); + /* Unlock GASA registers operations */ + spin_unlock_irqrestore(&ndev->gasa_lock, irqflags); +} + +/* + * idt_sw_read() - Global registers read method + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to write data to + * + * IDT PCIe-switch Global configuration registers are all Little endian. + * + * Return: register value + */ +static u32 idt_sw_read(struct idt_ntb_dev *ndev, const unsigned int reg) +{ + unsigned long irqflags; + u32 data; + + /* + * It's obvious bug to request a register exceeding the maximum possible + * value as well as to have it unaligned. + */ + if (WARN_ON(reg > IDT_REG_SW_MAX || !IS_ALIGNED(reg, IDT_REG_ALIGN))) + return ~0; + + /* Lock GASA registers operations */ + spin_lock_irqsave(&ndev->gasa_lock, irqflags); + /* Set the global register address */ + iowrite32((u32)reg, ndev->cfgspc + (ptrdiff_t)IDT_NT_GASAADDR); + /* Get the data of the register (read ops acts as MMIO barrier) */ + data = ioread32(ndev->cfgspc + (ptrdiff_t)IDT_NT_GASADATA); + /* Unlock GASA registers operations */ + spin_unlock_irqrestore(&ndev->gasa_lock, irqflags); + + return data; +} + +/* + * idt_reg_set_bits() - set bits of a passed register + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to change bits of + * @reg_lock: Register access spin lock + * @valid_mask: Mask of valid bits + * @set_bits: Bitmask to set + * + * Helper method to check whether a passed bitfield is valid and set + * corresponding bits of a register. + * + * WARNING! Make sure the passed register isn't accessed over plane + * idt_nt_write() method (read method is ok to be used concurrently). + * + * Return: zero on success, negative error on invalid bitmask. + */ +static inline int idt_reg_set_bits(struct idt_ntb_dev *ndev, unsigned int reg, + spinlock_t *reg_lock, + u64 valid_mask, u64 set_bits) +{ + unsigned long irqflags; + u32 data; + + if (set_bits & ~(u64)valid_mask) + return -EINVAL; + + /* Lock access to the register unless the change is written back */ + spin_lock_irqsave(reg_lock, irqflags); + data = idt_nt_read(ndev, reg) | (u32)set_bits; + idt_nt_write(ndev, reg, data); + /* Unlock the register */ + spin_unlock_irqrestore(reg_lock, irqflags); + + return 0; +} + +/* + * idt_reg_clear_bits() - clear bits of a passed register + * @ndev: IDT NTB hardware driver descriptor + * @reg: Register to change bits of + * @reg_lock: Register access spin lock + * @set_bits: Bitmask to clear + * + * Helper method to check whether a passed bitfield is valid and clear + * corresponding bits of a register. + * + * NOTE! Invalid bits are always considered cleared so it's not an error + * to clear them over. + * + * WARNING! Make sure the passed register isn't accessed over plane + * idt_nt_write() method (read method is ok to use concurrently). + */ +static inline void idt_reg_clear_bits(struct idt_ntb_dev *ndev, + unsigned int reg, spinlock_t *reg_lock, + u64 clear_bits) +{ + unsigned long irqflags; + u32 data; + + /* Lock access to the register unless the change is written back */ + spin_lock_irqsave(reg_lock, irqflags); + data = idt_nt_read(ndev, reg) & ~(u32)clear_bits; + idt_nt_write(ndev, reg, data); + /* Unlock the register */ + spin_unlock_irqrestore(reg_lock, irqflags); +} + +/*=========================================================================== + * 2. Ports operations + * + * IDT PCIe-switches can have from 3 up to 8 ports with possible + * NT-functions enabled. So all the possible ports need to be scanned looking + * for NTB activated. NTB API will have enumerated only the ports with NTB. + *=========================================================================== + */ + +/* + * idt_scan_ports() - scan IDT PCIe-switch ports collecting info in the tables + * @ndev: Pointer to the PCI device descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_scan_ports(struct idt_ntb_dev *ndev) +{ + unsigned char pidx, port, part; + u32 data, portsts, partsts; + + /* Retrieve the local port number */ + data = idt_nt_read(ndev, IDT_NT_PCIELCAP); + ndev->port = GET_FIELD(PCIELCAP_PORTNUM, data); + + /* Retrieve the local partition number */ + portsts = idt_sw_read(ndev, portdata_tbl[ndev->port].sts); + ndev->part = GET_FIELD(SWPORTxSTS_SWPART, portsts); + + /* Initialize port/partition -> index tables with invalid values */ + memset(ndev->port_idx_map, -EINVAL, sizeof(ndev->port_idx_map)); + memset(ndev->part_idx_map, -EINVAL, sizeof(ndev->part_idx_map)); + + /* + * Walk over all the possible ports checking whether any of them has + * NT-function activated + */ + ndev->peer_cnt = 0; + for (pidx = 0; pidx < ndev->swcfg->port_cnt; pidx++) { + port = ndev->swcfg->ports[pidx]; + /* Skip local port */ + if (port == ndev->port) + continue; + + /* Read the port status register to get it partition */ + portsts = idt_sw_read(ndev, portdata_tbl[port].sts); + part = GET_FIELD(SWPORTxSTS_SWPART, portsts); + + /* Retrieve the partition status */ + partsts = idt_sw_read(ndev, partdata_tbl[part].sts); + /* Check if partition state is active and port has NTB */ + if (IS_FLD_SET(SWPARTxSTS_STATE, partsts, ACT) && + (IS_FLD_SET(SWPORTxSTS_MODE, portsts, NT) || + IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNT) || + IS_FLD_SET(SWPORTxSTS_MODE, portsts, USNTDMA) || + IS_FLD_SET(SWPORTxSTS_MODE, portsts, NTDMA))) { + /* Save the port and partition numbers */ + ndev->peers[ndev->peer_cnt].port = port; + ndev->peers[ndev->peer_cnt].part = part; + /* Fill in the port/partition -> index tables */ + ndev->port_idx_map[port] = ndev->peer_cnt; + ndev->part_idx_map[part] = ndev->peer_cnt; + ndev->peer_cnt++; + } + } + + dev_dbg(&ndev->ntb.pdev->dev, "Local port: %hhu, num of peers: %hhu\n", + ndev->port, ndev->peer_cnt); + + /* It's useless to have this driver loaded if there is no any peer */ + if (ndev->peer_cnt == 0) { + dev_warn(&ndev->ntb.pdev->dev, "No active peer found\n"); + return -ENODEV; + } + + return 0; +} + +/* + * idt_ntb_port_number() - get the local port number + * @ntb: NTB device context. + * + * Return: the local port number + */ +static int idt_ntb_port_number(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return ndev->port; +} + +/* + * idt_ntb_peer_port_count() - get the number of peer ports + * @ntb: NTB device context. + * + * Return the count of detected peer NT-functions. + * + * Return: number of peer ports + */ +static int idt_ntb_peer_port_count(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return ndev->peer_cnt; +} + +/* + * idt_ntb_peer_port_number() - get peer port by given index + * @ntb: NTB device context. + * @pidx: Peer port index. + * + * Return: peer port or negative error + */ +static int idt_ntb_peer_port_number(struct ntb_dev *ntb, int pidx) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + /* Return the detected NT-function port number */ + return ndev->peers[pidx].port; +} + +/* + * idt_ntb_peer_port_idx() - get peer port index by given port number + * @ntb: NTB device context. + * @port: Peer port number. + * + * Internal port -> index table is pre-initialized with -EINVAL values, + * so we just need to return it value + * + * Return: peer NT-function port index or negative error + */ +static int idt_ntb_peer_port_idx(struct ntb_dev *ntb, int port) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (port < 0 || IDT_MAX_NR_PORTS <= port) + return -EINVAL; + + return ndev->port_idx_map[port]; +} + +/*=========================================================================== + * 3. Link status operations + * There is no any ready-to-use method to have peer ports notified if NTB + * link is set up or got down. Instead global signal can be used instead. + * In case if any one of ports changes local NTB link state, it sends + * global signal and clears corresponding global state bit. Then all the ports + * receive a notification of that, so to make client driver being aware of + * possible NTB link change. + * Additionally each of active NT-functions is subscribed to PCIe-link + * state changes of peer ports. + *=========================================================================== + */ + +static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev); + +/* + * idt_init_link() - Initialize NTB link state notification subsystem + * @ndev: IDT NTB hardware driver descriptor + * + * Function performs the basic initialization of some global registers + * needed to enable IRQ-based notifications of PCIe Link Up/Down and + * Global Signal events. + * NOTE Since it's not possible to determine when all the NTB peer drivers are + * unloaded as well as have those registers accessed concurrently, we must + * preinitialize them with the same value and leave it uncleared on local + * driver unload. + */ +static void idt_init_link(struct idt_ntb_dev *ndev) +{ + u32 part_mask, port_mask, se_mask; + unsigned char pidx; + + /* Initialize spin locker of Mapping Table access registers */ + spin_lock_init(&ndev->mtbl_lock); + + /* Walk over all detected peers collecting port and partition masks */ + port_mask = ~BIT(ndev->port); + part_mask = ~BIT(ndev->part); + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + port_mask &= ~BIT(ndev->peers[pidx].port); + part_mask &= ~BIT(ndev->peers[pidx].part); + } + + /* Clean the Link Up/Down and GLobal Signal status registers */ + idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1); + + /* Unmask NT-activated partitions to receive Global Switch events */ + idt_sw_write(ndev, IDT_SW_SEPMSK, part_mask); + + /* Enable PCIe Link Up events of NT-activated ports */ + idt_sw_write(ndev, IDT_SW_SELINKUPMSK, port_mask); + + /* Enable PCIe Link Down events of NT-activated ports */ + idt_sw_write(ndev, IDT_SW_SELINKDNMSK, port_mask); + + /* Unmask NT-activated partitions to receive Global Signal events */ + idt_sw_write(ndev, IDT_SW_SEGSIGMSK, part_mask); + + /* Unmask Link Up/Down and Global Switch Events */ + se_mask = ~(IDT_SEMSK_LINKUP | IDT_SEMSK_LINKDN | IDT_SEMSK_GSIGNAL); + idt_sw_write(ndev, IDT_SW_SEMSK, se_mask); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events initialized"); +} + +/* + * idt_deinit_link() - deinitialize link subsystem + * @ndev: IDT NTB hardware driver descriptor + * + * Just disable the link back. + */ +static void idt_deinit_link(struct idt_ntb_dev *ndev) +{ + /* Disable the link */ + idt_ntb_local_link_disable(ndev); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB link status events deinitialized"); +} + +/* + * idt_se_isr() - switch events ISR + * @ndev: IDT NTB hardware driver descriptor + * @ntint_sts: NT-function interrupt status + * + * This driver doesn't support IDT PCIe-switch dynamic reconfigurations, + * Failover capability, etc, so switch events are utilized to notify of + * PCIe and NTB link events. + * The method is called from PCIe ISR bottom-half routine. + */ +static void idt_se_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +{ + u32 sests; + + /* Read Switch Events status */ + sests = idt_sw_read(ndev, IDT_SW_SESTS); + + /* Clean the Link Up/Down and Global Signal status registers */ + idt_sw_write(ndev, IDT_SW_SELINKUPSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SELINKDNSTS, (u32)-1); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)-1); + + /* Clean the corresponding interrupt bit */ + idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_SEVENT); + + dev_dbg(&ndev->ntb.pdev->dev, "SE IRQ detected %#08x (SESTS %#08x)", + ntint_sts, sests); + + /* Notify the client driver of possible link state change */ + ntb_link_event(&ndev->ntb); +} + +/* + * idt_ntb_local_link_enable() - enable the local NTB link. + * @ndev: IDT NTB hardware driver descriptor + * + * In order to enable the NTB link we need: + * - enable Completion TLPs translation + * - initialize mapping table to enable the Request ID translation + * - notify peers of NTB link state change + */ +static void idt_ntb_local_link_enable(struct idt_ntb_dev *ndev) +{ + u32 reqid, mtbldata = 0; + unsigned long irqflags; + + /* Enable the ID protection and Completion TLPs translation */ + idt_nt_write(ndev, IDT_NT_NTCTL, IDT_NTCTL_CPEN); + + /* Retrieve the current Requester ID (Bus:Device:Function) */ + reqid = idt_nt_read(ndev, IDT_NT_REQIDCAP); + + /* + * Set the corresponding NT Mapping table entry of port partition index + * with the data to perform the Request ID translation + */ + mtbldata = SET_FIELD(NTMTBLDATA_REQID, 0, reqid) | + SET_FIELD(NTMTBLDATA_PART, 0, ndev->part) | + IDT_NTMTBLDATA_VALID; + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); + idt_nt_write(ndev, IDT_NT_NTMTBLDATA, mtbldata); + mmiowb(); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + /* Notify the peers by setting and clearing the global signal bit */ + idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part); +} + +/* + * idt_ntb_local_link_disable() - disable the local NTB link. + * @ndev: IDT NTB hardware driver descriptor + * + * In order to enable the NTB link we need: + * - disable Completion TLPs translation + * - clear corresponding mapping table entry + * - notify peers of NTB link state change + */ +static void idt_ntb_local_link_disable(struct idt_ntb_dev *ndev) +{ + unsigned long irqflags; + + /* Disable Completion TLPs translation */ + idt_nt_write(ndev, IDT_NT_NTCTL, 0); + + /* Clear the corresponding NT Mapping table entry */ + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); + idt_nt_write(ndev, IDT_NT_NTMTBLDATA, 0); + mmiowb(); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + /* Notify the peers by setting and clearing the global signal bit */ + idt_nt_write(ndev, IDT_NT_NTGSIGNAL, IDT_NTGSIGNAL_SET); + idt_sw_write(ndev, IDT_SW_SEGSIGSTS, (u32)1 << ndev->part); +} + +/* + * idt_ntb_local_link_is_up() - test wethter local NTB link is up + * @ndev: IDT NTB hardware driver descriptor + * + * Local link is up under the following conditions: + * - Bus mastering is enabled + * - NTCTL has Completion TLPs translation enabled + * - Mapping table permits Request TLPs translation + * NOTE: We don't need to check PCIe link state since it's obviously + * up while we are able to communicate with IDT PCIe-switch + * + * Return: true if link is up, otherwise false + */ +static bool idt_ntb_local_link_is_up(struct idt_ntb_dev *ndev) +{ + unsigned long irqflags; + u32 data; + + /* Read the local Bus Master Enable status */ + data = idt_nt_read(ndev, IDT_NT_PCICMDSTS); + if (!(data & IDT_PCICMDSTS_BME)) + return false; + + /* Read the local Completion TLPs translation enable status */ + data = idt_nt_read(ndev, IDT_NT_NTCTL); + if (!(data & IDT_NTCTL_CPEN)) + return false; + + /* Read Mapping table entry corresponding to the local partition */ + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->part); + data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + return !!(data & IDT_NTMTBLDATA_VALID); +} + +/* + * idt_ntb_peer_link_is_up() - test whether peer NTB link is up + * @ndev: IDT NTB hardware driver descriptor + * @pidx: Peer port index + * + * Peer link is up under the following conditions: + * - PCIe link is up + * - Bus mastering is enabled + * - NTCTL has Completion TLPs translation enabled + * - Mapping table permits Request TLPs translation + * + * Return: true if link is up, otherwise false + */ +static bool idt_ntb_peer_link_is_up(struct idt_ntb_dev *ndev, int pidx) +{ + unsigned long irqflags; + unsigned char port; + u32 data; + + /* Retrieve the device port number */ + port = ndev->peers[pidx].port; + + /* Check whether PCIe link is up */ + data = idt_sw_read(ndev, portdata_tbl[port].sts); + if (!(data & IDT_SWPORTxSTS_LINKUP)) + return false; + + /* Check whether bus mastering is enabled on the peer port */ + data = idt_sw_read(ndev, portdata_tbl[port].pcicmdsts); + if (!(data & IDT_PCICMDSTS_BME)) + return false; + + /* Check if Completion TLPs translation is enabled on the peer port */ + data = idt_sw_read(ndev, portdata_tbl[port].ntctl); + if (!(data & IDT_NTCTL_CPEN)) + return false; + + /* Read Mapping table entry corresponding to the peer partition */ + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, ndev->peers[pidx].part); + data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + return !!(data & IDT_NTMTBLDATA_VALID); +} + +/* + * idt_ntb_link_is_up() - get the current ntb link state (NTB API callback) + * @ntb: NTB device context. + * @speed: OUT - The link speed expressed as PCIe generation number. + * @width: OUT - The link width expressed as the number of PCIe lanes. + * + * Get the bitfield of NTB link states for all peer ports + * + * Return: bitfield of indexed ports link state: bit is set/cleared if the + * link is up/down respectively. + */ +static u64 idt_ntb_link_is_up(struct ntb_dev *ntb, + enum ntb_speed *speed, enum ntb_width *width) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + unsigned char pidx; + u64 status; + u32 data; + + /* Retrieve the local link speed and width */ + if (speed != NULL || width != NULL) { + data = idt_nt_read(ndev, IDT_NT_PCIELCTLSTS); + if (speed != NULL) + *speed = GET_FIELD(PCIELCTLSTS_CLS, data); + if (width != NULL) + *width = GET_FIELD(PCIELCTLSTS_NLW, data); + } + + /* If local NTB link isn't up then all the links are considered down */ + if (!idt_ntb_local_link_is_up(ndev)) + return 0; + + /* Collect all the peer ports link states into the bitfield */ + status = 0; + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + if (idt_ntb_peer_link_is_up(ndev, pidx)) + status |= ((u64)1 << pidx); + } + + return status; +} + +/* + * idt_ntb_link_enable() - enable local port ntb link (NTB API callback) + * @ntb: NTB device context. + * @max_speed: The maximum link speed expressed as PCIe generation number. + * @max_width: The maximum link width expressed as the number of PCIe lanes. + * + * Enable just local NTB link. PCIe link parameters are ignored. + * + * Return: always zero. + */ +static int idt_ntb_link_enable(struct ntb_dev *ntb, enum ntb_speed speed, + enum ntb_width width) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + /* Just enable the local NTB link */ + idt_ntb_local_link_enable(ndev); + + dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link enabled"); + + return 0; +} + +/* + * idt_ntb_link_disable() - disable local port ntb link (NTB API callback) + * @ntb: NTB device context. + * + * Disable just local NTB link. + * + * Return: always zero. + */ +static int idt_ntb_link_disable(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + /* Just disable the local NTB link */ + idt_ntb_local_link_disable(ndev); + + dev_dbg(&ndev->ntb.pdev->dev, "Local NTB link disabled"); + + return 0; +} + +/*============================================================================= + * 4. Memory Window operations + * + * IDT PCIe-switches have two types of memory windows: MWs with direct + * address translation and MWs with LUT based translation. The first type of + * MWs is simple map of corresponding BAR address space to a memory space + * of specified target port. So it implemets just ont-to-one mapping. Lookup + * table in its turn can map one BAR address space to up to 24 different + * memory spaces of different ports. + * NT-functions BARs can be turned on to implement either direct or lookup + * table based address translations, so: + * BAR0 - NT configuration registers space/direct address translation + * BAR1 - direct address translation/upper address of BAR0x64 + * BAR2 - direct address translation/Lookup table with either 12 or 24 entries + * BAR3 - direct address translation/upper address of BAR2x64 + * BAR4 - direct address translation/Lookup table with either 12 or 24 entries + * BAR5 - direct address translation/upper address of BAR4x64 + * Additionally BAR2 and BAR4 can't have 24-entries LUT enabled at the same + * time. Since the BARs setup can be rather complicated this driver implements + * a scanning algorithm to have all the possible memory windows configuration + * covered. + * + * NOTE 1 BAR setup must be done before Linux kernel enumerated NT-function + * of any port, so this driver would have memory windows configurations fixed. + * In this way all initializations must be performed either by platform BIOS + * or using EEPROM connected to IDT PCIe-switch master SMBus. + * + * NOTE 2 This driver expects BAR0 mapping NT-function configuration space. + * Easy calculation can give us an upper boundary of 29 possible memory windows + * per each NT-function if all the BARs are of 32bit type. + *============================================================================= + */ + +/* + * idt_get_mw_count() - get memory window count + * @mw_type: Memory window type + * + * Return: number of memory windows with respect to the BAR type + */ +static inline unsigned char idt_get_mw_count(enum idt_mw_type mw_type) +{ + switch (mw_type) { + case IDT_MW_DIR: + return 1; + case IDT_MW_LUT12: + return 12; + case IDT_MW_LUT24: + return 24; + default: + break; + } + + return 0; +} + +/* + * idt_get_mw_name() - get memory window name + * @mw_type: Memory window type + * + * Return: pointer to a string with name + */ +static inline char *idt_get_mw_name(enum idt_mw_type mw_type) +{ + switch (mw_type) { + case IDT_MW_DIR: + return "DIR "; + case IDT_MW_LUT12: + return "LUT12"; + case IDT_MW_LUT24: + return "LUT24"; + default: + break; + } + + return "unknown"; +} + +/* + * idt_scan_mws() - scan memory windows of the port + * @ndev: IDT NTB hardware driver descriptor + * @port: Port to get number of memory windows for + * @mw_cnt: Out - number of memory windows + * + * It walks over BAR setup registers of the specified port and determines + * the memory windows parameters if any activated. + * + * Return: array of memory windows + */ +static struct idt_mw_cfg *idt_scan_mws(struct idt_ntb_dev *ndev, int port, + unsigned char *mw_cnt) +{ + struct idt_mw_cfg mws[IDT_MAX_NR_MWS], *ret_mws; + const struct idt_ntb_bar *bars; + enum idt_mw_type mw_type; + unsigned char widx, bidx, en_cnt; + bool bar_64bit = false; + int aprt_size; + u32 data; + + /* Retrieve the array of the BARs registers */ + bars = portdata_tbl[port].bars; + + /* Scan all the BARs belonging to the port */ + *mw_cnt = 0; + for (bidx = 0; bidx < IDT_BAR_CNT; bidx += 1 + bar_64bit) { + /* Read BARSETUP register value */ + data = idt_sw_read(ndev, bars[bidx].setup); + + /* Skip disabled BARs */ + if (!(data & IDT_BARSETUP_EN)) { + bar_64bit = false; + continue; + } + + /* Skip next BARSETUP if current one has 64bit addressing */ + bar_64bit = IS_FLD_SET(BARSETUP_TYPE, data, 64); + + /* Skip configuration space mapping BARs */ + if (data & IDT_BARSETUP_MODE_CFG) + continue; + + /* Retrieve MW type/entries count and aperture size */ + mw_type = GET_FIELD(BARSETUP_ATRAN, data); + en_cnt = idt_get_mw_count(mw_type); + aprt_size = (u64)1 << GET_FIELD(BARSETUP_SIZE, data); + + /* Save configurations of all available memory windows */ + for (widx = 0; widx < en_cnt; widx++, (*mw_cnt)++) { + /* + * IDT can expose a limited number of MWs, so it's bug + * to have more than the driver expects + */ + if (*mw_cnt >= IDT_MAX_NR_MWS) + return ERR_PTR(-EINVAL); + + /* Save basic MW info */ + mws[*mw_cnt].type = mw_type; + mws[*mw_cnt].bar = bidx; + mws[*mw_cnt].idx = widx; + /* It's always DWORD aligned */ + mws[*mw_cnt].addr_align = IDT_TRANS_ALIGN; + /* DIR and LUT approachs differently configure MWs */ + if (mw_type == IDT_MW_DIR) + mws[*mw_cnt].size_max = aprt_size; + else if (mw_type == IDT_MW_LUT12) + mws[*mw_cnt].size_max = aprt_size / 16; + else + mws[*mw_cnt].size_max = aprt_size / 32; + mws[*mw_cnt].size_align = (mw_type == IDT_MW_DIR) ? + IDT_DIR_SIZE_ALIGN : mws[*mw_cnt].size_max; + } + } + + /* Allocate memory for memory window descriptors */ + ret_mws = devm_kcalloc(&ndev->ntb.pdev->dev, *mw_cnt, + sizeof(*ret_mws), GFP_KERNEL); + if (IS_ERR_OR_NULL(ret_mws)) + return ERR_PTR(-ENOMEM); + + /* Copy the info of detected memory windows */ + memcpy(ret_mws, mws, (*mw_cnt)*sizeof(*ret_mws)); + + return ret_mws; +} + +/* + * idt_init_mws() - initialize memory windows subsystem + * @ndev: IDT NTB hardware driver descriptor + * + * Scan BAR setup registers of local and peer ports to determine the + * outbound and inbound memory windows parameters + * + * Return: zero on success, otherwise a negative error number + */ +static int idt_init_mws(struct idt_ntb_dev *ndev) +{ + struct idt_ntb_peer *peer; + unsigned char pidx; + + /* Scan memory windows of the local port */ + ndev->mws = idt_scan_mws(ndev, ndev->port, &ndev->mw_cnt); + if (IS_ERR(ndev->mws)) { + dev_err(&ndev->ntb.pdev->dev, + "Failed to scan mws of local port %hhu", ndev->port); + return PTR_ERR(ndev->mws); + } + + /* Scan memory windows of the peer ports */ + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + peer = &ndev->peers[pidx]; + peer->mws = idt_scan_mws(ndev, peer->port, &peer->mw_cnt); + if (IS_ERR(peer->mws)) { + dev_err(&ndev->ntb.pdev->dev, + "Failed to scan mws of port %hhu", peer->port); + return PTR_ERR(peer->mws); + } + } + + /* Initialize spin locker of the LUT registers */ + spin_lock_init(&ndev->lut_lock); + + dev_dbg(&ndev->ntb.pdev->dev, "Outbound and inbound MWs initialized"); + + return 0; +} + +/* + * idt_ntb_mw_count() - number of inbound memory windows (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * + * The value is returned for the specified peer, so generally speaking it can + * be different for different port depending on the IDT PCIe-switch + * initialization. + * + * Return: the number of memory windows. + */ +static int idt_ntb_mw_count(struct ntb_dev *ntb, int pidx) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + return ndev->peers[pidx].mw_cnt; +} + +/* + * idt_ntb_mw_get_align() - inbound memory window parameters (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * @addr_align: OUT - the base alignment for translating the memory window + * @size_align: OUT - the size alignment for translating the memory window + * @size_max: OUT - the maximum size of the memory window + * + * The peer memory window parameters have already been determined, so just + * return the corresponding values, which mustn't change within session. + * + * Return: Zero on success, otherwise a negative error number. + */ +static int idt_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + struct idt_ntb_peer *peer; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + peer = &ndev->peers[pidx]; + + if (widx < 0 || peer->mw_cnt <= widx) + return -EINVAL; + + if (addr_align != NULL) + *addr_align = peer->mws[widx].addr_align; + + if (size_align != NULL) + *size_align = peer->mws[widx].size_align; + + if (size_max != NULL) + *size_max = peer->mws[widx].size_max; + + return 0; +} + +/* + * idt_ntb_peer_mw_count() - number of outbound memory windows + * (NTB API callback) + * @ntb: NTB device context. + * + * Outbound memory windows parameters have been determined based on the + * BAR setup registers value, which are mostly constants within one session. + * + * Return: the number of memory windows. + */ +static int idt_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return ndev->mw_cnt; +} + +/* + * idt_ntb_peer_mw_get_addr() - get map address of an outbound memory window + * (NTB API callback) + * @ntb: NTB device context. + * @widx: Memory window index (within ntb_peer_mw_count() return value). + * @base: OUT - the base address of mapping region. + * @size: OUT - the size of mapping region. + * + * Return just parameters of BAR resources mapping. Size reflects just the size + * of the resource + * + * Return: Zero on success, otherwise a negative error number. + */ +static int idt_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int widx, + phys_addr_t *base, resource_size_t *size) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (widx < 0 || ndev->mw_cnt <= widx) + return -EINVAL; + + /* Mapping address is just properly shifted BAR resource start */ + if (base != NULL) + *base = pci_resource_start(ntb->pdev, ndev->mws[widx].bar) + + ndev->mws[widx].idx * ndev->mws[widx].size_max; + + /* Mapping size has already been calculated at MWs scanning */ + if (size != NULL) + *size = ndev->mws[widx].size_max; + + return 0; +} + +/* + * idt_ntb_peer_mw_set_trans() - set a translation address of a memory window + * (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device the translation address received from. + * @widx: Memory window index. + * @addr: The dma address of the shared memory to access. + * @size: The size of the shared memory to access. + * + * The Direct address translation and LUT base translation is initialized a + * bit differenet. Although the parameters restriction are now determined by + * the same code. + * + * Return: Zero on success, otherwise an error number. + */ +static int idt_ntb_peer_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx, + u64 addr, resource_size_t size) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + struct idt_mw_cfg *mw_cfg; + u32 data = 0, lutoff = 0; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + if (widx < 0 || ndev->mw_cnt <= widx) + return -EINVAL; + + /* + * Retrieve the memory window config to make sure the passed arguments + * fit it restrictions + */ + mw_cfg = &ndev->mws[widx]; + if (!IS_ALIGNED(addr, mw_cfg->addr_align)) + return -EINVAL; + if (!IS_ALIGNED(size, mw_cfg->size_align) || size > mw_cfg->size_max) + return -EINVAL; + + /* DIR and LUT based translations are initialized differently */ + if (mw_cfg->type == IDT_MW_DIR) { + const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar]; + u64 limit; + /* Set destination partition of translation */ + data = idt_nt_read(ndev, bar->setup); + data = SET_FIELD(BARSETUP_TPART, data, ndev->peers[pidx].part); + idt_nt_write(ndev, bar->setup, data); + /* Set translation base address */ + idt_nt_write(ndev, bar->ltbase, (u32)addr); + idt_nt_write(ndev, bar->utbase, (u32)(addr >> 32)); + /* Set the custom BAR aperture limit */ + limit = pci_resource_start(ntb->pdev, mw_cfg->bar) + size; + idt_nt_write(ndev, bar->limit, (u32)limit); + if (IS_FLD_SET(BARSETUP_TYPE, data, 64)) + idt_nt_write(ndev, (bar + 1)->limit, (limit >> 32)); + } else { + unsigned long irqflags; + /* Initialize corresponding LUT entry */ + lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) | + SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar); + data = SET_FIELD(LUTUDATA_PART, 0, ndev->peers[pidx].part) | + IDT_LUTUDATA_VALID; + spin_lock_irqsave(&ndev->lut_lock, irqflags); + idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff); + idt_nt_write(ndev, IDT_NT_LUTLDATA, (u32)addr); + idt_nt_write(ndev, IDT_NT_LUTMDATA, (u32)(addr >> 32)); + idt_nt_write(ndev, IDT_NT_LUTUDATA, data); + mmiowb(); + spin_unlock_irqrestore(&ndev->lut_lock, irqflags); + /* Limit address isn't specified since size is fixed for LUT */ + } + + return 0; +} + +/* + * idt_ntb_peer_mw_clear_trans() - clear the outbound MW translation address + * (NTB API callback) + * @ntb: NTB device context. + * @pidx: Port index of peer device. + * @widx: Memory window index. + * + * It effectively disables the translation over the specified outbound MW. + * + * Return: Zero on success, otherwise an error number. + */ +static int idt_ntb_peer_mw_clear_trans(struct ntb_dev *ntb, int pidx, + int widx) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + struct idt_mw_cfg *mw_cfg; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + if (widx < 0 || ndev->mw_cnt <= widx) + return -EINVAL; + + mw_cfg = &ndev->mws[widx]; + + /* DIR and LUT based translations are initialized differently */ + if (mw_cfg->type == IDT_MW_DIR) { + const struct idt_ntb_bar *bar = &ntdata_tbl.bars[mw_cfg->bar]; + u32 data; + /* Read BARSETUP to check BAR type */ + data = idt_nt_read(ndev, bar->setup); + /* Disable translation by specifying zero BAR limit */ + idt_nt_write(ndev, bar->limit, 0); + if (IS_FLD_SET(BARSETUP_TYPE, data, 64)) + idt_nt_write(ndev, (bar + 1)->limit, 0); + } else { + unsigned long irqflags; + u32 lutoff; + /* Clear the corresponding LUT entry up */ + lutoff = SET_FIELD(LUTOFFSET_INDEX, 0, mw_cfg->idx) | + SET_FIELD(LUTOFFSET_BAR, 0, mw_cfg->bar); + spin_lock_irqsave(&ndev->lut_lock, irqflags); + idt_nt_write(ndev, IDT_NT_LUTOFFSET, lutoff); + idt_nt_write(ndev, IDT_NT_LUTLDATA, 0); + idt_nt_write(ndev, IDT_NT_LUTMDATA, 0); + idt_nt_write(ndev, IDT_NT_LUTUDATA, 0); + mmiowb(); + spin_unlock_irqrestore(&ndev->lut_lock, irqflags); + } + + return 0; +} + +/*============================================================================= + * 5. Doorbell operations + * + * Doorbell functionality of IDT PCIe-switches is pretty unusual. First of + * all there is global doorbell register which state can by changed by any + * NT-function of the IDT device in accordance with global permissions. These + * permissions configs are not supported by NTB API, so it must be done by + * either BIOS or EEPROM settings. In the same way the state of the global + * doorbell is reflected to the NT-functions local inbound doorbell registers. + * It can lead to situations when client driver sets some peer doorbell bits + * and get them bounced back to local inbound doorbell if permissions are + * granted. + * Secondly there is just one IRQ vector for Doorbell, Message, Temperature + * and Switch events, so if client driver left any of Doorbell bits set and + * some other event occurred, the driver will be notified of Doorbell event + * again. + *============================================================================= + */ + +/* + * idt_db_isr() - doorbell event ISR + * @ndev: IDT NTB hardware driver descriptor + * @ntint_sts: NT-function interrupt status + * + * Doorbell event happans when DBELL bit of NTINTSTS switches from 0 to 1. + * It happens only when unmasked doorbell bits are set to ones on completely + * zeroed doorbell register. + * The method is called from PCIe ISR bottom-half routine. + */ +static void idt_db_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +{ + /* + * Doorbell IRQ status will be cleaned only when client + * driver unsets all the doorbell bits. + */ + dev_dbg(&ndev->ntb.pdev->dev, "DB IRQ detected %#08x", ntint_sts); + + /* Notify the client driver of possible doorbell state change */ + ntb_db_event(&ndev->ntb, 0); +} + +/* + * idt_ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb + * (NTB API callback) + * @ntb: NTB device context. + * + * IDT PCIe-switches expose just one Doorbell register of DWORD size. + * + * Return: A mask of doorbell bits supported by the ntb. + */ +static u64 idt_ntb_db_valid_mask(struct ntb_dev *ntb) +{ + return IDT_DBELL_MASK; +} + +/* + * idt_ntb_db_read() - read the local doorbell register (NTB API callback) + * @ntb: NTB device context. + * + * There is just on inbound doorbell register of each NT-function, so + * this method return it value. + * + * Return: The bits currently set in the local doorbell register. + */ +static u64 idt_ntb_db_read(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_nt_read(ndev, IDT_NT_INDBELLSTS); +} + +/* + * idt_ntb_db_clear() - clear bits in the local doorbell register + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * Clear bits of inbound doorbell register by writing ones to it. + * + * NOTE! Invalid bits are always considered cleared so it's not an error + * to clear them over. + * + * Return: always zero as success. + */ +static int idt_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_nt_write(ndev, IDT_NT_INDBELLSTS, (u32)db_bits); + + return 0; +} + +/* + * idt_ntb_db_read_mask() - read the local doorbell mask (NTB API callback) + * @ntb: NTB device context. + * + * Each inbound doorbell bit can be masked from generating IRQ by setting + * the corresponding bit in inbound doorbell mask. So this method returns + * the value of the register. + * + * Return: The bits currently set in the local doorbell mask register. + */ +static u64 idt_ntb_db_read_mask(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_nt_read(ndev, IDT_NT_INDBELLMSK); +} + +/* + * idt_ntb_db_set_mask() - set bits in the local doorbell mask + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell mask bits to set. + * + * The inbound doorbell register mask value must be read, then OR'ed with + * passed field and only then set back. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_reg_set_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock, + IDT_DBELL_MASK, db_bits); +} + +/* + * idt_ntb_db_clear_mask() - clear bits in the local doorbell mask + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell bits to clear. + * + * The method just clears the set bits up in accordance with the passed + * bitfield. IDT PCIe-switch shall generate an interrupt if there hasn't + * been any unmasked bit set before current unmasking. Otherwise IRQ won't + * be generated since there is only one IRQ vector for all doorbells. + * + * Return: always zero as success + */ +static int idt_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_reg_clear_bits(ndev, IDT_NT_INDBELLMSK, &ndev->db_mask_lock, + db_bits); + + return 0; +} + +/* + * idt_ntb_peer_db_set() - set bits in the peer doorbell register + * (NTB API callback) + * @ntb: NTB device context. + * @db_bits: Doorbell bits to set. + * + * IDT PCIe-switches exposes local outbound doorbell register to change peer + * inbound doorbell register state. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (db_bits & ~(u64)IDT_DBELL_MASK) + return -EINVAL; + + idt_nt_write(ndev, IDT_NT_OUTDBELLSET, (u32)db_bits); + return 0; +} + +/*============================================================================= + * 6. Messaging operations + * + * Each NT-function of IDT PCIe-switch has four inbound and four outbound + * message registers. Each outbound message register can be connected to one or + * even more than one peer inbound message registers by setting global + * configurations. Since NTB API permits one-on-one message registers mapping + * only, the driver acts in according with that restriction. + *============================================================================= + */ + +/* + * idt_init_msg() - initialize messaging interface + * @ndev: IDT NTB hardware driver descriptor + * + * Just initialize the message registers routing tables locker. + */ +static void idt_init_msg(struct idt_ntb_dev *ndev) +{ + unsigned char midx; + + /* Init the messages routing table lockers */ + for (midx = 0; midx < IDT_MSG_CNT; midx++) + spin_lock_init(&ndev->msg_locks[midx]); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB Messaging initialized"); +} + +/* + * idt_msg_isr() - message event ISR + * @ndev: IDT NTB hardware driver descriptor + * @ntint_sts: NT-function interrupt status + * + * Message event happens when MSG bit of NTINTSTS switches from 0 to 1. + * It happens only when unmasked message status bits are set to ones on + * completely zeroed message status register. + * The method is called from PCIe ISR bottom-half routine. + */ +static void idt_msg_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +{ + /* + * Message IRQ status will be cleaned only when client + * driver unsets all the message status bits. + */ + dev_dbg(&ndev->ntb.pdev->dev, "Message IRQ detected %#08x", ntint_sts); + + /* Notify the client driver of possible message status change */ + ntb_msg_event(&ndev->ntb); +} + +/* + * idt_ntb_msg_count() - get the number of message registers (NTB API callback) + * @ntb: NTB device context. + * + * IDT PCIe-switches support four message registers. + * + * Return: the number of message registers. + */ +static int idt_ntb_msg_count(struct ntb_dev *ntb) +{ + return IDT_MSG_CNT; +} + +/* + * idt_ntb_msg_inbits() - get a bitfield of inbound message registers status + * (NTB API callback) + * @ntb: NTB device context. + * + * NT message status register is shared between inbound and outbound message + * registers status + * + * Return: bitfield of inbound message registers. + */ +static u64 idt_ntb_msg_inbits(struct ntb_dev *ntb) +{ + return (u64)IDT_INMSG_MASK; +} + +/* + * idt_ntb_msg_outbits() - get a bitfield of outbound message registers status + * (NTB API callback) + * @ntb: NTB device context. + * + * NT message status register is shared between inbound and outbound message + * registers status + * + * Return: bitfield of outbound message registers. + */ +static u64 idt_ntb_msg_outbits(struct ntb_dev *ntb) +{ + return (u64)IDT_OUTMSG_MASK; +} + +/* + * idt_ntb_msg_read_sts() - read the message registers status (NTB API callback) + * @ntb: NTB device context. + * + * IDT PCIe-switches expose message status registers to notify drivers of + * incoming data and failures in case if peer message register isn't freed. + * + * Return: status bits of message registers + */ +static u64 idt_ntb_msg_read_sts(struct ntb_dev *ntb) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_nt_read(ndev, IDT_NT_MSGSTS); +} + +/* + * idt_ntb_msg_clear_sts() - clear status bits of message registers + * (NTB API callback) + * @ntb: NTB device context. + * @sts_bits: Status bits to clear. + * + * Clear bits in the status register by writing ones. + * + * NOTE! Invalid bits are always considered cleared so it's not an error + * to clear them over. + * + * Return: always zero as success. + */ +static int idt_ntb_msg_clear_sts(struct ntb_dev *ntb, u64 sts_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_nt_write(ndev, IDT_NT_MSGSTS, sts_bits); + + return 0; +} + +/* + * idt_ntb_msg_set_mask() - set mask of message register status bits + * (NTB API callback) + * @ntb: NTB device context. + * @mask_bits: Mask bits. + * + * Mask the message status bits from raising an IRQ. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_msg_set_mask(struct ntb_dev *ntb, u64 mask_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + return idt_reg_set_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock, + IDT_MSG_MASK, mask_bits); +} + +/* + * idt_ntb_msg_clear_mask() - clear message registers mask + * (NTB API callback) + * @ntb: NTB device context. + * @mask_bits: Mask bits. + * + * Clear mask of message status bits IRQs. + * + * Return: always zero as success. + */ +static int idt_ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + idt_reg_clear_bits(ndev, IDT_NT_MSGSTSMSK, &ndev->msg_mask_lock, + mask_bits); + + return 0; +} + +/* + * idt_ntb_msg_read() - read message register with specified index + * (NTB API callback) + * @ntb: NTB device context. + * @midx: Message register index + * @pidx: OUT - Port index of peer device a message retrieved from + * @msg: OUT - Data + * + * Read data from the specified message register and source register. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + + if (midx < 0 || IDT_MSG_CNT <= midx) + return -EINVAL; + + /* Retrieve source port index of the message */ + if (pidx != NULL) { + u32 srcpart; + + srcpart = idt_nt_read(ndev, ntdata_tbl.msgs[midx].src); + *pidx = ndev->part_idx_map[srcpart]; + + /* Sanity check partition index (for initial case) */ + if (*pidx == -EINVAL) + *pidx = 0; + } + + /* Retrieve data of the corresponding message register */ + if (msg != NULL) + *msg = idt_nt_read(ndev, ntdata_tbl.msgs[midx].in); + + return 0; +} + +/* + * idt_ntb_msg_write() - write data to the specified message register + * (NTB API callback) + * @ntb: NTB device context. + * @midx: Message register index + * @pidx: Port index of peer device a message being sent to + * @msg: Data to send + * + * Just try to send data to a peer. Message status register should be + * checked by client driver. + * + * Return: zero on success, negative error if invalid argument passed. + */ +static int idt_ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, u32 msg) +{ + struct idt_ntb_dev *ndev = to_ndev_ntb(ntb); + unsigned long irqflags; + u32 swpmsgctl = 0; + + if (midx < 0 || IDT_MSG_CNT <= midx) + return -EINVAL; + + if (pidx < 0 || ndev->peer_cnt <= pidx) + return -EINVAL; + + /* Collect the routing information */ + swpmsgctl = SET_FIELD(SWPxMSGCTL_REG, 0, midx) | + SET_FIELD(SWPxMSGCTL_PART, 0, ndev->peers[pidx].part); + + /* Lock the messages routing table of the specified register */ + spin_lock_irqsave(&ndev->msg_locks[midx], irqflags); + /* Set the route and send the data */ + idt_sw_write(ndev, partdata_tbl[ndev->part].msgctl[midx], swpmsgctl); + idt_nt_write(ndev, ntdata_tbl.msgs[midx].out, msg); + mmiowb(); + /* Unlock the messages routing table */ + spin_unlock_irqrestore(&ndev->msg_locks[midx], irqflags); + + /* Client driver shall check the status register */ + return 0; +} + +/*============================================================================= + * 7. Temperature sensor operations + * + * IDT PCIe-switch has an embedded temperature sensor, which can be used to + * warn a user-space of possible chip overheating. Since workload temperature + * can be different on different platforms, temperature thresholds as well as + * general sensor settings must be setup in the framework of BIOS/EEPROM + * initializations. It includes the actual sensor enabling as well. + *============================================================================= + */ + +/* + * idt_read_temp() - read temperature from chip sensor + * @ntb: NTB device context. + * @val: OUT - integer value of temperature + * @frac: OUT - fraction + */ +static void idt_read_temp(struct idt_ntb_dev *ndev, unsigned char *val, + unsigned char *frac) +{ + u32 data; + + /* Read the data from TEMP field of the TMPSTS register */ + data = idt_sw_read(ndev, IDT_SW_TMPSTS); + data = GET_FIELD(TMPSTS_TEMP, data); + /* TEMP field has one fractional bit and seven integer bits */ + *val = data >> 1; + *frac = ((data & 0x1) ? 5 : 0); +} + +/* + * idt_temp_isr() - temperature sensor alarm events ISR + * @ndev: IDT NTB hardware driver descriptor + * @ntint_sts: NT-function interrupt status + * + * It handles events of temperature crossing alarm thresholds. Since reading + * of TMPALARM register clears it up, the function doesn't analyze the + * read value, instead the current temperature value just warningly printed to + * log. + * The method is called from PCIe ISR bottom-half routine. + */ +static void idt_temp_isr(struct idt_ntb_dev *ndev, u32 ntint_sts) +{ + unsigned char val, frac; + + /* Read the current temperature value */ + idt_read_temp(ndev, &val, &frac); + + /* Read the temperature alarm to clean the alarm status out */ + /*(void)idt_sw_read(ndev, IDT_SW_TMPALARM);*/ + + /* Clean the corresponding interrupt bit */ + idt_nt_write(ndev, IDT_NT_NTINTSTS, IDT_NTINTSTS_TMPSENSOR); + + dev_dbg(&ndev->ntb.pdev->dev, + "Temp sensor IRQ detected %#08x", ntint_sts); + + /* Print temperature value to log */ + dev_warn(&ndev->ntb.pdev->dev, "Temperature %hhu.%hhu", val, frac); +} + +/*============================================================================= + * 8. ISRs related operations + * + * IDT PCIe-switch has strangely developed IRQ system. There is just one + * interrupt vector for doorbell and message registers. So the hardware driver + * can't determine actual source of IRQ if, for example, message event happened + * while any of unmasked doorbell is still set. The similar situation may be if + * switch or temperature sensor events pop up. The difference is that SEVENT + * and TMPSENSOR bits of NT interrupt status register can be cleaned by + * IRQ handler so a next interrupt request won't have false handling of + * corresponding events. + * The hardware driver has only bottom-half handler of the IRQ, since if any + * of events happened the device won't raise it again before the last one is + * handled by clearing of corresponding NTINTSTS bit. + *============================================================================= + */ + +static irqreturn_t idt_thread_isr(int irq, void *devid); + +/* + * idt_init_isr() - initialize PCIe interrupt handler + * @ndev: IDT NTB hardware driver descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_init_isr(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + u32 ntint_mask; + int ret; + + /* Allocate just one interrupt vector for the ISR */ + ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI | PCI_IRQ_LEGACY); + if (ret != 1) { + dev_err(&pdev->dev, "Failed to allocate IRQ vector"); + return ret; + } + + /* Retrieve the IRQ vector */ + ret = pci_irq_vector(pdev, 0); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to get IRQ vector"); + goto err_free_vectors; + } + + /* Set the IRQ handler */ + ret = devm_request_threaded_irq(&pdev->dev, ret, NULL, idt_thread_isr, + IRQF_ONESHOT, NTB_IRQNAME, ndev); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to set MSI IRQ handler, %d", ret); + goto err_free_vectors; + } + + /* Unmask Message/Doorbell/SE/Temperature interrupts */ + ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) & ~IDT_NTINTMSK_ALL; + idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask); + + /* From now on the interrupts are enabled */ + dev_dbg(&pdev->dev, "NTB interrupts initialized"); + + return 0; + +err_free_vectors: + pci_free_irq_vectors(pdev); + + return ret; +} + + +/* + * idt_deinit_ist() - deinitialize PCIe interrupt handler + * @ndev: IDT NTB hardware driver descriptor + * + * Disable corresponding interrupts and free allocated IRQ vectors. + */ +static void idt_deinit_isr(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + u32 ntint_mask; + + /* Mask interrupts back */ + ntint_mask = idt_nt_read(ndev, IDT_NT_NTINTMSK) | IDT_NTINTMSK_ALL; + idt_nt_write(ndev, IDT_NT_NTINTMSK, ntint_mask); + + /* Manually free IRQ otherwise PCI free irq vectors will fail */ + devm_free_irq(&pdev->dev, pci_irq_vector(pdev, 0), ndev); + + /* Free allocated IRQ vectors */ + pci_free_irq_vectors(pdev); + + dev_dbg(&pdev->dev, "NTB interrupts deinitialized"); +} + +/* + * idt_thread_isr() - NT function interrupts handler + * @irq: IRQ number + * @devid: Custom buffer + * + * It reads current NT interrupts state register and handles all the event + * it declares. + * The method is bottom-half routine of actual default PCIe IRQ handler. + */ +static irqreturn_t idt_thread_isr(int irq, void *devid) +{ + struct idt_ntb_dev *ndev = devid; + bool handled = false; + u32 ntint_sts; + + /* Read the NT interrupts status register */ + ntint_sts = idt_nt_read(ndev, IDT_NT_NTINTSTS); + + /* Handle messaging interrupts */ + if (ntint_sts & IDT_NTINTSTS_MSG) { + idt_msg_isr(ndev, ntint_sts); + handled = true; + } + + /* Handle doorbell interrupts */ + if (ntint_sts & IDT_NTINTSTS_DBELL) { + idt_db_isr(ndev, ntint_sts); + handled = true; + } + + /* Handle switch event interrupts */ + if (ntint_sts & IDT_NTINTSTS_SEVENT) { + idt_se_isr(ndev, ntint_sts); + handled = true; + } + + /* Handle temperature sensor interrupt */ + if (ntint_sts & IDT_NTINTSTS_TMPSENSOR) { + idt_temp_isr(ndev, ntint_sts); + handled = true; + } + + dev_dbg(&ndev->ntb.pdev->dev, "IDT IRQs 0x%08x handled", ntint_sts); + + return handled ? IRQ_HANDLED : IRQ_NONE; +} + +/*=========================================================================== + * 9. NTB hardware driver initialization + *=========================================================================== + */ + +/* + * NTB API operations + */ +static const struct ntb_dev_ops idt_ntb_ops = { + .port_number = idt_ntb_port_number, + .peer_port_count = idt_ntb_peer_port_count, + .peer_port_number = idt_ntb_peer_port_number, + .peer_port_idx = idt_ntb_peer_port_idx, + .link_is_up = idt_ntb_link_is_up, + .link_enable = idt_ntb_link_enable, + .link_disable = idt_ntb_link_disable, + .mw_count = idt_ntb_mw_count, + .mw_get_align = idt_ntb_mw_get_align, + .peer_mw_count = idt_ntb_peer_mw_count, + .peer_mw_get_addr = idt_ntb_peer_mw_get_addr, + .peer_mw_set_trans = idt_ntb_peer_mw_set_trans, + .peer_mw_clear_trans = idt_ntb_peer_mw_clear_trans, + .db_valid_mask = idt_ntb_db_valid_mask, + .db_read = idt_ntb_db_read, + .db_clear = idt_ntb_db_clear, + .db_read_mask = idt_ntb_db_read_mask, + .db_set_mask = idt_ntb_db_set_mask, + .db_clear_mask = idt_ntb_db_clear_mask, + .peer_db_set = idt_ntb_peer_db_set, + .msg_count = idt_ntb_msg_count, + .msg_inbits = idt_ntb_msg_inbits, + .msg_outbits = idt_ntb_msg_outbits, + .msg_read_sts = idt_ntb_msg_read_sts, + .msg_clear_sts = idt_ntb_msg_clear_sts, + .msg_set_mask = idt_ntb_msg_set_mask, + .msg_clear_mask = idt_ntb_msg_clear_mask, + .msg_read = idt_ntb_msg_read, + .msg_write = idt_ntb_msg_write +}; + +/* + * idt_register_device() - register IDT NTB device + * @ndev: IDT NTB hardware driver descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_register_device(struct idt_ntb_dev *ndev) +{ + int ret; + + /* Initialize the rest of NTB device structure and register it */ + ndev->ntb.ops = &idt_ntb_ops; + ndev->ntb.topo = NTB_TOPO_PRI; + + ret = ntb_register_device(&ndev->ntb); + if (ret != 0) { + dev_err(&ndev->ntb.pdev->dev, "Failed to register NTB device"); + return ret; + } + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device successfully registered"); + + return 0; +} + +/* + * idt_unregister_device() - unregister IDT NTB device + * @ndev: IDT NTB hardware driver descriptor + */ +static void idt_unregister_device(struct idt_ntb_dev *ndev) +{ + /* Just unregister the NTB device */ + ntb_unregister_device(&ndev->ntb); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device unregistered"); +} + +/*============================================================================= + * 10. DebugFS node initialization + *============================================================================= + */ + +static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp); + +/* + * Driver DebugFS info file operations + */ +static const struct file_operations idt_dbgfs_info_ops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = idt_dbgfs_info_read +}; + +/* + * idt_dbgfs_info_read() - DebugFS read info node callback + * @file: File node descriptor. + * @ubuf: User-space buffer to put data to + * @count: Size of the buffer + * @offp: Offset within the buffer + */ +static ssize_t idt_dbgfs_info_read(struct file *filp, char __user *ubuf, + size_t count, loff_t *offp) +{ + struct idt_ntb_dev *ndev = filp->private_data; + unsigned char temp, frac, idx, pidx, cnt; + ssize_t ret = 0, off = 0; + unsigned long irqflags; + enum ntb_speed speed; + enum ntb_width width; + char *strbuf; + size_t size; + u32 data; + + /* Lets limit the buffer size the way the Intel/AMD drivers do */ + size = min_t(size_t, count, 0x1000U); + + /* Allocate the memory for the buffer */ + strbuf = kmalloc(size, GFP_KERNEL); + if (strbuf == NULL) + return -ENOMEM; + + /* Put the data into the string buffer */ + off += scnprintf(strbuf + off, size - off, + "\n\t\tIDT NTB device Information:\n\n"); + + /* General local device configurations */ + off += scnprintf(strbuf + off, size - off, + "Local Port %hhu, Partition %hhu\n", ndev->port, ndev->part); + + /* Peer ports information */ + off += scnprintf(strbuf + off, size - off, "Peers:\n"); + for (idx = 0; idx < ndev->peer_cnt; idx++) { + off += scnprintf(strbuf + off, size - off, + "\t%hhu. Port %hhu, Partition %hhu\n", + idx, ndev->peers[idx].port, ndev->peers[idx].part); + } + + /* Links status */ + data = idt_ntb_link_is_up(&ndev->ntb, &speed, &width); + off += scnprintf(strbuf + off, size - off, + "NTB link status\t- 0x%08x, ", data); + off += scnprintf(strbuf + off, size - off, "PCIe Gen %d x%d lanes\n", + speed, width); + + /* Mapping table entries */ + off += scnprintf(strbuf + off, size - off, "NTB Mapping Table:\n"); + for (idx = 0; idx < IDT_MTBL_ENTRY_CNT; idx++) { + spin_lock_irqsave(&ndev->mtbl_lock, irqflags); + idt_nt_write(ndev, IDT_NT_NTMTBLADDR, idx); + data = idt_nt_read(ndev, IDT_NT_NTMTBLDATA); + spin_unlock_irqrestore(&ndev->mtbl_lock, irqflags); + + /* Print valid entries only */ + if (data & IDT_NTMTBLDATA_VALID) { + off += scnprintf(strbuf + off, size - off, + "\t%hhu. Partition %d, Requester ID 0x%04x\n", + idx, GET_FIELD(NTMTBLDATA_PART, data), + GET_FIELD(NTMTBLDATA_REQID, data)); + } + } + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Outbound memory windows information */ + off += scnprintf(strbuf + off, size - off, + "Outbound Memory Windows:\n"); + for (idx = 0; idx < ndev->mw_cnt; idx += cnt) { + data = ndev->mws[idx].type; + cnt = idt_get_mw_count(data); + + /* Print Memory Window information */ + if (data == IDT_MW_DIR) + off += scnprintf(strbuf + off, size - off, + "\t%hhu.\t", idx); + else + off += scnprintf(strbuf + off, size - off, + "\t%hhu-%hhu.\t", idx, idx + cnt - 1); + + off += scnprintf(strbuf + off, size - off, "%s BAR%hhu, ", + idt_get_mw_name(data), ndev->mws[idx].bar); + + off += scnprintf(strbuf + off, size - off, + "Address align 0x%08llx, ", ndev->mws[idx].addr_align); + + off += scnprintf(strbuf + off, size - off, + "Size align 0x%08llx, Size max %llu\n", + ndev->mws[idx].size_align, ndev->mws[idx].size_max); + } + + /* Inbound memory windows information */ + for (pidx = 0; pidx < ndev->peer_cnt; pidx++) { + off += scnprintf(strbuf + off, size - off, + "Inbound Memory Windows for peer %hhu (Port %hhu):\n", + pidx, ndev->peers[pidx].port); + + /* Print Memory Windows information */ + for (idx = 0; idx < ndev->peers[pidx].mw_cnt; idx += cnt) { + data = ndev->peers[pidx].mws[idx].type; + cnt = idt_get_mw_count(data); + + if (data == IDT_MW_DIR) + off += scnprintf(strbuf + off, size - off, + "\t%hhu.\t", idx); + else + off += scnprintf(strbuf + off, size - off, + "\t%hhu-%hhu.\t", idx, idx + cnt - 1); + + off += scnprintf(strbuf + off, size - off, + "%s BAR%hhu, ", idt_get_mw_name(data), + ndev->peers[pidx].mws[idx].bar); + + off += scnprintf(strbuf + off, size - off, + "Address align 0x%08llx, ", + ndev->peers[pidx].mws[idx].addr_align); + + off += scnprintf(strbuf + off, size - off, + "Size align 0x%08llx, Size max %llu\n", + ndev->peers[pidx].mws[idx].size_align, + ndev->peers[pidx].mws[idx].size_max); + } + } + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Doorbell information */ + data = idt_sw_read(ndev, IDT_SW_GDBELLSTS); + off += scnprintf(strbuf + off, size - off, + "Global Doorbell state\t- 0x%08x\n", data); + data = idt_ntb_db_read(&ndev->ntb); + off += scnprintf(strbuf + off, size - off, + "Local Doorbell state\t- 0x%08x\n", data); + data = idt_nt_read(ndev, IDT_NT_INDBELLMSK); + off += scnprintf(strbuf + off, size - off, + "Local Doorbell mask\t- 0x%08x\n", data); + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Messaging information */ + off += scnprintf(strbuf + off, size - off, + "Message event valid\t- 0x%08x\n", IDT_MSG_MASK); + data = idt_ntb_msg_read_sts(&ndev->ntb); + off += scnprintf(strbuf + off, size - off, + "Message event status\t- 0x%08x\n", data); + data = idt_nt_read(ndev, IDT_NT_MSGSTSMSK); + off += scnprintf(strbuf + off, size - off, + "Message event mask\t- 0x%08x\n", data); + off += scnprintf(strbuf + off, size - off, + "Message data:\n"); + for (idx = 0; idx < IDT_MSG_CNT; idx++) { + int src; + (void)idt_ntb_msg_read(&ndev->ntb, idx, &src, &data); + off += scnprintf(strbuf + off, size - off, + "\t%hhu. 0x%08x from peer %hhu (Port %hhu)\n", + idx, data, src, ndev->peers[src].port); + } + off += scnprintf(strbuf + off, size - off, "\n"); + + /* Current temperature */ + idt_read_temp(ndev, &temp, &frac); + off += scnprintf(strbuf + off, size - off, + "Switch temperature\t\t- %hhu.%hhuC\n", temp, frac); + + /* Copy the buffer to the User Space */ + ret = simple_read_from_buffer(ubuf, count, offp, strbuf, off); + kfree(strbuf); + + return ret; +} + +/* + * idt_init_dbgfs() - initialize DebugFS node + * @ndev: IDT NTB hardware driver descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_init_dbgfs(struct idt_ntb_dev *ndev) +{ + char devname[64]; + + /* If the top directory is not created then do nothing */ + if (IS_ERR_OR_NULL(dbgfs_topdir)) { + dev_info(&ndev->ntb.pdev->dev, "Top DebugFS directory absent"); + return PTR_ERR(dbgfs_topdir); + } + + /* Create the info file node */ + snprintf(devname, 64, "info:%s", pci_name(ndev->ntb.pdev)); + ndev->dbgfs_info = debugfs_create_file(devname, 0400, dbgfs_topdir, + ndev, &idt_dbgfs_info_ops); + if (IS_ERR(ndev->dbgfs_info)) { + dev_dbg(&ndev->ntb.pdev->dev, "Failed to create DebugFS node"); + return PTR_ERR(ndev->dbgfs_info); + } + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node created"); + + return 0; +} + +/* + * idt_deinit_dbgfs() - deinitialize DebugFS node + * @ndev: IDT NTB hardware driver descriptor + * + * Just discard the info node from DebugFS + */ +static void idt_deinit_dbgfs(struct idt_ntb_dev *ndev) +{ + debugfs_remove(ndev->dbgfs_info); + + dev_dbg(&ndev->ntb.pdev->dev, "NTB device DebugFS node discarded"); +} + +/*============================================================================= + * 11. Basic PCIe device initialization + *============================================================================= + */ + +/* + * idt_check_setup() - Check whether the IDT PCIe-swtich is properly + * pre-initialized + * @pdev: Pointer to the PCI device descriptor + * + * Return: zero on success, otherwise a negative error number. + */ +static int idt_check_setup(struct pci_dev *pdev) +{ + u32 data; + int ret; + + /* Read the BARSETUP0 */ + ret = pci_read_config_dword(pdev, IDT_NT_BARSETUP0, &data); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to read BARSETUP0 config register"); + return ret; + } + + /* Check whether the BAR0 register is enabled to be of config space */ + if (!(data & IDT_BARSETUP_EN) || !(data & IDT_BARSETUP_MODE_CFG)) { + dev_err(&pdev->dev, "BAR0 doesn't map config space"); + return -EINVAL; + } + + /* Configuration space BAR0 must have certain size */ + if ((data & IDT_BARSETUP_SIZE_MASK) != IDT_BARSETUP_SIZE_CFG) { + dev_err(&pdev->dev, "Invalid size of config space"); + return -EINVAL; + } + + dev_dbg(&pdev->dev, "NTB device pre-initialized correctly"); + + return 0; +} + +/* + * Create the IDT PCIe-switch driver descriptor + * @pdev: Pointer to the PCI device descriptor + * @id: IDT PCIe-device configuration + * + * It just allocates a memory for IDT PCIe-switch device structure and + * initializes some commonly used fields. + * + * No need of release method, since managed device resource is used for + * memory allocation. + * + * Return: pointer to the descriptor, otherwise a negative error number. + */ +static struct idt_ntb_dev *idt_create_dev(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct idt_ntb_dev *ndev; + + /* Allocate memory for the IDT PCIe-device descriptor */ + ndev = devm_kzalloc(&pdev->dev, sizeof(*ndev), GFP_KERNEL); + if (IS_ERR_OR_NULL(ndev)) { + dev_err(&pdev->dev, "Memory allocation failed for descriptor"); + return ERR_PTR(-ENOMEM); + } + + /* Save the IDT PCIe-switch ports configuration */ + ndev->swcfg = (struct idt_89hpes_cfg *)id->driver_data; + /* Save the PCI-device pointer inside the NTB device structure */ + ndev->ntb.pdev = pdev; + + /* Initialize spin locker of Doorbell, Message and GASA registers */ + spin_lock_init(&ndev->db_mask_lock); + spin_lock_init(&ndev->msg_mask_lock); + spin_lock_init(&ndev->gasa_lock); + + dev_info(&pdev->dev, "IDT %s discovered", ndev->swcfg->name); + + dev_dbg(&pdev->dev, "NTB device descriptor created"); + + return ndev; +} + +/* + * idt_init_pci() - initialize the basic PCI-related subsystem + * @ndev: Pointer to the IDT PCIe-switch driver descriptor + * + * Managed device resources will be freed automatically in case of failure or + * driver detachment. + * + * Return: zero on success, otherwise negative error number. + */ +static int idt_init_pci(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + int ret; + + /* Initialize the bit mask of DMA */ + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to set DMA bit mask\n"); + return ret; + } + dev_warn(&pdev->dev, "Cannot set DMA highmem bit mask\n"); + } + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret != 0) { + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (ret != 0) { + dev_err(&pdev->dev, + "Failed to set consistent DMA bit mask\n"); + return ret; + } + dev_warn(&pdev->dev, + "Cannot set consistent DMA highmem bit mask\n"); + } + + /* + * Enable the device advanced error reporting. It's not critical to + * have AER disabled in the kernel. + */ + ret = pci_enable_pcie_error_reporting(pdev); + if (ret != 0) + dev_warn(&pdev->dev, "PCIe AER capability disabled\n"); + else /* Cleanup uncorrectable error status before getting to init */ + pci_cleanup_aer_uncorrect_error_status(pdev); + + /* First enable the PCI device */ + ret = pcim_enable_device(pdev); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to enable PCIe device\n"); + goto err_disable_aer; + } + + /* + * Enable the bus mastering, which effectively enables MSI IRQs and + * Request TLPs translation + */ + pci_set_master(pdev); + + /* Request all BARs resources and map BAR0 only */ + ret = pcim_iomap_regions_request_all(pdev, 1, NTB_NAME); + if (ret != 0) { + dev_err(&pdev->dev, "Failed to request resources\n"); + goto err_clear_master; + } + + /* Retrieve virtual address of BAR0 - PCI configuration space */ + ndev->cfgspc = pcim_iomap_table(pdev)[0]; + + /* Put the IDT driver data pointer to the PCI-device private pointer */ + pci_set_drvdata(pdev, ndev); + + dev_dbg(&pdev->dev, "NT-function PCIe interface initialized"); + + return 0; + +err_clear_master: + pci_clear_master(pdev); +err_disable_aer: + (void)pci_disable_pcie_error_reporting(pdev); + + return ret; +} + +/* + * idt_deinit_pci() - deinitialize the basic PCI-related subsystem + * @ndev: Pointer to the IDT PCIe-switch driver descriptor + * + * Managed resources will be freed on the driver detachment + */ +static void idt_deinit_pci(struct idt_ntb_dev *ndev) +{ + struct pci_dev *pdev = ndev->ntb.pdev; + + /* Clean up the PCI-device private data pointer */ + pci_set_drvdata(pdev, NULL); + + /* Clear the bus master disabling the Request TLPs translation */ + pci_clear_master(pdev); + + /* Disable the AER capability */ + (void)pci_disable_pcie_error_reporting(pdev); + + dev_dbg(&pdev->dev, "NT-function PCIe interface cleared"); +} + +/*=========================================================================== + * 12. PCI bus callback functions + *=========================================================================== + */ + +/* + * idt_pci_probe() - PCI device probe callback + * @pdev: Pointer to PCI device structure + * @id: PCIe device custom descriptor + * + * Return: zero on success, otherwise negative error number + */ +static int idt_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct idt_ntb_dev *ndev; + int ret; + + /* Check whether IDT PCIe-switch is properly pre-initialized */ + ret = idt_check_setup(pdev); + if (ret != 0) + return ret; + + /* Allocate the memory for IDT NTB device data */ + ndev = idt_create_dev(pdev, id); + if (IS_ERR_OR_NULL(ndev)) + return PTR_ERR(ndev); + + /* Initialize the basic PCI subsystem of the device */ + ret = idt_init_pci(ndev); + if (ret != 0) + return ret; + + /* Scan ports of the IDT PCIe-switch */ + (void)idt_scan_ports(ndev); + + /* Initialize NTB link events subsystem */ + idt_init_link(ndev); + + /* Initialize MWs subsystem */ + ret = idt_init_mws(ndev); + if (ret != 0) + goto err_deinit_link; + + /* Initialize Messaging subsystem */ + idt_init_msg(ndev); + + /* Initialize IDT interrupts handler */ + ret = idt_init_isr(ndev); + if (ret != 0) + goto err_deinit_link; + + /* Register IDT NTB devices on the NTB bus */ + ret = idt_register_device(ndev); + if (ret != 0) + goto err_deinit_isr; + + /* Initialize DebugFS info node */ + (void)idt_init_dbgfs(ndev); + + /* IDT PCIe-switch NTB driver is finally initialized */ + dev_info(&pdev->dev, "IDT NTB device is ready"); + + /* May the force be with us... */ + return 0; + +err_deinit_isr: + idt_deinit_isr(ndev); +err_deinit_link: + idt_deinit_link(ndev); + idt_deinit_pci(ndev); + + return ret; +} + +/* + * idt_pci_probe() - PCI device remove callback + * @pdev: Pointer to PCI device structure + */ +static void idt_pci_remove(struct pci_dev *pdev) +{ + struct idt_ntb_dev *ndev = pci_get_drvdata(pdev); + + /* Deinit the DebugFS node */ + idt_deinit_dbgfs(ndev); + + /* Unregister NTB device */ + idt_unregister_device(ndev); + + /* Stop the interrupts handling */ + idt_deinit_isr(ndev); + + /* Deinitialize link event subsystem */ + idt_deinit_link(ndev); + + /* Deinit basic PCI subsystem */ + idt_deinit_pci(ndev); + + /* IDT PCIe-switch NTB driver is finally initialized */ + dev_info(&pdev->dev, "IDT NTB device is removed"); + + /* Sayonara... */ +} + +/* + * IDT PCIe-switch models ports configuration structures + */ +static struct idt_89hpes_cfg idt_89hpes24nt6ag2_config = { + .name = "89HPES24NT6AG2", + .port_cnt = 6, .ports = {0, 2, 4, 6, 8, 12} +}; +static struct idt_89hpes_cfg idt_89hpes32nt8ag2_config = { + .name = "89HPES32NT8AG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static struct idt_89hpes_cfg idt_89hpes32nt8bg2_config = { + .name = "89HPES32NT8BG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static struct idt_89hpes_cfg idt_89hpes12nt12g2_config = { + .name = "89HPES12NT12G2", + .port_cnt = 3, .ports = {0, 8, 16} +}; +static struct idt_89hpes_cfg idt_89hpes16nt16g2_config = { + .name = "89HPES16NT16G2", + .port_cnt = 4, .ports = {0, 8, 12, 16} +}; +static struct idt_89hpes_cfg idt_89hpes24nt24g2_config = { + .name = "89HPES24NT24G2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static struct idt_89hpes_cfg idt_89hpes32nt24ag2_config = { + .name = "89HPES32NT24AG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; +static struct idt_89hpes_cfg idt_89hpes32nt24bg2_config = { + .name = "89HPES32NT24BG2", + .port_cnt = 8, .ports = {0, 2, 4, 6, 8, 12, 16, 20} +}; + +/* + * PCI-ids table of the supported IDT PCIe-switch devices + */ +static const struct pci_device_id idt_pci_tbl[] = { + {IDT_PCI_DEVICE_IDS(89HPES24NT6AG2, idt_89hpes24nt6ag2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT8AG2, idt_89hpes32nt8ag2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT8BG2, idt_89hpes32nt8bg2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES12NT12G2, idt_89hpes12nt12g2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES16NT16G2, idt_89hpes16nt16g2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES24NT24G2, idt_89hpes24nt24g2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT24AG2, idt_89hpes32nt24ag2_config)}, + {IDT_PCI_DEVICE_IDS(89HPES32NT24BG2, idt_89hpes32nt24bg2_config)}, + {0} +}; +MODULE_DEVICE_TABLE(pci, idt_pci_tbl); + +/* + * IDT PCIe-switch NT-function device driver structure definition + */ +static struct pci_driver idt_pci_driver = { + .name = KBUILD_MODNAME, + .probe = idt_pci_probe, + .remove = idt_pci_remove, + .id_table = idt_pci_tbl, +}; + +static int __init idt_pci_driver_init(void) +{ + pr_info("%s %s\n", NTB_DESC, NTB_VER); + + /* Create the top DebugFS directory if the FS is initialized */ + if (debugfs_initialized()) + dbgfs_topdir = debugfs_create_dir(KBUILD_MODNAME, NULL); + + /* Register the NTB hardware driver to handle the PCI device */ + return pci_register_driver(&idt_pci_driver); +} +module_init(idt_pci_driver_init); + +static void __exit idt_pci_driver_exit(void) +{ + /* Unregister the NTB hardware driver */ + pci_unregister_driver(&idt_pci_driver); + + /* Discard the top DebugFS directory */ + debugfs_remove_recursive(dbgfs_topdir); +} +module_exit(idt_pci_driver_exit); + diff --git a/drivers/ntb/hw/idt/ntb_hw_idt.h b/drivers/ntb/hw/idt/ntb_hw_idt.h new file mode 100644 index 000000000000..856fd182f6f4 --- /dev/null +++ b/drivers/ntb/hw/idt/ntb_hw_idt.h @@ -0,0 +1,1149 @@ +/* + * This file is provided under a GPLv2 license. When using or + * redistributing this file, you may do so under that license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2016 T-Platforms All Rights Reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General + * Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, one can be found http://www.gnu.org/licenses/. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * IDT PCIe-switch NTB Linux driver + * + * Contact Information: + * Serge Semin <fancer.lancer@gmail.com>, <Sergey.Semin@t-platforms.ru> + */ + +#ifndef NTB_HW_IDT_H +#define NTB_HW_IDT_H + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/pci_ids.h> +#include <linux/interrupt.h> +#include <linux/spinlock.h> +#include <linux/ntb.h> + + +/* + * Macro is used to create the struct pci_device_id that matches + * the supported IDT PCIe-switches + * @devname: Capitalized name of the particular device + * @data: Variable passed to the driver of the particular device + */ +#define IDT_PCI_DEVICE_IDS(devname, data) \ + .vendor = PCI_VENDOR_ID_IDT, .device = PCI_DEVICE_ID_IDT_##devname, \ + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, \ + .class = (PCI_CLASS_BRIDGE_OTHER << 8), .class_mask = (0xFFFF00), \ + .driver_data = (kernel_ulong_t)&data + +/* + * IDT PCIe-switches device IDs + */ +#define PCI_DEVICE_ID_IDT_89HPES24NT6AG2 0x8091 +#define PCI_DEVICE_ID_IDT_89HPES32NT8AG2 0x808F +#define PCI_DEVICE_ID_IDT_89HPES32NT8BG2 0x8088 +#define PCI_DEVICE_ID_IDT_89HPES12NT12G2 0x8092 +#define PCI_DEVICE_ID_IDT_89HPES16NT16G2 0x8090 +#define PCI_DEVICE_ID_IDT_89HPES24NT24G2 0x808E +#define PCI_DEVICE_ID_IDT_89HPES32NT24AG2 0x808C +#define PCI_DEVICE_ID_IDT_89HPES32NT24BG2 0x808A + +/* + * NT-function Configuration Space registers + * NOTE 1) The IDT PCIe-switch internal data is little-endian + * so it must be taken into account in the driver + * internals. + * 2) Additionally the registers should be accessed either + * with byte-enables corresponding to their native size or + * the size of one DWORD + * + * So to simplify the driver code, there is only DWORD-sized read/write + * operations utilized. + */ +/* PCI Express Configuration Space */ +/* PCI Express command/status register (DWORD) */ +#define IDT_NT_PCICMDSTS 0x00004U +/* PCI Express Device Capabilities (DWORD) */ +#define IDT_NT_PCIEDCAP 0x00044U +/* PCI Express Device Control/Status (WORD+WORD) */ +#define IDT_NT_PCIEDCTLSTS 0x00048U +/* PCI Express Link Capabilities (DWORD) */ +#define IDT_NT_PCIELCAP 0x0004CU +/* PCI Express Link Control/Status (WORD+WORD) */ +#define IDT_NT_PCIELCTLSTS 0x00050U +/* PCI Express Device Capabilities 2 (DWORD) */ +#define IDT_NT_PCIEDCAP2 0x00064U +/* PCI Express Device Control 2 (WORD+WORD) */ +#define IDT_NT_PCIEDCTL2 0x00068U +/* PCI Power Management Control and Status (DWORD) */ +#define IDT_NT_PMCSR 0x000C4U +/*==========================================*/ +/* IDT Proprietary NT-port-specific registers */ +/* NT-function main control registers */ +/* NT Endpoint Control (DWORD) */ +#define IDT_NT_NTCTL 0x00400U +/* NT Endpoint Interrupt Status/Mask (DWORD) */ +#define IDT_NT_NTINTSTS 0x00404U +#define IDT_NT_NTINTMSK 0x00408U +/* NT Endpoint Signal Data (DWORD) */ +#define IDT_NT_NTSDATA 0x0040CU +/* NT Endpoint Global Signal (DWORD) */ +#define IDT_NT_NTGSIGNAL 0x00410U +/* Internal Error Reporting Mask 0/1 (DWORD) */ +#define IDT_NT_NTIERRORMSK0 0x00414U +#define IDT_NT_NTIERRORMSK1 0x00418U +/* Doorbel registers */ +/* NT Outbound Doorbell Set (DWORD) */ +#define IDT_NT_OUTDBELLSET 0x00420U +/* NT Inbound Doorbell Status/Mask (DWORD) */ +#define IDT_NT_INDBELLSTS 0x00428U +#define IDT_NT_INDBELLMSK 0x0042CU +/* Message registers */ +/* Outbound Message N (DWORD) */ +#define IDT_NT_OUTMSG0 0x00430U +#define IDT_NT_OUTMSG1 0x00434U +#define IDT_NT_OUTMSG2 0x00438U +#define IDT_NT_OUTMSG3 0x0043CU +/* Inbound Message N (DWORD) */ +#define IDT_NT_INMSG0 0x00440U +#define IDT_NT_INMSG1 0x00444U +#define IDT_NT_INMSG2 0x00448U +#define IDT_NT_INMSG3 0x0044CU +/* Inbound Message Source N (DWORD) */ +#define IDT_NT_INMSGSRC0 0x00450U +#define IDT_NT_INMSGSRC1 0x00454U +#define IDT_NT_INMSGSRC2 0x00458U +#define IDT_NT_INMSGSRC3 0x0045CU +/* Message Status (DWORD) */ +#define IDT_NT_MSGSTS 0x00460U +/* Message Status Mask (DWORD) */ +#define IDT_NT_MSGSTSMSK 0x00464U +/* BAR-setup registers */ +/* BAR N Setup/Limit Address/Lower and Upper Translated Base Address (DWORD) */ +#define IDT_NT_BARSETUP0 0x00470U +#define IDT_NT_BARLIMIT0 0x00474U +#define IDT_NT_BARLTBASE0 0x00478U +#define IDT_NT_BARUTBASE0 0x0047CU +#define IDT_NT_BARSETUP1 0x00480U +#define IDT_NT_BARLIMIT1 0x00484U +#define IDT_NT_BARLTBASE1 0x00488U +#define IDT_NT_BARUTBASE1 0x0048CU +#define IDT_NT_BARSETUP2 0x00490U +#define IDT_NT_BARLIMIT2 0x00494U +#define IDT_NT_BARLTBASE2 0x00498U +#define IDT_NT_BARUTBASE2 0x0049CU +#define IDT_NT_BARSETUP3 0x004A0U +#define IDT_NT_BARLIMIT3 0x004A4U +#define IDT_NT_BARLTBASE3 0x004A8U +#define IDT_NT_BARUTBASE3 0x004ACU +#define IDT_NT_BARSETUP4 0x004B0U +#define IDT_NT_BARLIMIT4 0x004B4U +#define IDT_NT_BARLTBASE4 0x004B8U +#define IDT_NT_BARUTBASE4 0x004BCU +#define IDT_NT_BARSETUP5 0x004C0U +#define IDT_NT_BARLIMIT5 0x004C4U +#define IDT_NT_BARLTBASE5 0x004C8U +#define IDT_NT_BARUTBASE5 0x004CCU +/* NT mapping table registers */ +/* NT Mapping Table Address/Status/Data (DWORD) */ +#define IDT_NT_NTMTBLADDR 0x004D0U +#define IDT_NT_NTMTBLSTS 0x004D4U +#define IDT_NT_NTMTBLDATA 0x004D8U +/* Requester ID (Bus:Device:Function) Capture (DWORD) */ +#define IDT_NT_REQIDCAP 0x004DCU +/* Memory Windows Lookup table registers */ +/* Lookup Table Offset/Lower, Middle and Upper data (DWORD) */ +#define IDT_NT_LUTOFFSET 0x004E0U +#define IDT_NT_LUTLDATA 0x004E4U +#define IDT_NT_LUTMDATA 0x004E8U +#define IDT_NT_LUTUDATA 0x004ECU +/* NT Endpoint Uncorrectable/Correctable Errors Emulation registers (DWORD) */ +#define IDT_NT_NTUEEM 0x004F0U +#define IDT_NT_NTCEEM 0x004F4U +/* Global Address Space Access/Data registers (DWARD) */ +#define IDT_NT_GASAADDR 0x00FF8U +#define IDT_NT_GASADATA 0x00FFCU + +/* + * IDT PCIe-switch Global Configuration and Status registers + */ +/* Port N Configuration register in global space */ +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP0_PCIECMDSTS 0x01004U +#define IDT_SW_NTP0_PCIELCTLSTS 0x01050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP0_NTCTL 0x01400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP0_BARSETUP0 0x01470U +#define IDT_SW_NTP0_BARLIMIT0 0x01474U +#define IDT_SW_NTP0_BARLTBASE0 0x01478U +#define IDT_SW_NTP0_BARUTBASE0 0x0147CU +#define IDT_SW_NTP0_BARSETUP1 0x01480U +#define IDT_SW_NTP0_BARLIMIT1 0x01484U +#define IDT_SW_NTP0_BARLTBASE1 0x01488U +#define IDT_SW_NTP0_BARUTBASE1 0x0148CU +#define IDT_SW_NTP0_BARSETUP2 0x01490U +#define IDT_SW_NTP0_BARLIMIT2 0x01494U +#define IDT_SW_NTP0_BARLTBASE2 0x01498U +#define IDT_SW_NTP0_BARUTBASE2 0x0149CU +#define IDT_SW_NTP0_BARSETUP3 0x014A0U +#define IDT_SW_NTP0_BARLIMIT3 0x014A4U +#define IDT_SW_NTP0_BARLTBASE3 0x014A8U +#define IDT_SW_NTP0_BARUTBASE3 0x014ACU +#define IDT_SW_NTP0_BARSETUP4 0x014B0U +#define IDT_SW_NTP0_BARLIMIT4 0x014B4U +#define IDT_SW_NTP0_BARLTBASE4 0x014B8U +#define IDT_SW_NTP0_BARUTBASE4 0x014BCU +#define IDT_SW_NTP0_BARSETUP5 0x014C0U +#define IDT_SW_NTP0_BARLIMIT5 0x014C4U +#define IDT_SW_NTP0_BARLTBASE5 0x014C8U +#define IDT_SW_NTP0_BARUTBASE5 0x014CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP2_PCIECMDSTS 0x05004U +#define IDT_SW_NTP2_PCIELCTLSTS 0x05050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP2_NTCTL 0x05400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP2_BARSETUP0 0x05470U +#define IDT_SW_NTP2_BARLIMIT0 0x05474U +#define IDT_SW_NTP2_BARLTBASE0 0x05478U +#define IDT_SW_NTP2_BARUTBASE0 0x0547CU +#define IDT_SW_NTP2_BARSETUP1 0x05480U +#define IDT_SW_NTP2_BARLIMIT1 0x05484U +#define IDT_SW_NTP2_BARLTBASE1 0x05488U +#define IDT_SW_NTP2_BARUTBASE1 0x0548CU +#define IDT_SW_NTP2_BARSETUP2 0x05490U +#define IDT_SW_NTP2_BARLIMIT2 0x05494U +#define IDT_SW_NTP2_BARLTBASE2 0x05498U +#define IDT_SW_NTP2_BARUTBASE2 0x0549CU +#define IDT_SW_NTP2_BARSETUP3 0x054A0U +#define IDT_SW_NTP2_BARLIMIT3 0x054A4U +#define IDT_SW_NTP2_BARLTBASE3 0x054A8U +#define IDT_SW_NTP2_BARUTBASE3 0x054ACU +#define IDT_SW_NTP2_BARSETUP4 0x054B0U +#define IDT_SW_NTP2_BARLIMIT4 0x054B4U +#define IDT_SW_NTP2_BARLTBASE4 0x054B8U +#define IDT_SW_NTP2_BARUTBASE4 0x054BCU +#define IDT_SW_NTP2_BARSETUP5 0x054C0U +#define IDT_SW_NTP2_BARLIMIT5 0x054C4U +#define IDT_SW_NTP2_BARLTBASE5 0x054C8U +#define IDT_SW_NTP2_BARUTBASE5 0x054CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP4_PCIECMDSTS 0x09004U +#define IDT_SW_NTP4_PCIELCTLSTS 0x09050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP4_NTCTL 0x09400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP4_BARSETUP0 0x09470U +#define IDT_SW_NTP4_BARLIMIT0 0x09474U +#define IDT_SW_NTP4_BARLTBASE0 0x09478U +#define IDT_SW_NTP4_BARUTBASE0 0x0947CU +#define IDT_SW_NTP4_BARSETUP1 0x09480U +#define IDT_SW_NTP4_BARLIMIT1 0x09484U +#define IDT_SW_NTP4_BARLTBASE1 0x09488U +#define IDT_SW_NTP4_BARUTBASE1 0x0948CU +#define IDT_SW_NTP4_BARSETUP2 0x09490U +#define IDT_SW_NTP4_BARLIMIT2 0x09494U +#define IDT_SW_NTP4_BARLTBASE2 0x09498U +#define IDT_SW_NTP4_BARUTBASE2 0x0949CU +#define IDT_SW_NTP4_BARSETUP3 0x094A0U +#define IDT_SW_NTP4_BARLIMIT3 0x094A4U +#define IDT_SW_NTP4_BARLTBASE3 0x094A8U +#define IDT_SW_NTP4_BARUTBASE3 0x094ACU +#define IDT_SW_NTP4_BARSETUP4 0x094B0U +#define IDT_SW_NTP4_BARLIMIT4 0x094B4U +#define IDT_SW_NTP4_BARLTBASE4 0x094B8U +#define IDT_SW_NTP4_BARUTBASE4 0x094BCU +#define IDT_SW_NTP4_BARSETUP5 0x094C0U +#define IDT_SW_NTP4_BARLIMIT5 0x094C4U +#define IDT_SW_NTP4_BARLTBASE5 0x094C8U +#define IDT_SW_NTP4_BARUTBASE5 0x094CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP6_PCIECMDSTS 0x0D004U +#define IDT_SW_NTP6_PCIELCTLSTS 0x0D050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP6_NTCTL 0x0D400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP6_BARSETUP0 0x0D470U +#define IDT_SW_NTP6_BARLIMIT0 0x0D474U +#define IDT_SW_NTP6_BARLTBASE0 0x0D478U +#define IDT_SW_NTP6_BARUTBASE0 0x0D47CU +#define IDT_SW_NTP6_BARSETUP1 0x0D480U +#define IDT_SW_NTP6_BARLIMIT1 0x0D484U +#define IDT_SW_NTP6_BARLTBASE1 0x0D488U +#define IDT_SW_NTP6_BARUTBASE1 0x0D48CU +#define IDT_SW_NTP6_BARSETUP2 0x0D490U +#define IDT_SW_NTP6_BARLIMIT2 0x0D494U +#define IDT_SW_NTP6_BARLTBASE2 0x0D498U +#define IDT_SW_NTP6_BARUTBASE2 0x0D49CU +#define IDT_SW_NTP6_BARSETUP3 0x0D4A0U +#define IDT_SW_NTP6_BARLIMIT3 0x0D4A4U +#define IDT_SW_NTP6_BARLTBASE3 0x0D4A8U +#define IDT_SW_NTP6_BARUTBASE3 0x0D4ACU +#define IDT_SW_NTP6_BARSETUP4 0x0D4B0U +#define IDT_SW_NTP6_BARLIMIT4 0x0D4B4U +#define IDT_SW_NTP6_BARLTBASE4 0x0D4B8U +#define IDT_SW_NTP6_BARUTBASE4 0x0D4BCU +#define IDT_SW_NTP6_BARSETUP5 0x0D4C0U +#define IDT_SW_NTP6_BARLIMIT5 0x0D4C4U +#define IDT_SW_NTP6_BARLTBASE5 0x0D4C8U +#define IDT_SW_NTP6_BARUTBASE5 0x0D4CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP8_PCIECMDSTS 0x11004U +#define IDT_SW_NTP8_PCIELCTLSTS 0x11050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP8_NTCTL 0x11400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP8_BARSETUP0 0x11470U +#define IDT_SW_NTP8_BARLIMIT0 0x11474U +#define IDT_SW_NTP8_BARLTBASE0 0x11478U +#define IDT_SW_NTP8_BARUTBASE0 0x1147CU +#define IDT_SW_NTP8_BARSETUP1 0x11480U +#define IDT_SW_NTP8_BARLIMIT1 0x11484U +#define IDT_SW_NTP8_BARLTBASE1 0x11488U +#define IDT_SW_NTP8_BARUTBASE1 0x1148CU +#define IDT_SW_NTP8_BARSETUP2 0x11490U +#define IDT_SW_NTP8_BARLIMIT2 0x11494U +#define IDT_SW_NTP8_BARLTBASE2 0x11498U +#define IDT_SW_NTP8_BARUTBASE2 0x1149CU +#define IDT_SW_NTP8_BARSETUP3 0x114A0U +#define IDT_SW_NTP8_BARLIMIT3 0x114A4U +#define IDT_SW_NTP8_BARLTBASE3 0x114A8U +#define IDT_SW_NTP8_BARUTBASE3 0x114ACU +#define IDT_SW_NTP8_BARSETUP4 0x114B0U +#define IDT_SW_NTP8_BARLIMIT4 0x114B4U +#define IDT_SW_NTP8_BARLTBASE4 0x114B8U +#define IDT_SW_NTP8_BARUTBASE4 0x114BCU +#define IDT_SW_NTP8_BARSETUP5 0x114C0U +#define IDT_SW_NTP8_BARLIMIT5 0x114C4U +#define IDT_SW_NTP8_BARLTBASE5 0x114C8U +#define IDT_SW_NTP8_BARUTBASE5 0x114CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP12_PCIECMDSTS 0x19004U +#define IDT_SW_NTP12_PCIELCTLSTS 0x19050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP12_NTCTL 0x19400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP12_BARSETUP0 0x19470U +#define IDT_SW_NTP12_BARLIMIT0 0x19474U +#define IDT_SW_NTP12_BARLTBASE0 0x19478U +#define IDT_SW_NTP12_BARUTBASE0 0x1947CU +#define IDT_SW_NTP12_BARSETUP1 0x19480U +#define IDT_SW_NTP12_BARLIMIT1 0x19484U +#define IDT_SW_NTP12_BARLTBASE1 0x19488U +#define IDT_SW_NTP12_BARUTBASE1 0x1948CU +#define IDT_SW_NTP12_BARSETUP2 0x19490U +#define IDT_SW_NTP12_BARLIMIT2 0x19494U +#define IDT_SW_NTP12_BARLTBASE2 0x19498U +#define IDT_SW_NTP12_BARUTBASE2 0x1949CU +#define IDT_SW_NTP12_BARSETUP3 0x194A0U +#define IDT_SW_NTP12_BARLIMIT3 0x194A4U +#define IDT_SW_NTP12_BARLTBASE3 0x194A8U +#define IDT_SW_NTP12_BARUTBASE3 0x194ACU +#define IDT_SW_NTP12_BARSETUP4 0x194B0U +#define IDT_SW_NTP12_BARLIMIT4 0x194B4U +#define IDT_SW_NTP12_BARLTBASE4 0x194B8U +#define IDT_SW_NTP12_BARUTBASE4 0x194BCU +#define IDT_SW_NTP12_BARSETUP5 0x194C0U +#define IDT_SW_NTP12_BARLIMIT5 0x194C4U +#define IDT_SW_NTP12_BARLTBASE5 0x194C8U +#define IDT_SW_NTP12_BARUTBASE5 0x194CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP16_PCIECMDSTS 0x21004U +#define IDT_SW_NTP16_PCIELCTLSTS 0x21050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP16_NTCTL 0x21400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP16_BARSETUP0 0x21470U +#define IDT_SW_NTP16_BARLIMIT0 0x21474U +#define IDT_SW_NTP16_BARLTBASE0 0x21478U +#define IDT_SW_NTP16_BARUTBASE0 0x2147CU +#define IDT_SW_NTP16_BARSETUP1 0x21480U +#define IDT_SW_NTP16_BARLIMIT1 0x21484U +#define IDT_SW_NTP16_BARLTBASE1 0x21488U +#define IDT_SW_NTP16_BARUTBASE1 0x2148CU +#define IDT_SW_NTP16_BARSETUP2 0x21490U +#define IDT_SW_NTP16_BARLIMIT2 0x21494U +#define IDT_SW_NTP16_BARLTBASE2 0x21498U +#define IDT_SW_NTP16_BARUTBASE2 0x2149CU +#define IDT_SW_NTP16_BARSETUP3 0x214A0U +#define IDT_SW_NTP16_BARLIMIT3 0x214A4U +#define IDT_SW_NTP16_BARLTBASE3 0x214A8U +#define IDT_SW_NTP16_BARUTBASE3 0x214ACU +#define IDT_SW_NTP16_BARSETUP4 0x214B0U +#define IDT_SW_NTP16_BARLIMIT4 0x214B4U +#define IDT_SW_NTP16_BARLTBASE4 0x214B8U +#define IDT_SW_NTP16_BARUTBASE4 0x214BCU +#define IDT_SW_NTP16_BARSETUP5 0x214C0U +#define IDT_SW_NTP16_BARLIMIT5 0x214C4U +#define IDT_SW_NTP16_BARLTBASE5 0x214C8U +#define IDT_SW_NTP16_BARUTBASE5 0x214CCU +/* PCI Express command/status and link control/status registers (WORD+WORD) */ +#define IDT_SW_NTP20_PCIECMDSTS 0x29004U +#define IDT_SW_NTP20_PCIELCTLSTS 0x29050U +/* NT-function control register (DWORD) */ +#define IDT_SW_NTP20_NTCTL 0x29400U +/* BAR setup/limit/base address registers (DWORD) */ +#define IDT_SW_NTP20_BARSETUP0 0x29470U +#define IDT_SW_NTP20_BARLIMIT0 0x29474U +#define IDT_SW_NTP20_BARLTBASE0 0x29478U +#define IDT_SW_NTP20_BARUTBASE0 0x2947CU +#define IDT_SW_NTP20_BARSETUP1 0x29480U +#define IDT_SW_NTP20_BARLIMIT1 0x29484U +#define IDT_SW_NTP20_BARLTBASE1 0x29488U +#define IDT_SW_NTP20_BARUTBASE1 0x2948CU +#define IDT_SW_NTP20_BARSETUP2 0x29490U +#define IDT_SW_NTP20_BARLIMIT2 0x29494U +#define IDT_SW_NTP20_BARLTBASE2 0x29498U +#define IDT_SW_NTP20_BARUTBASE2 0x2949CU +#define IDT_SW_NTP20_BARSETUP3 0x294A0U +#define IDT_SW_NTP20_BARLIMIT3 0x294A4U +#define IDT_SW_NTP20_BARLTBASE3 0x294A8U +#define IDT_SW_NTP20_BARUTBASE3 0x294ACU +#define IDT_SW_NTP20_BARSETUP4 0x294B0U +#define IDT_SW_NTP20_BARLIMIT4 0x294B4U +#define IDT_SW_NTP20_BARLTBASE4 0x294B8U +#define IDT_SW_NTP20_BARUTBASE4 0x294BCU +#define IDT_SW_NTP20_BARSETUP5 0x294C0U +#define IDT_SW_NTP20_BARLIMIT5 0x294C4U +#define IDT_SW_NTP20_BARLTBASE5 0x294C8U +#define IDT_SW_NTP20_BARUTBASE5 0x294CCU +/* IDT PCIe-switch control register (DWORD) */ +#define IDT_SW_CTL 0x3E000U +/* Boot Configuration Vector Status (DWORD) */ +#define IDT_SW_BCVSTS 0x3E004U +/* Port Clocking Mode (DWORD) */ +#define IDT_SW_PCLKMODE 0x3E008U +/* Reset Drain Delay (DWORD) */ +#define IDT_SW_RDRAINDELAY 0x3E080U +/* Port Operating Mode Change Drain Delay (DWORD) */ +#define IDT_SW_POMCDELAY 0x3E084U +/* Side Effect Delay (DWORD) */ +#define IDT_SW_SEDELAY 0x3E088U +/* Upstream Secondary Bus Reset Delay (DWORD) */ +#define IDT_SW_SSBRDELAY 0x3E08CU +/* Switch partition N Control/Status/Failover registers */ +#define IDT_SW_SWPART0CTL 0x3E100U +#define IDT_SW_SWPART0STS 0x3E104U +#define IDT_SW_SWPART0FCTL 0x3E108U +#define IDT_SW_SWPART1CTL 0x3E120U +#define IDT_SW_SWPART1STS 0x3E124U +#define IDT_SW_SWPART1FCTL 0x3E128U +#define IDT_SW_SWPART2CTL 0x3E140U +#define IDT_SW_SWPART2STS 0x3E144U +#define IDT_SW_SWPART2FCTL 0x3E148U +#define IDT_SW_SWPART3CTL 0x3E160U +#define IDT_SW_SWPART3STS 0x3E164U +#define IDT_SW_SWPART3FCTL 0x3E168U +#define IDT_SW_SWPART4CTL 0x3E180U +#define IDT_SW_SWPART4STS 0x3E184U +#define IDT_SW_SWPART4FCTL 0x3E188U +#define IDT_SW_SWPART5CTL 0x3E1A0U +#define IDT_SW_SWPART5STS 0x3E1A4U +#define IDT_SW_SWPART5FCTL 0x3E1A8U +#define IDT_SW_SWPART6CTL 0x3E1C0U +#define IDT_SW_SWPART6STS 0x3E1C4U +#define IDT_SW_SWPART6FCTL 0x3E1C8U +#define IDT_SW_SWPART7CTL 0x3E1E0U +#define IDT_SW_SWPART7STS 0x3E1E4U +#define IDT_SW_SWPART7FCTL 0x3E1E8U +/* Switch port N control and status registers */ +#define IDT_SW_SWPORT0CTL 0x3E200U +#define IDT_SW_SWPORT0STS 0x3E204U +#define IDT_SW_SWPORT0FCTL 0x3E208U +#define IDT_SW_SWPORT2CTL 0x3E240U +#define IDT_SW_SWPORT2STS 0x3E244U +#define IDT_SW_SWPORT2FCTL 0x3E248U +#define IDT_SW_SWPORT4CTL 0x3E280U +#define IDT_SW_SWPORT4STS 0x3E284U +#define IDT_SW_SWPORT4FCTL 0x3E288U +#define IDT_SW_SWPORT6CTL 0x3E2C0U +#define IDT_SW_SWPORT6STS 0x3E2C4U +#define IDT_SW_SWPORT6FCTL 0x3E2C8U +#define IDT_SW_SWPORT8CTL 0x3E300U +#define IDT_SW_SWPORT8STS 0x3E304U +#define IDT_SW_SWPORT8FCTL 0x3E308U +#define IDT_SW_SWPORT12CTL 0x3E380U +#define IDT_SW_SWPORT12STS 0x3E384U +#define IDT_SW_SWPORT12FCTL 0x3E388U +#define IDT_SW_SWPORT16CTL 0x3E400U +#define IDT_SW_SWPORT16STS 0x3E404U +#define IDT_SW_SWPORT16FCTL 0x3E408U +#define IDT_SW_SWPORT20CTL 0x3E480U +#define IDT_SW_SWPORT20STS 0x3E484U +#define IDT_SW_SWPORT20FCTL 0x3E488U +/* Switch Event registers */ +/* Switch Event Status/Mask/Partition mask (DWORD) */ +#define IDT_SW_SESTS 0x3EC00U +#define IDT_SW_SEMSK 0x3EC04U +#define IDT_SW_SEPMSK 0x3EC08U +/* Switch Event Link Up/Down Status/Mask (DWORD) */ +#define IDT_SW_SELINKUPSTS 0x3EC0CU +#define IDT_SW_SELINKUPMSK 0x3EC10U +#define IDT_SW_SELINKDNSTS 0x3EC14U +#define IDT_SW_SELINKDNMSK 0x3EC18U +/* Switch Event Fundamental Reset Status/Mask (DWORD) */ +#define IDT_SW_SEFRSTSTS 0x3EC1CU +#define IDT_SW_SEFRSTMSK 0x3EC20U +/* Switch Event Hot Reset Status/Mask (DWORD) */ +#define IDT_SW_SEHRSTSTS 0x3EC24U +#define IDT_SW_SEHRSTMSK 0x3EC28U +/* Switch Event Failover Mask (DWORD) */ +#define IDT_SW_SEFOVRMSK 0x3EC2CU +/* Switch Event Global Signal Status/Mask (DWORD) */ +#define IDT_SW_SEGSIGSTS 0x3EC30U +#define IDT_SW_SEGSIGMSK 0x3EC34U +/* NT Global Doorbell Status (DWORD) */ +#define IDT_SW_GDBELLSTS 0x3EC3CU +/* Switch partition N message M control (msgs routing table) (DWORD) */ +#define IDT_SW_SWP0MSGCTL0 0x3EE00U +#define IDT_SW_SWP1MSGCTL0 0x3EE04U +#define IDT_SW_SWP2MSGCTL0 0x3EE08U +#define IDT_SW_SWP3MSGCTL0 0x3EE0CU +#define IDT_SW_SWP4MSGCTL0 0x3EE10U +#define IDT_SW_SWP5MSGCTL0 0x3EE14U +#define IDT_SW_SWP6MSGCTL0 0x3EE18U +#define IDT_SW_SWP7MSGCTL0 0x3EE1CU +#define IDT_SW_SWP0MSGCTL1 0x3EE20U +#define IDT_SW_SWP1MSGCTL1 0x3EE24U +#define IDT_SW_SWP2MSGCTL1 0x3EE28U +#define IDT_SW_SWP3MSGCTL1 0x3EE2CU +#define IDT_SW_SWP4MSGCTL1 0x3EE30U +#define IDT_SW_SWP5MSGCTL1 0x3EE34U +#define IDT_SW_SWP6MSGCTL1 0x3EE38U +#define IDT_SW_SWP7MSGCTL1 0x3EE3CU +#define IDT_SW_SWP0MSGCTL2 0x3EE40U +#define IDT_SW_SWP1MSGCTL2 0x3EE44U +#define IDT_SW_SWP2MSGCTL2 0x3EE48U +#define IDT_SW_SWP3MSGCTL2 0x3EE4CU +#define IDT_SW_SWP4MSGCTL2 0x3EE50U +#define IDT_SW_SWP5MSGCTL2 0x3EE54U +#define IDT_SW_SWP6MSGCTL2 0x3EE58U +#define IDT_SW_SWP7MSGCTL2 0x3EE5CU +#define IDT_SW_SWP0MSGCTL3 0x3EE60U +#define IDT_SW_SWP1MSGCTL3 0x3EE64U +#define IDT_SW_SWP2MSGCTL3 0x3EE68U +#define IDT_SW_SWP3MSGCTL3 0x3EE6CU +#define IDT_SW_SWP4MSGCTL3 0x3EE70U +#define IDT_SW_SWP5MSGCTL3 0x3EE74U +#define IDT_SW_SWP6MSGCTL3 0x3EE78U +#define IDT_SW_SWP7MSGCTL3 0x3EE7CU +/* SMBus Status and Control registers (DWORD) */ +#define IDT_SW_SMBUSSTS 0x3F188U +#define IDT_SW_SMBUSCTL 0x3F18CU +/* Serial EEPROM Interface (DWORD) */ +#define IDT_SW_EEPROMINTF 0x3F190U +/* MBus I/O Expander Address N (DWORD) */ +#define IDT_SW_IOEXPADDR0 0x3F198U +#define IDT_SW_IOEXPADDR1 0x3F19CU +#define IDT_SW_IOEXPADDR2 0x3F1A0U +#define IDT_SW_IOEXPADDR3 0x3F1A4U +#define IDT_SW_IOEXPADDR4 0x3F1A8U +#define IDT_SW_IOEXPADDR5 0x3F1ACU +/* General Purpose Events Control and Status registers (DWORD) */ +#define IDT_SW_GPECTL 0x3F1B0U +#define IDT_SW_GPESTS 0x3F1B4U +/* Temperature sensor Control/Status/Alarm/Adjustment/Slope registers */ +#define IDT_SW_TMPCTL 0x3F1D4U +#define IDT_SW_TMPSTS 0x3F1D8U +#define IDT_SW_TMPALARM 0x3F1DCU +#define IDT_SW_TMPADJ 0x3F1E0U +#define IDT_SW_TSSLOPE 0x3F1E4U +/* SMBus Configuration Block header log (DWORD) */ +#define IDT_SW_SMBUSCBHL 0x3F1E8U + +/* + * Common registers related constants + * @IDT_REG_ALIGN: Registers alignment used in the driver + * @IDT_REG_PCI_MAX: Maximum PCI configuration space register value + * @IDT_REG_SW_MAX: Maximum global register value + */ +#define IDT_REG_ALIGN 4 +#define IDT_REG_PCI_MAX 0x00FFFU +#define IDT_REG_SW_MAX 0x3FFFFU + +/* + * PCICMDSTS register fields related constants + * @IDT_PCICMDSTS_IOAE: I/O access enable + * @IDT_PCICMDSTS_MAE: Memory access enable + * @IDT_PCICMDSTS_BME: Bus master enable + */ +#define IDT_PCICMDSTS_IOAE 0x00000001U +#define IDT_PCICMDSTS_MAE 0x00000002U +#define IDT_PCICMDSTS_BME 0x00000004U + +/* + * PCIEDCAP register fields related constants + * @IDT_PCIEDCAP_MPAYLOAD_MASK: Maximum payload size mask + * @IDT_PCIEDCAP_MPAYLOAD_FLD: Maximum payload size field offset + * @IDT_PCIEDCAP_MPAYLOAD_S128: Max supported payload size of 128 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S256: Max supported payload size of 256 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S512: Max supported payload size of 512 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S1024: Max supported payload size of 1024 bytes + * @IDT_PCIEDCAP_MPAYLOAD_S2048: Max supported payload size of 2048 bytes + */ +#define IDT_PCIEDCAP_MPAYLOAD_MASK 0x00000007U +#define IDT_PCIEDCAP_MPAYLOAD_FLD 0 +#define IDT_PCIEDCAP_MPAYLOAD_S128 0x00000000U +#define IDT_PCIEDCAP_MPAYLOAD_S256 0x00000001U +#define IDT_PCIEDCAP_MPAYLOAD_S512 0x00000002U +#define IDT_PCIEDCAP_MPAYLOAD_S1024 0x00000003U +#define IDT_PCIEDCAP_MPAYLOAD_S2048 0x00000004U + +/* + * PCIEDCTLSTS registers fields related constants + * @IDT_PCIEDCTL_MPS_MASK: Maximum payload size mask + * @IDT_PCIEDCTL_MPS_FLD: MPS field offset + * @IDT_PCIEDCTL_MPS_S128: Max payload size of 128 bytes + * @IDT_PCIEDCTL_MPS_S256: Max payload size of 256 bytes + * @IDT_PCIEDCTL_MPS_S512: Max payload size of 512 bytes + * @IDT_PCIEDCTL_MPS_S1024: Max payload size of 1024 bytes + * @IDT_PCIEDCTL_MPS_S2048: Max payload size of 2048 bytes + * @IDT_PCIEDCTL_MPS_S4096: Max payload size of 4096 bytes + */ +#define IDT_PCIEDCTLSTS_MPS_MASK 0x000000E0U +#define IDT_PCIEDCTLSTS_MPS_FLD 5 +#define IDT_PCIEDCTLSTS_MPS_S128 0x00000000U +#define IDT_PCIEDCTLSTS_MPS_S256 0x00000020U +#define IDT_PCIEDCTLSTS_MPS_S512 0x00000040U +#define IDT_PCIEDCTLSTS_MPS_S1024 0x00000060U +#define IDT_PCIEDCTLSTS_MPS_S2048 0x00000080U +#define IDT_PCIEDCTLSTS_MPS_S4096 0x000000A0U + +/* + * PCIELCAP register fields related constants + * @IDT_PCIELCAP_PORTNUM_MASK: Port number field mask + * @IDT_PCIELCAP_PORTNUM_FLD: Port number field offset + */ +#define IDT_PCIELCAP_PORTNUM_MASK 0xFF000000U +#define IDT_PCIELCAP_PORTNUM_FLD 24 + +/* + * PCIELCTLSTS registers fields related constants + * @IDT_PCIELSTS_CLS_MASK: Current link speed mask + * @IDT_PCIELSTS_CLS_FLD: Current link speed field offset + * @IDT_PCIELSTS_NLW_MASK: Negotiated link width mask + * @IDT_PCIELSTS_NLW_FLD: Negotiated link width field offset + * @IDT_PCIELSTS_SCLK_COM: Common slot clock configuration + */ +#define IDT_PCIELCTLSTS_CLS_MASK 0x000F0000U +#define IDT_PCIELCTLSTS_CLS_FLD 16 +#define IDT_PCIELCTLSTS_NLW_MASK 0x03F00000U +#define IDT_PCIELCTLSTS_NLW_FLD 20 +#define IDT_PCIELCTLSTS_SCLK_COM 0x10000000U + +/* + * NTCTL register fields related constants + * @IDT_NTCTL_IDPROTDIS: ID Protection check disable (disable MTBL) + * @IDT_NTCTL_CPEN: Completion enable + * @IDT_NTCTL_RNS: Request no snoop processing (if MTBL disabled) + * @IDT_NTCTL_ATP: Address type processing (if MTBL disabled) + */ +#define IDT_NTCTL_IDPROTDIS 0x00000001U +#define IDT_NTCTL_CPEN 0x00000002U +#define IDT_NTCTL_RNS 0x00000004U +#define IDT_NTCTL_ATP 0x00000008U + +/* + * NTINTSTS register fields related constants + * @IDT_NTINTSTS_MSG: Message interrupt bit + * @IDT_NTINTSTS_DBELL: Doorbell interrupt bit + * @IDT_NTINTSTS_SEVENT: Switch Event interrupt bit + * @IDT_NTINTSTS_TMPSENSOR: Temperature sensor interrupt bit + */ +#define IDT_NTINTSTS_MSG 0x00000001U +#define IDT_NTINTSTS_DBELL 0x00000002U +#define IDT_NTINTSTS_SEVENT 0x00000008U +#define IDT_NTINTSTS_TMPSENSOR 0x00000080U + +/* + * NTINTMSK register fields related constants + * @IDT_NTINTMSK_MSG: Message interrupt mask bit + * @IDT_NTINTMSK_DBELL: Doorbell interrupt mask bit + * @IDT_NTINTMSK_SEVENT: Switch Event interrupt mask bit + * @IDT_NTINTMSK_TMPSENSOR: Temperature sensor interrupt mask bit + * @IDT_NTINTMSK_ALL: All the useful interrupts mask + */ +#define IDT_NTINTMSK_MSG 0x00000001U +#define IDT_NTINTMSK_DBELL 0x00000002U +#define IDT_NTINTMSK_SEVENT 0x00000008U +#define IDT_NTINTMSK_TMPSENSOR 0x00000080U +#define IDT_NTINTMSK_ALL \ + (IDT_NTINTMSK_MSG | IDT_NTINTMSK_DBELL | \ + IDT_NTINTMSK_SEVENT | IDT_NTINTMSK_TMPSENSOR) + +/* + * NTGSIGNAL register fields related constants + * @IDT_NTGSIGNAL_SET: Set global signal of the local partition + */ +#define IDT_NTGSIGNAL_SET 0x00000001U + +/* + * BARSETUP register fields related constants + * @IDT_BARSETUP_TYPE_MASK: Mask of the TYPE field + * @IDT_BARSETUP_TYPE_32: 32-bit addressing BAR + * @IDT_BARSETUP_TYPE_64: 64-bit addressing BAR + * @IDT_BARSETUP_PREF: Value of the BAR prefetchable field + * @IDT_BARSETUP_SIZE_MASK: Mask of the SIZE field + * @IDT_BARSETUP_SIZE_FLD: SIZE field offset + * @IDT_BARSETUP_SIZE_CFG: SIZE field value in case of config space MODE + * @IDT_BARSETUP_MODE_CFG: Configuration space BAR mode + * @IDT_BARSETUP_ATRAN_MASK: ATRAN field mask + * @IDT_BARSETUP_ATRAN_FLD: ATRAN field offset + * @IDT_BARSETUP_ATRAN_DIR: Direct address translation memory window + * @IDT_BARSETUP_ATRAN_LUT12: 12-entry lookup table + * @IDT_BARSETUP_ATRAN_LUT24: 24-entry lookup table + * @IDT_BARSETUP_TPART_MASK: TPART field mask + * @IDT_BARSETUP_TPART_FLD: TPART field offset + * @IDT_BARSETUP_EN: BAR enable bit + */ +#define IDT_BARSETUP_TYPE_MASK 0x00000006U +#define IDT_BARSETUP_TYPE_FLD 0 +#define IDT_BARSETUP_TYPE_32 0x00000000U +#define IDT_BARSETUP_TYPE_64 0x00000004U +#define IDT_BARSETUP_PREF 0x00000008U +#define IDT_BARSETUP_SIZE_MASK 0x000003F0U +#define IDT_BARSETUP_SIZE_FLD 4 +#define IDT_BARSETUP_SIZE_CFG 0x000000C0U +#define IDT_BARSETUP_MODE_CFG 0x00000400U +#define IDT_BARSETUP_ATRAN_MASK 0x00001800U +#define IDT_BARSETUP_ATRAN_FLD 11 +#define IDT_BARSETUP_ATRAN_DIR 0x00000000U +#define IDT_BARSETUP_ATRAN_LUT12 0x00000800U +#define IDT_BARSETUP_ATRAN_LUT24 0x00001000U +#define IDT_BARSETUP_TPART_MASK 0x0000E000U +#define IDT_BARSETUP_TPART_FLD 13 +#define IDT_BARSETUP_EN 0x80000000U + +/* + * NTMTBLDATA register fields related constants + * @IDT_NTMTBLDATA_VALID: Set the MTBL entry being valid + * @IDT_NTMTBLDATA_REQID_MASK: Bus:Device:Function field mask + * @IDT_NTMTBLDATA_REQID_FLD: Bus:Device:Function field offset + * @IDT_NTMTBLDATA_PART_MASK: Partition field mask + * @IDT_NTMTBLDATA_PART_FLD: Partition field offset + * @IDT_NTMTBLDATA_ATP_TRANS: Enable AT field translation on request TLPs + * @IDT_NTMTBLDATA_CNS_INV: Enable No Snoop attribute inversion of + * Completion TLPs + * @IDT_NTMTBLDATA_RNS_INV: Enable No Snoop attribute inversion of + * Request TLPs + */ +#define IDT_NTMTBLDATA_VALID 0x00000001U +#define IDT_NTMTBLDATA_REQID_MASK 0x0001FFFEU +#define IDT_NTMTBLDATA_REQID_FLD 1 +#define IDT_NTMTBLDATA_PART_MASK 0x000E0000U +#define IDT_NTMTBLDATA_PART_FLD 17 +#define IDT_NTMTBLDATA_ATP_TRANS 0x20000000U +#define IDT_NTMTBLDATA_CNS_INV 0x40000000U +#define IDT_NTMTBLDATA_RNS_INV 0x80000000U + +/* + * REQIDCAP register fields related constants + * @IDT_REQIDCAP_REQID_MASK: Request ID field mask + * @IDT_REQIDCAP_REQID_FLD: Request ID field offset + */ +#define IDT_REQIDCAP_REQID_MASK 0x0000FFFFU +#define IDT_REQIDCAP_REQID_FLD 0 + +/* + * LUTOFFSET register fields related constants + * @IDT_LUTOFFSET_INDEX_MASK: Lookup table index field mask + * @IDT_LUTOFFSET_INDEX_FLD: Lookup table index field offset + * @IDT_LUTOFFSET_BAR_MASK: Lookup table BAR select field mask + * @IDT_LUTOFFSET_BAR_FLD: Lookup table BAR select field offset + */ +#define IDT_LUTOFFSET_INDEX_MASK 0x0000001FU +#define IDT_LUTOFFSET_INDEX_FLD 0 +#define IDT_LUTOFFSET_BAR_MASK 0x00000700U +#define IDT_LUTOFFSET_BAR_FLD 8 + +/* + * LUTUDATA register fields related constants + * @IDT_LUTUDATA_PART_MASK: Partition field mask + * @IDT_LUTUDATA_PART_FLD: Partition field offset + * @IDT_LUTUDATA_VALID: Lookup table entry valid bit + */ +#define IDT_LUTUDATA_PART_MASK 0x0000000FU +#define IDT_LUTUDATA_PART_FLD 0 +#define IDT_LUTUDATA_VALID 0x80000000U + +/* + * SWPARTxSTS register fields related constants + * @IDT_SWPARTxSTS_SCI: Switch partition state change initiated + * @IDT_SWPARTxSTS_SCC: Switch partition state change completed + * @IDT_SWPARTxSTS_STATE_MASK: Switch partition state mask + * @IDT_SWPARTxSTS_STATE_FLD: Switch partition state field offset + * @IDT_SWPARTxSTS_STATE_DIS: Switch partition disabled + * @IDT_SWPARTxSTS_STATE_ACT: Switch partition enabled + * @IDT_SWPARTxSTS_STATE_RES: Switch partition in reset + * @IDT_SWPARTxSTS_US: Switch partition has upstream port + * @IDT_SWPARTxSTS_USID_MASK: Switch partition upstream port ID mask + * @IDT_SWPARTxSTS_USID_FLD: Switch partition upstream port ID field offset + * @IDT_SWPARTxSTS_NT: Upstream port has NT function + * @IDT_SWPARTxSTS_DMA: Upstream port has DMA function + */ +#define IDT_SWPARTxSTS_SCI 0x00000001U +#define IDT_SWPARTxSTS_SCC 0x00000002U +#define IDT_SWPARTxSTS_STATE_MASK 0x00000060U +#define IDT_SWPARTxSTS_STATE_FLD 5 +#define IDT_SWPARTxSTS_STATE_DIS 0x00000000U +#define IDT_SWPARTxSTS_STATE_ACT 0x00000020U +#define IDT_SWPARTxSTS_STATE_RES 0x00000060U +#define IDT_SWPARTxSTS_US 0x00000100U +#define IDT_SWPARTxSTS_USID_MASK 0x00003E00U +#define IDT_SWPARTxSTS_USID_FLD 9 +#define IDT_SWPARTxSTS_NT 0x00004000U +#define IDT_SWPARTxSTS_DMA 0x00008000U + +/* + * SWPORTxSTS register fields related constants + * @IDT_SWPORTxSTS_OMCI: Operation mode change initiated + * @IDT_SWPORTxSTS_OMCC: Operation mode change completed + * @IDT_SWPORTxSTS_LINKUP: Link up status + * @IDT_SWPORTxSTS_DS: Port lanes behave as downstream lanes + * @IDT_SWPORTxSTS_MODE_MASK: Port mode field mask + * @IDT_SWPORTxSTS_MODE_FLD: Port mode field offset + * @IDT_SWPORTxSTS_MODE_DIS: Port mode - disabled + * @IDT_SWPORTxSTS_MODE_DS: Port mode - downstream switch port + * @IDT_SWPORTxSTS_MODE_US: Port mode - upstream switch port + * @IDT_SWPORTxSTS_MODE_NT: Port mode - NT function + * @IDT_SWPORTxSTS_MODE_USNT: Port mode - upstream switch port with NTB + * @IDT_SWPORTxSTS_MODE_UNAT: Port mode - unattached + * @IDT_SWPORTxSTS_MODE_USDMA: Port mode - upstream switch port with DMA + * @IDT_SWPORTxSTS_MODE_USNTDMA:Port mode - upstream port with NTB and DMA + * @IDT_SWPORTxSTS_MODE_NTDMA: Port mode - NT function with DMA + * @IDT_SWPORTxSTS_SWPART_MASK: Port partition field mask + * @IDT_SWPORTxSTS_SWPART_FLD: Port partition field offset + * @IDT_SWPORTxSTS_DEVNUM_MASK: Port device number field mask + * @IDT_SWPORTxSTS_DEVNUM_FLD: Port device number field offset + */ +#define IDT_SWPORTxSTS_OMCI 0x00000001U +#define IDT_SWPORTxSTS_OMCC 0x00000002U +#define IDT_SWPORTxSTS_LINKUP 0x00000010U +#define IDT_SWPORTxSTS_DS 0x00000020U +#define IDT_SWPORTxSTS_MODE_MASK 0x000003C0U +#define IDT_SWPORTxSTS_MODE_FLD 6 +#define IDT_SWPORTxSTS_MODE_DIS 0x00000000U +#define IDT_SWPORTxSTS_MODE_DS 0x00000040U +#define IDT_SWPORTxSTS_MODE_US 0x00000080U +#define IDT_SWPORTxSTS_MODE_NT 0x000000C0U +#define IDT_SWPORTxSTS_MODE_USNT 0x00000100U +#define IDT_SWPORTxSTS_MODE_UNAT 0x00000140U +#define IDT_SWPORTxSTS_MODE_USDMA 0x00000180U +#define IDT_SWPORTxSTS_MODE_USNTDMA 0x000001C0U +#define IDT_SWPORTxSTS_MODE_NTDMA 0x00000200U +#define IDT_SWPORTxSTS_SWPART_MASK 0x00001C00U +#define IDT_SWPORTxSTS_SWPART_FLD 10 +#define IDT_SWPORTxSTS_DEVNUM_MASK 0x001F0000U +#define IDT_SWPORTxSTS_DEVNUM_FLD 16 + +/* + * SEMSK register fields related constants + * @IDT_SEMSK_LINKUP: Link Up event mask bit + * @IDT_SEMSK_LINKDN: Link Down event mask bit + * @IDT_SEMSK_GSIGNAL: Global Signal event mask bit + */ +#define IDT_SEMSK_LINKUP 0x00000001U +#define IDT_SEMSK_LINKDN 0x00000002U +#define IDT_SEMSK_GSIGNAL 0x00000020U + +/* + * SWPxMSGCTL register fields related constants + * @IDT_SWPxMSGCTL_REG_MASK: Register select field mask + * @IDT_SWPxMSGCTL_REG_FLD: Register select field offset + * @IDT_SWPxMSGCTL_PART_MASK: Partition select field mask + * @IDT_SWPxMSGCTL_PART_FLD: Partition select field offset + */ +#define IDT_SWPxMSGCTL_REG_MASK 0x00000003U +#define IDT_SWPxMSGCTL_REG_FLD 0 +#define IDT_SWPxMSGCTL_PART_MASK 0x00000070U +#define IDT_SWPxMSGCTL_PART_FLD 4 + +/* + * TMPSTS register fields related constants + * @IDT_TMPSTS_TEMP_MASK: Current temperature field mask + * @IDT_TMPSTS_TEMP_FLD: Current temperature field offset + */ +#define IDT_TMPSTS_TEMP_MASK 0x000000FFU +#define IDT_TMPSTS_TEMP_FLD 0 + +/* + * Helper macro to get/set the corresponding field value + * @GET_FIELD: Retrieve the value of the corresponding field + * @SET_FIELD: Set the specified field up + * @IS_FLD_SET: Check whether a field is set with value + */ +#define GET_FIELD(field, data) \ + (((u32)(data) & IDT_ ##field## _MASK) >> IDT_ ##field## _FLD) +#define SET_FIELD(field, data, value) \ + (((u32)(data) & ~IDT_ ##field## _MASK) | \ + ((u32)(value) << IDT_ ##field## _FLD)) +#define IS_FLD_SET(field, data, value) \ + (((u32)(data) & IDT_ ##field## _MASK) == IDT_ ##field## _ ##value) + +/* + * Useful registers masks: + * @IDT_DBELL_MASK: Doorbell bits mask + * @IDT_OUTMSG_MASK: Out messages status bits mask + * @IDT_INMSG_MASK: In messages status bits mask + * @IDT_MSG_MASK: Any message status bits mask + */ +#define IDT_DBELL_MASK ((u32)0xFFFFFFFFU) +#define IDT_OUTMSG_MASK ((u32)0x0000000FU) +#define IDT_INMSG_MASK ((u32)0x000F0000U) +#define IDT_MSG_MASK (IDT_INMSG_MASK | IDT_OUTMSG_MASK) + +/* + * Number of IDT NTB resources: + * @IDT_MSG_CNT: Number of Message registers + * @IDT_BAR_CNT: Number of BARs of each port + * @IDT_MTBL_ENTRY_CNT: Number mapping table entries + */ +#define IDT_MSG_CNT 4 +#define IDT_BAR_CNT 6 +#define IDT_MTBL_ENTRY_CNT 64 + +/* + * General IDT PCIe-switch constant + * @IDT_MAX_NR_PORTS: Maximum number of ports per IDT PCIe-switch + * @IDT_MAX_NR_PARTS: Maximum number of partitions per IDT PCIe-switch + * @IDT_MAX_NR_PEERS: Maximum number of NT-peers per IDT PCIe-switch + * @IDT_MAX_NR_MWS: Maximum number of Memory Widows + * @IDT_PCIE_REGSIZE: Size of the registers in bytes + * @IDT_TRANS_ALIGN: Alignment of translated base address + * @IDT_DIR_SIZE_ALIGN: Alignment of size setting for direct translated MWs. + * Even though the lower 10 bits are reserved, they are + * treated by IDT as one's so basically there is no any + * alignment of size limit for DIR address translation. + */ +#define IDT_MAX_NR_PORTS 24 +#define IDT_MAX_NR_PARTS 8 +#define IDT_MAX_NR_PEERS 8 +#define IDT_MAX_NR_MWS 29 +#define IDT_PCIE_REGSIZE 4 +#define IDT_TRANS_ALIGN 4 +#define IDT_DIR_SIZE_ALIGN 1 + +/* + * IDT Memory Windows type. Depending on the device settings, IDT supports + * Direct Address Translation MW registers and Lookup Table registers + * @IDT_MW_DIR: Direct address translation + * @IDT_MW_LUT12: 12-entry lookup table entry + * @IDT_MW_LUT24: 24-entry lookup table entry + * + * NOTE These values are exactly the same as one of the BARSETUP ATRAN field + */ +enum idt_mw_type { + IDT_MW_DIR = 0x0, + IDT_MW_LUT12 = 0x1, + IDT_MW_LUT24 = 0x2 +}; + +/* + * IDT PCIe-switch model private data + * @name: Device name + * @port_cnt: Total number of NT endpoint ports + * @ports: Port ids + */ +struct idt_89hpes_cfg { + char *name; + unsigned char port_cnt; + unsigned char ports[]; +}; + +/* + * Memory window configuration structure + * @type: Type of the memory window (direct address translation or lookup + * table) + * + * @bar: PCIe BAR the memory window referenced to + * @idx: Index of the memory window within the BAR + * + * @addr_align: Alignment of translated address + * @size_align: Alignment of memory window size + * @size_max: Maximum size of memory window + */ +struct idt_mw_cfg { + enum idt_mw_type type; + + unsigned char bar; + unsigned char idx; + + u64 addr_align; + u64 size_align; + u64 size_max; +}; + +/* + * Description structure of peer IDT NT-functions: + * @port: NT-function port + * @part: NT-function partition + * + * @mw_cnt: Number of memory windows supported by NT-function + * @mws: Array of memory windows descriptors + */ +struct idt_ntb_peer { + unsigned char port; + unsigned char part; + + unsigned char mw_cnt; + struct idt_mw_cfg *mws; +}; + +/* + * Description structure of local IDT NT-function: + * @ntb: Linux NTB-device description structure + * @swcfg: Pointer to the structure of local IDT PCIe-switch + * specific cofnfigurations + * + * @port: Local NT-function port + * @part: Local NT-function partition + * + * @peer_cnt: Number of peers with activated NTB-function + * @peers: Array of peers descripting structures + * @port_idx_map: Map of port number -> peer index + * @part_idx_map: Map of partition number -> peer index + * + * @mtbl_lock: Mapping table access lock + * + * @mw_cnt: Number of memory windows supported by NT-function + * @mws: Array of memory windows descriptors + * @lut_lock: Lookup table access lock + * + * @msg_locks: Message registers mapping table lockers + * + * @cfgspc: Virtual address of the memory mapped configuration + * space of the NT-function + * @db_mask_lock: Doorbell mask register lock + * @msg_mask_lock: Message mask register lock + * @gasa_lock: GASA registers access lock + * + * @dbgfs_info: DebugFS info node + */ +struct idt_ntb_dev { + struct ntb_dev ntb; + struct idt_89hpes_cfg *swcfg; + + unsigned char port; + unsigned char part; + + unsigned char peer_cnt; + struct idt_ntb_peer peers[IDT_MAX_NR_PEERS]; + char port_idx_map[IDT_MAX_NR_PORTS]; + char part_idx_map[IDT_MAX_NR_PARTS]; + + spinlock_t mtbl_lock; + + unsigned char mw_cnt; + struct idt_mw_cfg *mws; + spinlock_t lut_lock; + + spinlock_t msg_locks[IDT_MSG_CNT]; + + void __iomem *cfgspc; + spinlock_t db_mask_lock; + spinlock_t msg_mask_lock; + spinlock_t gasa_lock; + + struct dentry *dbgfs_info; +}; +#define to_ndev_ntb(__ntb) container_of(__ntb, struct idt_ntb_dev, ntb) + +/* + * Descriptor of the IDT PCIe-switch BAR resources + * @setup: BAR setup register + * @limit: BAR limit register + * @ltbase: Lower translated base address + * @utbase: Upper translated base address + */ +struct idt_ntb_bar { + unsigned int setup; + unsigned int limit; + unsigned int ltbase; + unsigned int utbase; +}; + +/* + * Descriptor of the IDT PCIe-switch message resources + * @in: Inbound message register + * @out: Outbound message register + * @src: Source of inbound message register + */ +struct idt_ntb_msg { + unsigned int in; + unsigned int out; + unsigned int src; +}; + +/* + * Descriptor of the IDT PCIe-switch NT-function specific parameters in the + * PCI Configuration Space + * @bars: BARs related registers + * @msgs: Messaging related registers + */ +struct idt_ntb_regs { + struct idt_ntb_bar bars[IDT_BAR_CNT]; + struct idt_ntb_msg msgs[IDT_MSG_CNT]; +}; + +/* + * Descriptor of the IDT PCIe-switch port specific parameters in the + * Global Configuration Space + * @pcicmdsts: PCI command/status register + * @pcielctlsts: PCIe link control/status + * + * @ctl: Port control register + * @sts: Port status register + * + * @bars: BARs related registers + */ +struct idt_ntb_port { + unsigned int pcicmdsts; + unsigned int pcielctlsts; + unsigned int ntctl; + + unsigned int ctl; + unsigned int sts; + + struct idt_ntb_bar bars[IDT_BAR_CNT]; +}; + +/* + * Descriptor of the IDT PCIe-switch partition specific parameters. + * @ctl: Partition control register in the Global Address Space + * @sts: Partition status register in the Global Address Space + * @msgctl: Messages control registers + */ +struct idt_ntb_part { + unsigned int ctl; + unsigned int sts; + unsigned int msgctl[IDT_MSG_CNT]; +}; + +#endif /* NTB_HW_IDT_H */ diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.c b/drivers/ntb/hw/intel/ntb_hw_intel.c index 7b3b6fd63d7d..2557e2c05b90 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.c +++ b/drivers/ntb/hw/intel/ntb_hw_intel.c @@ -6,6 +6,7 @@ * * Copyright(c) 2012 Intel Corporation. All rights reserved. * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -15,6 +16,7 @@ * * Copyright(c) 2012 Intel Corporation. All rights reserved. * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -270,12 +272,12 @@ static inline int ndev_db_addr(struct intel_ntb_dev *ndev, if (db_addr) { *db_addr = reg_addr + reg; - dev_dbg(ndev_dev(ndev), "Peer db addr %llx\n", *db_addr); + dev_dbg(&ndev->ntb.pdev->dev, "Peer db addr %llx\n", *db_addr); } if (db_size) { *db_size = ndev->reg->db_size; - dev_dbg(ndev_dev(ndev), "Peer db size %llx\n", *db_size); + dev_dbg(&ndev->ntb.pdev->dev, "Peer db size %llx\n", *db_size); } return 0; @@ -368,7 +370,8 @@ static inline int ndev_spad_addr(struct intel_ntb_dev *ndev, int idx, if (spad_addr) { *spad_addr = reg_addr + reg + (idx << 2); - dev_dbg(ndev_dev(ndev), "Peer spad addr %llx\n", *spad_addr); + dev_dbg(&ndev->ntb.pdev->dev, "Peer spad addr %llx\n", + *spad_addr); } return 0; @@ -409,7 +412,7 @@ static irqreturn_t ndev_interrupt(struct intel_ntb_dev *ndev, int vec) if ((ndev->hwerr_flags & NTB_HWERR_MSIX_VECTOR32_BAD) && (vec == 31)) vec_mask |= ndev->db_link_mask; - dev_dbg(ndev_dev(ndev), "vec %d vec_mask %llx\n", vec, vec_mask); + dev_dbg(&ndev->ntb.pdev->dev, "vec %d vec_mask %llx\n", vec, vec_mask); ndev->last_ts = jiffies; @@ -428,7 +431,7 @@ static irqreturn_t ndev_vec_isr(int irq, void *dev) { struct intel_ntb_vec *nvec = dev; - dev_dbg(ndev_dev(nvec->ndev), "irq: %d nvec->num: %d\n", + dev_dbg(&nvec->ndev->ntb.pdev->dev, "irq: %d nvec->num: %d\n", irq, nvec->num); return ndev_interrupt(nvec->ndev, nvec->num); @@ -438,7 +441,7 @@ static irqreturn_t ndev_irq_isr(int irq, void *dev) { struct intel_ntb_dev *ndev = dev; - return ndev_interrupt(ndev, irq - ndev_pdev(ndev)->irq); + return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); } static int ndev_init_isr(struct intel_ntb_dev *ndev, @@ -448,7 +451,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev, struct pci_dev *pdev; int rc, i, msix_count, node; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; node = dev_to_node(&pdev->dev); @@ -487,7 +490,7 @@ static int ndev_init_isr(struct intel_ntb_dev *ndev, goto err_msix_request; } - dev_dbg(ndev_dev(ndev), "Using %d msix interrupts\n", msix_count); + dev_dbg(&pdev->dev, "Using %d msix interrupts\n", msix_count); ndev->db_vec_count = msix_count; ndev->db_vec_shift = msix_shift; return 0; @@ -515,7 +518,7 @@ err_msix_vec_alloc: if (rc) goto err_msi_request; - dev_dbg(ndev_dev(ndev), "Using msi interrupts\n"); + dev_dbg(&pdev->dev, "Using msi interrupts\n"); ndev->db_vec_count = 1; ndev->db_vec_shift = total_shift; return 0; @@ -533,7 +536,7 @@ err_msi_enable: if (rc) goto err_intx_request; - dev_dbg(ndev_dev(ndev), "Using intx interrupts\n"); + dev_dbg(&pdev->dev, "Using intx interrupts\n"); ndev->db_vec_count = 1; ndev->db_vec_shift = total_shift; return 0; @@ -547,7 +550,7 @@ static void ndev_deinit_isr(struct intel_ntb_dev *ndev) struct pci_dev *pdev; int i; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; /* Mask all doorbell interrupts */ ndev->db_mask = ndev->db_valid_mask; @@ -744,7 +747,7 @@ static ssize_t ndev_ntb_debugfs_read(struct file *filp, char __user *ubuf, union { u64 v64; u32 v32; u16 v16; u8 v8; } u; ndev = filp->private_data; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; mmio = ndev->self_mmio; buf_size = min(count, 0x800ul); @@ -1019,7 +1022,8 @@ static void ndev_init_debugfs(struct intel_ntb_dev *ndev) ndev->debugfs_info = NULL; } else { ndev->debugfs_dir = - debugfs_create_dir(ndev_name(ndev), debugfs_dir); + debugfs_create_dir(pci_name(ndev->ntb.pdev), + debugfs_dir); if (!ndev->debugfs_dir) ndev->debugfs_info = NULL; else @@ -1035,20 +1039,26 @@ static void ndev_deinit_debugfs(struct intel_ntb_dev *ndev) debugfs_remove_recursive(ndev->debugfs_dir); } -static int intel_ntb_mw_count(struct ntb_dev *ntb) +static int intel_ntb_mw_count(struct ntb_dev *ntb, int pidx) { + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + return ntb_ndev(ntb)->mw_count; } -static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx, - phys_addr_t *base, - resource_size_t *size, - resource_size_t *align, - resource_size_t *align_size) +static int intel_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, + resource_size_t *addr_align, + resource_size_t *size_align, + resource_size_t *size_max) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); + resource_size_t bar_size, mw_size; int bar; + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + if (idx >= ndev->b2b_idx && !ndev->b2b_off) idx += 1; @@ -1056,24 +1066,26 @@ static int intel_ntb_mw_get_range(struct ntb_dev *ntb, int idx, if (bar < 0) return bar; - if (base) - *base = pci_resource_start(ndev->ntb.pdev, bar) + - (idx == ndev->b2b_idx ? ndev->b2b_off : 0); + bar_size = pci_resource_len(ndev->ntb.pdev, bar); - if (size) - *size = pci_resource_len(ndev->ntb.pdev, bar) - - (idx == ndev->b2b_idx ? ndev->b2b_off : 0); + if (idx == ndev->b2b_idx) + mw_size = bar_size - ndev->b2b_off; + else + mw_size = bar_size; + + if (addr_align) + *addr_align = pci_resource_len(ndev->ntb.pdev, bar); - if (align) - *align = pci_resource_len(ndev->ntb.pdev, bar); + if (size_align) + *size_align = 1; - if (align_size) - *align_size = 1; + if (size_max) + *size_max = mw_size; return 0; } -static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, +static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, dma_addr_t addr, resource_size_t size) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); @@ -1083,6 +1095,9 @@ static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, u64 base, limit, reg_val; int bar; + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + if (idx >= ndev->b2b_idx && !ndev->b2b_off) idx += 1; @@ -1171,7 +1186,7 @@ static int intel_ntb_mw_set_trans(struct ntb_dev *ntb, int idx, return 0; } -static int intel_ntb_link_is_up(struct ntb_dev *ntb, +static u64 intel_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, enum ntb_width *width) { @@ -1206,13 +1221,13 @@ static int intel_ntb_link_enable(struct ntb_dev *ntb, if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; - dev_dbg(ndev_dev(ndev), + dev_dbg(&ntb->pdev->dev, "Enabling link with max_speed %d max_width %d\n", max_speed, max_width); if (max_speed != NTB_SPEED_AUTO) - dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed); + dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed); if (max_width != NTB_WIDTH_AUTO) - dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width); + dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width); ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); @@ -1235,7 +1250,7 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb) if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; - dev_dbg(ndev_dev(ndev), "Disabling link\n"); + dev_dbg(&ntb->pdev->dev, "Disabling link\n"); /* Bring NTB link down */ ntb_cntl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); @@ -1249,6 +1264,36 @@ static int intel_ntb_link_disable(struct ntb_dev *ntb) return 0; } +static int intel_ntb_peer_mw_count(struct ntb_dev *ntb) +{ + /* Numbers of inbound and outbound memory windows match */ + return ntb_ndev(ntb)->mw_count; +} + +static int intel_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, + phys_addr_t *base, resource_size_t *size) +{ + struct intel_ntb_dev *ndev = ntb_ndev(ntb); + int bar; + + if (idx >= ndev->b2b_idx && !ndev->b2b_off) + idx += 1; + + bar = ndev_mw_to_bar(ndev, idx); + if (bar < 0) + return bar; + + if (base) + *base = pci_resource_start(ndev->ntb.pdev, bar) + + (idx == ndev->b2b_idx ? ndev->b2b_off : 0); + + if (size) + *size = pci_resource_len(ndev->ntb.pdev, bar) - + (idx == ndev->b2b_idx ? ndev->b2b_off : 0); + + return 0; +} + static int intel_ntb_db_is_unsafe(struct ntb_dev *ntb) { return ndev_ignore_unsafe(ntb_ndev(ntb), NTB_UNSAFE_DB); @@ -1366,30 +1411,30 @@ static int intel_ntb_spad_write(struct ntb_dev *ntb, ndev->self_reg->spad); } -static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int idx, +static int intel_ntb_peer_spad_addr(struct ntb_dev *ntb, int pidx, int sidx, phys_addr_t *spad_addr) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); - return ndev_spad_addr(ndev, idx, spad_addr, ndev->peer_addr, + return ndev_spad_addr(ndev, sidx, spad_addr, ndev->peer_addr, ndev->peer_reg->spad); } -static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int idx) +static u32 intel_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); - return ndev_spad_read(ndev, idx, + return ndev_spad_read(ndev, sidx, ndev->peer_mmio + ndev->peer_reg->spad); } -static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, - int idx, u32 val) +static int intel_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, + int sidx, u32 val) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); - return ndev_spad_write(ndev, idx, val, + return ndev_spad_write(ndev, sidx, val, ndev->peer_mmio + ndev->peer_reg->spad); } @@ -1442,30 +1487,33 @@ static int atom_link_is_err(struct intel_ntb_dev *ndev) static inline enum ntb_topo atom_ppd_topo(struct intel_ntb_dev *ndev, u32 ppd) { + struct device *dev = &ndev->ntb.pdev->dev; + switch (ppd & ATOM_PPD_TOPO_MASK) { case ATOM_PPD_TOPO_B2B_USD: - dev_dbg(ndev_dev(ndev), "PPD %d B2B USD\n", ppd); + dev_dbg(dev, "PPD %d B2B USD\n", ppd); return NTB_TOPO_B2B_USD; case ATOM_PPD_TOPO_B2B_DSD: - dev_dbg(ndev_dev(ndev), "PPD %d B2B DSD\n", ppd); + dev_dbg(dev, "PPD %d B2B DSD\n", ppd); return NTB_TOPO_B2B_DSD; case ATOM_PPD_TOPO_PRI_USD: case ATOM_PPD_TOPO_PRI_DSD: /* accept bogus PRI_DSD */ case ATOM_PPD_TOPO_SEC_USD: case ATOM_PPD_TOPO_SEC_DSD: /* accept bogus SEC_DSD */ - dev_dbg(ndev_dev(ndev), "PPD %d non B2B disabled\n", ppd); + dev_dbg(dev, "PPD %d non B2B disabled\n", ppd); return NTB_TOPO_NONE; } - dev_dbg(ndev_dev(ndev), "PPD %d invalid\n", ppd); + dev_dbg(dev, "PPD %d invalid\n", ppd); return NTB_TOPO_NONE; } static void atom_link_hb(struct work_struct *work) { struct intel_ntb_dev *ndev = hb_ndev(work); + struct device *dev = &ndev->ntb.pdev->dev; unsigned long poll_ts; void __iomem *mmio; u32 status32; @@ -1503,30 +1551,30 @@ static void atom_link_hb(struct work_struct *work) /* Clear AER Errors, write to clear */ status32 = ioread32(mmio + ATOM_ERRCORSTS_OFFSET); - dev_dbg(ndev_dev(ndev), "ERRCORSTS = %x\n", status32); + dev_dbg(dev, "ERRCORSTS = %x\n", status32); status32 &= PCI_ERR_COR_REP_ROLL; iowrite32(status32, mmio + ATOM_ERRCORSTS_OFFSET); /* Clear unexpected electrical idle event in LTSSM, write to clear */ status32 = ioread32(mmio + ATOM_LTSSMERRSTS0_OFFSET); - dev_dbg(ndev_dev(ndev), "LTSSMERRSTS0 = %x\n", status32); + dev_dbg(dev, "LTSSMERRSTS0 = %x\n", status32); status32 |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI; iowrite32(status32, mmio + ATOM_LTSSMERRSTS0_OFFSET); /* Clear DeSkew Buffer error, write to clear */ status32 = ioread32(mmio + ATOM_DESKEWSTS_OFFSET); - dev_dbg(ndev_dev(ndev), "DESKEWSTS = %x\n", status32); + dev_dbg(dev, "DESKEWSTS = %x\n", status32); status32 |= ATOM_DESKEWSTS_DBERR; iowrite32(status32, mmio + ATOM_DESKEWSTS_OFFSET); status32 = ioread32(mmio + ATOM_IBSTERRRCRVSTS0_OFFSET); - dev_dbg(ndev_dev(ndev), "IBSTERRRCRVSTS0 = %x\n", status32); + dev_dbg(dev, "IBSTERRRCRVSTS0 = %x\n", status32); status32 &= ATOM_IBIST_ERR_OFLOW; iowrite32(status32, mmio + ATOM_IBSTERRRCRVSTS0_OFFSET); /* Releases the NTB state machine to allow the link to retrain */ status32 = ioread32(mmio + ATOM_LTSSMSTATEJMP_OFFSET); - dev_dbg(ndev_dev(ndev), "LTSSMSTATEJMP = %x\n", status32); + dev_dbg(dev, "LTSSMSTATEJMP = %x\n", status32); status32 &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT; iowrite32(status32, mmio + ATOM_LTSSMSTATEJMP_OFFSET); @@ -1699,11 +1747,11 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, int b2b_bar; u8 bar_sz; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; mmio = ndev->self_mmio; if (ndev->b2b_idx == UINT_MAX) { - dev_dbg(ndev_dev(ndev), "not using b2b mw\n"); + dev_dbg(&pdev->dev, "not using b2b mw\n"); b2b_bar = 0; ndev->b2b_off = 0; } else { @@ -1711,24 +1759,21 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, if (b2b_bar < 0) return -EIO; - dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar); + dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar); bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); - dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size); + dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size); if (b2b_mw_share && ((bar_size >> 1) >= XEON_B2B_MIN_SIZE)) { - dev_dbg(ndev_dev(ndev), - "b2b using first half of bar\n"); + dev_dbg(&pdev->dev, "b2b using first half of bar\n"); ndev->b2b_off = bar_size >> 1; } else if (bar_size >= XEON_B2B_MIN_SIZE) { - dev_dbg(ndev_dev(ndev), - "b2b using whole bar\n"); + dev_dbg(&pdev->dev, "b2b using whole bar\n"); ndev->b2b_off = 0; --ndev->mw_count; } else { - dev_dbg(ndev_dev(ndev), - "b2b bar size is too small\n"); + dev_dbg(&pdev->dev, "b2b bar size is too small\n"); return -EIO; } } @@ -1738,7 +1783,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, * except disable or halve the size of the b2b secondary bar. */ pci_read_config_byte(pdev, SKX_IMBAR1SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "IMBAR1SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "IMBAR1SZ %#x\n", bar_sz); if (b2b_bar == 1) { if (ndev->b2b_off) bar_sz -= 1; @@ -1748,10 +1793,10 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, pci_write_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, bar_sz); pci_read_config_byte(pdev, SKX_EMBAR1SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "EMBAR1SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "EMBAR1SZ %#x\n", bar_sz); pci_read_config_byte(pdev, SKX_IMBAR2SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "IMBAR2SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "IMBAR2SZ %#x\n", bar_sz); if (b2b_bar == 2) { if (ndev->b2b_off) bar_sz -= 1; @@ -1761,7 +1806,7 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, pci_write_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, bar_sz); pci_read_config_byte(pdev, SKX_EMBAR2SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "EMBAR2SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "EMBAR2SZ %#x\n", bar_sz); /* SBAR01 hit by first part of the b2b bar */ if (b2b_bar == 0) @@ -1777,12 +1822,12 @@ static int skx_setup_b2b_mw(struct intel_ntb_dev *ndev, bar_addr = addr->bar2_addr64 + (b2b_bar == 1 ? ndev->b2b_off : 0); iowrite64(bar_addr, mmio + SKX_IMBAR1XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR1XLMT_OFFSET); - dev_dbg(ndev_dev(ndev), "IMBAR1XLMT %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "IMBAR1XLMT %#018llx\n", bar_addr); bar_addr = addr->bar4_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); iowrite64(bar_addr, mmio + SKX_IMBAR2XLMT_OFFSET); bar_addr = ioread64(mmio + SKX_IMBAR2XLMT_OFFSET); - dev_dbg(ndev_dev(ndev), "IMBAR2XLMT %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "IMBAR2XLMT %#018llx\n", bar_addr); /* zero incoming translation addrs */ iowrite64(0, mmio + SKX_IMBAR1XBASE_OFFSET); @@ -1852,7 +1897,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev) u8 ppd; int rc; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; ndev->reg = &skx_reg; @@ -1861,7 +1906,7 @@ static int skx_init_dev(struct intel_ntb_dev *ndev) return -EIO; ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); - dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd, + dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd, ntb_topo_string(ndev->ntb.topo)); if (ndev->ntb.topo == NTB_TOPO_NONE) return -EINVAL; @@ -1885,14 +1930,14 @@ static int intel_ntb3_link_enable(struct ntb_dev *ntb, ndev = container_of(ntb, struct intel_ntb_dev, ntb); - dev_dbg(ndev_dev(ndev), + dev_dbg(&ntb->pdev->dev, "Enabling link with max_speed %d max_width %d\n", max_speed, max_width); if (max_speed != NTB_SPEED_AUTO) - dev_dbg(ndev_dev(ndev), "ignoring max_speed %d\n", max_speed); + dev_dbg(&ntb->pdev->dev, "ignoring max_speed %d\n", max_speed); if (max_width != NTB_WIDTH_AUTO) - dev_dbg(ndev_dev(ndev), "ignoring max_width %d\n", max_width); + dev_dbg(&ntb->pdev->dev, "ignoring max_width %d\n", max_width); ntb_ctl = ioread32(ndev->self_mmio + ndev->reg->ntb_ctl); ntb_ctl &= ~(NTB_CTL_DISABLE | NTB_CTL_CFG_LOCK); @@ -1902,7 +1947,7 @@ static int intel_ntb3_link_enable(struct ntb_dev *ntb, return 0; } -static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx, +static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, dma_addr_t addr, resource_size_t size) { struct intel_ntb_dev *ndev = ntb_ndev(ntb); @@ -1912,6 +1957,9 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx, u64 base, limit, reg_val; int bar; + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + if (idx >= ndev->b2b_idx && !ndev->b2b_off) idx += 1; @@ -1953,7 +2001,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx, return -EIO; } - dev_dbg(ndev_dev(ndev), "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val); + dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXBASE: %#Lx\n", bar, reg_val); /* set and verify setting the limit */ iowrite64(limit, mmio + limit_reg); @@ -1964,7 +2012,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx, return -EIO; } - dev_dbg(ndev_dev(ndev), "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val); + dev_dbg(&ntb->pdev->dev, "BAR %d IMBARXLMT: %#Lx\n", bar, reg_val); /* setup the EP */ limit_reg = ndev->xlat_reg->bar2_limit + (idx * 0x10) + 0x4000; @@ -1985,7 +2033,7 @@ static int intel_ntb3_mw_set_trans(struct ntb_dev *ntb, int idx, return -EIO; } - dev_dbg(ndev_dev(ndev), "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val); + dev_dbg(&ntb->pdev->dev, "BAR %d EMBARXLMT: %#Lx\n", bar, reg_val); return 0; } @@ -2092,7 +2140,7 @@ static inline enum ntb_topo xeon_ppd_topo(struct intel_ntb_dev *ndev, u8 ppd) static inline int xeon_ppd_bar4_split(struct intel_ntb_dev *ndev, u8 ppd) { if (ppd & XEON_PPD_SPLIT_BAR_MASK) { - dev_dbg(ndev_dev(ndev), "PPD %d split bar\n", ppd); + dev_dbg(&ndev->ntb.pdev->dev, "PPD %d split bar\n", ppd); return 1; } return 0; @@ -2122,11 +2170,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, int b2b_bar; u8 bar_sz; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; mmio = ndev->self_mmio; if (ndev->b2b_idx == UINT_MAX) { - dev_dbg(ndev_dev(ndev), "not using b2b mw\n"); + dev_dbg(&pdev->dev, "not using b2b mw\n"); b2b_bar = 0; ndev->b2b_off = 0; } else { @@ -2134,24 +2182,21 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, if (b2b_bar < 0) return -EIO; - dev_dbg(ndev_dev(ndev), "using b2b mw bar %d\n", b2b_bar); + dev_dbg(&pdev->dev, "using b2b mw bar %d\n", b2b_bar); bar_size = pci_resource_len(ndev->ntb.pdev, b2b_bar); - dev_dbg(ndev_dev(ndev), "b2b bar size %#llx\n", bar_size); + dev_dbg(&pdev->dev, "b2b bar size %#llx\n", bar_size); if (b2b_mw_share && XEON_B2B_MIN_SIZE <= bar_size >> 1) { - dev_dbg(ndev_dev(ndev), - "b2b using first half of bar\n"); + dev_dbg(&pdev->dev, "b2b using first half of bar\n"); ndev->b2b_off = bar_size >> 1; } else if (XEON_B2B_MIN_SIZE <= bar_size) { - dev_dbg(ndev_dev(ndev), - "b2b using whole bar\n"); + dev_dbg(&pdev->dev, "b2b using whole bar\n"); ndev->b2b_off = 0; --ndev->mw_count; } else { - dev_dbg(ndev_dev(ndev), - "b2b bar size is too small\n"); + dev_dbg(&pdev->dev, "b2b bar size is too small\n"); return -EIO; } } @@ -2163,7 +2208,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, * offsets are not in a consistent order (bar5sz comes after ppd, odd). */ pci_read_config_byte(pdev, XEON_PBAR23SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "PBAR23SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "PBAR23SZ %#x\n", bar_sz); if (b2b_bar == 2) { if (ndev->b2b_off) bar_sz -= 1; @@ -2172,11 +2217,11 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, } pci_write_config_byte(pdev, XEON_SBAR23SZ_OFFSET, bar_sz); pci_read_config_byte(pdev, XEON_SBAR23SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "SBAR23SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "SBAR23SZ %#x\n", bar_sz); if (!ndev->bar4_split) { pci_read_config_byte(pdev, XEON_PBAR45SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "PBAR45SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "PBAR45SZ %#x\n", bar_sz); if (b2b_bar == 4) { if (ndev->b2b_off) bar_sz -= 1; @@ -2185,10 +2230,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, } pci_write_config_byte(pdev, XEON_SBAR45SZ_OFFSET, bar_sz); pci_read_config_byte(pdev, XEON_SBAR45SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "SBAR45SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "SBAR45SZ %#x\n", bar_sz); } else { pci_read_config_byte(pdev, XEON_PBAR4SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "PBAR4SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "PBAR4SZ %#x\n", bar_sz); if (b2b_bar == 4) { if (ndev->b2b_off) bar_sz -= 1; @@ -2197,10 +2242,10 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, } pci_write_config_byte(pdev, XEON_SBAR4SZ_OFFSET, bar_sz); pci_read_config_byte(pdev, XEON_SBAR4SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "SBAR4SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "SBAR4SZ %#x\n", bar_sz); pci_read_config_byte(pdev, XEON_PBAR5SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "PBAR5SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "PBAR5SZ %#x\n", bar_sz); if (b2b_bar == 5) { if (ndev->b2b_off) bar_sz -= 1; @@ -2209,7 +2254,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, } pci_write_config_byte(pdev, XEON_SBAR5SZ_OFFSET, bar_sz); pci_read_config_byte(pdev, XEON_SBAR5SZ_OFFSET, &bar_sz); - dev_dbg(ndev_dev(ndev), "SBAR5SZ %#x\n", bar_sz); + dev_dbg(&pdev->dev, "SBAR5SZ %#x\n", bar_sz); } /* SBAR01 hit by first part of the b2b bar */ @@ -2226,7 +2271,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, else return -EIO; - dev_dbg(ndev_dev(ndev), "SBAR01 %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR01 %#018llx\n", bar_addr); iowrite64(bar_addr, mmio + XEON_SBAR0BASE_OFFSET); /* Other SBAR are normally hit by the PBAR xlat, except for b2b bar. @@ -2237,26 +2282,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); iowrite64(bar_addr, mmio + XEON_SBAR23BASE_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR23BASE_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR23 %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR23 %#018llx\n", bar_addr); if (!ndev->bar4_split) { bar_addr = addr->bar4_addr64 + (b2b_bar == 4 ? ndev->b2b_off : 0); iowrite64(bar_addr, mmio + XEON_SBAR45BASE_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR45BASE_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR45 %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR45 %#018llx\n", bar_addr); } else { bar_addr = addr->bar4_addr32 + (b2b_bar == 4 ? ndev->b2b_off : 0); iowrite32(bar_addr, mmio + XEON_SBAR4BASE_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR4BASE_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR4 %#010llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR4 %#010llx\n", bar_addr); bar_addr = addr->bar5_addr32 + (b2b_bar == 5 ? ndev->b2b_off : 0); iowrite32(bar_addr, mmio + XEON_SBAR5BASE_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR5BASE_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR5 %#010llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR5 %#010llx\n", bar_addr); } /* setup incoming bar limits == base addrs (zero length windows) */ @@ -2264,26 +2309,26 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, bar_addr = addr->bar2_addr64 + (b2b_bar == 2 ? ndev->b2b_off : 0); iowrite64(bar_addr, mmio + XEON_SBAR23LMT_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR23LMT_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR23LMT %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR23LMT %#018llx\n", bar_addr); if (!ndev->bar4_split) { bar_addr = addr->bar4_addr64 + (b2b_bar == 4 ? ndev->b2b_off : 0); iowrite64(bar_addr, mmio + XEON_SBAR45LMT_OFFSET); bar_addr = ioread64(mmio + XEON_SBAR45LMT_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR45LMT %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR45LMT %#018llx\n", bar_addr); } else { bar_addr = addr->bar4_addr32 + (b2b_bar == 4 ? ndev->b2b_off : 0); iowrite32(bar_addr, mmio + XEON_SBAR4LMT_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR4LMT_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR4LMT %#010llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR4LMT %#010llx\n", bar_addr); bar_addr = addr->bar5_addr32 + (b2b_bar == 5 ? ndev->b2b_off : 0); iowrite32(bar_addr, mmio + XEON_SBAR5LMT_OFFSET); bar_addr = ioread32(mmio + XEON_SBAR5LMT_OFFSET); - dev_dbg(ndev_dev(ndev), "SBAR5LMT %#05llx\n", bar_addr); + dev_dbg(&pdev->dev, "SBAR5LMT %#05llx\n", bar_addr); } /* zero incoming translation addrs */ @@ -2309,23 +2354,23 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, bar_addr = peer_addr->bar2_addr64; iowrite64(bar_addr, mmio + XEON_PBAR23XLAT_OFFSET); bar_addr = ioread64(mmio + XEON_PBAR23XLAT_OFFSET); - dev_dbg(ndev_dev(ndev), "PBAR23XLAT %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "PBAR23XLAT %#018llx\n", bar_addr); if (!ndev->bar4_split) { bar_addr = peer_addr->bar4_addr64; iowrite64(bar_addr, mmio + XEON_PBAR45XLAT_OFFSET); bar_addr = ioread64(mmio + XEON_PBAR45XLAT_OFFSET); - dev_dbg(ndev_dev(ndev), "PBAR45XLAT %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "PBAR45XLAT %#018llx\n", bar_addr); } else { bar_addr = peer_addr->bar4_addr32; iowrite32(bar_addr, mmio + XEON_PBAR4XLAT_OFFSET); bar_addr = ioread32(mmio + XEON_PBAR4XLAT_OFFSET); - dev_dbg(ndev_dev(ndev), "PBAR4XLAT %#010llx\n", bar_addr); + dev_dbg(&pdev->dev, "PBAR4XLAT %#010llx\n", bar_addr); bar_addr = peer_addr->bar5_addr32; iowrite32(bar_addr, mmio + XEON_PBAR5XLAT_OFFSET); bar_addr = ioread32(mmio + XEON_PBAR5XLAT_OFFSET); - dev_dbg(ndev_dev(ndev), "PBAR5XLAT %#010llx\n", bar_addr); + dev_dbg(&pdev->dev, "PBAR5XLAT %#010llx\n", bar_addr); } /* set the translation offset for b2b registers */ @@ -2343,7 +2388,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, return -EIO; /* B2B_XLAT_OFFSET is 64bit, but can only take 32bit writes */ - dev_dbg(ndev_dev(ndev), "B2BXLAT %#018llx\n", bar_addr); + dev_dbg(&pdev->dev, "B2BXLAT %#018llx\n", bar_addr); iowrite32(bar_addr, mmio + XEON_B2B_XLAT_OFFSETL); iowrite32(bar_addr >> 32, mmio + XEON_B2B_XLAT_OFFSETU); @@ -2362,6 +2407,7 @@ static int xeon_setup_b2b_mw(struct intel_ntb_dev *ndev, static int xeon_init_ntb(struct intel_ntb_dev *ndev) { + struct device *dev = &ndev->ntb.pdev->dev; int rc; u32 ntb_ctl; @@ -2377,7 +2423,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev) switch (ndev->ntb.topo) { case NTB_TOPO_PRI: if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { - dev_err(ndev_dev(ndev), "NTB Primary config disabled\n"); + dev_err(dev, "NTB Primary config disabled\n"); return -EINVAL; } @@ -2395,7 +2441,7 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev) case NTB_TOPO_SEC: if (ndev->hwerr_flags & NTB_HWERR_SDOORBELL_LOCKUP) { - dev_err(ndev_dev(ndev), "NTB Secondary config disabled\n"); + dev_err(dev, "NTB Secondary config disabled\n"); return -EINVAL; } /* use half the spads for the peer */ @@ -2420,18 +2466,17 @@ static int xeon_init_ntb(struct intel_ntb_dev *ndev) ndev->b2b_idx = b2b_mw_idx; if (ndev->b2b_idx >= ndev->mw_count) { - dev_dbg(ndev_dev(ndev), + dev_dbg(dev, "b2b_mw_idx %d invalid for mw_count %u\n", b2b_mw_idx, ndev->mw_count); return -EINVAL; } - dev_dbg(ndev_dev(ndev), - "setting up b2b mw idx %d means %d\n", + dev_dbg(dev, "setting up b2b mw idx %d means %d\n", b2b_mw_idx, ndev->b2b_idx); } else if (ndev->hwerr_flags & NTB_HWERR_B2BDOORBELL_BIT14) { - dev_warn(ndev_dev(ndev), "Reduce doorbell count by 1\n"); + dev_warn(dev, "Reduce doorbell count by 1\n"); ndev->db_count -= 1; } @@ -2472,7 +2517,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev) u8 ppd; int rc, mem; - pdev = ndev_pdev(ndev); + pdev = ndev->ntb.pdev; switch (pdev->device) { /* There is a Xeon hardware errata related to writes to SDOORBELL or @@ -2548,14 +2593,14 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev) return -EIO; ndev->ntb.topo = xeon_ppd_topo(ndev, ppd); - dev_dbg(ndev_dev(ndev), "ppd %#x topo %s\n", ppd, + dev_dbg(&pdev->dev, "ppd %#x topo %s\n", ppd, ntb_topo_string(ndev->ntb.topo)); if (ndev->ntb.topo == NTB_TOPO_NONE) return -EINVAL; if (ndev->ntb.topo != NTB_TOPO_SEC) { ndev->bar4_split = xeon_ppd_bar4_split(ndev, ppd); - dev_dbg(ndev_dev(ndev), "ppd %#x bar4_split %d\n", + dev_dbg(&pdev->dev, "ppd %#x bar4_split %d\n", ppd, ndev->bar4_split); } else { /* This is a way for transparent BAR to figure out if we are @@ -2565,7 +2610,7 @@ static int xeon_init_dev(struct intel_ntb_dev *ndev) mem = pci_select_bars(pdev, IORESOURCE_MEM); ndev->bar4_split = hweight32(mem) == HSX_SPLIT_BAR_MW_COUNT + 1; - dev_dbg(ndev_dev(ndev), "mem %#x bar4_split %d\n", + dev_dbg(&pdev->dev, "mem %#x bar4_split %d\n", mem, ndev->bar4_split); } @@ -2602,7 +2647,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; - dev_warn(ndev_dev(ndev), "Cannot DMA highmem\n"); + dev_warn(&pdev->dev, "Cannot DMA highmem\n"); } rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); @@ -2610,7 +2655,7 @@ static int intel_ntb_init_pci(struct intel_ntb_dev *ndev, struct pci_dev *pdev) rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; - dev_warn(ndev_dev(ndev), "Cannot DMA consistent highmem\n"); + dev_warn(&pdev->dev, "Cannot DMA consistent highmem\n"); } ndev->self_mmio = pci_iomap(pdev, 0, 0); @@ -2636,7 +2681,7 @@ err_pci_enable: static void intel_ntb_deinit_pci(struct intel_ntb_dev *ndev) { - struct pci_dev *pdev = ndev_pdev(ndev); + struct pci_dev *pdev = ndev->ntb.pdev; if (ndev->peer_mmio && ndev->peer_mmio != ndev->self_mmio) pci_iounmap(pdev, ndev->peer_mmio); @@ -2906,8 +2951,10 @@ static const struct intel_ntb_xlat_reg skx_sec_xlat = { /* operations for primary side of local ntb */ static const struct ntb_dev_ops intel_ntb_ops = { .mw_count = intel_ntb_mw_count, - .mw_get_range = intel_ntb_mw_get_range, + .mw_get_align = intel_ntb_mw_get_align, .mw_set_trans = intel_ntb_mw_set_trans, + .peer_mw_count = intel_ntb_peer_mw_count, + .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, .link_is_up = intel_ntb_link_is_up, .link_enable = intel_ntb_link_enable, .link_disable = intel_ntb_link_disable, @@ -2932,8 +2979,10 @@ static const struct ntb_dev_ops intel_ntb_ops = { static const struct ntb_dev_ops intel_ntb3_ops = { .mw_count = intel_ntb_mw_count, - .mw_get_range = intel_ntb_mw_get_range, + .mw_get_align = intel_ntb_mw_get_align, .mw_set_trans = intel_ntb3_mw_set_trans, + .peer_mw_count = intel_ntb_peer_mw_count, + .peer_mw_get_addr = intel_ntb_peer_mw_get_addr, .link_is_up = intel_ntb_link_is_up, .link_enable = intel_ntb3_link_enable, .link_disable = intel_ntb_link_disable, @@ -3008,4 +3057,3 @@ static void __exit intel_ntb_pci_driver_exit(void) debugfs_remove_recursive(debugfs_dir); } module_exit(intel_ntb_pci_driver_exit); - diff --git a/drivers/ntb/hw/intel/ntb_hw_intel.h b/drivers/ntb/hw/intel/ntb_hw_intel.h index f2cf8a783f1e..2d6c38afb128 100644 --- a/drivers/ntb/hw/intel/ntb_hw_intel.h +++ b/drivers/ntb/hw/intel/ntb_hw_intel.h @@ -382,9 +382,6 @@ struct intel_ntb_dev { struct dentry *debugfs_info; }; -#define ndev_pdev(ndev) ((ndev)->ntb.pdev) -#define ndev_name(ndev) pci_name(ndev_pdev(ndev)) -#define ndev_dev(ndev) (&ndev_pdev(ndev)->dev) #define ntb_ndev(__ntb) container_of(__ntb, struct intel_ntb_dev, ntb) #define hb_ndev(__work) container_of(__work, struct intel_ntb_dev, \ hb_timer.work) diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c index 2e2530743831..03b80d89b980 100644 --- a/drivers/ntb/ntb.c +++ b/drivers/ntb/ntb.c @@ -5,6 +5,7 @@ * GPL LICENSE SUMMARY * * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,6 +19,7 @@ * BSD LICENSE * * Copyright (C) 2015 EMC Corporation. All Rights Reserved. + * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -191,6 +193,73 @@ void ntb_db_event(struct ntb_dev *ntb, int vector) } EXPORT_SYMBOL(ntb_db_event); +void ntb_msg_event(struct ntb_dev *ntb) +{ + unsigned long irqflags; + + spin_lock_irqsave(&ntb->ctx_lock, irqflags); + { + if (ntb->ctx_ops && ntb->ctx_ops->msg_event) + ntb->ctx_ops->msg_event(ntb->ctx); + } + spin_unlock_irqrestore(&ntb->ctx_lock, irqflags); +} +EXPORT_SYMBOL(ntb_msg_event); + +int ntb_default_port_number(struct ntb_dev *ntb) +{ + switch (ntb->topo) { + case NTB_TOPO_PRI: + case NTB_TOPO_B2B_USD: + return NTB_PORT_PRI_USD; + case NTB_TOPO_SEC: + case NTB_TOPO_B2B_DSD: + return NTB_PORT_SEC_DSD; + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(ntb_default_port_number); + +int ntb_default_peer_port_count(struct ntb_dev *ntb) +{ + return NTB_DEF_PEER_CNT; +} +EXPORT_SYMBOL(ntb_default_peer_port_count); + +int ntb_default_peer_port_number(struct ntb_dev *ntb, int pidx) +{ + if (pidx != NTB_DEF_PEER_IDX) + return -EINVAL; + + switch (ntb->topo) { + case NTB_TOPO_PRI: + case NTB_TOPO_B2B_USD: + return NTB_PORT_SEC_DSD; + case NTB_TOPO_SEC: + case NTB_TOPO_B2B_DSD: + return NTB_PORT_PRI_USD; + default: + break; + } + + return -EINVAL; +} +EXPORT_SYMBOL(ntb_default_peer_port_number); + +int ntb_default_peer_port_idx(struct ntb_dev *ntb, int port) +{ + int peer_port = ntb_default_peer_port_number(ntb, NTB_DEF_PEER_IDX); + + if (peer_port == -EINVAL || port != peer_port) + return -EINVAL; + + return 0; +} +EXPORT_SYMBOL(ntb_default_peer_port_idx); + static int ntb_probe(struct device *dev) { struct ntb_dev *ntb; diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c index 10e5bf460139..9a03c5871efe 100644 --- a/drivers/ntb/ntb_transport.c +++ b/drivers/ntb/ntb_transport.c @@ -95,6 +95,9 @@ MODULE_PARM_DESC(use_dma, "Use DMA engine to perform large data copy"); static struct dentry *nt_debugfs_dir; +/* Only two-ports NTB devices are supported */ +#define PIDX NTB_DEF_PEER_IDX + struct ntb_queue_entry { /* ntb_queue list reference */ struct list_head entry; @@ -670,7 +673,7 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw) if (!mw->virt_addr) return; - ntb_mw_clear_trans(nt->ndev, num_mw); + ntb_mw_clear_trans(nt->ndev, PIDX, num_mw); dma_free_coherent(&pdev->dev, mw->buff_size, mw->virt_addr, mw->dma_addr); mw->xlat_size = 0; @@ -727,7 +730,8 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, } /* Notify HW the memory location of the receive buffer */ - rc = ntb_mw_set_trans(nt->ndev, num_mw, mw->dma_addr, mw->xlat_size); + rc = ntb_mw_set_trans(nt->ndev, PIDX, num_mw, mw->dma_addr, + mw->xlat_size); if (rc) { dev_err(&pdev->dev, "Unable to set mw%d translation", num_mw); ntb_free_mw(nt, num_mw); @@ -858,17 +862,17 @@ static void ntb_transport_link_work(struct work_struct *work) size = max_mw_size; spad = MW0_SZ_HIGH + (i * 2); - ntb_peer_spad_write(ndev, spad, upper_32_bits(size)); + ntb_peer_spad_write(ndev, PIDX, spad, upper_32_bits(size)); spad = MW0_SZ_LOW + (i * 2); - ntb_peer_spad_write(ndev, spad, lower_32_bits(size)); + ntb_peer_spad_write(ndev, PIDX, spad, lower_32_bits(size)); } - ntb_peer_spad_write(ndev, NUM_MWS, nt->mw_count); + ntb_peer_spad_write(ndev, PIDX, NUM_MWS, nt->mw_count); - ntb_peer_spad_write(ndev, NUM_QPS, nt->qp_count); + ntb_peer_spad_write(ndev, PIDX, NUM_QPS, nt->qp_count); - ntb_peer_spad_write(ndev, VERSION, NTB_TRANSPORT_VERSION); + ntb_peer_spad_write(ndev, PIDX, VERSION, NTB_TRANSPORT_VERSION); /* Query the remote side for its info */ val = ntb_spad_read(ndev, VERSION); @@ -944,7 +948,7 @@ static void ntb_qp_link_work(struct work_struct *work) val = ntb_spad_read(nt->ndev, QP_LINKS); - ntb_peer_spad_write(nt->ndev, QP_LINKS, val | BIT(qp->qp_num)); + ntb_peer_spad_write(nt->ndev, PIDX, QP_LINKS, val | BIT(qp->qp_num)); /* query remote spad for qp ready bits */ dev_dbg_ratelimited(&pdev->dev, "Remote QP link status = %x\n", val); @@ -1055,7 +1059,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) int node; int rc, i; - mw_count = ntb_mw_count(ndev); + mw_count = ntb_mw_count(ndev, PIDX); + + if (!ndev->ops->mw_set_trans) { + dev_err(&ndev->dev, "Inbound MW based NTB API is required\n"); + return -EINVAL; + } if (ntb_db_is_unsafe(ndev)) dev_dbg(&ndev->dev, @@ -1064,6 +1073,9 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) dev_dbg(&ndev->dev, "scratchpad is unsafe, proceed anyway...\n"); + if (ntb_peer_port_count(ndev) != NTB_DEF_PEER_CNT) + dev_warn(&ndev->dev, "Multi-port NTB devices unsupported\n"); + node = dev_to_node(&ndev->dev); nt = kzalloc_node(sizeof(*nt), GFP_KERNEL, node); @@ -1094,8 +1106,13 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev) for (i = 0; i < mw_count; i++) { mw = &nt->mw_vec[i]; - rc = ntb_mw_get_range(ndev, i, &mw->phys_addr, &mw->phys_size, - &mw->xlat_align, &mw->xlat_align_size); + rc = ntb_mw_get_align(ndev, PIDX, i, &mw->xlat_align, + &mw->xlat_align_size, NULL); + if (rc) + goto err1; + + rc = ntb_peer_mw_get_addr(ndev, i, &mw->phys_addr, + &mw->phys_size); if (rc) goto err1; @@ -2091,8 +2108,7 @@ void ntb_transport_link_down(struct ntb_transport_qp *qp) val = ntb_spad_read(qp->ndev, QP_LINKS); - ntb_peer_spad_write(qp->ndev, QP_LINKS, - val & ~BIT(qp->qp_num)); + ntb_peer_spad_write(qp->ndev, PIDX, QP_LINKS, val & ~BIT(qp->qp_num)); if (qp->link_is_up) ntb_send_link_down(qp); diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c index 5cab2831ce99..759f772fa00c 100644 --- a/drivers/ntb/test/ntb_perf.c +++ b/drivers/ntb/test/ntb_perf.c @@ -76,6 +76,7 @@ #define DMA_RETRIES 20 #define SZ_4G (1ULL << 32) #define MAX_SEG_ORDER 20 /* no larger than 1M for kmalloc buffer */ +#define PIDX NTB_DEF_PEER_IDX MODULE_LICENSE(DRIVER_LICENSE); MODULE_VERSION(DRIVER_VERSION); @@ -100,6 +101,10 @@ static bool use_dma; /* default to 0 */ module_param(use_dma, bool, 0644); MODULE_PARM_DESC(use_dma, "Using DMA engine to measure performance"); +static bool on_node = true; /* default to 1 */ +module_param(on_node, bool, 0644); +MODULE_PARM_DESC(on_node, "Run threads only on NTB device node (default: true)"); + struct perf_mw { phys_addr_t phys_addr; resource_size_t phys_size; @@ -135,9 +140,6 @@ struct perf_ctx { bool link_is_up; struct delayed_work link_work; wait_queue_head_t link_wq; - struct dentry *debugfs_node_dir; - struct dentry *debugfs_run; - struct dentry *debugfs_threads; u8 perf_threads; /* mutex ensures only one set of threads run at once */ struct mutex run_mutex; @@ -344,6 +346,10 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src, static bool perf_dma_filter_fn(struct dma_chan *chan, void *node) { + /* Is the channel required to be on the same node as the device? */ + if (!on_node) + return true; + return dev_to_node(&chan->dev->device) == (int)(unsigned long)node; } @@ -361,7 +367,7 @@ static int ntb_perf_thread(void *data) pr_debug("kthread %s starting...\n", current->comm); - node = dev_to_node(&pdev->dev); + node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE; if (use_dma && !pctx->dma_chan) { dma_cap_mask_t dma_mask; @@ -454,7 +460,7 @@ static void perf_free_mw(struct perf_ctx *perf) if (!mw->virt_addr) return; - ntb_mw_clear_trans(perf->ntb, 0); + ntb_mw_clear_trans(perf->ntb, PIDX, 0); dma_free_coherent(&pdev->dev, mw->buf_size, mw->virt_addr, mw->dma_addr); mw->xlat_size = 0; @@ -490,7 +496,7 @@ static int perf_set_mw(struct perf_ctx *perf, resource_size_t size) mw->buf_size = 0; } - rc = ntb_mw_set_trans(perf->ntb, 0, mw->dma_addr, mw->xlat_size); + rc = ntb_mw_set_trans(perf->ntb, PIDX, 0, mw->dma_addr, mw->xlat_size); if (rc) { dev_err(&perf->ntb->dev, "Unable to set mw0 translation\n"); perf_free_mw(perf); @@ -517,9 +523,9 @@ static void perf_link_work(struct work_struct *work) if (max_mw_size && size > max_mw_size) size = max_mw_size; - ntb_peer_spad_write(ndev, MW_SZ_HIGH, upper_32_bits(size)); - ntb_peer_spad_write(ndev, MW_SZ_LOW, lower_32_bits(size)); - ntb_peer_spad_write(ndev, VERSION, PERF_VERSION); + ntb_peer_spad_write(ndev, PIDX, MW_SZ_HIGH, upper_32_bits(size)); + ntb_peer_spad_write(ndev, PIDX, MW_SZ_LOW, lower_32_bits(size)); + ntb_peer_spad_write(ndev, PIDX, VERSION, PERF_VERSION); /* now read what peer wrote */ val = ntb_spad_read(ndev, VERSION); @@ -561,8 +567,12 @@ static int perf_setup_mw(struct ntb_dev *ntb, struct perf_ctx *perf) mw = &perf->mw; - rc = ntb_mw_get_range(ntb, 0, &mw->phys_addr, &mw->phys_size, - &mw->xlat_align, &mw->xlat_align_size); + rc = ntb_mw_get_align(ntb, PIDX, 0, &mw->xlat_align, + &mw->xlat_align_size, NULL); + if (rc) + return rc; + + rc = ntb_peer_mw_get_addr(ntb, 0, &mw->phys_addr, &mw->phys_size); if (rc) return rc; @@ -677,7 +687,8 @@ static ssize_t debugfs_run_write(struct file *filp, const char __user *ubuf, pr_info("Fix run_order to %u\n", run_order); } - node = dev_to_node(&perf->ntb->pdev->dev); + node = on_node ? dev_to_node(&perf->ntb->pdev->dev) + : NUMA_NO_NODE; atomic_set(&perf->tdone, 0); /* launch kernel thread */ @@ -723,34 +734,71 @@ static const struct file_operations ntb_perf_debugfs_run = { static int perf_debugfs_setup(struct perf_ctx *perf) { struct pci_dev *pdev = perf->ntb->pdev; + struct dentry *debugfs_node_dir; + struct dentry *debugfs_run; + struct dentry *debugfs_threads; + struct dentry *debugfs_seg_order; + struct dentry *debugfs_run_order; + struct dentry *debugfs_use_dma; + struct dentry *debugfs_on_node; if (!debugfs_initialized()) return -ENODEV; + /* Assumpion: only one NTB device in the system */ if (!perf_debugfs_dir) { perf_debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); if (!perf_debugfs_dir) return -ENODEV; } - perf->debugfs_node_dir = debugfs_create_dir(pci_name(pdev), - perf_debugfs_dir); - if (!perf->debugfs_node_dir) - return -ENODEV; + debugfs_node_dir = debugfs_create_dir(pci_name(pdev), + perf_debugfs_dir); + if (!debugfs_node_dir) + goto err; - perf->debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR, - perf->debugfs_node_dir, perf, - &ntb_perf_debugfs_run); - if (!perf->debugfs_run) - return -ENODEV; + debugfs_run = debugfs_create_file("run", S_IRUSR | S_IWUSR, + debugfs_node_dir, perf, + &ntb_perf_debugfs_run); + if (!debugfs_run) + goto err; - perf->debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR, - perf->debugfs_node_dir, - &perf->perf_threads); - if (!perf->debugfs_threads) - return -ENODEV; + debugfs_threads = debugfs_create_u8("threads", S_IRUSR | S_IWUSR, + debugfs_node_dir, + &perf->perf_threads); + if (!debugfs_threads) + goto err; + + debugfs_seg_order = debugfs_create_u32("seg_order", 0600, + debugfs_node_dir, + &seg_order); + if (!debugfs_seg_order) + goto err; + + debugfs_run_order = debugfs_create_u32("run_order", 0600, + debugfs_node_dir, + &run_order); + if (!debugfs_run_order) + goto err; + + debugfs_use_dma = debugfs_create_bool("use_dma", 0600, + debugfs_node_dir, + &use_dma); + if (!debugfs_use_dma) + goto err; + + debugfs_on_node = debugfs_create_bool("on_node", 0600, + debugfs_node_dir, + &on_node); + if (!debugfs_on_node) + goto err; return 0; + +err: + debugfs_remove_recursive(perf_debugfs_dir); + perf_debugfs_dir = NULL; + return -ENODEV; } static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb) @@ -766,8 +814,15 @@ static int perf_probe(struct ntb_client *client, struct ntb_dev *ntb) return -EIO; } - node = dev_to_node(&pdev->dev); + if (!ntb->ops->mw_set_trans) { + dev_err(&ntb->dev, "Need inbound MW based NTB API\n"); + return -EINVAL; + } + + if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT) + dev_warn(&ntb->dev, "Multi-port NTB devices unsupported\n"); + node = on_node ? dev_to_node(&pdev->dev) : NUMA_NO_NODE; perf = kzalloc_node(sizeof(*perf), GFP_KERNEL, node); if (!perf) { rc = -ENOMEM; diff --git a/drivers/ntb/test/ntb_pingpong.c b/drivers/ntb/test/ntb_pingpong.c index 435861189d97..938a18bcfc3f 100644 --- a/drivers/ntb/test/ntb_pingpong.c +++ b/drivers/ntb/test/ntb_pingpong.c @@ -90,6 +90,9 @@ static unsigned long db_init = 0x7; module_param(db_init, ulong, 0644); MODULE_PARM_DESC(db_init, "Initial doorbell bits to ring on the peer"); +/* Only two-ports NTB devices are supported */ +#define PIDX NTB_DEF_PEER_IDX + struct pp_ctx { struct ntb_dev *ntb; u64 db_bits; @@ -135,7 +138,7 @@ static void pp_ping(unsigned long ctx) "Ping bits %#llx read %#x write %#x\n", db_bits, spad_rd, spad_wr); - ntb_peer_spad_write(pp->ntb, 0, spad_wr); + ntb_peer_spad_write(pp->ntb, PIDX, 0, spad_wr); ntb_peer_db_set(pp->ntb, db_bits); ntb_db_clear_mask(pp->ntb, db_mask); @@ -222,6 +225,12 @@ static int pp_probe(struct ntb_client *client, } } + if (ntb_spad_count(ntb) < 1) { + dev_dbg(&ntb->dev, "no enough scratchpads\n"); + rc = -EINVAL; + goto err_pp; + } + if (ntb_spad_is_unsafe(ntb)) { dev_dbg(&ntb->dev, "scratchpad is unsafe\n"); if (!unsafe) { @@ -230,6 +239,9 @@ static int pp_probe(struct ntb_client *client, } } + if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT) + dev_warn(&ntb->dev, "multi-port NTB is unsupported\n"); + pp = kmalloc(sizeof(*pp), GFP_KERNEL); if (!pp) { rc = -ENOMEM; diff --git a/drivers/ntb/test/ntb_tool.c b/drivers/ntb/test/ntb_tool.c index 61bf2ef87e0e..f002bf48a08d 100644 --- a/drivers/ntb/test/ntb_tool.c +++ b/drivers/ntb/test/ntb_tool.c @@ -119,7 +119,10 @@ MODULE_VERSION(DRIVER_VERSION); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESCRIPTION); -#define MAX_MWS 16 +/* It is rare to have hadrware with greater than six MWs */ +#define MAX_MWS 6 +/* Only two-ports devices are supported */ +#define PIDX NTB_DEF_PEER_IDX static struct dentry *tool_dbgfs; @@ -459,13 +462,22 @@ static TOOL_FOPS_RDWR(tool_spad_fops, tool_spad_read, tool_spad_write); +static u32 ntb_tool_peer_spad_read(struct ntb_dev *ntb, int sidx) +{ + return ntb_peer_spad_read(ntb, PIDX, sidx); +} + static ssize_t tool_peer_spad_read(struct file *filep, char __user *ubuf, size_t size, loff_t *offp) { struct tool_ctx *tc = filep->private_data; - return tool_spadfn_read(tc, ubuf, size, offp, - tc->ntb->ops->peer_spad_read); + return tool_spadfn_read(tc, ubuf, size, offp, ntb_tool_peer_spad_read); +} + +static int ntb_tool_peer_spad_write(struct ntb_dev *ntb, int sidx, u32 val) +{ + return ntb_peer_spad_write(ntb, PIDX, sidx, val); } static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf, @@ -474,7 +486,7 @@ static ssize_t tool_peer_spad_write(struct file *filep, const char __user *ubuf, struct tool_ctx *tc = filep->private_data; return tool_spadfn_write(tc, ubuf, size, offp, - tc->ntb->ops->peer_spad_write); + ntb_tool_peer_spad_write); } static TOOL_FOPS_RDWR(tool_peer_spad_fops, @@ -668,28 +680,27 @@ static int tool_setup_mw(struct tool_ctx *tc, int idx, size_t req_size) { int rc; struct tool_mw *mw = &tc->mws[idx]; - phys_addr_t base; - resource_size_t size, align, align_size; + resource_size_t size, align_addr, align_size; char buf[16]; if (mw->peer) return 0; - rc = ntb_mw_get_range(tc->ntb, idx, &base, &size, &align, - &align_size); + rc = ntb_mw_get_align(tc->ntb, PIDX, idx, &align_addr, + &align_size, &size); if (rc) return rc; mw->size = min_t(resource_size_t, req_size, size); - mw->size = round_up(mw->size, align); + mw->size = round_up(mw->size, align_addr); mw->size = round_up(mw->size, align_size); mw->peer = dma_alloc_coherent(&tc->ntb->pdev->dev, mw->size, &mw->peer_dma, GFP_KERNEL); - if (!mw->peer) + if (!mw->peer || !IS_ALIGNED(mw->peer_dma, align_addr)) return -ENOMEM; - rc = ntb_mw_set_trans(tc->ntb, idx, mw->peer_dma, mw->size); + rc = ntb_mw_set_trans(tc->ntb, PIDX, idx, mw->peer_dma, mw->size); if (rc) goto err_free_dma; @@ -716,7 +727,7 @@ static void tool_free_mw(struct tool_ctx *tc, int idx) struct tool_mw *mw = &tc->mws[idx]; if (mw->peer) { - ntb_mw_clear_trans(tc->ntb, idx); + ntb_mw_clear_trans(tc->ntb, PIDX, idx); dma_free_coherent(&tc->ntb->pdev->dev, mw->size, mw->peer, mw->peer_dma); @@ -742,8 +753,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, phys_addr_t base; resource_size_t mw_size; - resource_size_t align; + resource_size_t align_addr; resource_size_t align_size; + resource_size_t max_size; buf_size = min_t(size_t, size, 512); @@ -751,8 +763,9 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, if (!buf) return -ENOMEM; - ntb_mw_get_range(mw->tc->ntb, mw->idx, - &base, &mw_size, &align, &align_size); + ntb_mw_get_align(mw->tc->ntb, PIDX, mw->idx, + &align_addr, &align_size, &max_size); + ntb_peer_mw_get_addr(mw->tc->ntb, mw->idx, &base, &mw_size); off += scnprintf(buf + off, buf_size - off, "Peer MW %d Information:\n", mw->idx); @@ -767,13 +780,17 @@ static ssize_t tool_peer_mw_trans_read(struct file *filep, off += scnprintf(buf + off, buf_size - off, "Alignment \t%lld\n", - (unsigned long long)align); + (unsigned long long)align_addr); off += scnprintf(buf + off, buf_size - off, "Size Alignment \t%lld\n", (unsigned long long)align_size); off += scnprintf(buf + off, buf_size - off, + "Size Max \t%lld\n", + (unsigned long long)max_size); + + off += scnprintf(buf + off, buf_size - off, "Ready \t%c\n", (mw->peer) ? 'Y' : 'N'); @@ -827,8 +844,7 @@ static int tool_init_mw(struct tool_ctx *tc, int idx) phys_addr_t base; int rc; - rc = ntb_mw_get_range(tc->ntb, idx, &base, &mw->win_size, - NULL, NULL); + rc = ntb_peer_mw_get_addr(tc->ntb, idx, &base, &mw->win_size); if (rc) return rc; @@ -913,12 +929,27 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb) int rc; int i; + if (!ntb->ops->mw_set_trans) { + dev_dbg(&ntb->dev, "need inbound MW based NTB API\n"); + rc = -EINVAL; + goto err_tc; + } + + if (ntb_spad_count(ntb) < 1) { + dev_dbg(&ntb->dev, "no enough scratchpads\n"); + rc = -EINVAL; + goto err_tc; + } + if (ntb_db_is_unsafe(ntb)) dev_dbg(&ntb->dev, "doorbell is unsafe\n"); if (ntb_spad_is_unsafe(ntb)) dev_dbg(&ntb->dev, "scratchpad is unsafe\n"); + if (ntb_peer_port_count(ntb) != NTB_DEF_PEER_CNT) + dev_warn(&ntb->dev, "multi-port NTB is unsupported\n"); + tc = kzalloc(sizeof(*tc), GFP_KERNEL); if (!tc) { rc = -ENOMEM; @@ -928,7 +959,7 @@ static int tool_probe(struct ntb_client *self, struct ntb_dev *ntb) tc->ntb = ntb; init_waitqueue_head(&tc->link_wq); - tc->mw_count = min(ntb_mw_count(tc->ntb), MAX_MWS); + tc->mw_count = min(ntb_mw_count(tc->ntb, PIDX), MAX_MWS); for (i = 0; i < tc->mw_count; i++) { rc = tool_init_mw(tc, i); if (rc) diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c index f12d23c49771..345acca576b3 100644 --- a/drivers/nvdimm/blk.c +++ b/drivers/nvdimm/blk.c @@ -106,7 +106,8 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk, len -= cur_len; dev_offset += cur_len; - bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len); + if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len)) + return -EIO; } return err; @@ -179,16 +180,8 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) int err = 0, rw; bool do_acct; - /* - * bio_integrity_enabled also checks if the bio already has an - * integrity payload attached. If it does, we *don't* do a - * bio_integrity_prep here - the payload has been generated by - * another kernel subsystem, and we just pass it through. - */ - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio->bi_status = BLK_STS_IOERR; - goto out; - } + if (!bio_integrity_prep(bio)) + return BLK_QC_T_NONE; bip = bio_integrity(bio); nsblk = q->queuedata; @@ -212,7 +205,6 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio) if (do_acct) nd_iostat_end(bio, start); - out: bio_endio(bio); return BLK_QC_T_NONE; } diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c index 64216dea5278..14323faf8bd9 100644 --- a/drivers/nvdimm/btt.c +++ b/drivers/nvdimm/btt.c @@ -985,7 +985,8 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip, len -= cur_len; meta_nsoff += cur_len; - bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len); + if (!bvec_iter_advance(bip->bip_vec, &bip->bip_iter, cur_len)) + return -EIO; } return ret; @@ -1203,16 +1204,8 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) int err = 0; bool do_acct; - /* - * bio_integrity_enabled also checks if the bio already has an - * integrity payload attached. If it does, we *don't* do a - * bio_integrity_prep here - the payload has been generated by - * another kernel subsystem, and we just pass it through. - */ - if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { - bio->bi_status = BLK_STS_IOERR; - goto out; - } + if (!bio_integrity_prep(bio)) + return BLK_QC_T_NONE; do_acct = nd_iostat_start(bio, &start); bio_for_each_segment(bvec, bio, iter) { @@ -1239,7 +1232,6 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio) if (do_acct) nd_iostat_end(bio, start); -out: bio_endio(bio); return BLK_QC_T_NONE; } diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c index 7cd99b1f8596..75bc08c6838c 100644 --- a/drivers/nvdimm/core.c +++ b/drivers/nvdimm/core.c @@ -421,14 +421,15 @@ static void set_badblock(struct badblocks *bb, sector_t s, int num) static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len) { const unsigned int sector_size = 512; - sector_t start_sector; + sector_t start_sector, end_sector; u64 num_sectors; u32 rem; start_sector = div_u64(ns_offset, sector_size); - num_sectors = div_u64_rem(len, sector_size, &rem); + end_sector = div_u64_rem(ns_offset + len, sector_size, &rem); if (rem) - num_sectors++; + end_sector++; + num_sectors = end_sector - start_sector; if (unlikely(num_sectors > (u64)INT_MAX)) { u64 remaining = num_sectors; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index d70df1d0072d..3b77cfe5aa1e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -131,7 +131,7 @@ void nvme_complete_rq(struct request *req) { if (unlikely(nvme_req(req)->status && nvme_req_needs_retry(req))) { nvme_req(req)->retries++; - blk_mq_requeue_request(req, !blk_mq_queue_stopped(req->q)); + blk_mq_requeue_request(req, true); return; } @@ -336,7 +336,7 @@ static int nvme_get_stream_params(struct nvme_ctrl *ctrl, c.directive.opcode = nvme_admin_directive_recv; c.directive.nsid = cpu_to_le32(nsid); - c.directive.numd = sizeof(*s); + c.directive.numd = cpu_to_le32(sizeof(*s)); c.directive.doper = NVME_DIR_RCV_ST_OP_PARAM; c.directive.dtype = NVME_DIR_STREAMS; @@ -2591,12 +2591,29 @@ static void nvme_release_instance(struct nvme_ctrl *ctrl) spin_unlock(&dev_list_lock); } -void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) +void nvme_stop_ctrl(struct nvme_ctrl *ctrl) { + nvme_stop_keep_alive(ctrl); flush_work(&ctrl->async_event_work); flush_work(&ctrl->scan_work); - nvme_remove_namespaces(ctrl); +} +EXPORT_SYMBOL_GPL(nvme_stop_ctrl); +void nvme_start_ctrl(struct nvme_ctrl *ctrl) +{ + if (ctrl->kato) + nvme_start_keep_alive(ctrl); + + if (ctrl->queue_count > 1) { + nvme_queue_scan(ctrl); + nvme_queue_async_events(ctrl); + nvme_start_queues(ctrl); + } +} +EXPORT_SYMBOL_GPL(nvme_start_ctrl); + +void nvme_uninit_ctrl(struct nvme_ctrl *ctrl) +{ device_destroy(nvme_class, MKDEV(nvme_char_major, ctrl->instance)); spin_lock(&dev_list_lock); @@ -2694,9 +2711,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) /* Forcibly unquiesce queues to avoid blocking dispatch */ blk_mq_unquiesce_queue(ctrl->admin_q); - /* Forcibly start all queues to avoid having stuck requests */ - blk_mq_start_hw_queues(ctrl->admin_q); - list_for_each_entry(ns, &ctrl->namespaces, list) { /* * Revalidating a dead namespace sets capacity to 0. This will @@ -2709,16 +2723,6 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) /* Forcibly unquiesce queues to avoid blocking dispatch */ blk_mq_unquiesce_queue(ns->queue); - - /* - * Forcibly start all queues to avoid having stuck requests. - * Note that we must ensure the queues are not stopped - * when the final removal happens. - */ - blk_mq_start_hw_queues(ns->queue); - - /* draining requests in requeue list */ - blk_mq_kick_requeue_list(ns->queue); } mutex_unlock(&ctrl->namespaces_mutex); } @@ -2787,10 +2791,8 @@ void nvme_start_queues(struct nvme_ctrl *ctrl) struct nvme_ns *ns; mutex_lock(&ctrl->namespaces_mutex); - list_for_each_entry(ns, &ctrl->namespaces, list) { + list_for_each_entry(ns, &ctrl->namespaces, list) blk_mq_unquiesce_queue(ns->queue); - blk_mq_kick_requeue_list(ns->queue); - } mutex_unlock(&ctrl->namespaces_mutex); } EXPORT_SYMBOL_GPL(nvme_start_queues); diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index ed87214fdc0e..d666ada39a9b 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -148,13 +148,10 @@ struct nvme_fc_ctrl { struct device *dev; struct nvme_fc_lport *lport; struct nvme_fc_rport *rport; - u32 queue_count; u32 cnum; u64 association_id; - u64 cap; - struct list_head ctrl_list; /* rport->ctrl_list */ struct blk_mq_tag_set admin_tag_set; @@ -1614,7 +1611,7 @@ nvme_fc_free_io_queues(struct nvme_fc_ctrl *ctrl) { int i; - for (i = 1; i < ctrl->queue_count; i++) + for (i = 1; i < ctrl->ctrl.queue_count; i++) nvme_fc_free_queue(&ctrl->queues[i]); } @@ -1635,10 +1632,10 @@ __nvme_fc_create_hw_queue(struct nvme_fc_ctrl *ctrl, static void nvme_fc_delete_hw_io_queues(struct nvme_fc_ctrl *ctrl) { - struct nvme_fc_queue *queue = &ctrl->queues[ctrl->queue_count - 1]; + struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; int i; - for (i = ctrl->queue_count - 1; i >= 1; i--, queue--) + for (i = ctrl->ctrl.queue_count - 1; i >= 1; i--, queue--) __nvme_fc_delete_hw_queue(ctrl, queue, i); } @@ -1648,7 +1645,7 @@ nvme_fc_create_hw_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) struct nvme_fc_queue *queue = &ctrl->queues[1]; int i, ret; - for (i = 1; i < ctrl->queue_count; i++, queue++) { + for (i = 1; i < ctrl->ctrl.queue_count; i++, queue++) { ret = __nvme_fc_create_hw_queue(ctrl, queue, i, qsize); if (ret) goto delete_queues; @@ -1667,7 +1664,7 @@ nvme_fc_connect_io_queues(struct nvme_fc_ctrl *ctrl, u16 qsize) { int i, ret = 0; - for (i = 1; i < ctrl->queue_count; i++) { + for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, (qsize / 5)); if (ret) @@ -1685,7 +1682,7 @@ nvme_fc_init_io_queues(struct nvme_fc_ctrl *ctrl) { int i; - for (i = 1; i < ctrl->queue_count; i++) + for (i = 1; i < ctrl->ctrl.queue_count; i++) nvme_fc_init_queue(ctrl, i, ctrl->ctrl.sqsize); } @@ -1706,6 +1703,7 @@ nvme_fc_ctrl_free(struct kref *ref) list_del(&ctrl->ctrl_list); spin_unlock_irqrestore(&ctrl->rport->lock, flags); + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); blk_cleanup_queue(ctrl->ctrl.admin_q); blk_mq_free_tag_set(&ctrl->admin_tag_set); @@ -1969,10 +1967,9 @@ nvme_fc_start_fcp_op(struct nvme_fc_ctrl *ctrl, struct nvme_fc_queue *queue, if (ret != -EBUSY) return BLK_STS_IOERR; - if (op->rq) { - blk_mq_stop_hw_queues(op->rq->q); - blk_mq_delay_queue(queue->hctx, NVMEFC_QUEUE_DELAY); - } + if (op->rq) + blk_mq_delay_run_hw_queue(queue->hctx, NVMEFC_QUEUE_DELAY); + return BLK_STS_RESOURCE; } @@ -2178,17 +2175,20 @@ static int nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; int ret; - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); + nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), + ctrl->lport->ops->max_hw_queues); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) { dev_info(ctrl->ctrl.device, "set_queue_count failed: %d\n", ret); return ret; } - ctrl->queue_count = opts->nr_io_queues + 1; - if (!opts->nr_io_queues) + ctrl->ctrl.queue_count = nr_io_queues + 1; + if (!nr_io_queues) return 0; nvme_fc_init_io_queues(ctrl); @@ -2204,7 +2204,7 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) sizeof(struct scatterlist)) + ctrl->lport->ops->fcprqst_priv_sz; ctrl->tag_set.driver_data = ctrl; - ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; + ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; ctrl->tag_set.timeout = NVME_IO_TIMEOUT; ret = blk_mq_alloc_tag_set(&ctrl->tag_set); @@ -2232,7 +2232,6 @@ nvme_fc_create_io_queues(struct nvme_fc_ctrl *ctrl) out_delete_hw_queues: nvme_fc_delete_hw_io_queues(ctrl); out_cleanup_blk_queue: - nvme_stop_keep_alive(&ctrl->ctrl); blk_cleanup_queue(ctrl->ctrl.connect_q); out_free_tag_set: blk_mq_free_tag_set(&ctrl->tag_set); @@ -2248,17 +2247,21 @@ static int nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) { struct nvmf_ctrl_options *opts = ctrl->ctrl.opts; + unsigned int nr_io_queues; int ret; - ret = nvme_set_queue_count(&ctrl->ctrl, &opts->nr_io_queues); + nr_io_queues = min(min(opts->nr_io_queues, num_online_cpus()), + ctrl->lport->ops->max_hw_queues); + ret = nvme_set_queue_count(&ctrl->ctrl, &nr_io_queues); if (ret) { dev_info(ctrl->ctrl.device, "set_queue_count failed: %d\n", ret); return ret; } + ctrl->ctrl.queue_count = nr_io_queues + 1; /* check for io queues existing */ - if (ctrl->queue_count == 1) + if (ctrl->ctrl.queue_count == 1) return 0; nvme_fc_init_io_queues(ctrl); @@ -2275,6 +2278,8 @@ nvme_fc_reinit_io_queues(struct nvme_fc_ctrl *ctrl) if (ret) goto out_delete_hw_queues; + blk_mq_update_nr_hw_queues(&ctrl->tag_set, nr_io_queues); + return 0; out_delete_hw_queues: @@ -2316,7 +2321,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) goto out_delete_hw_queue; if (ctrl->ctrl.state != NVME_CTRL_NEW) - blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); ret = nvmf_connect_admin_queue(&ctrl->ctrl); if (ret) @@ -2329,7 +2334,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) * prior connection values */ - ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); + ret = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); if (ret) { dev_err(ctrl->ctrl.device, "prop_get NVME_REG_CAP failed\n"); @@ -2337,9 +2342,9 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap) + 1, ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap) + 1, ctrl->ctrl.sqsize); - ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); + ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); if (ret) goto out_disconnect_admin_queue; @@ -2360,8 +2365,6 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) goto out_disconnect_admin_queue; } - nvme_start_keep_alive(&ctrl->ctrl); - /* FC-NVME supports normal SGL Data Block Descriptors */ if (opts->queue_size > ctrl->ctrl.maxcmd) { @@ -2381,7 +2384,7 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) * Create the io queues */ - if (ctrl->queue_count > 1) { + if (ctrl->ctrl.queue_count > 1) { if (ctrl->ctrl.state == NVME_CTRL_NEW) ret = nvme_fc_create_io_queues(ctrl); else @@ -2395,17 +2398,12 @@ nvme_fc_create_association(struct nvme_fc_ctrl *ctrl) ctrl->ctrl.nr_reconnects = 0; - if (ctrl->queue_count > 1) { - nvme_start_queues(&ctrl->ctrl); - nvme_queue_scan(&ctrl->ctrl); - nvme_queue_async_events(&ctrl->ctrl); - } + nvme_start_ctrl(&ctrl->ctrl); return 0; /* Success */ out_term_aen_ops: nvme_fc_term_aen_ops(ctrl); - nvme_stop_keep_alive(&ctrl->ctrl); out_disconnect_admin_queue: /* send a Disconnect(association) LS to fc-nvme target */ nvme_fc_xmt_disconnect_assoc(ctrl); @@ -2428,8 +2426,6 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) { unsigned long flags; - nvme_stop_keep_alive(&ctrl->ctrl); - spin_lock_irqsave(&ctrl->lock, flags); ctrl->flags |= FCCTRL_TERMIO; ctrl->iocnt = 0; @@ -2447,7 +2443,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) * io requests back to the block layer as part of normal completions * (but with error status). */ - if (ctrl->queue_count > 1) { + if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_fc_terminate_exchange, &ctrl->ctrl); @@ -2470,7 +2466,7 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl) * use blk_mq_tagset_busy_itr() and the transport routine to * terminate the exchanges. */ - blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_fc_terminate_exchange, &ctrl->ctrl); @@ -2511,7 +2507,8 @@ nvme_fc_delete_ctrl_work(struct work_struct *work) cancel_work_sync(&ctrl->ctrl.reset_work); cancel_delayed_work_sync(&ctrl->connect_work); - + nvme_stop_ctrl(&ctrl->ctrl); + nvme_remove_namespaces(&ctrl->ctrl); /* * kill the association on the link side. this will block * waiting for io to terminate @@ -2606,6 +2603,7 @@ nvme_fc_reset_ctrl_work(struct work_struct *work) container_of(work, struct nvme_fc_ctrl, ctrl.reset_work); int ret; + nvme_stop_ctrl(&ctrl->ctrl); /* will block will waiting for io to terminate */ nvme_fc_delete_association(ctrl); @@ -2702,18 +2700,17 @@ nvme_fc_init_ctrl(struct device *dev, struct nvmf_ctrl_options *opts, spin_lock_init(&ctrl->lock); /* io queue count */ - ctrl->queue_count = min_t(unsigned int, + ctrl->ctrl.queue_count = min_t(unsigned int, opts->nr_io_queues, lport->ops->max_hw_queues); - opts->nr_io_queues = ctrl->queue_count; /* so opts has valid value */ - ctrl->queue_count++; /* +1 for admin queue */ + ctrl->ctrl.queue_count++; /* +1 for admin queue */ ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.kato = opts->kato; ret = -ENOMEM; - ctrl->queues = kcalloc(ctrl->queue_count, sizeof(struct nvme_fc_queue), - GFP_KERNEL); + ctrl->queues = kcalloc(ctrl->ctrl.queue_count, + sizeof(struct nvme_fc_queue), GFP_KERNEL); if (!ctrl->queues) goto out_free_ida; diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index d70ff0fdd36b..8f2a168ddc01 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -142,7 +142,9 @@ struct nvme_ctrl { u16 cntlid; u32 ctrl_config; + u32 queue_count; + u64 cap; u32 page_size; u32 max_hw_sectors; u16 oncs; @@ -278,6 +280,8 @@ int nvme_shutdown_ctrl(struct nvme_ctrl *ctrl); int nvme_init_ctrl(struct nvme_ctrl *ctrl, struct device *dev, const struct nvme_ctrl_ops *ops, unsigned long quirks); void nvme_uninit_ctrl(struct nvme_ctrl *ctrl); +void nvme_start_ctrl(struct nvme_ctrl *ctrl); +void nvme_stop_ctrl(struct nvme_ctrl *ctrl); void nvme_put_ctrl(struct nvme_ctrl *ctrl); int nvme_init_identify(struct nvme_ctrl *ctrl); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index b7a84c523475..8569ee771269 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -35,7 +35,6 @@ #include "nvme.h" -#define NVME_Q_DEPTH 1024 #define SQ_SIZE(depth) (depth * sizeof(struct nvme_command)) #define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion)) @@ -57,6 +56,16 @@ module_param(max_host_mem_size_mb, uint, 0444); MODULE_PARM_DESC(max_host_mem_size_mb, "Maximum Host Memory Buffer (HMB) size per controller (in MiB)"); +static int io_queue_depth_set(const char *val, const struct kernel_param *kp); +static const struct kernel_param_ops io_queue_depth_ops = { + .set = io_queue_depth_set, + .get = param_get_int, +}; + +static int io_queue_depth = 1024; +module_param_cb(io_queue_depth, &io_queue_depth_ops, &io_queue_depth, 0644); +MODULE_PARM_DESC(io_queue_depth, "set io queue depth, should >= 2"); + struct nvme_dev; struct nvme_queue; @@ -74,7 +83,6 @@ struct nvme_dev { struct device *dev; struct dma_pool *prp_page_pool; struct dma_pool *prp_small_pool; - unsigned queue_count; unsigned online_queues; unsigned max_qid; int q_depth; @@ -105,6 +113,17 @@ struct nvme_dev { void **host_mem_desc_bufs; }; +static int io_queue_depth_set(const char *val, const struct kernel_param *kp) +{ + int n = 0, ret; + + ret = kstrtoint(val, 10, &n); + if (ret != 0 || n < 2) + return -EINVAL; + + return param_set_int(val, kp); +} + static inline unsigned int sq_idx(unsigned int qid, u32 stride) { return qid * 2 * stride; @@ -520,7 +539,7 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi) } #endif -static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) +static blk_status_t nvme_setup_prps(struct nvme_dev *dev, struct request *req) { struct nvme_iod *iod = blk_mq_rq_to_pdu(req); struct dma_pool *pool; @@ -537,7 +556,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) length -= (page_size - offset); if (length <= 0) - return true; + return BLK_STS_OK; dma_len -= (page_size - offset); if (dma_len) { @@ -550,7 +569,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) if (length <= page_size) { iod->first_dma = dma_addr; - return true; + return BLK_STS_OK; } nprps = DIV_ROUND_UP(length, page_size); @@ -566,7 +585,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) if (!prp_list) { iod->first_dma = dma_addr; iod->npages = -1; - return false; + return BLK_STS_RESOURCE; } list[0] = prp_list; iod->first_dma = prp_dma; @@ -576,7 +595,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) __le64 *old_prp_list = prp_list; prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) - return false; + return BLK_STS_RESOURCE; list[iod->npages++] = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); @@ -590,13 +609,29 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req) break; if (dma_len > 0) continue; - BUG_ON(dma_len < 0); + if (unlikely(dma_len < 0)) + goto bad_sgl; sg = sg_next(sg); dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } - return true; + return BLK_STS_OK; + + bad_sgl: + if (WARN_ONCE(1, "Invalid SGL for payload:%d nents:%d\n", + blk_rq_payload_bytes(req), iod->nents)) { + for_each_sg(iod->sg, sg, iod->nents, i) { + dma_addr_t phys = sg_phys(sg); + pr_warn("sg[%d] phys_addr:%pad offset:%d length:%d " + "dma_address:%pad dma_length:%d\n", i, &phys, + sg->offset, sg->length, + &sg_dma_address(sg), + sg_dma_len(sg)); + } + } + return BLK_STS_IOERR; + } static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, @@ -618,7 +653,8 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, DMA_ATTR_NO_WARN)) goto out; - if (!nvme_setup_prps(dev, req)) + ret = nvme_setup_prps(dev, req); + if (ret != BLK_STS_OK) goto out_unmap; ret = BLK_STS_IOERR; @@ -1099,9 +1135,9 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) { int i; - for (i = dev->queue_count - 1; i >= lowest; i--) { + for (i = dev->ctrl.queue_count - 1; i >= lowest; i--) { struct nvme_queue *nvmeq = dev->queues[i]; - dev->queue_count--; + dev->ctrl.queue_count--; dev->queues[i] = NULL; nvme_free_queue(nvmeq); } @@ -1126,7 +1162,7 @@ static int nvme_suspend_queue(struct nvme_queue *nvmeq) spin_unlock_irq(&nvmeq->q_lock); if (!nvmeq->qid && nvmeq->dev->ctrl.admin_q) - blk_mq_stop_hw_queues(nvmeq->dev->ctrl.admin_q); + blk_mq_quiesce_queue(nvmeq->dev->ctrl.admin_q); pci_free_irq(to_pci_dev(nvmeq->dev->dev), vector, nvmeq); @@ -1145,8 +1181,7 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown) if (shutdown) nvme_shutdown_ctrl(&dev->ctrl); else - nvme_disable_ctrl(&dev->ctrl, lo_hi_readq( - dev->bar + NVME_REG_CAP)); + nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); spin_lock_irq(&nvmeq->q_lock); nvme_process_cq(nvmeq); @@ -1221,7 +1256,7 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->qid = qid; nvmeq->cq_vector = -1; dev->queues[qid] = nvmeq; - dev->queue_count++; + dev->ctrl.queue_count++; return nvmeq; @@ -1317,7 +1352,7 @@ static void nvme_dev_remove_admin(struct nvme_dev *dev) * user requests may be waiting on a stopped queue. Start the * queue to flush these to completion. */ - blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true); + blk_mq_unquiesce_queue(dev->ctrl.admin_q); blk_cleanup_queue(dev->ctrl.admin_q); blk_mq_free_tag_set(&dev->admin_tagset); } @@ -1354,7 +1389,7 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) return -ENODEV; } } else - blk_mq_start_stopped_hw_queues(dev->ctrl.admin_q, true); + blk_mq_unquiesce_queue(dev->ctrl.admin_q); return 0; } @@ -1385,11 +1420,10 @@ static int nvme_remap_bar(struct nvme_dev *dev, unsigned long size) return 0; } -static int nvme_configure_admin_queue(struct nvme_dev *dev) +static int nvme_pci_configure_admin_queue(struct nvme_dev *dev) { int result; u32 aqa; - u64 cap = lo_hi_readq(dev->bar + NVME_REG_CAP); struct nvme_queue *nvmeq; result = nvme_remap_bar(dev, db_bar_size(dev, 0)); @@ -1397,13 +1431,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) return result; dev->subsystem = readl(dev->bar + NVME_REG_VS) >= NVME_VS(1, 1, 0) ? - NVME_CAP_NSSRC(cap) : 0; + NVME_CAP_NSSRC(dev->ctrl.cap) : 0; if (dev->subsystem && (readl(dev->bar + NVME_REG_CSTS) & NVME_CSTS_NSSRO)) writel(NVME_CSTS_NSSRO, dev->bar + NVME_REG_CSTS); - result = nvme_disable_ctrl(&dev->ctrl, cap); + result = nvme_disable_ctrl(&dev->ctrl, dev->ctrl.cap); if (result < 0) return result; @@ -1422,7 +1456,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) lo_hi_writeq(nvmeq->sq_dma_addr, dev->bar + NVME_REG_ASQ); lo_hi_writeq(nvmeq->cq_dma_addr, dev->bar + NVME_REG_ACQ); - result = nvme_enable_ctrl(&dev->ctrl, cap); + result = nvme_enable_ctrl(&dev->ctrl, dev->ctrl.cap); if (result) return result; @@ -1441,7 +1475,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) unsigned i, max; int ret = 0; - for (i = dev->queue_count; i <= dev->max_qid; i++) { + for (i = dev->ctrl.queue_count; i <= dev->max_qid; i++) { /* vector == qid - 1, match nvme_create_queue */ if (!nvme_alloc_queue(dev, i, dev->q_depth, pci_irq_get_node(to_pci_dev(dev->dev), i - 1))) { @@ -1450,7 +1484,7 @@ static int nvme_create_io_queues(struct nvme_dev *dev) } } - max = min(dev->max_qid, dev->queue_count - 1); + max = min(dev->max_qid, dev->ctrl.queue_count - 1); for (i = dev->online_queues; i <= max; i++) { ret = nvme_create_queue(dev->queues[i], i); if (ret) @@ -1585,9 +1619,10 @@ static void nvme_free_host_mem(struct nvme_dev *dev) static int nvme_alloc_host_mem(struct nvme_dev *dev, u64 min, u64 preferred) { struct nvme_host_mem_buf_desc *descs; - u32 chunk_size, max_entries, i = 0; + u32 chunk_size, max_entries; + int i = 0; void **bufs; - u64 size, tmp; + u64 size = 0, tmp; /* start big and work our way down */ chunk_size = min(preferred, (u64)PAGE_SIZE << MAX_ORDER); @@ -1866,7 +1901,6 @@ static int nvme_dev_add(struct nvme_dev *dev) static int nvme_pci_enable(struct nvme_dev *dev) { - u64 cap; int result = -ENOMEM; struct pci_dev *pdev = to_pci_dev(dev->dev); @@ -1893,10 +1927,11 @@ static int nvme_pci_enable(struct nvme_dev *dev) if (result < 0) return result; - cap = lo_hi_readq(dev->bar + NVME_REG_CAP); + dev->ctrl.cap = lo_hi_readq(dev->bar + NVME_REG_CAP); - dev->q_depth = min_t(int, NVME_CAP_MQES(cap) + 1, NVME_Q_DEPTH); - dev->db_stride = 1 << NVME_CAP_STRIDE(cap); + dev->q_depth = min_t(int, NVME_CAP_MQES(dev->ctrl.cap) + 1, + io_queue_depth); + dev->db_stride = 1 << NVME_CAP_STRIDE(dev->ctrl.cap); dev->dbs = dev->bar + 4096; /* @@ -1908,6 +1943,12 @@ static int nvme_pci_enable(struct nvme_dev *dev) dev_warn(dev->ctrl.device, "detected Apple NVMe controller, " "set queue depth=%u to work around controller resets\n", dev->q_depth); + } else if (pdev->vendor == PCI_VENDOR_ID_SAMSUNG && + (pdev->device == 0xa821 || pdev->device == 0xa822) && + NVME_CAP_MQES(dev->ctrl.cap) == 0) { + dev->q_depth = 64; + dev_err(dev->ctrl.device, "detected PM1725 NVMe controller, " + "set queue depth=%u\n", dev->q_depth); } /* @@ -1996,7 +2037,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) nvme_stop_queues(&dev->ctrl); queues = dev->online_queues - 1; - for (i = dev->queue_count - 1; i > 0; i--) + for (i = dev->ctrl.queue_count - 1; i > 0; i--) nvme_suspend_queue(dev->queues[i]); if (dead) { @@ -2004,7 +2045,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown) * probe, before the admin queue is configured. Thus, * queue_count can be 0 here. */ - if (dev->queue_count) + if (dev->ctrl.queue_count) nvme_suspend_queue(dev->queues[0]); } else { nvme_disable_io_queues(dev, queues); @@ -2094,7 +2135,7 @@ static void nvme_reset_work(struct work_struct *work) if (result) goto out; - result = nvme_configure_admin_queue(dev); + result = nvme_pci_configure_admin_queue(dev); if (result) goto out; @@ -2133,15 +2174,6 @@ static void nvme_reset_work(struct work_struct *work) goto out; /* - * A controller that can not execute IO typically requires user - * intervention to correct. For such degraded controllers, the driver - * should not submit commands the user did not request, so skip - * registering for asynchronous event notification on this condition. - */ - if (dev->online_queues > 1) - nvme_queue_async_events(&dev->ctrl); - - /* * Keep the controller around but remove all namespaces if we don't have * any working I/O queue. */ @@ -2161,8 +2193,7 @@ static void nvme_reset_work(struct work_struct *work) goto out; } - if (dev->online_queues > 1) - nvme_queue_scan(&dev->ctrl); + nvme_start_ctrl(&dev->ctrl); return; out: @@ -2268,7 +2299,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) result = nvme_dev_map(dev); if (result) - goto free; + goto put_pci; INIT_WORK(&dev->ctrl.reset_work, nvme_reset_work); INIT_WORK(&dev->remove_work, nvme_remove_dead_ctrl_work); @@ -2277,7 +2308,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) result = nvme_setup_prp_pools(dev); if (result) - goto put_pci; + goto unmap; quirks |= check_dell_samsung_bug(pdev); @@ -2294,9 +2325,10 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id) release_pools: nvme_release_prp_pools(dev); + unmap: + nvme_dev_unmap(dev); put_pci: put_device(dev->dev); - nvme_dev_unmap(dev); free: kfree(dev->queues); kfree(dev); @@ -2341,11 +2373,13 @@ static void nvme_remove(struct pci_dev *pdev) } flush_work(&dev->ctrl.reset_work); - nvme_uninit_ctrl(&dev->ctrl); + nvme_stop_ctrl(&dev->ctrl); + nvme_remove_namespaces(&dev->ctrl); nvme_dev_disable(dev, true); nvme_free_host_mem(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); + nvme_uninit_ctrl(&dev->ctrl); nvme_release_prp_pools(dev); nvme_dev_unmap(dev); nvme_put_ctrl(&dev->ctrl); @@ -2450,6 +2484,9 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0x0a54), .driver_data = NVME_QUIRK_STRIPE_SIZE | NVME_QUIRK_DEALLOCATE_ZEROES, }, + { PCI_VDEVICE(INTEL, 0x0a55), + .driver_data = NVME_QUIRK_STRIPE_SIZE | + NVME_QUIRK_DEALLOCATE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ @@ -2458,6 +2495,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE(0x1c5f, 0x0540), /* Memblaze Pblaze4 adapter */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x144d, 0xa821), /* Samsung PM1725 */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, + { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ + .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 6d4119dfbdaa..da04df1af231 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -86,7 +86,7 @@ enum nvme_rdma_queue_flags { struct nvme_rdma_queue { struct nvme_rdma_qe *rsp_ring; - u8 sig_count; + atomic_t sig_count; int queue_size; size_t cmnd_capsule_len; struct nvme_rdma_ctrl *ctrl; @@ -103,7 +103,6 @@ struct nvme_rdma_queue { struct nvme_rdma_ctrl { /* read only in the hot path */ struct nvme_rdma_queue *queues; - u32 queue_count; /* other member variables */ struct blk_mq_tag_set tag_set; @@ -119,7 +118,6 @@ struct nvme_rdma_ctrl { struct blk_mq_tag_set admin_tag_set; struct nvme_rdma_device *device; - u64 cap; u32 max_fr_pages; struct sockaddr_storage addr; @@ -274,9 +272,6 @@ static int nvme_rdma_reinit_request(void *data, struct request *rq) struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq); int ret = 0; - if (!req->mr->need_inval) - goto out; - ib_dereg_mr(req->mr); req->mr = ib_alloc_mr(dev->pd, IB_MR_TYPE_MEM_REG, @@ -349,7 +344,7 @@ static int nvme_rdma_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, struct nvme_rdma_ctrl *ctrl = data; struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; - BUG_ON(hctx_idx >= ctrl->queue_count); + BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); hctx->driver_data = queue; return 0; @@ -525,6 +520,7 @@ static int nvme_rdma_init_queue(struct nvme_rdma_ctrl *ctrl, queue->cmnd_capsule_len = sizeof(struct nvme_command); queue->queue_size = queue_size; + atomic_set(&queue->sig_count, 0); queue->cm_id = rdma_create_id(&init_net, nvme_rdma_cm_handler, queue, RDMA_PS_TCP, IB_QPT_RC); @@ -587,7 +583,7 @@ static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) { int i; - for (i = 1; i < ctrl->queue_count; i++) + for (i = 1; i < ctrl->ctrl.queue_count; i++) nvme_rdma_stop_and_free_queue(&ctrl->queues[i]); } @@ -595,7 +591,7 @@ static int nvme_rdma_connect_io_queues(struct nvme_rdma_ctrl *ctrl) { int i, ret = 0; - for (i = 1; i < ctrl->queue_count; i++) { + for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) { dev_info(ctrl->ctrl.device, @@ -623,14 +619,14 @@ static int nvme_rdma_init_io_queues(struct nvme_rdma_ctrl *ctrl) if (ret) return ret; - ctrl->queue_count = nr_io_queues + 1; - if (ctrl->queue_count < 2) + ctrl->ctrl.queue_count = nr_io_queues + 1; + if (ctrl->ctrl.queue_count < 2) return 0; dev_info(ctrl->ctrl.device, "creating %d I/O queues.\n", nr_io_queues); - for (i = 1; i < ctrl->queue_count; i++) { + for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvme_rdma_init_queue(ctrl, i, ctrl->ctrl.opts->queue_size); if (ret) { @@ -705,7 +701,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) ++ctrl->ctrl.nr_reconnects; - if (ctrl->queue_count > 1) { + if (ctrl->ctrl.queue_count > 1) { nvme_rdma_free_io_queues(ctrl); ret = blk_mq_reinit_tagset(&ctrl->tag_set); @@ -729,13 +725,11 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); - ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); + ret = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); if (ret) goto requeue; - nvme_start_keep_alive(&ctrl->ctrl); - - if (ctrl->queue_count > 1) { + if (ctrl->ctrl.queue_count > 1) { ret = nvme_rdma_init_io_queues(ctrl); if (ret) goto requeue; @@ -743,16 +737,16 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work) ret = nvme_rdma_connect_io_queues(ctrl); if (ret) goto requeue; + + blk_mq_update_nr_hw_queues(&ctrl->tag_set, + ctrl->ctrl.queue_count - 1); } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); ctrl->ctrl.nr_reconnects = 0; - if (ctrl->queue_count > 1) { - nvme_queue_scan(&ctrl->ctrl); - nvme_queue_async_events(&ctrl->ctrl); - } + nvme_start_ctrl(&ctrl->ctrl); dev_info(ctrl->ctrl.device, "Successfully reconnected\n"); @@ -770,17 +764,17 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); int i; - nvme_stop_keep_alive(&ctrl->ctrl); + nvme_stop_ctrl(&ctrl->ctrl); - for (i = 0; i < ctrl->queue_count; i++) + for (i = 0; i < ctrl->ctrl.queue_count; i++) clear_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[i].flags); - if (ctrl->queue_count > 1) + if (ctrl->ctrl.queue_count > 1) nvme_stop_queues(&ctrl->ctrl); - blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); /* We must take care of fastfail/requeue all our inflight requests */ - if (ctrl->queue_count > 1) + if (ctrl->ctrl.queue_count > 1) blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, @@ -790,7 +784,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) * queues are not a live anymore, so restart the queues to fail fast * new IO */ - blk_mq_start_stopped_hw_queues(ctrl->ctrl.admin_q, true); + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_start_queues(&ctrl->ctrl); nvme_rdma_reconnect_or_remove(ctrl); @@ -1008,17 +1002,16 @@ static void nvme_rdma_send_done(struct ib_cq *cq, struct ib_wc *wc) nvme_rdma_wr_error(cq, wc, "SEND"); } -static inline int nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) +/* + * We want to signal completion at least every queue depth/2. This returns the + * largest power of two that is not above half of (queue size + 1) to optimize + * (avoid divisions). + */ +static inline bool nvme_rdma_queue_sig_limit(struct nvme_rdma_queue *queue) { - int sig_limit; + int limit = 1 << ilog2((queue->queue_size + 1) / 2); - /* - * We signal completion every queue depth/2 and also handle the - * degenerated case of a device with queue_depth=1, where we - * would need to signal every message. - */ - sig_limit = max(queue->queue_size / 2, 1); - return (++queue->sig_count % sig_limit) == 0; + return (atomic_inc_return(&queue->sig_count) & (limit - 1)) == 0; } static int nvme_rdma_post_send(struct nvme_rdma_queue *queue, @@ -1574,7 +1567,8 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags); - error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, + &ctrl->ctrl.cap); if (error) { dev_err(ctrl->ctrl.device, "prop_get NVME_REG_CAP failed\n"); @@ -1582,9 +1576,9 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); - error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); + error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); if (error) goto out_cleanup_queue; @@ -1601,8 +1595,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl) if (error) goto out_cleanup_queue; - nvme_start_keep_alive(&ctrl->ctrl); - return 0; out_cleanup_queue: @@ -1620,11 +1612,10 @@ out_free_queue: static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) { - nvme_stop_keep_alive(&ctrl->ctrl); cancel_work_sync(&ctrl->err_work); cancel_delayed_work_sync(&ctrl->reconnect_work); - if (ctrl->queue_count > 1) { + if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); @@ -1634,18 +1625,21 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl) if (test_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[0].flags)) nvme_shutdown_ctrl(&ctrl->ctrl); - blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, &ctrl->ctrl); + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_rdma_destroy_admin_queue(ctrl); } static void __nvme_rdma_remove_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown) { - nvme_uninit_ctrl(&ctrl->ctrl); + nvme_stop_ctrl(&ctrl->ctrl); + nvme_remove_namespaces(&ctrl->ctrl); if (shutdown) nvme_rdma_shutdown_ctrl(ctrl); + nvme_uninit_ctrl(&ctrl->ctrl); if (ctrl->ctrl.tagset) { blk_cleanup_queue(ctrl->ctrl.connect_q); blk_mq_free_tag_set(&ctrl->tag_set); @@ -1707,6 +1701,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) int ret; bool changed; + nvme_stop_ctrl(&ctrl->ctrl); nvme_rdma_shutdown_ctrl(ctrl); ret = nvme_rdma_configure_admin_queue(ctrl); @@ -1716,7 +1711,7 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) goto del_dead_ctrl; } - if (ctrl->queue_count > 1) { + if (ctrl->ctrl.queue_count > 1) { ret = blk_mq_reinit_tagset(&ctrl->tag_set); if (ret) goto del_dead_ctrl; @@ -1728,16 +1723,15 @@ static void nvme_rdma_reset_ctrl_work(struct work_struct *work) ret = nvme_rdma_connect_io_queues(ctrl); if (ret) goto del_dead_ctrl; + + blk_mq_update_nr_hw_queues(&ctrl->tag_set, + ctrl->ctrl.queue_count - 1); } changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); - if (ctrl->queue_count > 1) { - nvme_start_queues(&ctrl->ctrl); - nvme_queue_scan(&ctrl->ctrl); - nvme_queue_async_events(&ctrl->ctrl); - } + nvme_start_ctrl(&ctrl->ctrl); return; @@ -1785,7 +1779,7 @@ static int nvme_rdma_create_io_queues(struct nvme_rdma_ctrl *ctrl) ctrl->tag_set.cmd_size = sizeof(struct nvme_rdma_request) + SG_CHUNK_SIZE * sizeof(struct scatterlist); ctrl->tag_set.driver_data = ctrl; - ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; + ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; ctrl->tag_set.timeout = NVME_IO_TIMEOUT; ret = blk_mq_alloc_tag_set(&ctrl->tag_set); @@ -1863,12 +1857,12 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, INIT_WORK(&ctrl->delete_work, nvme_rdma_del_ctrl_work); INIT_WORK(&ctrl->ctrl.reset_work, nvme_rdma_reset_ctrl_work); - ctrl->queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ + ctrl->ctrl.queue_count = opts->nr_io_queues + 1; /* +1 for admin queue */ ctrl->ctrl.sqsize = opts->queue_size - 1; ctrl->ctrl.kato = opts->kato; ret = -ENOMEM; - ctrl->queues = kcalloc(ctrl->queue_count, sizeof(*ctrl->queues), + ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues), GFP_KERNEL); if (!ctrl->queues) goto out_uninit_ctrl; @@ -1925,15 +1919,11 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev, list_add_tail(&ctrl->list, &nvme_rdma_ctrl_list); mutex_unlock(&nvme_rdma_ctrl_mutex); - if (opts->nr_io_queues) { - nvme_queue_scan(&ctrl->ctrl); - nvme_queue_async_events(&ctrl->ctrl); - } + nvme_start_ctrl(&ctrl->ctrl); return &ctrl->ctrl; out_remove_admin_queue: - nvme_stop_keep_alive(&ctrl->ctrl); nvme_rdma_destroy_admin_queue(ctrl); out_kfree_queues: kfree(ctrl->queues); diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 35f930db3c02..2d7a98ab53fb 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -168,11 +168,21 @@ out: nvmet_req_complete(req, status); } +static void copy_and_pad(char *dst, int dst_len, const char *src, int src_len) +{ + int len = min(src_len, dst_len); + + memcpy(dst, src, len); + if (dst_len > len) + memset(dst + len, ' ', dst_len - len); +} + static void nvmet_execute_identify_ctrl(struct nvmet_req *req) { struct nvmet_ctrl *ctrl = req->sq->ctrl; struct nvme_id_ctrl *id; u16 status = 0; + const char model[] = "Linux"; id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { @@ -184,8 +194,10 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req) id->vid = 0; id->ssvid = 0; - memset(id->sn, ' ', sizeof(id->sn)); - snprintf(id->sn, sizeof(id->sn), "%llx", ctrl->serial); + bin2hex(id->sn, &ctrl->subsys->serial, + min(sizeof(ctrl->subsys->serial), sizeof(id->sn) / 2)); + copy_and_pad(id->mn, sizeof(id->mn), model, sizeof(model) - 1); + copy_and_pad(id->fr, sizeof(id->fr), UTS_RELEASE, strlen(UTS_RELEASE)); memset(id->mn, ' ', sizeof(id->mn)); strncpy((char *)id->mn, "Linux", sizeof(id->mn)); diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index a358ecd93e11..0a0067e771f5 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -650,7 +650,7 @@ out_unlock: CONFIGFS_ATTR(nvmet_subsys_, attr_allow_any_host); -static ssize_t nvmet_subsys_version_show(struct config_item *item, +static ssize_t nvmet_subsys_attr_version_show(struct config_item *item, char *page) { struct nvmet_subsys *subsys = to_subsys(item); @@ -666,7 +666,7 @@ static ssize_t nvmet_subsys_version_show(struct config_item *item, (int)NVME_MINOR(subsys->ver)); } -static ssize_t nvmet_subsys_version_store(struct config_item *item, +static ssize_t nvmet_subsys_attr_version_store(struct config_item *item, const char *page, size_t count) { struct nvmet_subsys *subsys = to_subsys(item); @@ -684,11 +684,33 @@ static ssize_t nvmet_subsys_version_store(struct config_item *item, return count; } -CONFIGFS_ATTR(nvmet_subsys_, version); +CONFIGFS_ATTR(nvmet_subsys_, attr_version); + +static ssize_t nvmet_subsys_attr_serial_show(struct config_item *item, + char *page) +{ + struct nvmet_subsys *subsys = to_subsys(item); + + return snprintf(page, PAGE_SIZE, "%llx\n", subsys->serial); +} + +static ssize_t nvmet_subsys_attr_serial_store(struct config_item *item, + const char *page, size_t count) +{ + struct nvmet_subsys *subsys = to_subsys(item); + + down_write(&nvmet_config_sem); + sscanf(page, "%llx\n", &subsys->serial); + up_write(&nvmet_config_sem); + + return count; +} +CONFIGFS_ATTR(nvmet_subsys_, attr_serial); static struct configfs_attribute *nvmet_subsys_attrs[] = { &nvmet_subsys_attr_attr_allow_any_host, - &nvmet_subsys_attr_version, + &nvmet_subsys_attr_attr_version, + &nvmet_subsys_attr_attr_serial, NULL, }; diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index b5b4ac103748..f4b02bb4a1a8 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -767,9 +767,6 @@ u16 nvmet_alloc_ctrl(const char *subsysnqn, const char *hostnqn, memcpy(ctrl->subsysnqn, subsysnqn, NVMF_NQN_SIZE); memcpy(ctrl->hostnqn, hostnqn, NVMF_NQN_SIZE); - /* generate a random serial number as our controllers are ephemeral: */ - get_random_bytes(&ctrl->serial, sizeof(ctrl->serial)); - kref_init(&ctrl->ref); ctrl->subsys = subsys; @@ -928,6 +925,8 @@ struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn, return NULL; subsys->ver = NVME_VS(1, 3, 0); /* NVMe 1.3.0 */ + /* generate a random serial number as our controllers are ephemeral: */ + get_random_bytes(&subsys->serial, sizeof(subsys->serial)); switch (type) { case NVME_NQN_NVME: diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c index 7692a96c9065..d5801c150b1c 100644 --- a/drivers/nvme/target/fc.c +++ b/drivers/nvme/target/fc.c @@ -1164,18 +1164,24 @@ nvmet_fc_ls_create_association(struct nvmet_fc_tgtport *tgtport, memset(acc, 0, sizeof(*acc)); - if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_assoc_rqst)) + /* + * FC-NVME spec changes. There are initiators sending different + * lengths as padding sizes for Create Association Cmd descriptor + * was incorrect. + * Accept anything of "minimum" length. Assume format per 1.15 + * spec (with HOSTID reduced to 16 bytes), ignore how long the + * trailing pad length is. + */ + if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) ret = VERR_CR_ASSOC_LEN; - else if (rqst->desc_list_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_ls_cr_assoc_rqst))) + else if (be32_to_cpu(rqst->desc_list_len) < + FCNVME_LSDESC_CRA_RQST_MIN_LISTLEN) ret = VERR_CR_ASSOC_RQST_LEN; else if (rqst->assoc_cmd.desc_tag != cpu_to_be32(FCNVME_LSDESC_CREATE_ASSOC_CMD)) ret = VERR_CR_ASSOC_CMD; - else if (rqst->assoc_cmd.desc_len != - fcnvme_lsdesc_len( - sizeof(struct fcnvme_lsdesc_cr_assoc_cmd))) + else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < + FCNVME_LSDESC_CRA_CMD_DESC_MIN_DESCLEN) ret = VERR_CR_ASSOC_CMD_LEN; else if (!rqst->assoc_cmd.ersp_ratio || (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 40128793e613..3b4d47a6abdb 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -85,7 +85,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) bio_set_op_attrs(bio, op, op_flags); bio_chain(bio, prev); - cookie = submit_bio(prev); + submit_bio(prev); } sector += sg->length >> 9; diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c index 5f55c683b338..717ed7ddb2f6 100644 --- a/drivers/nvme/target/loop.c +++ b/drivers/nvme/target/loop.c @@ -44,12 +44,10 @@ struct nvme_loop_iod { struct nvme_loop_ctrl { struct nvme_loop_queue *queues; - u32 queue_count; struct blk_mq_tag_set admin_tag_set; struct list_head list; - u64 cap; struct blk_mq_tag_set tag_set; struct nvme_loop_iod async_event_iod; struct nvme_ctrl ctrl; @@ -241,7 +239,7 @@ static int nvme_loop_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, struct nvme_loop_ctrl *ctrl = data; struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; - BUG_ON(hctx_idx >= ctrl->queue_count); + BUG_ON(hctx_idx >= ctrl->ctrl.queue_count); hctx->driver_data = queue; return 0; @@ -307,7 +305,7 @@ static void nvme_loop_destroy_io_queues(struct nvme_loop_ctrl *ctrl) { int i; - for (i = 1; i < ctrl->queue_count; i++) + for (i = 1; i < ctrl->ctrl.queue_count; i++) nvmet_sq_destroy(&ctrl->queues[i].nvme_sq); } @@ -330,7 +328,7 @@ static int nvme_loop_init_io_queues(struct nvme_loop_ctrl *ctrl) if (ret) goto out_destroy_queues; - ctrl->queue_count++; + ctrl->ctrl.queue_count++; } return 0; @@ -344,7 +342,7 @@ static int nvme_loop_connect_io_queues(struct nvme_loop_ctrl *ctrl) { int i, ret; - for (i = 1; i < ctrl->queue_count; i++) { + for (i = 1; i < ctrl->ctrl.queue_count; i++) { ret = nvmf_connect_io_queue(&ctrl->ctrl, i); if (ret) return ret; @@ -372,7 +370,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) error = nvmet_sq_init(&ctrl->queues[0].nvme_sq); if (error) return error; - ctrl->queue_count = 1; + ctrl->ctrl.queue_count = 1; error = blk_mq_alloc_tag_set(&ctrl->admin_tag_set); if (error) @@ -388,7 +386,7 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) if (error) goto out_cleanup_queue; - error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->cap); + error = nvmf_reg_read64(&ctrl->ctrl, NVME_REG_CAP, &ctrl->ctrl.cap); if (error) { dev_err(ctrl->ctrl.device, "prop_get NVME_REG_CAP failed\n"); @@ -396,9 +394,9 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) } ctrl->ctrl.sqsize = - min_t(int, NVME_CAP_MQES(ctrl->cap), ctrl->ctrl.sqsize); + min_t(int, NVME_CAP_MQES(ctrl->ctrl.cap), ctrl->ctrl.sqsize); - error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->cap); + error = nvme_enable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap); if (error) goto out_cleanup_queue; @@ -409,8 +407,6 @@ static int nvme_loop_configure_admin_queue(struct nvme_loop_ctrl *ctrl) if (error) goto out_cleanup_queue; - nvme_start_keep_alive(&ctrl->ctrl); - return 0; out_cleanup_queue: @@ -424,9 +420,7 @@ out_free_sq: static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) { - nvme_stop_keep_alive(&ctrl->ctrl); - - if (ctrl->queue_count > 1) { + if (ctrl->ctrl.queue_count > 1) { nvme_stop_queues(&ctrl->ctrl); blk_mq_tagset_busy_iter(&ctrl->tag_set, nvme_cancel_request, &ctrl->ctrl); @@ -436,9 +430,10 @@ static void nvme_loop_shutdown_ctrl(struct nvme_loop_ctrl *ctrl) if (ctrl->ctrl.state == NVME_CTRL_LIVE) nvme_shutdown_ctrl(&ctrl->ctrl); - blk_mq_stop_hw_queues(ctrl->ctrl.admin_q); + blk_mq_quiesce_queue(ctrl->ctrl.admin_q); blk_mq_tagset_busy_iter(&ctrl->admin_tag_set, nvme_cancel_request, &ctrl->ctrl); + blk_mq_unquiesce_queue(ctrl->ctrl.admin_q); nvme_loop_destroy_admin_queue(ctrl); } @@ -447,8 +442,10 @@ static void nvme_loop_del_ctrl_work(struct work_struct *work) struct nvme_loop_ctrl *ctrl = container_of(work, struct nvme_loop_ctrl, delete_work); - nvme_uninit_ctrl(&ctrl->ctrl); + nvme_stop_ctrl(&ctrl->ctrl); + nvme_remove_namespaces(&ctrl->ctrl); nvme_loop_shutdown_ctrl(ctrl); + nvme_uninit_ctrl(&ctrl->ctrl); nvme_put_ctrl(&ctrl->ctrl); } @@ -496,6 +493,7 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) bool changed; int ret; + nvme_stop_ctrl(&ctrl->ctrl); nvme_loop_shutdown_ctrl(ctrl); ret = nvme_loop_configure_admin_queue(ctrl); @@ -510,13 +508,13 @@ static void nvme_loop_reset_ctrl_work(struct work_struct *work) if (ret) goto out_destroy_io; + blk_mq_update_nr_hw_queues(&ctrl->tag_set, + ctrl->ctrl.queue_count - 1); + changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_LIVE); WARN_ON_ONCE(!changed); - nvme_queue_scan(&ctrl->ctrl); - nvme_queue_async_events(&ctrl->ctrl); - - nvme_start_queues(&ctrl->ctrl); + nvme_start_ctrl(&ctrl->ctrl); return; @@ -559,7 +557,7 @@ static int nvme_loop_create_io_queues(struct nvme_loop_ctrl *ctrl) ctrl->tag_set.cmd_size = sizeof(struct nvme_loop_iod) + SG_CHUNK_SIZE * sizeof(struct scatterlist); ctrl->tag_set.driver_data = ctrl; - ctrl->tag_set.nr_hw_queues = ctrl->queue_count - 1; + ctrl->tag_set.nr_hw_queues = ctrl->ctrl.queue_count - 1; ctrl->tag_set.timeout = NVME_IO_TIMEOUT; ctrl->ctrl.tagset = &ctrl->tag_set; @@ -651,10 +649,7 @@ static struct nvme_ctrl *nvme_loop_create_ctrl(struct device *dev, list_add_tail(&ctrl->list, &nvme_loop_ctrl_list); mutex_unlock(&nvme_loop_ctrl_mutex); - if (opts->nr_io_queues) { - nvme_queue_scan(&ctrl->ctrl); - nvme_queue_async_events(&ctrl->ctrl); - } + nvme_start_ctrl(&ctrl->ctrl); return &ctrl->ctrl; diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 747bbdb4f9c6..e3b244c7e443 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -112,7 +112,6 @@ struct nvmet_ctrl { struct mutex lock; u64 cap; - u64 serial; u32 cc; u32 csts; @@ -152,6 +151,7 @@ struct nvmet_subsys { u16 max_qid; u64 ver; + u64 serial; char *subsysnqn; struct config_group group; diff --git a/drivers/nvmem/rockchip-efuse.c b/drivers/nvmem/rockchip-efuse.c index a0d4ede9b8fc..63e3eb55f3ac 100644 --- a/drivers/nvmem/rockchip-efuse.c +++ b/drivers/nvmem/rockchip-efuse.c @@ -170,7 +170,7 @@ static const struct of_device_id rockchip_efuse_match[] = { .data = (void *)&rockchip_rk3288_efuse_read, }, { - .compatible = "rockchip,rk322x-efuse", + .compatible = "rockchip,rk3228-efuse", .data = (void *)&rockchip_rk3288_efuse_read, }, { diff --git a/drivers/of/property.c b/drivers/of/property.c index 07c7c36c5ca8..eda50b4be934 100644 --- a/drivers/of/property.c +++ b/drivers/of/property.c @@ -804,3 +804,151 @@ struct device_node *of_graph_get_remote_node(const struct device_node *node, return remote; } EXPORT_SYMBOL(of_graph_get_remote_node); + +static void of_fwnode_get(struct fwnode_handle *fwnode) +{ + of_node_get(to_of_node(fwnode)); +} + +static void of_fwnode_put(struct fwnode_handle *fwnode) +{ + of_node_put(to_of_node(fwnode)); +} + +static bool of_fwnode_device_is_available(struct fwnode_handle *fwnode) +{ + return of_device_is_available(to_of_node(fwnode)); +} + +static bool of_fwnode_property_present(struct fwnode_handle *fwnode, + const char *propname) +{ + return of_property_read_bool(to_of_node(fwnode), propname); +} + +static int of_fwnode_property_read_int_array(struct fwnode_handle *fwnode, + const char *propname, + unsigned int elem_size, void *val, + size_t nval) +{ + struct device_node *node = to_of_node(fwnode); + + if (!val) + return of_property_count_elems_of_size(node, propname, + elem_size); + + switch (elem_size) { + case sizeof(u8): + return of_property_read_u8_array(node, propname, val, nval); + case sizeof(u16): + return of_property_read_u16_array(node, propname, val, nval); + case sizeof(u32): + return of_property_read_u32_array(node, propname, val, nval); + case sizeof(u64): + return of_property_read_u64_array(node, propname, val, nval); + } + + return -ENXIO; +} + +static int of_fwnode_property_read_string_array(struct fwnode_handle *fwnode, + const char *propname, + const char **val, size_t nval) +{ + struct device_node *node = to_of_node(fwnode); + + return val ? + of_property_read_string_array(node, propname, val, nval) : + of_property_count_strings(node, propname); +} + +static struct fwnode_handle *of_fwnode_get_parent(struct fwnode_handle *fwnode) +{ + return of_fwnode_handle(of_get_parent(to_of_node(fwnode))); +} + +static struct fwnode_handle * +of_fwnode_get_next_child_node(struct fwnode_handle *fwnode, + struct fwnode_handle *child) +{ + return of_fwnode_handle(of_get_next_available_child(to_of_node(fwnode), + to_of_node(child))); +} + +static struct fwnode_handle * +of_fwnode_get_named_child_node(struct fwnode_handle *fwnode, + const char *childname) +{ + struct device_node *node = to_of_node(fwnode); + struct device_node *child; + + for_each_available_child_of_node(node, child) + if (!of_node_cmp(child->name, childname)) + return of_fwnode_handle(child); + + return NULL; +} + +static struct fwnode_handle * +of_fwnode_graph_get_next_endpoint(struct fwnode_handle *fwnode, + struct fwnode_handle *prev) +{ + return of_fwnode_handle(of_graph_get_next_endpoint(to_of_node(fwnode), + to_of_node(prev))); +} + +static struct fwnode_handle * +of_fwnode_graph_get_remote_endpoint(struct fwnode_handle *fwnode) +{ + return of_fwnode_handle(of_parse_phandle(to_of_node(fwnode), + "remote-endpoint", 0)); +} + +static struct fwnode_handle * +of_fwnode_graph_get_port_parent(struct fwnode_handle *fwnode) +{ + struct device_node *np; + + /* Get the parent of the port */ + np = of_get_next_parent(to_of_node(fwnode)); + if (!np) + return NULL; + + /* Is this the "ports" node? If not, it's the port parent. */ + if (of_node_cmp(np->name, "ports")) + return of_fwnode_handle(np); + + return of_fwnode_handle(of_get_next_parent(np)); +} + +static int of_fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode, + struct fwnode_endpoint *endpoint) +{ + struct device_node *node = to_of_node(fwnode); + struct device_node *port_node = of_get_parent(node); + + endpoint->local_fwnode = fwnode; + + of_property_read_u32(port_node, "reg", &endpoint->port); + of_property_read_u32(node, "reg", &endpoint->id); + + of_node_put(port_node); + + return 0; +} + +const struct fwnode_operations of_fwnode_ops = { + .get = of_fwnode_get, + .put = of_fwnode_put, + .device_is_available = of_fwnode_device_is_available, + .property_present = of_fwnode_property_present, + .property_read_int_array = of_fwnode_property_read_int_array, + .property_read_string_array = of_fwnode_property_read_string_array, + .get_parent = of_fwnode_get_parent, + .get_next_child_node = of_fwnode_get_next_child_node, + .get_named_child_node = of_fwnode_get_named_child_node, + .graph_get_next_endpoint = of_fwnode_graph_get_next_endpoint, + .graph_get_remote_endpoint = of_fwnode_graph_get_remote_endpoint, + .graph_get_port_parent = of_fwnode_graph_get_port_parent, + .graph_parse_endpoint = of_fwnode_graph_parse_endpoint, +}; diff --git a/drivers/pci/host/pcie-rockchip.c b/drivers/pci/host/pcie-rockchip.c index 5acf8694fb23..7bb9870f6d8c 100644 --- a/drivers/pci/host/pcie-rockchip.c +++ b/drivers/pci/host/pcie-rockchip.c @@ -1483,7 +1483,7 @@ static int rockchip_pcie_probe(struct platform_device *pdev) bridge->swizzle_irq = pci_common_swizzle; err = pci_scan_root_bus_bridge(bridge); - if (!err) + if (err < 0) goto err_free_res; bus = bridge->bus; diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 607f677f48d2..d51e8738f9c2 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -511,6 +511,7 @@ static int pci_restore_standard_config(struct pci_dev *pci_dev) } pci_restore_state(pci_dev); + pci_pme_restore(pci_dev); return 0; } @@ -522,6 +523,7 @@ static void pci_pm_default_resume_early(struct pci_dev *pci_dev) { pci_power_up(pci_dev); pci_restore_state(pci_dev); + pci_pme_restore(pci_dev); pci_fixup_device(pci_fixup_resume_early, pci_dev); } diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index d88edf5c563b..af0cc3456dc1 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1801,7 +1801,11 @@ static void __pci_pme_active(struct pci_dev *dev, bool enable) pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr); } -static void pci_pme_restore(struct pci_dev *dev) +/** + * pci_pme_restore - Restore PME configuration after config space restore. + * @dev: PCI device to update. + */ +void pci_pme_restore(struct pci_dev *dev) { u16 pmcsr; @@ -1811,6 +1815,7 @@ static void pci_pme_restore(struct pci_dev *dev) pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); if (dev->wakeup_prepared) { pmcsr |= PCI_PM_CTRL_PME_ENABLE; + pmcsr &= ~PCI_PM_CTRL_PME_STATUS; } else { pmcsr &= ~PCI_PM_CTRL_PME_ENABLE; pmcsr |= PCI_PM_CTRL_PME_STATUS; @@ -1907,14 +1912,9 @@ int pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable) { int ret = 0; - /* - * Don't do the same thing twice in a row for one device, but restore - * PME Enable in case it has been updated by config space restoration. - */ - if (!!enable == !!dev->wakeup_prepared) { - pci_pme_restore(dev); + /* Don't do the same thing twice in a row for one device. */ + if (!!enable == !!dev->wakeup_prepared) return 0; - } /* * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 03e3d0285aea..22e061738c6f 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -71,6 +71,7 @@ void pci_power_up(struct pci_dev *dev); void pci_disable_enabled_device(struct pci_dev *dev); int pci_finish_runtime_suspend(struct pci_dev *dev); int __pci_pme_wakeup(struct pci_dev *dev, void *ign); +void pci_pme_restore(struct pci_dev *dev); bool pci_dev_keep_suspended(struct pci_dev *dev); void pci_dev_complete_resume(struct pci_dev *pci_dev); void pci_config_pm_runtime_get(struct pci_dev *dev); diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c index 80e58d25006d..fafdb165dd2e 100644 --- a/drivers/pci/pcie/pme.c +++ b/drivers/pci/pcie/pme.c @@ -40,17 +40,11 @@ static int __init pcie_pme_setup(char *str) } __setup("pcie_pme=", pcie_pme_setup); -enum pme_suspend_level { - PME_SUSPEND_NONE = 0, - PME_SUSPEND_WAKEUP, - PME_SUSPEND_NOIRQ, -}; - struct pcie_pme_service_data { spinlock_t lock; struct pcie_device *srv; struct work_struct work; - enum pme_suspend_level suspend_level; + bool noirq; /* If set, keep the PME interrupt disabled. */ }; /** @@ -228,7 +222,7 @@ static void pcie_pme_work_fn(struct work_struct *work) spin_lock_irq(&data->lock); for (;;) { - if (data->suspend_level != PME_SUSPEND_NONE) + if (data->noirq) break; pcie_capability_read_dword(port, PCI_EXP_RTSTA, &rtsta); @@ -255,7 +249,7 @@ static void pcie_pme_work_fn(struct work_struct *work) spin_lock_irq(&data->lock); } - if (data->suspend_level == PME_SUSPEND_NONE) + if (!data->noirq) pcie_pme_interrupt_enable(port, true); spin_unlock_irq(&data->lock); @@ -378,7 +372,7 @@ static int pcie_pme_suspend(struct pcie_device *srv) { struct pcie_pme_service_data *data = get_service_data(srv); struct pci_dev *port = srv->port; - bool wakeup, wake_irq_enabled = false; + bool wakeup; int ret; if (device_may_wakeup(&port->dev)) { @@ -388,19 +382,16 @@ static int pcie_pme_suspend(struct pcie_device *srv) wakeup = pcie_pme_check_wakeup(port->subordinate); up_read(&pci_bus_sem); } - spin_lock_irq(&data->lock); if (wakeup) { ret = enable_irq_wake(srv->irq); - if (ret == 0) { - data->suspend_level = PME_SUSPEND_WAKEUP; - wake_irq_enabled = true; - } - } - if (!wake_irq_enabled) { - pcie_pme_interrupt_enable(port, false); - pcie_clear_root_pme_status(port); - data->suspend_level = PME_SUSPEND_NOIRQ; + if (!ret) + return 0; } + + spin_lock_irq(&data->lock); + pcie_pme_interrupt_enable(port, false); + pcie_clear_root_pme_status(port); + data->noirq = true; spin_unlock_irq(&data->lock); synchronize_irq(srv->irq); @@ -417,15 +408,15 @@ static int pcie_pme_resume(struct pcie_device *srv) struct pcie_pme_service_data *data = get_service_data(srv); spin_lock_irq(&data->lock); - if (data->suspend_level == PME_SUSPEND_NOIRQ) { + if (data->noirq) { struct pci_dev *port = srv->port; pcie_clear_root_pme_status(port); pcie_pme_interrupt_enable(port, true); + data->noirq = false; } else { disable_irq_wake(srv->irq); } - data->suspend_level = PME_SUSPEND_NONE; spin_unlock_irq(&data->lock); return 0; diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig index 76bdae1a93bb..0ad6e290bbda 100644 --- a/drivers/platform/chrome/Kconfig +++ b/drivers/platform/chrome/Kconfig @@ -49,7 +49,7 @@ config CROS_EC_CHARDEV config CROS_EC_LPC tristate "ChromeOS Embedded Controller (LPC)" - depends on MFD_CROS_EC && (X86 || COMPILE_TEST) + depends on MFD_CROS_EC && ACPI && (X86 || COMPILE_TEST) help If you say Y here, you get support for talking to the ChromeOS EC over an LPC bus. This uses a simple byte-level protocol with a @@ -59,6 +59,18 @@ config CROS_EC_LPC To compile this driver as a module, choose M here: the module will be called cros_ec_lpc. +config CROS_EC_LPC_MEC + bool "ChromeOS Embedded Controller LPC Microchip EC (MEC) variant" + depends on CROS_EC_LPC + default n + help + If you say Y here, a variant LPC protocol for the Microchip EC + will be used. Note that this variant is not backward compatible + with non-Microchip ECs. + + If you have a ChromeOS Embedded Controller Microchip EC variant + choose Y here. + config CROS_EC_PROTO bool help diff --git a/drivers/platform/chrome/Makefile b/drivers/platform/chrome/Makefile index 4f3462783a3c..66c345ca35fc 100644 --- a/drivers/platform/chrome/Makefile +++ b/drivers/platform/chrome/Makefile @@ -2,8 +2,11 @@ obj-$(CONFIG_CHROMEOS_LAPTOP) += chromeos_laptop.o obj-$(CONFIG_CHROMEOS_PSTORE) += chromeos_pstore.o cros_ec_devs-objs := cros_ec_dev.o cros_ec_sysfs.o \ - cros_ec_lightbar.o cros_ec_vbc.o + cros_ec_lightbar.o cros_ec_vbc.o \ + cros_ec_debugfs.o obj-$(CONFIG_CROS_EC_CHARDEV) += cros_ec_devs.o -obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpc.o +cros_ec_lpcs-objs := cros_ec_lpc.o cros_ec_lpc_reg.o +cros_ec_lpcs-$(CONFIG_CROS_EC_LPC_MEC) += cros_ec_lpc_mec.o +obj-$(CONFIG_CROS_EC_LPC) += cros_ec_lpcs.o obj-$(CONFIG_CROS_EC_PROTO) += cros_ec_proto.o obj-$(CONFIG_CROS_KBD_LED_BACKLIGHT) += cros_kbd_led_backlight.o diff --git a/drivers/platform/chrome/cros_ec_debugfs.c b/drivers/platform/chrome/cros_ec_debugfs.c new file mode 100644 index 000000000000..4cc66f405760 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_debugfs.c @@ -0,0 +1,401 @@ +/* + * cros_ec_debugfs - debug logs for Chrome OS EC + * + * Copyright 2015 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/circ_buf.h> +#include <linux/debugfs.h> +#include <linux/delay.h> +#include <linux/fs.h> +#include <linux/mfd/cros_ec.h> +#include <linux/mfd/cros_ec_commands.h> +#include <linux/mutex.h> +#include <linux/poll.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/wait.h> + +#include "cros_ec_dev.h" +#include "cros_ec_debugfs.h" + +#define LOG_SHIFT 14 +#define LOG_SIZE (1 << LOG_SHIFT) +#define LOG_POLL_SEC 10 + +#define CIRC_ADD(idx, size, value) (((idx) + (value)) & ((size) - 1)) + +/* struct cros_ec_debugfs - ChromeOS EC debugging information + * + * @ec: EC device this debugfs information belongs to + * @dir: dentry for debugfs files + * @log_buffer: circular buffer for console log information + * @read_msg: preallocated EC command and buffer to read console log + * @log_mutex: mutex to protect circular buffer + * @log_wq: waitqueue for log readers + * @log_poll_work: recurring task to poll EC for new console log data + * @panicinfo_blob: panicinfo debugfs blob + */ +struct cros_ec_debugfs { + struct cros_ec_dev *ec; + struct dentry *dir; + /* EC log */ + struct circ_buf log_buffer; + struct cros_ec_command *read_msg; + struct mutex log_mutex; + wait_queue_head_t log_wq; + struct delayed_work log_poll_work; + /* EC panicinfo */ + struct debugfs_blob_wrapper panicinfo_blob; +}; + +/* + * We need to make sure that the EC log buffer on the UART is large enough, + * so that it is unlikely enough to overlow within LOG_POLL_SEC. + */ +static void cros_ec_console_log_work(struct work_struct *__work) +{ + struct cros_ec_debugfs *debug_info = + container_of(to_delayed_work(__work), + struct cros_ec_debugfs, + log_poll_work); + struct cros_ec_dev *ec = debug_info->ec; + struct circ_buf *cb = &debug_info->log_buffer; + struct cros_ec_command snapshot_msg = { + .command = EC_CMD_CONSOLE_SNAPSHOT + ec->cmd_offset, + }; + + struct ec_params_console_read_v1 *read_params = + (struct ec_params_console_read_v1 *)debug_info->read_msg->data; + uint8_t *ec_buffer = (uint8_t *)debug_info->read_msg->data; + int idx; + int buf_space; + int ret; + + ret = cros_ec_cmd_xfer(ec->ec_dev, &snapshot_msg); + if (ret < 0) { + dev_err(ec->dev, "EC communication failed\n"); + goto resched; + } + if (snapshot_msg.result != EC_RES_SUCCESS) { + dev_err(ec->dev, "EC failed to snapshot the console log\n"); + goto resched; + } + + /* Loop until we have read everything, or there's an error. */ + mutex_lock(&debug_info->log_mutex); + buf_space = CIRC_SPACE(cb->head, cb->tail, LOG_SIZE); + + while (1) { + if (!buf_space) { + dev_info_once(ec->dev, + "Some logs may have been dropped...\n"); + break; + } + + memset(read_params, '\0', sizeof(*read_params)); + read_params->subcmd = CONSOLE_READ_RECENT; + ret = cros_ec_cmd_xfer(ec->ec_dev, debug_info->read_msg); + if (ret < 0) { + dev_err(ec->dev, "EC communication failed\n"); + break; + } + if (debug_info->read_msg->result != EC_RES_SUCCESS) { + dev_err(ec->dev, + "EC failed to read the console log\n"); + break; + } + + /* If the buffer is empty, we're done here. */ + if (ret == 0 || ec_buffer[0] == '\0') + break; + + idx = 0; + while (idx < ret && ec_buffer[idx] != '\0' && buf_space > 0) { + cb->buf[cb->head] = ec_buffer[idx]; + cb->head = CIRC_ADD(cb->head, LOG_SIZE, 1); + idx++; + buf_space--; + } + + wake_up(&debug_info->log_wq); + } + + mutex_unlock(&debug_info->log_mutex); + +resched: + schedule_delayed_work(&debug_info->log_poll_work, + msecs_to_jiffies(LOG_POLL_SEC * 1000)); +} + +static int cros_ec_console_log_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + + return nonseekable_open(inode, file); +} + +static ssize_t cros_ec_console_log_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct cros_ec_debugfs *debug_info = file->private_data; + struct circ_buf *cb = &debug_info->log_buffer; + ssize_t ret; + + mutex_lock(&debug_info->log_mutex); + + while (!CIRC_CNT(cb->head, cb->tail, LOG_SIZE)) { + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + goto error; + } + + mutex_unlock(&debug_info->log_mutex); + + ret = wait_event_interruptible(debug_info->log_wq, + CIRC_CNT(cb->head, cb->tail, LOG_SIZE)); + if (ret < 0) + return ret; + + mutex_lock(&debug_info->log_mutex); + } + + /* Only copy until the end of the circular buffer, and let userspace + * retry to get the rest of the data. + */ + ret = min_t(size_t, CIRC_CNT_TO_END(cb->head, cb->tail, LOG_SIZE), + count); + + if (copy_to_user(buf, cb->buf + cb->tail, ret)) { + ret = -EFAULT; + goto error; + } + + cb->tail = CIRC_ADD(cb->tail, LOG_SIZE, ret); + +error: + mutex_unlock(&debug_info->log_mutex); + return ret; +} + +static unsigned int cros_ec_console_log_poll(struct file *file, + poll_table *wait) +{ + struct cros_ec_debugfs *debug_info = file->private_data; + unsigned int mask = 0; + + poll_wait(file, &debug_info->log_wq, wait); + + mutex_lock(&debug_info->log_mutex); + if (CIRC_CNT(debug_info->log_buffer.head, + debug_info->log_buffer.tail, + LOG_SIZE)) + mask |= POLLIN | POLLRDNORM; + mutex_unlock(&debug_info->log_mutex); + + return mask; +} + +static int cros_ec_console_log_release(struct inode *inode, struct file *file) +{ + return 0; +} + +const struct file_operations cros_ec_console_log_fops = { + .owner = THIS_MODULE, + .open = cros_ec_console_log_open, + .read = cros_ec_console_log_read, + .llseek = no_llseek, + .poll = cros_ec_console_log_poll, + .release = cros_ec_console_log_release, +}; + +static int ec_read_version_supported(struct cros_ec_dev *ec) +{ + struct ec_params_get_cmd_versions_v1 *params; + struct ec_response_get_cmd_versions *response; + int ret; + + struct cros_ec_command *msg; + + msg = kzalloc(sizeof(*msg) + max(sizeof(*params), sizeof(*response)), + GFP_KERNEL); + if (!msg) + return 0; + + msg->command = EC_CMD_GET_CMD_VERSIONS + ec->cmd_offset; + msg->outsize = sizeof(*params); + msg->insize = sizeof(*response); + + params = (struct ec_params_get_cmd_versions_v1 *)msg->data; + params->cmd = EC_CMD_CONSOLE_READ; + response = (struct ec_response_get_cmd_versions *)msg->data; + + ret = cros_ec_cmd_xfer(ec->ec_dev, msg) >= 0 && + msg->result == EC_RES_SUCCESS && + (response->version_mask & EC_VER_MASK(1)); + + kfree(msg); + + return ret; +} + +static int cros_ec_create_console_log(struct cros_ec_debugfs *debug_info) +{ + struct cros_ec_dev *ec = debug_info->ec; + char *buf; + int read_params_size; + int read_response_size; + + if (!ec_read_version_supported(ec)) { + dev_warn(ec->dev, + "device does not support reading the console log\n"); + return 0; + } + + buf = devm_kzalloc(ec->dev, LOG_SIZE, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + read_params_size = sizeof(struct ec_params_console_read_v1); + read_response_size = ec->ec_dev->max_response; + debug_info->read_msg = devm_kzalloc(ec->dev, + sizeof(*debug_info->read_msg) + + max(read_params_size, read_response_size), GFP_KERNEL); + if (!debug_info->read_msg) + return -ENOMEM; + + debug_info->read_msg->version = 1; + debug_info->read_msg->command = EC_CMD_CONSOLE_READ + ec->cmd_offset; + debug_info->read_msg->outsize = read_params_size; + debug_info->read_msg->insize = read_response_size; + + debug_info->log_buffer.buf = buf; + debug_info->log_buffer.head = 0; + debug_info->log_buffer.tail = 0; + + mutex_init(&debug_info->log_mutex); + init_waitqueue_head(&debug_info->log_wq); + + if (!debugfs_create_file("console_log", + S_IFREG | S_IRUGO, + debug_info->dir, + debug_info, + &cros_ec_console_log_fops)) + return -ENOMEM; + + INIT_DELAYED_WORK(&debug_info->log_poll_work, + cros_ec_console_log_work); + schedule_delayed_work(&debug_info->log_poll_work, 0); + + return 0; +} + +static void cros_ec_cleanup_console_log(struct cros_ec_debugfs *debug_info) +{ + if (debug_info->log_buffer.buf) { + cancel_delayed_work_sync(&debug_info->log_poll_work); + mutex_destroy(&debug_info->log_mutex); + } +} + +static int cros_ec_create_panicinfo(struct cros_ec_debugfs *debug_info) +{ + struct cros_ec_device *ec_dev = debug_info->ec->ec_dev; + int ret; + struct cros_ec_command *msg; + int insize; + + insize = ec_dev->max_response; + + msg = devm_kzalloc(debug_info->ec->dev, + sizeof(*msg) + insize, GFP_KERNEL); + if (!msg) + return -ENOMEM; + + msg->command = EC_CMD_GET_PANIC_INFO; + msg->insize = insize; + + ret = cros_ec_cmd_xfer(ec_dev, msg); + if (ret < 0) { + dev_warn(debug_info->ec->dev, "Cannot read panicinfo.\n"); + ret = 0; + goto free; + } + + /* No panic data */ + if (ret == 0) + goto free; + + debug_info->panicinfo_blob.data = msg->data; + debug_info->panicinfo_blob.size = ret; + + if (!debugfs_create_blob("panicinfo", + S_IFREG | S_IRUGO, + debug_info->dir, + &debug_info->panicinfo_blob)) { + ret = -ENOMEM; + goto free; + } + + return 0; + +free: + devm_kfree(debug_info->ec->dev, msg); + return ret; +} + +int cros_ec_debugfs_init(struct cros_ec_dev *ec) +{ + struct cros_ec_platform *ec_platform = dev_get_platdata(ec->dev); + const char *name = ec_platform->ec_name; + struct cros_ec_debugfs *debug_info; + int ret; + + debug_info = devm_kzalloc(ec->dev, sizeof(*debug_info), GFP_KERNEL); + if (!debug_info) + return -ENOMEM; + + debug_info->ec = ec; + debug_info->dir = debugfs_create_dir(name, NULL); + if (!debug_info->dir) + return -ENOMEM; + + ret = cros_ec_create_panicinfo(debug_info); + if (ret) + goto remove_debugfs; + + ret = cros_ec_create_console_log(debug_info); + if (ret) + goto remove_debugfs; + + ec->debug_info = debug_info; + + return 0; + +remove_debugfs: + debugfs_remove_recursive(debug_info->dir); + return ret; +} + +void cros_ec_debugfs_remove(struct cros_ec_dev *ec) +{ + if (!ec->debug_info) + return; + + debugfs_remove_recursive(ec->debug_info->dir); + cros_ec_cleanup_console_log(ec->debug_info); +} diff --git a/drivers/platform/chrome/cros_ec_debugfs.h b/drivers/platform/chrome/cros_ec_debugfs.h new file mode 100644 index 000000000000..1ff3a50aa1b8 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_debugfs.h @@ -0,0 +1,27 @@ +/* + * Copyright 2015 Google, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _DRV_CROS_EC_DEBUGFS_H_ +#define _DRV_CROS_EC_DEBUGFS_H_ + +#include "cros_ec_dev.h" + +/* debugfs stuff */ +int cros_ec_debugfs_init(struct cros_ec_dev *ec); +void cros_ec_debugfs_remove(struct cros_ec_dev *ec); + +#endif /* _DRV_CROS_EC_DEBUGFS_H_ */ diff --git a/drivers/platform/chrome/cros_ec_dev.c b/drivers/platform/chrome/cros_ec_dev.c index 6aa120cd0574..cf6c4f0846b8 100644 --- a/drivers/platform/chrome/cros_ec_dev.c +++ b/drivers/platform/chrome/cros_ec_dev.c @@ -21,9 +21,11 @@ #include <linux/mfd/core.h> #include <linux/module.h> #include <linux/platform_device.h> +#include <linux/pm.h> #include <linux/slab.h> #include <linux/uaccess.h> +#include "cros_ec_debugfs.h" #include "cros_ec_dev.h" /* Device variables */ @@ -427,10 +429,16 @@ static int ec_device_probe(struct platform_device *pdev) goto failed; } + if (cros_ec_debugfs_init(ec)) + dev_warn(dev, "failed to create debugfs directory\n"); + /* check whether this EC is a sensor hub. */ if (cros_ec_check_features(ec, EC_FEATURE_MOTION_SENSE)) cros_ec_sensors_register(ec); + /* Take control of the lightbar from the EC. */ + lb_manual_suspend_ctrl(ec, 1); + return 0; failed: @@ -441,6 +449,12 @@ failed: static int ec_device_remove(struct platform_device *pdev) { struct cros_ec_dev *ec = dev_get_drvdata(&pdev->dev); + + /* Let the EC take over the lightbar again. */ + lb_manual_suspend_ctrl(ec, 0); + + cros_ec_debugfs_remove(ec); + cdev_del(&ec->cdev); device_unregister(&ec->class_dev); return 0; @@ -452,9 +466,35 @@ static const struct platform_device_id cros_ec_id[] = { }; MODULE_DEVICE_TABLE(platform, cros_ec_id); +static __maybe_unused int ec_device_suspend(struct device *dev) +{ + struct cros_ec_dev *ec = dev_get_drvdata(dev); + + lb_suspend(ec); + + return 0; +} + +static __maybe_unused int ec_device_resume(struct device *dev) +{ + struct cros_ec_dev *ec = dev_get_drvdata(dev); + + lb_resume(ec); + + return 0; +} + +static const struct dev_pm_ops cros_ec_dev_pm_ops = { +#ifdef CONFIG_PM_SLEEP + .suspend = ec_device_suspend, + .resume = ec_device_resume, +#endif +}; + static struct platform_driver cros_ec_dev_driver = { .driver = { .name = "cros-ec-ctl", + .pm = &cros_ec_dev_pm_ops, }, .probe = ec_device_probe, .remove = ec_device_remove, diff --git a/drivers/platform/chrome/cros_ec_dev.h b/drivers/platform/chrome/cros_ec_dev.h index bfd2c84c3571..45e9453608c5 100644 --- a/drivers/platform/chrome/cros_ec_dev.h +++ b/drivers/platform/chrome/cros_ec_dev.h @@ -43,4 +43,10 @@ struct cros_ec_readmem { #define CROS_EC_DEV_IOCXCMD _IOWR(CROS_EC_DEV_IOC, 0, struct cros_ec_command) #define CROS_EC_DEV_IOCRDMEM _IOWR(CROS_EC_DEV_IOC, 1, struct cros_ec_readmem) +/* Lightbar utilities */ +extern bool ec_has_lightbar(struct cros_ec_dev *ec); +extern int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable); +extern int lb_suspend(struct cros_ec_dev *ec); +extern int lb_resume(struct cros_ec_dev *ec); + #endif /* _CROS_EC_DEV_H_ */ diff --git a/drivers/platform/chrome/cros_ec_lightbar.c b/drivers/platform/chrome/cros_ec_lightbar.c index 8df3d447cacf..fd2b047a2748 100644 --- a/drivers/platform/chrome/cros_ec_lightbar.c +++ b/drivers/platform/chrome/cros_ec_lightbar.c @@ -38,6 +38,13 @@ /* Rate-limit the lightbar interface to prevent DoS. */ static unsigned long lb_interval_jiffies = 50 * HZ / 1000; +/* + * Whether or not we have given userspace control of the lightbar. + * If this is true, we won't do anything during suspend/resume. + */ +static bool userspace_control; +static struct cros_ec_dev *ec_with_lightbar; + static ssize_t interval_msec_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -295,7 +302,8 @@ exit: static char const *seqname[] = { "ERROR", "S5", "S3", "S0", "S5S3", "S3S0", - "S0S3", "S3S5", "STOP", "RUN", "PULSE", "TEST", "KONAMI", + "S0S3", "S3S5", "STOP", "RUN", "KONAMI", + "TAP", "PROGRAM", }; static ssize_t sequence_show(struct device *dev, @@ -340,6 +348,89 @@ exit: return ret; } +static int lb_send_empty_cmd(struct cros_ec_dev *ec, uint8_t cmd) +{ + struct ec_params_lightbar *param; + struct cros_ec_command *msg; + int ret; + + msg = alloc_lightbar_cmd_msg(ec); + if (!msg) + return -ENOMEM; + + param = (struct ec_params_lightbar *)msg->data; + param->cmd = cmd; + + ret = lb_throttle(); + if (ret) + goto error; + + ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + if (ret < 0) + goto error; + if (msg->result != EC_RES_SUCCESS) { + ret = -EINVAL; + goto error; + } + ret = 0; +error: + kfree(msg); + + return ret; +} + +int lb_manual_suspend_ctrl(struct cros_ec_dev *ec, uint8_t enable) +{ + struct ec_params_lightbar *param; + struct cros_ec_command *msg; + int ret; + + if (ec != ec_with_lightbar) + return 0; + + msg = alloc_lightbar_cmd_msg(ec); + if (!msg) + return -ENOMEM; + + param = (struct ec_params_lightbar *)msg->data; + + param->cmd = LIGHTBAR_CMD_MANUAL_SUSPEND_CTRL; + param->manual_suspend_ctrl.enable = enable; + + ret = lb_throttle(); + if (ret) + goto error; + + ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + if (ret < 0) + goto error; + if (msg->result != EC_RES_SUCCESS) { + ret = -EINVAL; + goto error; + } + ret = 0; +error: + kfree(msg); + + return ret; +} + +int lb_suspend(struct cros_ec_dev *ec) +{ + if (userspace_control || ec != ec_with_lightbar) + return 0; + + return lb_send_empty_cmd(ec, LIGHTBAR_CMD_SUSPEND); +} + +int lb_resume(struct cros_ec_dev *ec) +{ + if (userspace_control || ec != ec_with_lightbar) + return 0; + + return lb_send_empty_cmd(ec, LIGHTBAR_CMD_RESUME); +} + static ssize_t sequence_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { @@ -390,6 +481,93 @@ exit: return ret; } +static ssize_t program_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t count) +{ + int extra_bytes, max_size, ret; + struct ec_params_lightbar *param; + struct cros_ec_command *msg; + struct cros_ec_dev *ec = container_of(dev, struct cros_ec_dev, + class_dev); + + /* + * We might need to reject the program for size reasons. The EC + * enforces a maximum program size, but we also don't want to try + * and send a program that is too big for the protocol. In order + * to ensure the latter, we also need to ensure we have extra bytes + * to represent the rest of the packet. + */ + extra_bytes = sizeof(*param) - sizeof(param->set_program.data); + max_size = min(EC_LB_PROG_LEN, ec->ec_dev->max_request - extra_bytes); + if (count > max_size) { + dev_err(dev, "Program is %u bytes, too long to send (max: %u)", + (unsigned int)count, max_size); + + return -EINVAL; + } + + msg = alloc_lightbar_cmd_msg(ec); + if (!msg) + return -ENOMEM; + + ret = lb_throttle(); + if (ret) + goto exit; + + dev_info(dev, "Copying %zu byte program to EC", count); + + param = (struct ec_params_lightbar *)msg->data; + param->cmd = LIGHTBAR_CMD_SET_PROGRAM; + + param->set_program.size = count; + memcpy(param->set_program.data, buf, count); + + /* + * We need to set the message size manually or else it will use + * EC_LB_PROG_LEN. This might be too long, and the program + * is unlikely to use all of the space. + */ + msg->outsize = count + extra_bytes; + + ret = cros_ec_cmd_xfer(ec->ec_dev, msg); + if (ret < 0) + goto exit; + if (msg->result != EC_RES_SUCCESS) { + ret = -EINVAL; + goto exit; + } + + ret = count; +exit: + kfree(msg); + + return ret; +} + +static ssize_t userspace_control_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return scnprintf(buf, PAGE_SIZE, "%d\n", userspace_control); +} + +static ssize_t userspace_control_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + bool enable; + int ret; + + ret = strtobool(buf, &enable); + if (ret < 0) + return ret; + + userspace_control = enable; + + return count; +} + /* Module initialization */ static DEVICE_ATTR_RW(interval_msec); @@ -397,15 +575,25 @@ static DEVICE_ATTR_RO(version); static DEVICE_ATTR_WO(brightness); static DEVICE_ATTR_WO(led_rgb); static DEVICE_ATTR_RW(sequence); +static DEVICE_ATTR_WO(program); +static DEVICE_ATTR_RW(userspace_control); + static struct attribute *__lb_cmds_attrs[] = { &dev_attr_interval_msec.attr, &dev_attr_version.attr, &dev_attr_brightness.attr, &dev_attr_led_rgb.attr, &dev_attr_sequence.attr, + &dev_attr_program.attr, + &dev_attr_userspace_control.attr, NULL, }; +bool ec_has_lightbar(struct cros_ec_dev *ec) +{ + return !!get_lightbar_version(ec, NULL, NULL); +} + static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { @@ -422,10 +610,11 @@ static umode_t cros_ec_lightbar_attrs_are_visible(struct kobject *kobj, return 0; /* Only instantiate this stuff if the EC has a lightbar */ - if (get_lightbar_version(ec, NULL, NULL)) + if (ec_has_lightbar(ec)) { + ec_with_lightbar = ec; return a->mode; - else - return 0; + } + return 0; } struct attribute_group cros_ec_lightbar_attr_group = { diff --git a/drivers/platform/chrome/cros_ec_lpc.c b/drivers/platform/chrome/cros_ec_lpc.c index f9a245465fd0..2b6436d1b6a4 100644 --- a/drivers/platform/chrome/cros_ec_lpc.c +++ b/drivers/platform/chrome/cros_ec_lpc.c @@ -21,24 +21,29 @@ * expensive. */ +#include <linux/acpi.h> #include <linux/dmi.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/mfd/cros_ec.h> #include <linux/mfd/cros_ec_commands.h> +#include <linux/mfd/cros_ec_lpc_reg.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/printk.h> -#define DRV_NAME "cros_ec_lpc" +#define DRV_NAME "cros_ec_lpcs" +#define ACPI_DRV_NAME "GOOG0004" static int ec_response_timed_out(void) { unsigned long one_second = jiffies + HZ; + u8 data; usleep_range(200, 300); do { - if (!(inb(EC_LPC_ADDR_HOST_CMD) & EC_LPC_STATUS_BUSY_MASK)) + if (!(cros_ec_lpc_read_bytes(EC_LPC_ADDR_HOST_CMD, 1, &data) & + EC_LPC_STATUS_BUSY_MASK)) return 0; usleep_range(100, 200); } while (time_before(jiffies, one_second)); @@ -51,21 +56,20 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, { struct ec_host_request *request; struct ec_host_response response; - u8 sum = 0; - int i; + u8 sum; int ret = 0; u8 *dout; ret = cros_ec_prepare_tx(ec, msg); /* Write buffer */ - for (i = 0; i < ret; i++) - outb(ec->dout[i], EC_LPC_ADDR_HOST_PACKET + i); + cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PACKET, ret, ec->dout); request = (struct ec_host_request *)ec->dout; /* Here we go */ - outb(EC_COMMAND_PROTOCOL_3, EC_LPC_ADDR_HOST_CMD); + sum = EC_COMMAND_PROTOCOL_3; + cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum); if (ec_response_timed_out()) { dev_warn(ec->dev, "EC responsed timed out\n"); @@ -74,17 +78,15 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, } /* Check result */ - msg->result = inb(EC_LPC_ADDR_HOST_DATA); + msg->result = cros_ec_lpc_read_bytes(EC_LPC_ADDR_HOST_DATA, 1, &sum); ret = cros_ec_check_result(ec, msg); if (ret) goto done; /* Read back response */ dout = (u8 *)&response; - for (i = 0; i < sizeof(response); i++) { - dout[i] = inb(EC_LPC_ADDR_HOST_PACKET + i); - sum += dout[i]; - } + sum = cros_ec_lpc_read_bytes(EC_LPC_ADDR_HOST_PACKET, sizeof(response), + dout); msg->result = response.result; @@ -97,11 +99,9 @@ static int cros_ec_pkt_xfer_lpc(struct cros_ec_device *ec, } /* Read response and process checksum */ - for (i = 0; i < response.data_len; i++) { - msg->data[i] = - inb(EC_LPC_ADDR_HOST_PACKET + sizeof(response) + i); - sum += msg->data[i]; - } + sum += cros_ec_lpc_read_bytes(EC_LPC_ADDR_HOST_PACKET + + sizeof(response), response.data_len, + msg->data); if (sum) { dev_err(ec->dev, @@ -121,8 +121,7 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, struct cros_ec_command *msg) { struct ec_lpc_host_args args; - int csum; - int i; + u8 sum; int ret = 0; if (msg->outsize > EC_PROTO2_MAX_PARAM_SIZE || @@ -139,24 +138,20 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, args.data_size = msg->outsize; /* Initialize checksum */ - csum = msg->command + args.flags + - args.command_version + args.data_size; + sum = msg->command + args.flags + args.command_version + args.data_size; /* Copy data and update checksum */ - for (i = 0; i < msg->outsize; i++) { - outb(msg->data[i], EC_LPC_ADDR_HOST_PARAM + i); - csum += msg->data[i]; - } + sum += cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_PARAM, msg->outsize, + msg->data); /* Finalize checksum and write args */ - args.checksum = csum & 0xFF; - outb(args.flags, EC_LPC_ADDR_HOST_ARGS); - outb(args.command_version, EC_LPC_ADDR_HOST_ARGS + 1); - outb(args.data_size, EC_LPC_ADDR_HOST_ARGS + 2); - outb(args.checksum, EC_LPC_ADDR_HOST_ARGS + 3); + args.checksum = sum; + cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_ARGS, sizeof(args), + (u8 *)&args); /* Here we go */ - outb(msg->command, EC_LPC_ADDR_HOST_CMD); + sum = msg->command; + cros_ec_lpc_write_bytes(EC_LPC_ADDR_HOST_CMD, 1, &sum); if (ec_response_timed_out()) { dev_warn(ec->dev, "EC responsed timed out\n"); @@ -165,16 +160,14 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, } /* Check result */ - msg->result = inb(EC_LPC_ADDR_HOST_DATA); + msg->result = cros_ec_lpc_read_bytes(EC_LPC_ADDR_HOST_DATA, 1, &sum); ret = cros_ec_check_result(ec, msg); if (ret) goto done; /* Read back args */ - args.flags = inb(EC_LPC_ADDR_HOST_ARGS); - args.command_version = inb(EC_LPC_ADDR_HOST_ARGS + 1); - args.data_size = inb(EC_LPC_ADDR_HOST_ARGS + 2); - args.checksum = inb(EC_LPC_ADDR_HOST_ARGS + 3); + cros_ec_lpc_read_bytes(EC_LPC_ADDR_HOST_ARGS, sizeof(args), + (u8 *)&args); if (args.data_size > msg->insize) { dev_err(ec->dev, @@ -185,20 +178,17 @@ static int cros_ec_cmd_xfer_lpc(struct cros_ec_device *ec, } /* Start calculating response checksum */ - csum = msg->command + args.flags + - args.command_version + args.data_size; + sum = msg->command + args.flags + args.command_version + args.data_size; /* Read response and update checksum */ - for (i = 0; i < args.data_size; i++) { - msg->data[i] = inb(EC_LPC_ADDR_HOST_PARAM + i); - csum += msg->data[i]; - } + sum += cros_ec_lpc_read_bytes(EC_LPC_ADDR_HOST_PARAM, args.data_size, + msg->data); /* Verify checksum */ - if (args.checksum != (csum & 0xFF)) { + if (args.checksum != sum) { dev_err(ec->dev, "bad packet checksum, expected %02x, got %02x\n", - args.checksum, csum & 0xFF); + args.checksum, sum); ret = -EBADMSG; goto done; } @@ -222,14 +212,13 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset, /* fixed length */ if (bytes) { - for (; cnt < bytes; i++, s++, cnt++) - *s = inb(EC_LPC_ADDR_MEMMAP + i); - return cnt; + cros_ec_lpc_read_bytes(EC_LPC_ADDR_MEMMAP + offset, bytes, s); + return bytes; } /* string */ for (; i < EC_MEMMAP_SIZE; i++, s++) { - *s = inb(EC_LPC_ADDR_MEMMAP + i); + cros_ec_lpc_read_bytes(EC_LPC_ADDR_MEMMAP + i, 1, s); cnt++; if (!*s) break; @@ -238,10 +227,23 @@ static int cros_ec_lpc_readmem(struct cros_ec_device *ec, unsigned int offset, return cnt; } +static void cros_ec_lpc_acpi_notify(acpi_handle device, u32 value, void *data) +{ + struct cros_ec_device *ec_dev = data; + + if (ec_dev->mkbp_event_supported && + cros_ec_get_next_event(ec_dev, NULL) > 0) + blocking_notifier_call_chain(&ec_dev->event_notifier, 0, + ec_dev); +} + static int cros_ec_lpc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct acpi_device *adev; + acpi_status status; struct cros_ec_device *ec_dev; + u8 buf[2]; int ret; if (!devm_request_region(dev, EC_LPC_ADDR_MEMMAP, EC_MEMMAP_SIZE, @@ -250,8 +252,8 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) return -EBUSY; } - if ((inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID) != 'E') || - (inb(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID + 1) != 'C')) { + cros_ec_lpc_read_bytes(EC_LPC_ADDR_MEMMAP + EC_MEMMAP_ID, 2, buf); + if (buf[0] != 'E' || buf[1] != 'C') { dev_err(dev, "EC ID not detected\n"); return -ENODEV; } @@ -287,12 +289,33 @@ static int cros_ec_lpc_probe(struct platform_device *pdev) return ret; } + /* + * Connect a notify handler to process MKBP messages if we have a + * companion ACPI device. + */ + adev = ACPI_COMPANION(dev); + if (adev) { + status = acpi_install_notify_handler(adev->handle, + ACPI_ALL_NOTIFY, + cros_ec_lpc_acpi_notify, + ec_dev); + if (ACPI_FAILURE(status)) + dev_warn(dev, "Failed to register notifier %08x\n", + status); + } + return 0; } static int cros_ec_lpc_remove(struct platform_device *pdev) { struct cros_ec_device *ec_dev; + struct acpi_device *adev; + + adev = ACPI_COMPANION(&pdev->dev); + if (adev) + acpi_remove_notify_handler(adev->handle, ACPI_ALL_NOTIFY, + cros_ec_lpc_acpi_notify); ec_dev = platform_get_drvdata(pdev); cros_ec_remove(ec_dev); @@ -300,6 +323,12 @@ static int cros_ec_lpc_remove(struct platform_device *pdev) return 0; } +static const struct acpi_device_id cros_ec_lpc_acpi_device_ids[] = { + { ACPI_DRV_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(acpi, cros_ec_lpc_acpi_device_ids); + static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = { { /* @@ -337,18 +366,36 @@ static struct dmi_system_id cros_ec_lpc_dmi_table[] __initdata = { }; MODULE_DEVICE_TABLE(dmi, cros_ec_lpc_dmi_table); +#ifdef CONFIG_PM_SLEEP +static int cros_ec_lpc_suspend(struct device *dev) +{ + struct cros_ec_device *ec_dev = dev_get_drvdata(dev); + + return cros_ec_suspend(ec_dev); +} + +static int cros_ec_lpc_resume(struct device *dev) +{ + struct cros_ec_device *ec_dev = dev_get_drvdata(dev); + + return cros_ec_resume(ec_dev); +} +#endif + +const struct dev_pm_ops cros_ec_lpc_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(cros_ec_lpc_suspend, cros_ec_lpc_resume) +}; + static struct platform_driver cros_ec_lpc_driver = { .driver = { .name = DRV_NAME, + .acpi_match_table = cros_ec_lpc_acpi_device_ids, + .pm = &cros_ec_lpc_pm_ops, }, .probe = cros_ec_lpc_probe, .remove = cros_ec_lpc_remove, }; -static struct platform_device cros_ec_lpc_device = { - .name = DRV_NAME -}; - static int __init cros_ec_lpc_init(void) { int ret; @@ -358,18 +405,13 @@ static int __init cros_ec_lpc_init(void) return -ENODEV; } + cros_ec_lpc_reg_init(); + /* Register the driver */ ret = platform_driver_register(&cros_ec_lpc_driver); if (ret) { pr_err(DRV_NAME ": can't register driver: %d\n", ret); - return ret; - } - - /* Register the device, and it'll get hooked up automatically */ - ret = platform_device_register(&cros_ec_lpc_device); - if (ret) { - pr_err(DRV_NAME ": can't register device: %d\n", ret); - platform_driver_unregister(&cros_ec_lpc_driver); + cros_ec_lpc_reg_destroy(); return ret; } @@ -378,8 +420,8 @@ static int __init cros_ec_lpc_init(void) static void __exit cros_ec_lpc_exit(void) { - platform_device_unregister(&cros_ec_lpc_device); platform_driver_unregister(&cros_ec_lpc_driver); + cros_ec_lpc_reg_destroy(); } module_init(cros_ec_lpc_init); diff --git a/drivers/platform/chrome/cros_ec_lpc_mec.c b/drivers/platform/chrome/cros_ec_lpc_mec.c new file mode 100644 index 000000000000..2eda2c2fc210 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_lpc_mec.c @@ -0,0 +1,140 @@ +/* + * cros_ec_lpc_mec - LPC variant I/O for Microchip EC + * + * Copyright (C) 2016 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This driver uses the Chrome OS EC byte-level message-based protocol for + * communicating the keyboard state (which keys are pressed) from a keyboard EC + * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, + * but everything else (including deghosting) is done here. The main + * motivation for this is to keep the EC firmware as simple as possible, since + * it cannot be easily upgraded and EC flash/IRAM space is relatively + * expensive. + */ + +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/mfd/cros_ec_commands.h> +#include <linux/mfd/cros_ec_lpc_mec.h> +#include <linux/mutex.h> +#include <linux/types.h> + +/* + * This mutex must be held while accessing the EMI unit. We can't rely on the + * EC mutex because memmap data may be accessed without it being held. + */ +static struct mutex io_mutex; + +/* + * cros_ec_lpc_mec_emi_write_address + * + * Initialize EMI read / write at a given address. + * + * @addr: Starting read / write address + * @access_type: Type of access, typically 32-bit auto-increment + */ +static void cros_ec_lpc_mec_emi_write_address(u16 addr, + enum cros_ec_lpc_mec_emi_access_mode access_type) +{ + /* Address relative to start of EMI range */ + addr -= MEC_EMI_RANGE_START; + outb((addr & 0xfc) | access_type, MEC_EMI_EC_ADDRESS_B0); + outb((addr >> 8) & 0x7f, MEC_EMI_EC_ADDRESS_B1); +} + +/* + * cros_ec_lpc_io_bytes_mec - Read / write bytes to MEC EMI port + * + * @io_type: MEC_IO_READ or MEC_IO_WRITE, depending on request + * @offset: Base read / write address + * @length: Number of bytes to read / write + * @buf: Destination / source buffer + * + * @return 8-bit checksum of all bytes read / written + */ +u8 cros_ec_lpc_io_bytes_mec(enum cros_ec_lpc_mec_io_type io_type, + unsigned int offset, unsigned int length, + u8 *buf) +{ + int i = 0; + int io_addr; + u8 sum = 0; + enum cros_ec_lpc_mec_emi_access_mode access, new_access; + + /* + * Long access cannot be used on misaligned data since reading B0 loads + * the data register and writing B3 flushes. + */ + if (offset & 0x3 || length < 4) + access = ACCESS_TYPE_BYTE; + else + access = ACCESS_TYPE_LONG_AUTO_INCREMENT; + + mutex_lock(&io_mutex); + + /* Initialize I/O at desired address */ + cros_ec_lpc_mec_emi_write_address(offset, access); + + /* Skip bytes in case of misaligned offset */ + io_addr = MEC_EMI_EC_DATA_B0 + (offset & 0x3); + while (i < length) { + while (io_addr <= MEC_EMI_EC_DATA_B3) { + if (io_type == MEC_IO_READ) + buf[i] = inb(io_addr++); + else + outb(buf[i], io_addr++); + + sum += buf[i++]; + offset++; + + /* Extra bounds check in case of misaligned length */ + if (i == length) + goto done; + } + + /* + * Use long auto-increment access except for misaligned write, + * since writing B3 triggers the flush. + */ + if (length - i < 4 && io_type == MEC_IO_WRITE) + new_access = ACCESS_TYPE_BYTE; + else + new_access = ACCESS_TYPE_LONG_AUTO_INCREMENT; + + if (new_access != access || + access != ACCESS_TYPE_LONG_AUTO_INCREMENT) { + access = new_access; + cros_ec_lpc_mec_emi_write_address(offset, access); + } + + /* Access [B0, B3] on each loop pass */ + io_addr = MEC_EMI_EC_DATA_B0; + } + +done: + mutex_unlock(&io_mutex); + + return sum; +} +EXPORT_SYMBOL(cros_ec_lpc_io_bytes_mec); + +void cros_ec_lpc_mec_init(void) +{ + mutex_init(&io_mutex); +} +EXPORT_SYMBOL(cros_ec_lpc_mec_init); + +void cros_ec_lpc_mec_destroy(void) +{ + mutex_destroy(&io_mutex); +} +EXPORT_SYMBOL(cros_ec_lpc_mec_destroy); diff --git a/drivers/platform/chrome/cros_ec_lpc_reg.c b/drivers/platform/chrome/cros_ec_lpc_reg.c new file mode 100644 index 000000000000..dcc7a3e30604 --- /dev/null +++ b/drivers/platform/chrome/cros_ec_lpc_reg.c @@ -0,0 +1,133 @@ +/* + * cros_ec_lpc_reg - LPC access to the Chrome OS Embedded Controller + * + * Copyright (C) 2016 Google, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * This driver uses the Chrome OS EC byte-level message-based protocol for + * communicating the keyboard state (which keys are pressed) from a keyboard EC + * to the AP over some bus (such as i2c, lpc, spi). The EC does debouncing, + * but everything else (including deghosting) is done here. The main + * motivation for this is to keep the EC firmware as simple as possible, since + * it cannot be easily upgraded and EC flash/IRAM space is relatively + * expensive. + */ + +#include <linux/io.h> +#include <linux/mfd/cros_ec.h> +#include <linux/mfd/cros_ec_commands.h> +#include <linux/mfd/cros_ec_lpc_mec.h> + +static u8 lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest) +{ + int i; + int sum = 0; + + for (i = 0; i < length; ++i) { + dest[i] = inb(offset + i); + sum += dest[i]; + } + + /* Return checksum of all bytes read */ + return sum; +} + +static u8 lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg) +{ + int i; + int sum = 0; + + for (i = 0; i < length; ++i) { + outb(msg[i], offset + i); + sum += msg[i]; + } + + /* Return checksum of all bytes written */ + return sum; +} + +#ifdef CONFIG_CROS_EC_LPC_MEC + +u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest) +{ + if (length == 0) + return 0; + + /* Access desired range through EMI interface */ + if (offset >= MEC_EMI_RANGE_START && offset <= MEC_EMI_RANGE_END) { + /* Ensure we don't straddle EMI region */ + if (WARN_ON(offset + length - 1 > MEC_EMI_RANGE_END)) + return 0; + + return cros_ec_lpc_io_bytes_mec(MEC_IO_READ, offset, length, + dest); + } + + if (WARN_ON(offset + length > MEC_EMI_RANGE_START && + offset < MEC_EMI_RANGE_START)) + return 0; + + return lpc_read_bytes(offset, length, dest); +} + +u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg) +{ + if (length == 0) + return 0; + + /* Access desired range through EMI interface */ + if (offset >= MEC_EMI_RANGE_START && offset <= MEC_EMI_RANGE_END) { + /* Ensure we don't straddle EMI region */ + if (WARN_ON(offset + length - 1 > MEC_EMI_RANGE_END)) + return 0; + + return cros_ec_lpc_io_bytes_mec(MEC_IO_WRITE, offset, length, + msg); + } + + if (WARN_ON(offset + length > MEC_EMI_RANGE_START && + offset < MEC_EMI_RANGE_START)) + return 0; + + return lpc_write_bytes(offset, length, msg); +} + +void cros_ec_lpc_reg_init(void) +{ + cros_ec_lpc_mec_init(); +} + +void cros_ec_lpc_reg_destroy(void) +{ + cros_ec_lpc_mec_destroy(); +} + +#else /* CONFIG_CROS_EC_LPC_MEC */ + +u8 cros_ec_lpc_read_bytes(unsigned int offset, unsigned int length, u8 *dest) +{ + return lpc_read_bytes(offset, length, dest); +} + +u8 cros_ec_lpc_write_bytes(unsigned int offset, unsigned int length, u8 *msg) +{ + return lpc_write_bytes(offset, length, msg); +} + +void cros_ec_lpc_reg_init(void) +{ +} + +void cros_ec_lpc_reg_destroy(void) +{ +} + +#endif /* CONFIG_CROS_EC_LPC_MEC */ diff --git a/drivers/platform/chrome/cros_ec_proto.c b/drivers/platform/chrome/cros_ec_proto.c index ed5dee744c74..8dfa7fcb1248 100644 --- a/drivers/platform/chrome/cros_ec_proto.c +++ b/drivers/platform/chrome/cros_ec_proto.c @@ -150,6 +150,40 @@ int cros_ec_check_result(struct cros_ec_device *ec_dev, } EXPORT_SYMBOL(cros_ec_check_result); +/* + * cros_ec_get_host_event_wake_mask + * + * Get the mask of host events that cause wake from suspend. + * + * @ec_dev: EC device to call + * @msg: message structure to use + * @mask: result when function returns >=0. + * + * LOCKING: + * the caller has ec_dev->lock mutex, or the caller knows there is + * no other command in progress. + */ +static int cros_ec_get_host_event_wake_mask(struct cros_ec_device *ec_dev, + struct cros_ec_command *msg, + uint32_t *mask) +{ + struct ec_response_host_event_mask *r; + int ret; + + msg->command = EC_CMD_HOST_EVENT_GET_WAKE_MASK; + msg->version = 0; + msg->outsize = 0; + msg->insize = sizeof(*r); + + ret = send_command(ec_dev, msg); + if (ret > 0) { + r = (struct ec_response_host_event_mask *)msg->data; + *mask = r->mask; + } + + return ret; +} + static int cros_ec_host_command_proto_query(struct cros_ec_device *ec_dev, int devidx, struct cros_ec_command *msg) @@ -235,6 +269,22 @@ static int cros_ec_host_command_proto_query_v2(struct cros_ec_device *ec_dev) return ret; } +/* + * cros_ec_get_host_command_version_mask + * + * Get the version mask of a given command. + * + * @ec_dev: EC device to call + * @msg: message structure to use + * @cmd: command to get the version of. + * @mask: result when function returns 0. + * + * @return 0 on success, error code otherwise + * + * LOCKING: + * the caller has ec_dev->lock mutex or the caller knows there is + * no other command in progress. + */ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, u16 cmd, u32 *mask) { @@ -256,7 +306,7 @@ static int cros_ec_get_host_command_version_mask(struct cros_ec_device *ec_dev, pver = (struct ec_params_get_cmd_versions *)msg->data; pver->cmd = cmd; - ret = cros_ec_cmd_xfer(ec_dev, msg); + ret = send_command(ec_dev, msg); if (ret > 0) { rver = (struct ec_response_get_cmd_versions *)msg->data; *mask = rver->version_mask; @@ -371,6 +421,17 @@ int cros_ec_query_all(struct cros_ec_device *ec_dev) else ec_dev->mkbp_event_supported = 1; + /* + * Get host event wake mask, assume all events are wake events + * if unavailable. + */ + ret = cros_ec_get_host_event_wake_mask(ec_dev, proto_msg, + &ec_dev->host_event_wake_mask); + if (ret < 0) + ec_dev->host_event_wake_mask = U32_MAX; + + ret = 0; + exit: kfree(proto_msg); return ret; @@ -486,11 +547,54 @@ static int get_keyboard_state_event(struct cros_ec_device *ec_dev) return ec_dev->event_size; } -int cros_ec_get_next_event(struct cros_ec_device *ec_dev) +int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event) { - if (ec_dev->mkbp_event_supported) - return get_next_event(ec_dev); - else - return get_keyboard_state_event(ec_dev); + u32 host_event; + int ret; + + if (!ec_dev->mkbp_event_supported) { + ret = get_keyboard_state_event(ec_dev); + if (ret < 0) + return ret; + + if (wake_event) + *wake_event = true; + + return ret; + } + + ret = get_next_event(ec_dev); + if (ret < 0) + return ret; + + if (wake_event) { + host_event = cros_ec_get_host_event(ec_dev); + + /* Consider non-host_event as wake event */ + *wake_event = !host_event || + !!(host_event & ec_dev->host_event_wake_mask); + } + + return ret; } EXPORT_SYMBOL(cros_ec_get_next_event); + +u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev) +{ + u32 host_event; + + BUG_ON(!ec_dev->mkbp_event_supported); + + if (ec_dev->event_data.event_type != EC_MKBP_EVENT_HOST_EVENT) + return 0; + + if (ec_dev->event_size != sizeof(host_event)) { + dev_warn(ec_dev->dev, "Invalid host event size\n"); + return 0; + } + + host_event = get_unaligned_le32(&ec_dev->event_data.data.host_event); + + return host_event; +} +EXPORT_SYMBOL(cros_ec_get_host_event); diff --git a/drivers/platform/mips/cpu_hwmon.c b/drivers/platform/mips/cpu_hwmon.c index 4300a558d0f3..322de58eebaf 100644 --- a/drivers/platform/mips/cpu_hwmon.c +++ b/drivers/platform/mips/cpu_hwmon.c @@ -17,17 +17,27 @@ */ int loongson3_cpu_temp(int cpu) { - u32 reg; + u32 reg, prid_rev; reg = LOONGSON_CHIPTEMP(cpu); - if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) + prid_rev = read_c0_prid() & PRID_REV_MASK; + switch (prid_rev) { + case PRID_REV_LOONGSON3A_R1: reg = (reg >> 8) & 0xff; - else + break; + case PRID_REV_LOONGSON3A_R2: + case PRID_REV_LOONGSON3B_R1: + case PRID_REV_LOONGSON3B_R2: reg = ((reg >> 8) & 0xff) - 100; - + break; + case PRID_REV_LOONGSON3A_R3: + reg = (reg & 0xffff)*731/0x4000 - 273; + break; + } return (int)reg * 1000; } +static int nr_packages; static struct device *cpu_hwmon_dev; static ssize_t get_hwmon_name(struct device *dev, @@ -51,88 +61,74 @@ static ssize_t get_hwmon_name(struct device *dev, return sprintf(buf, "cpu-hwmon\n"); } -static ssize_t get_cpu0_temp(struct device *dev, - struct device_attribute *attr, char *buf); -static ssize_t get_cpu1_temp(struct device *dev, +static ssize_t get_cpu_temp(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t cpu0_temp_label(struct device *dev, +static ssize_t cpu_temp_label(struct device *dev, struct device_attribute *attr, char *buf); -static ssize_t cpu1_temp_label(struct device *dev, - struct device_attribute *attr, char *buf); - -static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_cpu0_temp, NULL, 1); -static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, cpu0_temp_label, NULL, 1); -static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_cpu1_temp, NULL, 2); -static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, cpu1_temp_label, NULL, 2); -static const struct attribute *hwmon_cputemp1[] = { - &sensor_dev_attr_temp1_input.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - NULL -}; - -static const struct attribute *hwmon_cputemp2[] = { - &sensor_dev_attr_temp2_input.dev_attr.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL +static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, get_cpu_temp, NULL, 1); +static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, cpu_temp_label, NULL, 1); +static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, get_cpu_temp, NULL, 2); +static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, cpu_temp_label, NULL, 2); +static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, get_cpu_temp, NULL, 3); +static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, cpu_temp_label, NULL, 3); +static SENSOR_DEVICE_ATTR(temp4_input, S_IRUGO, get_cpu_temp, NULL, 4); +static SENSOR_DEVICE_ATTR(temp4_label, S_IRUGO, cpu_temp_label, NULL, 4); + +static const struct attribute *hwmon_cputemp[4][3] = { + { + &sensor_dev_attr_temp1_input.dev_attr.attr, + &sensor_dev_attr_temp1_label.dev_attr.attr, + NULL + }, + { + &sensor_dev_attr_temp2_input.dev_attr.attr, + &sensor_dev_attr_temp2_label.dev_attr.attr, + NULL + }, + { + &sensor_dev_attr_temp3_input.dev_attr.attr, + &sensor_dev_attr_temp3_label.dev_attr.attr, + NULL + }, + { + &sensor_dev_attr_temp4_input.dev_attr.attr, + &sensor_dev_attr_temp4_label.dev_attr.attr, + NULL + } }; -static ssize_t cpu0_temp_label(struct device *dev, - struct device_attribute *attr, char *buf) -{ - return sprintf(buf, "CPU 0 Temperature\n"); -} - -static ssize_t cpu1_temp_label(struct device *dev, +static ssize_t cpu_temp_label(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "CPU 1 Temperature\n"); + int id = (to_sensor_dev_attr(attr))->index - 1; + return sprintf(buf, "CPU %d Temperature\n", id); } -static ssize_t get_cpu0_temp(struct device *dev, +static ssize_t get_cpu_temp(struct device *dev, struct device_attribute *attr, char *buf) { - int value = loongson3_cpu_temp(0); - return sprintf(buf, "%d\n", value); -} - -static ssize_t get_cpu1_temp(struct device *dev, - struct device_attribute *attr, char *buf) -{ - int value = loongson3_cpu_temp(1); + int id = (to_sensor_dev_attr(attr))->index - 1; + int value = loongson3_cpu_temp(id); return sprintf(buf, "%d\n", value); } static int create_sysfs_cputemp_files(struct kobject *kobj) { - int ret; - - ret = sysfs_create_files(kobj, hwmon_cputemp1); - if (ret) - goto sysfs_create_temp1_fail; - - if (loongson_sysconf.nr_cpus <= loongson_sysconf.cores_per_package) - return 0; + int i, ret = 0; - ret = sysfs_create_files(kobj, hwmon_cputemp2); - if (ret) - goto sysfs_create_temp2_fail; + for (i=0; i<nr_packages; i++) + ret = sysfs_create_files(kobj, hwmon_cputemp[i]); - return 0; - -sysfs_create_temp2_fail: - sysfs_remove_files(kobj, hwmon_cputemp1); - -sysfs_create_temp1_fail: - return -1; + return ret; } static void remove_sysfs_cputemp_files(struct kobject *kobj) { - sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp1); + int i; - if (loongson_sysconf.nr_cpus > loongson_sysconf.cores_per_package) - sysfs_remove_files(&cpu_hwmon_dev->kobj, hwmon_cputemp2); + for (i=0; i<nr_packages; i++) + sysfs_remove_files(kobj, hwmon_cputemp[i]); } #define CPU_THERMAL_THRESHOLD 90000 @@ -140,8 +136,15 @@ static struct delayed_work thermal_work; static void do_thermal_timer(struct work_struct *work) { - int value = loongson3_cpu_temp(0); - if (value <= CPU_THERMAL_THRESHOLD) + int i, value, temp_max = 0; + + for (i=0; i<nr_packages; i++) { + value = loongson3_cpu_temp(i); + if (value > temp_max) + temp_max = value; + } + + if (temp_max <= CPU_THERMAL_THRESHOLD) schedule_delayed_work(&thermal_work, msecs_to_jiffies(5000)); else orderly_poweroff(true); @@ -160,6 +163,9 @@ static int __init loongson_hwmon_init(void) goto fail_hwmon_device_register; } + nr_packages = loongson_sysconf.nr_cpus / + loongson_sysconf.cores_per_package; + ret = sysfs_create_group(&cpu_hwmon_dev->kobj, &cpu_hwmon_attribute_group); if (ret) { diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c index 9866fec78c1c..0831b428c217 100644 --- a/drivers/platform/x86/alienware-wmi.c +++ b/drivers/platform/x86/alienware-wmi.c @@ -604,7 +604,7 @@ static struct attribute *hdmi_attrs[] = { NULL, }; -static struct attribute_group hdmi_attribute_group = { +static const struct attribute_group hdmi_attribute_group = { .name = "hdmi", .attrs = hdmi_attrs, }; @@ -660,7 +660,7 @@ static struct attribute *amplifier_attrs[] = { NULL, }; -static struct attribute_group amplifier_attribute_group = { +static const struct attribute_group amplifier_attribute_group = { .name = "amplifier", .attrs = amplifier_attrs, }; @@ -741,7 +741,7 @@ static struct attribute *deepsleep_attrs[] = { NULL, }; -static struct attribute_group deepsleep_attribute_group = { +static const struct attribute_group deepsleep_attribute_group = { .name = "deepsleep", .attrs = deepsleep_attrs, }; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 6c7d86074b38..709e3a67391a 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1433,7 +1433,7 @@ static umode_t asus_hwmon_sysfs_is_visible(struct kobject *kobj, return ok ? attr->mode : 0; } -static struct attribute_group hwmon_attribute_group = { +static const struct attribute_group hwmon_attribute_group = { .is_visible = asus_hwmon_sysfs_is_visible, .attrs = hwmon_attributes }; @@ -1821,7 +1821,7 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, return ok ? attr->mode : 0; } -static struct attribute_group platform_attribute_group = { +static const struct attribute_group platform_attribute_group = { .is_visible = asus_sysfs_is_visible, .attrs = platform_attributes }; diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c index e1c2b6d4b24a..a8e4a539e704 100644 --- a/drivers/platform/x86/compal-laptop.c +++ b/drivers/platform/x86/compal-laptop.c @@ -718,7 +718,7 @@ static struct attribute *compal_platform_attrs[] = { &dev_attr_wake_up_mouse.attr, NULL }; -static struct attribute_group compal_platform_attr_group = { +static const struct attribute_group compal_platform_attr_group = { .attrs = compal_platform_attrs }; diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c index c1a852847d02..85de30f93a9c 100644 --- a/drivers/platform/x86/fujitsu-laptop.c +++ b/drivers/platform/x86/fujitsu-laptop.c @@ -317,7 +317,7 @@ static struct attribute *fujitsu_pf_attributes[] = { NULL }; -static struct attribute_group fujitsu_pf_attribute_group = { +static const struct attribute_group fujitsu_pf_attribute_group = { .attrs = fujitsu_pf_attributes }; @@ -695,6 +695,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device) if (call_fext_func(device, FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) { led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + led->name = "fujitsu::logolamp"; led->brightness_set_blocking = logolamp_set; led->brightness_get = logolamp_get; @@ -707,6 +710,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device) FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) && (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) { led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + led->name = "fujitsu::kblamps"; led->brightness_set_blocking = kblamps_set; led->brightness_get = kblamps_get; @@ -723,6 +729,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device) */ if (call_fext_func(device, FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) { led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + led->name = "fujitsu::radio_led"; led->brightness_set_blocking = radio_led_set; led->brightness_get = radio_led_get; @@ -741,6 +750,9 @@ static int acpi_fujitsu_laptop_leds_register(struct acpi_device *device) (call_fext_func(device, FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) { led = devm_kzalloc(&device->dev, sizeof(*led), GFP_KERNEL); + if (!led) + return -ENOMEM; + led->name = "fujitsu::eco_led"; led->brightness_set_blocking = eco_led_set; led->brightness_get = eco_led_get; diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 527e5d9ab9bf..603fc6050971 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -909,17 +909,94 @@ static const struct dmi_system_id no_hw_rfkill_list[] = { }, }, { + .ident = "Lenovo V310-14IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14IKB"), + }, + }, + { + .ident = "Lenovo V310-14ISK", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-14ISK"), + }, + }, + { + .ident = "Lenovo V310-15IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15IKB"), + }, + }, + { .ident = "Lenovo V310-15ISK", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V310-15ISK"), + }, + }, + { + .ident = "Lenovo V510-15IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo V510-15IKB"), + }, + }, + { + .ident = "Lenovo ideapad 300-15IBR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IBR"), + }, + }, + { + .ident = "Lenovo ideapad 300-15IKB", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300-15IKB"), + }, + }, + { + .ident = "Lenovo ideapad 300S-11IBR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 300S-11BR"), + }, + }, + { + .ident = "Lenovo ideapad 310-15ABR", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ABR"), + }, + }, + { + .ident = "Lenovo ideapad 310-15IAP", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IAP"), }, }, { .ident = "Lenovo ideapad 310-15IKB", .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), - DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"), + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15IKB"), + }, + }, + { + .ident = "Lenovo ideapad 310-15ISK", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad 310-15ISK"), + }, + }, + { + .ident = "Lenovo ideapad Y700-14ISK", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo ideapad Y700-14ISK"), }, }, { diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index 4cc2f4ea0a25..cd21df982abd 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -710,6 +710,24 @@ static const struct file_operations telem_socstate_ops = { .release = single_release, }; +static int telem_s0ix_res_get(void *data, u64 *val) +{ + u64 s0ix_total_res; + int ret; + + ret = intel_pmc_s0ix_counter_read(&s0ix_total_res); + if (ret) { + pr_err("Failed to read S0ix residency"); + return ret; + } + + *val = s0ix_total_res; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(telem_s0ix_fops, telem_s0ix_res_get, NULL, "%llu\n"); + static int telem_pss_trc_verb_show(struct seq_file *s, void *unused) { u32 verbosity; @@ -938,7 +956,7 @@ static struct notifier_block pm_notifier = { static int __init telemetry_debugfs_init(void) { const struct x86_cpu_id *id; - int err = -ENOMEM; + int err; struct dentry *f; /* Only APL supported for now */ @@ -958,11 +976,10 @@ static int __init telemetry_debugfs_init(void) register_pm_notifier(&pm_notifier); + err = -ENOMEM; debugfs_conf->telemetry_dbg_dir = debugfs_create_dir("telemetry", NULL); - if (!debugfs_conf->telemetry_dbg_dir) { - err = -ENOMEM; + if (!debugfs_conf->telemetry_dbg_dir) goto out_pm; - } f = debugfs_create_file("pss_info", S_IFREG | S_IRUGO, debugfs_conf->telemetry_dbg_dir, NULL, @@ -988,6 +1005,14 @@ static int __init telemetry_debugfs_init(void) goto out; } + f = debugfs_create_file("s0ix_residency_usec", S_IFREG | S_IRUGO, + debugfs_conf->telemetry_dbg_dir, + NULL, &telem_s0ix_fops); + if (!f) { + pr_err("s0ix_residency_usec debugfs register failed\n"); + goto out; + } + f = debugfs_create_file("pss_trace_verbosity", S_IFREG | S_IRUGO, debugfs_conf->telemetry_dbg_dir, NULL, &telem_pss_trc_verb_ops); diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 76b0a58e205b..5c39b3211709 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -437,7 +437,7 @@ static struct attribute *pcc_sysfs_entries[] = { NULL, }; -static struct attribute_group pcc_attr_group = { +static const struct attribute_group pcc_attr_group = { .name = NULL, /* put in device directory */ .attrs = pcc_sysfs_entries, }; diff --git a/drivers/platform/x86/peaq-wmi.c b/drivers/platform/x86/peaq-wmi.c index ca75b4dc437e..77d1f90b0794 100644 --- a/drivers/platform/x86/peaq-wmi.c +++ b/drivers/platform/x86/peaq-wmi.c @@ -51,7 +51,7 @@ static void peaq_wmi_poll(struct input_polled_dev *dev) return; } - if (peaq_ignore_events_counter && --peaq_ignore_events_counter > 0) + if (peaq_ignore_events_counter && --peaq_ignore_events_counter >= 0) return; if (obj.integer.value) { diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c index 5c4dfe48f03d..0c703feaeb88 100644 --- a/drivers/platform/x86/samsung-laptop.c +++ b/drivers/platform/x86/samsung-laptop.c @@ -1232,7 +1232,7 @@ static umode_t samsung_sysfs_is_visible(struct kobject *kobj, return ok ? attr->mode : 0; } -static struct attribute_group platform_attribute_group = { +static const struct attribute_group platform_attribute_group = { .is_visible = samsung_sysfs_is_visible, .attrs = platform_attributes }; diff --git a/drivers/platform/x86/silead_dmi.c b/drivers/platform/x86/silead_dmi.c index 3cd3bdfe51df..1157a7b646d6 100644 --- a/drivers/platform/x86/silead_dmi.c +++ b/drivers/platform/x86/silead_dmi.c @@ -122,6 +122,20 @@ static const struct silead_ts_dmi_data pov_mobii_wintab_p800w_data = { .properties = pov_mobii_wintab_p800w_props, }; +static const struct property_entry itworks_tw891_props[] = { + PROPERTY_ENTRY_U32("touchscreen-size-x", 1600), + PROPERTY_ENTRY_U32("touchscreen-size-y", 890), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-itworks-tw891.fw"), + { } +}; + +static const struct silead_ts_dmi_data itworks_tw891_data = { + .acpi_name = "MSSL1680:00", + .properties = itworks_tw891_props, +}; + static const struct dmi_system_id silead_ts_dmi_table[] = { { /* CUBE iwork8 Air */ @@ -160,6 +174,16 @@ static const struct dmi_system_id silead_ts_dmi_table[] = { }, }, { + /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */ + .driver_data = (void *)&surftab_wintron70_st70416_6_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Shenzhen PLOYER"), + DMI_MATCH(DMI_PRODUCT_NAME, "MOMO7W"), + /* Exact match, different versions need different fw */ + DMI_MATCH(DMI_BIOS_VERSION, "MOMO.G.WI71C.MABMRBA02"), + }, + }, + { /* GP-electronic T701 */ .driver_data = (void *)&gp_electronic_t701_data, .matches = { @@ -187,6 +211,14 @@ static const struct dmi_system_id silead_ts_dmi_table[] = { DMI_MATCH(DMI_BIOS_DATE, "08/22/2014"), }, }, + { + /* I.T.Works TW891 */ + .driver_data = (void *)&itworks_tw891_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "To be filled by O.E.M."), + DMI_MATCH(DMI_PRODUCT_NAME, "TW891"), + }, + }, { }, }; diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c index 88f9f79a7cf6..bb1dcd7fbdeb 100644 --- a/drivers/platform/x86/toshiba_acpi.c +++ b/drivers/platform/x86/toshiba_acpi.c @@ -2419,7 +2419,7 @@ static umode_t toshiba_sysfs_is_visible(struct kobject *kobj, return exists ? attr->mode : 0; } -static struct attribute_group toshiba_attr_group = { +static const struct attribute_group toshiba_attr_group = { .is_visible = toshiba_sysfs_is_visible, .attrs = toshiba_attributes, }; diff --git a/drivers/power/supply/twl4030_charger.c b/drivers/power/supply/twl4030_charger.c index 3de802f169a1..9dff1b4b85fc 100644 --- a/drivers/power/supply/twl4030_charger.c +++ b/drivers/power/supply/twl4030_charger.c @@ -980,10 +980,37 @@ static int twl4030_bci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, bci); + INIT_WORK(&bci->work, twl4030_bci_usb_work); + INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker); + bci->channel_vac = devm_iio_channel_get(&pdev->dev, "vac"); if (IS_ERR(bci->channel_vac)) { + ret = PTR_ERR(bci->channel_vac); + if (ret == -EPROBE_DEFER) + return ret; /* iio not ready */ + dev_warn(&pdev->dev, "could not request vac iio channel (%d)", + ret); bci->channel_vac = NULL; - dev_warn(&pdev->dev, "could not request vac iio channel"); + } + + if (bci->dev->of_node) { + struct device_node *phynode; + + phynode = of_find_compatible_node(bci->dev->of_node->parent, + NULL, "ti,twl4030-usb"); + if (phynode) { + bci->usb_nb.notifier_call = twl4030_bci_usb_ncb; + bci->transceiver = devm_usb_get_phy_by_node( + bci->dev, phynode, &bci->usb_nb); + if (IS_ERR(bci->transceiver)) { + ret = PTR_ERR(bci->transceiver); + if (ret == -EPROBE_DEFER) + return ret; /* phy not ready */ + dev_warn(&pdev->dev, "could not request transceiver (%d)", + ret); + bci->transceiver = NULL; + } + } } bci->ac = devm_power_supply_register(&pdev->dev, &twl4030_bci_ac_desc, @@ -1019,20 +1046,6 @@ static int twl4030_bci_probe(struct platform_device *pdev) return ret; } - INIT_WORK(&bci->work, twl4030_bci_usb_work); - INIT_DELAYED_WORK(&bci->current_worker, twl4030_current_worker); - - bci->usb_nb.notifier_call = twl4030_bci_usb_ncb; - if (bci->dev->of_node) { - struct device_node *phynode; - - phynode = of_find_compatible_node(bci->dev->of_node->parent, - NULL, "ti,twl4030-usb"); - if (phynode) - bci->transceiver = devm_usb_get_phy_by_node( - bci->dev, phynode, &bci->usb_nb); - } - /* Enable interrupts now. */ reg = ~(u32)(TWL4030_ICHGLOW | TWL4030_ICHGEOC | TWL4030_TBATOR2 | TWL4030_TBATOR1 | TWL4030_BATSTS); diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index a0860b30bd93..1581f6ab1b1f 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -678,7 +678,9 @@ struct pwm_device *of_pwm_get(struct device_node *np, const char *con_id) pc = of_node_to_pwmchip(args.np); if (IS_ERR(pc)) { - pr_err("%s(): PWM chip not found\n", __func__); + if (PTR_ERR(pc) != -EPROBE_DEFER) + pr_err("%s(): PWM chip not found\n", __func__); + pwm = ERR_CAST(pc); goto put; } diff --git a/drivers/pwm/pwm-bfin.c b/drivers/pwm/pwm-bfin.c index d2ed0a2a18e8..a9a88137f2cb 100644 --- a/drivers/pwm/pwm-bfin.c +++ b/drivers/pwm/pwm-bfin.c @@ -118,10 +118,8 @@ static int bfin_pwm_probe(struct platform_device *pdev) int ret; pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL); - if (!pwm) { - dev_err(&pdev->dev, "failed to allocate memory\n"); + if (!pwm) return -ENOMEM; - } platform_set_drvdata(pdev, pwm); diff --git a/drivers/pwm/pwm-cros-ec.c b/drivers/pwm/pwm-cros-ec.c index f6ca4e8c6253..9c13694eaa24 100644 --- a/drivers/pwm/pwm-cros-ec.c +++ b/drivers/pwm/pwm-cros-ec.c @@ -75,8 +75,8 @@ static int __cros_ec_pwm_get_duty(struct cros_ec_device *ec, u8 index, msg->version = 0; msg->command = EC_CMD_PWM_GET_DUTY; - msg->insize = sizeof(*params); - msg->outsize = sizeof(*resp); + msg->insize = sizeof(*resp); + msg->outsize = sizeof(*params); params->pwm_type = EC_PWM_TYPE_GENERIC; params->index = index; diff --git a/drivers/pwm/pwm-hibvt.c b/drivers/pwm/pwm-hibvt.c index d0e8f8542626..8dadc58d6cdf 100644 --- a/drivers/pwm/pwm-hibvt.c +++ b/drivers/pwm/pwm-hibvt.c @@ -165,7 +165,7 @@ static int hibvt_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } -static struct pwm_ops hibvt_pwm_ops = { +static const struct pwm_ops hibvt_pwm_ops = { .get_state = hibvt_pwm_get_state, .apply = hibvt_pwm_apply, diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index 045ef9fa6fe3..cb845edfe2b4 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -103,6 +103,7 @@ struct meson_pwm_channel { struct meson_pwm_data { const char * const *parent_names; + unsigned int num_parents; }; struct meson_pwm { @@ -162,7 +163,8 @@ static int meson_pwm_calc(struct meson_pwm *meson, unsigned int duty, unsigned int period) { unsigned int pre_div, cnt, duty_cnt; - unsigned long fin_freq = -1, fin_ns; + unsigned long fin_freq = -1; + u64 fin_ps; if (~(meson->inverter_mask >> id) & 0x1) duty = period - duty; @@ -178,13 +180,15 @@ static int meson_pwm_calc(struct meson_pwm *meson, } dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq); - fin_ns = NSEC_PER_SEC / fin_freq; + fin_ps = (u64)NSEC_PER_SEC * 1000; + do_div(fin_ps, fin_freq); /* Calc pre_div with the period */ for (pre_div = 0; pre_div < MISC_CLK_DIV_MASK; pre_div++) { - cnt = DIV_ROUND_CLOSEST(period, fin_ns * (pre_div + 1)); - dev_dbg(meson->chip.dev, "fin_ns=%lu pre_div=%u cnt=%u\n", - fin_ns, pre_div, cnt); + cnt = DIV_ROUND_CLOSEST_ULL((u64)period * 1000, + fin_ps * (pre_div + 1)); + dev_dbg(meson->chip.dev, "fin_ps=%llu pre_div=%u cnt=%u\n", + fin_ps, pre_div, cnt); if (cnt <= 0xffff) break; } @@ -207,7 +211,8 @@ static int meson_pwm_calc(struct meson_pwm *meson, channel->lo = cnt; } else { /* Then check is we can have the duty with the same pre_div */ - duty_cnt = DIV_ROUND_CLOSEST(duty, fin_ns * (pre_div + 1)); + duty_cnt = DIV_ROUND_CLOSEST_ULL((u64)duty * 1000, + fin_ps * (pre_div + 1)); if (duty_cnt > 0xffff) { dev_err(meson->chip.dev, "unable to get duty cycle\n"); return -EINVAL; @@ -381,6 +386,7 @@ static const char * const pwm_meson8b_parent_names[] = { static const struct meson_pwm_data pwm_meson8b_data = { .parent_names = pwm_meson8b_parent_names, + .num_parents = ARRAY_SIZE(pwm_meson8b_parent_names), }; static const char * const pwm_gxbb_parent_names[] = { @@ -389,11 +395,35 @@ static const char * const pwm_gxbb_parent_names[] = { static const struct meson_pwm_data pwm_gxbb_data = { .parent_names = pwm_gxbb_parent_names, + .num_parents = ARRAY_SIZE(pwm_gxbb_parent_names), +}; + +/* + * Only the 2 first inputs of the GXBB AO PWMs are valid + * The last 2 are grounded + */ +static const char * const pwm_gxbb_ao_parent_names[] = { + "xtal", "clk81" +}; + +static const struct meson_pwm_data pwm_gxbb_ao_data = { + .parent_names = pwm_gxbb_ao_parent_names, + .num_parents = ARRAY_SIZE(pwm_gxbb_ao_parent_names), }; static const struct of_device_id meson_pwm_matches[] = { - { .compatible = "amlogic,meson8b-pwm", .data = &pwm_meson8b_data }, - { .compatible = "amlogic,meson-gxbb-pwm", .data = &pwm_gxbb_data }, + { + .compatible = "amlogic,meson8b-pwm", + .data = &pwm_meson8b_data + }, + { + .compatible = "amlogic,meson-gxbb-pwm", + .data = &pwm_gxbb_data + }, + { + .compatible = "amlogic,meson-gxbb-ao-pwm", + .data = &pwm_gxbb_ao_data + }, {}, }; MODULE_DEVICE_TABLE(of, meson_pwm_matches); @@ -417,7 +447,7 @@ static int meson_pwm_init_channels(struct meson_pwm *meson, init.ops = &clk_mux_ops; init.flags = CLK_IS_BASIC; init.parent_names = meson->data->parent_names; - init.num_parents = 1 << MISC_CLK_SEL_WIDTH; + init.num_parents = meson->data->num_parents; channel->mux.reg = meson->base + REG_MISC_AB; channel->mux.shift = mux_reg_shifts[i]; diff --git a/drivers/pwm/pwm-sun4i.c b/drivers/pwm/pwm-sun4i.c index 1284ffa05921..6d23f1d1c9b7 100644 --- a/drivers/pwm/pwm-sun4i.c +++ b/drivers/pwm/pwm-sun4i.c @@ -8,8 +8,10 @@ #include <linux/bitops.h> #include <linux/clk.h> +#include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> +#include <linux/jiffies.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> @@ -44,6 +46,10 @@ #define PWM_DTY_MASK GENMASK(15, 0) +#define PWM_REG_PRD(reg) ((((reg) >> 16) & PWM_PRD_MASK) + 1) +#define PWM_REG_DTY(reg) ((reg) & PWM_DTY_MASK) +#define PWM_REG_PRESCAL(reg, chan) (((reg) >> ((chan) * PWMCH_OFFSET)) & PWM_PRESCAL_MASK) + #define BIT_CH(bit, chan) ((bit) << ((chan) * PWMCH_OFFSET)) static const u32 prescaler_table[] = { @@ -77,6 +83,8 @@ struct sun4i_pwm_chip { void __iomem *base; spinlock_t ctrl_lock; const struct sun4i_pwm_data *data; + unsigned long next_period[2]; + bool needs_delay[2]; }; static inline struct sun4i_pwm_chip *to_sun4i_pwm_chip(struct pwm_chip *chip) @@ -96,26 +104,65 @@ static inline void sun4i_pwm_writel(struct sun4i_pwm_chip *chip, writel(val, chip->base + offset); } -static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, - int duty_ns, int period_ns) +static void sun4i_pwm_get_state(struct pwm_chip *chip, + struct pwm_device *pwm, + struct pwm_state *state) { struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); - u32 prd, dty, val, clk_gate; + u64 clk_rate, tmp; + u32 val; + unsigned int prescaler; + + clk_rate = clk_get_rate(sun4i_pwm->clk); + + val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + + if ((val == PWM_PRESCAL_MASK) && sun4i_pwm->data->has_prescaler_bypass) + prescaler = 1; + else + prescaler = prescaler_table[PWM_REG_PRESCAL(val, pwm->hwpwm)]; + + if (prescaler == 0) + return; + + if (val & BIT_CH(PWM_ACT_STATE, pwm->hwpwm)) + state->polarity = PWM_POLARITY_NORMAL; + else + state->polarity = PWM_POLARITY_INVERSED; + + if (val & BIT_CH(PWM_CLK_GATING | PWM_EN, pwm->hwpwm)) + state->enabled = true; + else + state->enabled = false; + + val = sun4i_pwm_readl(sun4i_pwm, PWM_CH_PRD(pwm->hwpwm)); + + tmp = prescaler * NSEC_PER_SEC * PWM_REG_DTY(val); + state->duty_cycle = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); + + tmp = prescaler * NSEC_PER_SEC * PWM_REG_PRD(val); + state->period = DIV_ROUND_CLOSEST_ULL(tmp, clk_rate); +} + +static int sun4i_pwm_calculate(struct sun4i_pwm_chip *sun4i_pwm, + struct pwm_state *state, + u32 *dty, u32 *prd, unsigned int *prsclr) +{ u64 clk_rate, div = 0; - unsigned int prescaler = 0; - int err; + unsigned int pval, prescaler = 0; clk_rate = clk_get_rate(sun4i_pwm->clk); if (sun4i_pwm->data->has_prescaler_bypass) { /* First, test without any prescaler when available */ prescaler = PWM_PRESCAL_MASK; + pval = 1; /* * When not using any prescaler, the clock period in nanoseconds * is not an integer so round it half up instead of * truncating to get less surprising values. */ - div = clk_rate * period_ns + NSEC_PER_SEC / 2; + div = clk_rate * state->period + NSEC_PER_SEC / 2; do_div(div, NSEC_PER_SEC); if (div - 1 > PWM_PRD_MASK) prescaler = 0; @@ -126,137 +173,141 @@ static int sun4i_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm, for (prescaler = 0; prescaler < PWM_PRESCAL_MASK; prescaler++) { if (!prescaler_table[prescaler]) continue; + pval = prescaler_table[prescaler]; div = clk_rate; - do_div(div, prescaler_table[prescaler]); - div = div * period_ns; + do_div(div, pval); + div = div * state->period; do_div(div, NSEC_PER_SEC); if (div - 1 <= PWM_PRD_MASK) break; } - if (div - 1 > PWM_PRD_MASK) { - dev_err(chip->dev, "period exceeds the maximum value\n"); + if (div - 1 > PWM_PRD_MASK) return -EINVAL; - } - } - - prd = div; - div *= duty_ns; - do_div(div, period_ns); - dty = div; - - err = clk_prepare_enable(sun4i_pwm->clk); - if (err) { - dev_err(chip->dev, "failed to enable PWM clock\n"); - return err; - } - - spin_lock(&sun4i_pwm->ctrl_lock); - val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); - - if (sun4i_pwm->data->has_rdy && (val & PWM_RDY(pwm->hwpwm))) { - spin_unlock(&sun4i_pwm->ctrl_lock); - clk_disable_unprepare(sun4i_pwm->clk); - return -EBUSY; - } - - clk_gate = val & BIT_CH(PWM_CLK_GATING, pwm->hwpwm); - if (clk_gate) { - val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); - sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); } - val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); - val &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm); - val |= BIT_CH(prescaler, pwm->hwpwm); - sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); - - val = (dty & PWM_DTY_MASK) | PWM_PRD(prd); - sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm)); + *prd = div; + div *= state->duty_cycle; + do_div(div, state->period); + *dty = div; + *prsclr = prescaler; - if (clk_gate) { - val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); - val |= clk_gate; - sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); - } + div = (u64)pval * NSEC_PER_SEC * *prd; + state->period = DIV_ROUND_CLOSEST_ULL(div, clk_rate); - spin_unlock(&sun4i_pwm->ctrl_lock); - clk_disable_unprepare(sun4i_pwm->clk); + div = (u64)pval * NSEC_PER_SEC * *dty; + state->duty_cycle = DIV_ROUND_CLOSEST_ULL(div, clk_rate); return 0; } -static int sun4i_pwm_set_polarity(struct pwm_chip *chip, struct pwm_device *pwm, - enum pwm_polarity polarity) +static int sun4i_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, + struct pwm_state *state) { struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); - u32 val; + struct pwm_state cstate; + u32 ctrl; int ret; + unsigned int delay_us; + unsigned long now; - ret = clk_prepare_enable(sun4i_pwm->clk); - if (ret) { - dev_err(chip->dev, "failed to enable PWM clock\n"); - return ret; + pwm_get_state(pwm, &cstate); + + if (!cstate.enabled) { + ret = clk_prepare_enable(sun4i_pwm->clk); + if (ret) { + dev_err(chip->dev, "failed to enable PWM clock\n"); + return ret; + } } spin_lock(&sun4i_pwm->ctrl_lock); - val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); - if (polarity != PWM_POLARITY_NORMAL) - val &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm); - else - val |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm); + if ((cstate.period != state->period) || + (cstate.duty_cycle != state->duty_cycle)) { + u32 period, duty, val; + unsigned int prescaler; - sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + ret = sun4i_pwm_calculate(sun4i_pwm, state, + &duty, &period, &prescaler); + if (ret) { + dev_err(chip->dev, "period exceeds the maximum value\n"); + spin_unlock(&sun4i_pwm->ctrl_lock); + if (!cstate.enabled) + clk_disable_unprepare(sun4i_pwm->clk); + return ret; + } - spin_unlock(&sun4i_pwm->ctrl_lock); - clk_disable_unprepare(sun4i_pwm->clk); + if (PWM_REG_PRESCAL(ctrl, pwm->hwpwm) != prescaler) { + /* Prescaler changed, the clock has to be gated */ + ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG); - return 0; -} + ctrl &= ~BIT_CH(PWM_PRESCAL_MASK, pwm->hwpwm); + ctrl |= BIT_CH(prescaler, pwm->hwpwm); + } -static int sun4i_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); - u32 val; - int ret; + val = (duty & PWM_DTY_MASK) | PWM_PRD(period); + sun4i_pwm_writel(sun4i_pwm, val, PWM_CH_PRD(pwm->hwpwm)); + sun4i_pwm->next_period[pwm->hwpwm] = jiffies + + usecs_to_jiffies(cstate.period / 1000 + 1); + sun4i_pwm->needs_delay[pwm->hwpwm] = true; + } - ret = clk_prepare_enable(sun4i_pwm->clk); - if (ret) { - dev_err(chip->dev, "failed to enable PWM clock\n"); - return ret; + if (state->polarity != PWM_POLARITY_NORMAL) + ctrl &= ~BIT_CH(PWM_ACT_STATE, pwm->hwpwm); + else + ctrl |= BIT_CH(PWM_ACT_STATE, pwm->hwpwm); + + ctrl |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + if (state->enabled) { + ctrl |= BIT_CH(PWM_EN, pwm->hwpwm); + } else if (!sun4i_pwm->needs_delay[pwm->hwpwm]) { + ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm); + ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); } - spin_lock(&sun4i_pwm->ctrl_lock); - val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); - val |= BIT_CH(PWM_EN, pwm->hwpwm); - val |= BIT_CH(PWM_CLK_GATING, pwm->hwpwm); - sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG); + spin_unlock(&sun4i_pwm->ctrl_lock); - return 0; -} + if (state->enabled) + return 0; -static void sun4i_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm) -{ - struct sun4i_pwm_chip *sun4i_pwm = to_sun4i_pwm_chip(chip); - u32 val; + if (!sun4i_pwm->needs_delay[pwm->hwpwm]) { + clk_disable_unprepare(sun4i_pwm->clk); + return 0; + } + + /* We need a full period to elapse before disabling the channel. */ + now = jiffies; + if (sun4i_pwm->needs_delay[pwm->hwpwm] && + time_before(now, sun4i_pwm->next_period[pwm->hwpwm])) { + delay_us = jiffies_to_usecs(sun4i_pwm->next_period[pwm->hwpwm] - + now); + if ((delay_us / 500) > MAX_UDELAY_MS) + msleep(delay_us / 1000 + 1); + else + usleep_range(delay_us, delay_us * 2); + } + sun4i_pwm->needs_delay[pwm->hwpwm] = false; spin_lock(&sun4i_pwm->ctrl_lock); - val = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); - val &= ~BIT_CH(PWM_EN, pwm->hwpwm); - val &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); - sun4i_pwm_writel(sun4i_pwm, val, PWM_CTRL_REG); + ctrl = sun4i_pwm_readl(sun4i_pwm, PWM_CTRL_REG); + ctrl &= ~BIT_CH(PWM_CLK_GATING, pwm->hwpwm); + ctrl &= ~BIT_CH(PWM_EN, pwm->hwpwm); + sun4i_pwm_writel(sun4i_pwm, ctrl, PWM_CTRL_REG); spin_unlock(&sun4i_pwm->ctrl_lock); clk_disable_unprepare(sun4i_pwm->clk); + + return 0; } static const struct pwm_ops sun4i_pwm_ops = { - .config = sun4i_pwm_config, - .set_polarity = sun4i_pwm_set_polarity, - .enable = sun4i_pwm_enable, - .disable = sun4i_pwm_disable, + .apply = sun4i_pwm_apply, + .get_state = sun4i_pwm_get_state, .owner = THIS_MODULE, }; @@ -316,8 +367,7 @@ static int sun4i_pwm_probe(struct platform_device *pdev) { struct sun4i_pwm_chip *pwm; struct resource *res; - u32 val; - int i, ret; + int ret; const struct of_device_id *match; match = of_match_device(sun4i_pwm_dt_ids, &pdev->dev); @@ -353,24 +403,7 @@ static int sun4i_pwm_probe(struct platform_device *pdev) platform_set_drvdata(pdev, pwm); - ret = clk_prepare_enable(pwm->clk); - if (ret) { - dev_err(&pdev->dev, "failed to enable PWM clock\n"); - goto clk_error; - } - - val = sun4i_pwm_readl(pwm, PWM_CTRL_REG); - for (i = 0; i < pwm->chip.npwm; i++) - if (!(val & BIT_CH(PWM_ACT_STATE, i))) - pwm_set_polarity(&pwm->chip.pwms[i], - PWM_POLARITY_INVERSED); - clk_disable_unprepare(pwm->clk); - return 0; - -clk_error: - pwmchip_remove(&pwm->chip); - return ret; } static int sun4i_pwm_remove(struct platform_device *pdev) diff --git a/drivers/pwm/pwm-tegra.c b/drivers/pwm/pwm-tegra.c index 8c6ed556db28..e9b33f09ff09 100644 --- a/drivers/pwm/pwm-tegra.c +++ b/drivers/pwm/pwm-tegra.c @@ -41,6 +41,9 @@ struct tegra_pwm_soc { unsigned int num_channels; + + /* Maximum IP frequency for given SoCs */ + unsigned long max_frequency; }; struct tegra_pwm_chip { @@ -201,7 +204,18 @@ static int tegra_pwm_probe(struct platform_device *pdev) if (IS_ERR(pwm->clk)) return PTR_ERR(pwm->clk); - /* Read PWM clock rate from source */ + /* Set maximum frequency of the IP */ + ret = clk_set_rate(pwm->clk, pwm->soc->max_frequency); + if (ret < 0) { + dev_err(&pdev->dev, "Failed to set max frequency: %d\n", ret); + return ret; + } + + /* + * The requested and configured frequency may differ due to + * clock register resolutions. Get the configured frequency + * so that PWM period can be calculated more accurately. + */ pwm->clk_rate = clk_get_rate(pwm->clk); pwm->rst = devm_reset_control_get(&pdev->dev, "pwm"); @@ -273,10 +287,12 @@ static int tegra_pwm_resume(struct device *dev) static const struct tegra_pwm_soc tegra20_pwm_soc = { .num_channels = 4, + .max_frequency = 48000000UL, }; static const struct tegra_pwm_soc tegra186_pwm_soc = { .num_channels = 1, + .max_frequency = 102000000UL, }; static const struct of_device_id tegra_pwm_of_match[] = { diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index 8d3b95728326..72419ac2c52a 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -77,6 +77,14 @@ config RTC_DEBUG Say yes here to enable debugging support in the RTC framework and individual RTC drivers. +config RTC_NVMEM + bool "RTC non volatile storage support" + select NVMEM + default RTC_CLASS + help + Say yes here to add support for the non volatile (often battery + backed) storage present on RTCs. + comment "RTC interfaces" config RTC_INTF_SYSFS @@ -197,6 +205,17 @@ config RTC_DRV_AC100 This driver can also be built as a module. If so, the module will be called rtc-ac100. +config RTC_DRV_BRCMSTB + tristate "Broadcom STB wake-timer" + depends on ARCH_BRCMSTB || BMIPS_GENERIC || COMPILE_TEST + default ARCH_BRCMSTB || BMIPS_GENERIC + help + If you say yes here you get support for the wake-timer found on + Broadcom STB SoCs (BCM7xxx). + + This driver can also be built as a module. If so, the module will + be called rtc-brcmstb-waketimer. + config RTC_DRV_AS3722 tristate "ams AS3722 RTC driver" depends on MFD_AS3722 @@ -791,6 +810,14 @@ config RTC_DRV_DS3232 This driver can also be built as a module. If so, the module will be called rtc-ds3232. +config RTC_DRV_DS3232_HWMON + bool "HWMON support for Dallas/Maxim DS3232/DS3234" + depends on RTC_DRV_DS3232 && HWMON && !(RTC_DRV_DS3232=y && HWMON=m) + default y + help + Say Y here if you want to expose temperature sensor data on + rtc-ds3232 + config RTC_DRV_PCF2127 tristate "NXP PCF2127" depends on RTC_I2C_AND_SPI @@ -1484,16 +1511,16 @@ config RTC_DRV_ARMADA38X This driver can also be built as a module. If so, the module will be called armada38x-rtc. -config RTC_DRV_GEMINI - tristate "Gemini SoC RTC" - depends on ARCH_GEMINI || COMPILE_TEST +config RTC_DRV_FTRTC010 + tristate "Faraday Technology FTRTC010 RTC" depends on HAS_IOMEM + default ARCH_GEMINI help If you say Y here you will get support for the - RTC found on Gemini SoC's. + Faraday Technolog FTRTC010 found on e.g. Gemini SoC's. This driver can also be built as a module. If so, the module - will be called rtc-gemini. + will be called rtc-ftrtc010. config RTC_DRV_PS3 tristate "PS3 RTC" diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 13857d2fce09..acd366b41c85 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile @@ -15,6 +15,7 @@ ifdef CONFIG_RTC_DRV_EFI rtc-core-y += rtc-efi-platform.o endif +rtc-core-$(CONFIG_RTC_NVMEM) += nvmem.o rtc-core-$(CONFIG_RTC_INTF_DEV) += rtc-dev.o rtc-core-$(CONFIG_RTC_INTF_PROC) += rtc-proc.o rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o @@ -36,6 +37,7 @@ obj-$(CONFIG_RTC_DRV_AT91RM9200)+= rtc-at91rm9200.o obj-$(CONFIG_RTC_DRV_AT91SAM9) += rtc-at91sam9.o obj-$(CONFIG_RTC_DRV_AU1XXX) += rtc-au1xxx.o obj-$(CONFIG_RTC_DRV_BFIN) += rtc-bfin.o +obj-$(CONFIG_RTC_DRV_BRCMSTB) += rtc-brcmstb-waketimer.o obj-$(CONFIG_RTC_DRV_BQ32K) += rtc-bq32k.o obj-$(CONFIG_RTC_DRV_BQ4802) += rtc-bq4802.o obj-$(CONFIG_RTC_DRV_CMOS) += rtc-cmos.o @@ -67,7 +69,7 @@ obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o obj-$(CONFIG_RTC_DRV_EM3027) += rtc-em3027.o obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o -obj-$(CONFIG_RTC_DRV_GEMINI) += rtc-gemini.o +obj-$(CONFIG_RTC_DRV_FTRTC010) += rtc-ftrtc010.o obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o obj-$(CONFIG_RTC_DRV_HID_SENSOR_TIME) += rtc-hid-sensor-time.o obj-$(CONFIG_RTC_DRV_HYM8563) += rtc-hym8563.o diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 5fb439897fe1..2ed970d61da1 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -150,59 +150,19 @@ static SIMPLE_DEV_PM_OPS(rtc_class_dev_pm_ops, rtc_suspend, rtc_resume); #define RTC_CLASS_DEV_PM_OPS NULL #endif - -/** - * rtc_device_register - register w/ RTC class - * @dev: the device to register - * - * rtc_device_unregister() must be called when the class device is no - * longer needed. - * - * Returns the pointer to the new struct class device. - */ -struct rtc_device *rtc_device_register(const char *name, struct device *dev, - const struct rtc_class_ops *ops, - struct module *owner) +/* Ensure the caller will set the id before releasing the device */ +static struct rtc_device *rtc_allocate_device(void) { struct rtc_device *rtc; - struct rtc_wkalrm alrm; - int of_id = -1, id = -1, err; - - if (dev->of_node) - of_id = of_alias_get_id(dev->of_node, "rtc"); - else if (dev->parent && dev->parent->of_node) - of_id = of_alias_get_id(dev->parent->of_node, "rtc"); - if (of_id >= 0) { - id = ida_simple_get(&rtc_ida, of_id, of_id + 1, - GFP_KERNEL); - if (id < 0) - dev_warn(dev, "/aliases ID %d not available\n", - of_id); - } - - if (id < 0) { - id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL); - if (id < 0) { - err = id; - goto exit; - } - } - - rtc = kzalloc(sizeof(struct rtc_device), GFP_KERNEL); - if (rtc == NULL) { - err = -ENOMEM; - goto exit_ida; - } + rtc = kzalloc(sizeof(*rtc), GFP_KERNEL); + if (!rtc) + return NULL; device_initialize(&rtc->dev); - rtc->id = id; - rtc->ops = ops; - rtc->owner = owner; rtc->irq_freq = 1; rtc->max_user_freq = 64; - rtc->dev.parent = dev; rtc->dev.class = rtc_class; rtc->dev.groups = rtc_get_dev_attribute_groups(); rtc->dev.release = rtc_device_release; @@ -224,7 +184,64 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, rtc->pie_timer.function = rtc_pie_update_irq; rtc->pie_enabled = 0; - strlcpy(rtc->name, name, RTC_DEVICE_NAME_SIZE); + return rtc; +} + +static int rtc_device_get_id(struct device *dev) +{ + int of_id = -1, id = -1; + + if (dev->of_node) + of_id = of_alias_get_id(dev->of_node, "rtc"); + else if (dev->parent && dev->parent->of_node) + of_id = of_alias_get_id(dev->parent->of_node, "rtc"); + + if (of_id >= 0) { + id = ida_simple_get(&rtc_ida, of_id, of_id + 1, GFP_KERNEL); + if (id < 0) + dev_warn(dev, "/aliases ID %d not available\n", of_id); + } + + if (id < 0) + id = ida_simple_get(&rtc_ida, 0, 0, GFP_KERNEL); + + return id; +} + +/** + * rtc_device_register - register w/ RTC class + * @dev: the device to register + * + * rtc_device_unregister() must be called when the class device is no + * longer needed. + * + * Returns the pointer to the new struct class device. + */ +struct rtc_device *rtc_device_register(const char *name, struct device *dev, + const struct rtc_class_ops *ops, + struct module *owner) +{ + struct rtc_device *rtc; + struct rtc_wkalrm alrm; + int id, err; + + id = rtc_device_get_id(dev); + if (id < 0) { + err = id; + goto exit; + } + + rtc = rtc_allocate_device(); + if (!rtc) { + err = -ENOMEM; + goto exit_ida; + } + + rtc->id = id; + rtc->ops = ops; + rtc->owner = owner; + rtc->dev.parent = dev; + dev_set_name(&rtc->dev, "rtc%d", id); /* Check to see if there is an ALARM already set in hw */ @@ -238,20 +255,20 @@ struct rtc_device *rtc_device_register(const char *name, struct device *dev, err = cdev_device_add(&rtc->char_dev, &rtc->dev); if (err) { dev_warn(&rtc->dev, "%s: failed to add char device %d:%d\n", - rtc->name, MAJOR(rtc->dev.devt), rtc->id); + name, MAJOR(rtc->dev.devt), rtc->id); /* This will free both memory and the ID */ put_device(&rtc->dev); goto exit; } else { - dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", rtc->name, + dev_dbg(&rtc->dev, "%s: dev (%d:%d)\n", name, MAJOR(rtc->dev.devt), rtc->id); } rtc_proc_add_device(rtc); dev_info(dev, "rtc core: registered %s as %s\n", - rtc->name, dev_name(&rtc->dev)); + name, dev_name(&rtc->dev)); return rtc; @@ -273,6 +290,8 @@ EXPORT_SYMBOL_GPL(rtc_device_register); */ void rtc_device_unregister(struct rtc_device *rtc) { + rtc_nvmem_unregister(rtc); + mutex_lock(&rtc->ops_lock); /* * Remove innards of this RTC, then disable it, before @@ -356,6 +375,91 @@ void devm_rtc_device_unregister(struct device *dev, struct rtc_device *rtc) } EXPORT_SYMBOL_GPL(devm_rtc_device_unregister); +static void devm_rtc_release_device(struct device *dev, void *res) +{ + struct rtc_device *rtc = *(struct rtc_device **)res; + + if (rtc->registered) + rtc_device_unregister(rtc); + else + put_device(&rtc->dev); +} + +struct rtc_device *devm_rtc_allocate_device(struct device *dev) +{ + struct rtc_device **ptr, *rtc; + int id, err; + + id = rtc_device_get_id(dev); + if (id < 0) + return ERR_PTR(id); + + ptr = devres_alloc(devm_rtc_release_device, sizeof(*ptr), GFP_KERNEL); + if (!ptr) { + err = -ENOMEM; + goto exit_ida; + } + + rtc = rtc_allocate_device(); + if (!rtc) { + err = -ENOMEM; + goto exit_devres; + } + + *ptr = rtc; + devres_add(dev, ptr); + + rtc->id = id; + rtc->dev.parent = dev; + dev_set_name(&rtc->dev, "rtc%d", id); + + return rtc; + +exit_devres: + devres_free(ptr); +exit_ida: + ida_simple_remove(&rtc_ida, id); + return ERR_PTR(err); +} +EXPORT_SYMBOL_GPL(devm_rtc_allocate_device); + +int __rtc_register_device(struct module *owner, struct rtc_device *rtc) +{ + struct rtc_wkalrm alrm; + int err; + + if (!rtc->ops) + return -EINVAL; + + rtc->owner = owner; + + /* Check to see if there is an ALARM already set in hw */ + err = __rtc_read_alarm(rtc, &alrm); + if (!err && !rtc_valid_tm(&alrm.time)) + rtc_initialize_alarm(rtc, &alrm); + + rtc_dev_prepare(rtc); + + err = cdev_device_add(&rtc->char_dev, &rtc->dev); + if (err) + dev_warn(rtc->dev.parent, "failed to add char device %d:%d\n", + MAJOR(rtc->dev.devt), rtc->id); + else + dev_dbg(rtc->dev.parent, "char device (%d:%d)\n", + MAJOR(rtc->dev.devt), rtc->id); + + rtc_proc_add_device(rtc); + + rtc_nvmem_register(rtc); + + rtc->registered = true; + dev_info(rtc->dev.parent, "registered as %s\n", + dev_name(&rtc->dev)); + + return 0; +} +EXPORT_SYMBOL_GPL(__rtc_register_device); + static int __init rtc_init(void) { rtc_class = class_create(THIS_MODULE, "rtc"); diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c index fc0fa7577636..8cec9a02c0b8 100644 --- a/drivers/rtc/interface.c +++ b/drivers/rtc/interface.c @@ -227,6 +227,13 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) missing = year; } + /* Can't proceed if alarm is still invalid after replacing + * missing fields. + */ + err = rtc_valid_tm(&alarm->time); + if (err) + goto done; + /* with luck, no rollover is needed */ t_now = rtc_tm_to_time64(&now); t_alm = rtc_tm_to_time64(&alarm->time); @@ -278,9 +285,9 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm) dev_warn(&rtc->dev, "alarm rollover not handled\n"); } -done: err = rtc_valid_tm(&alarm->time); +done: if (err) { dev_warn(&rtc->dev, "invalid alarm value: %d-%d-%d %d:%d:%d\n", alarm->time.tm_year + 1900, alarm->time.tm_mon + 1, diff --git a/drivers/rtc/nvmem.c b/drivers/rtc/nvmem.c new file mode 100644 index 000000000000..8567b4ed9ac6 --- /dev/null +++ b/drivers/rtc/nvmem.c @@ -0,0 +1,113 @@ +/* + * RTC subsystem, nvmem interface + * + * Copyright (C) 2017 Alexandre Belloni + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/types.h> +#include <linux/nvmem-consumer.h> +#include <linux/rtc.h> +#include <linux/sysfs.h> + +#include "rtc-core.h" + +/* + * Deprecated ABI compatibility, this should be removed at some point + */ + +static const char nvram_warning[] = "Deprecated ABI, please use nvmem"; + +static ssize_t +rtc_nvram_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct rtc_device *rtc = attr->private; + + dev_warn_once(kobj_to_dev(kobj), nvram_warning); + + return nvmem_device_read(rtc->nvmem, off, count, buf); +} + +static ssize_t +rtc_nvram_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct rtc_device *rtc = attr->private; + + dev_warn_once(kobj_to_dev(kobj), nvram_warning); + + return nvmem_device_write(rtc->nvmem, off, count, buf); +} + +static int rtc_nvram_register(struct rtc_device *rtc) +{ + int err; + + rtc->nvram = devm_kzalloc(rtc->dev.parent, + sizeof(struct bin_attribute), + GFP_KERNEL); + if (!rtc->nvram) + return -ENOMEM; + + rtc->nvram->attr.name = "nvram"; + rtc->nvram->attr.mode = 0644; + rtc->nvram->private = rtc; + + sysfs_bin_attr_init(rtc->nvram); + + rtc->nvram->read = rtc_nvram_read; + rtc->nvram->write = rtc_nvram_write; + rtc->nvram->size = rtc->nvmem_config->size; + + err = sysfs_create_bin_file(&rtc->dev.parent->kobj, + rtc->nvram); + if (err) { + devm_kfree(rtc->dev.parent, rtc->nvram); + rtc->nvram = NULL; + } + + return err; +} + +static void rtc_nvram_unregister(struct rtc_device *rtc) +{ + sysfs_remove_bin_file(&rtc->dev.parent->kobj, rtc->nvram); +} + +/* + * New ABI, uses nvmem + */ +void rtc_nvmem_register(struct rtc_device *rtc) +{ + if (!rtc->nvmem_config) + return; + + rtc->nvmem_config->dev = &rtc->dev; + rtc->nvmem_config->owner = rtc->owner; + rtc->nvmem = nvmem_register(rtc->nvmem_config); + if (IS_ERR_OR_NULL(rtc->nvmem)) + return; + + /* Register the old ABI */ + if (rtc->nvram_old_abi) + rtc_nvram_register(rtc); +} + +void rtc_nvmem_unregister(struct rtc_device *rtc) +{ + if (IS_ERR_OR_NULL(rtc->nvmem)) + return; + + /* unregister the old ABI */ + if (rtc->nvram) + rtc_nvram_unregister(rtc); + + nvmem_unregister(rtc->nvmem); +} diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c index b60fd477778f..e221b78b6f10 100644 --- a/drivers/rtc/rtc-at91rm9200.c +++ b/drivers/rtc/rtc-at91rm9200.c @@ -409,6 +409,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev) return -ENOMEM; } + rtc = devm_rtc_allocate_device(&pdev->dev); + if (IS_ERR(rtc)) + return PTR_ERR(rtc); + platform_set_drvdata(pdev, rtc); + sclk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sclk)) return PTR_ERR(sclk); @@ -441,13 +446,10 @@ static int __init at91_rtc_probe(struct platform_device *pdev) if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1); - rtc = devm_rtc_device_register(&pdev->dev, pdev->name, - &at91_rtc_ops, THIS_MODULE); - if (IS_ERR(rtc)) { - ret = PTR_ERR(rtc); + rtc->ops = &at91_rtc_ops; + ret = rtc_register_device(rtc); + if (ret) goto err_clk; - } - platform_set_drvdata(pdev, rtc); /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy * completion. diff --git a/drivers/rtc/rtc-brcmstb-waketimer.c b/drivers/rtc/rtc-brcmstb-waketimer.c new file mode 100644 index 000000000000..796ac792a381 --- /dev/null +++ b/drivers/rtc/rtc-brcmstb-waketimer.c @@ -0,0 +1,330 @@ +/* + * Copyright © 2014-2017 Broadcom + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/clk.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irqreturn.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm.h> +#include <linux/pm_wakeup.h> +#include <linux/reboot.h> +#include <linux/rtc.h> +#include <linux/stat.h> +#include <linux/suspend.h> + +struct brcmstb_waketmr { + struct rtc_device *rtc; + struct device *dev; + void __iomem *base; + int irq; + struct notifier_block reboot_notifier; + struct clk *clk; + u32 rate; +}; + +#define BRCMSTB_WKTMR_EVENT 0x00 +#define BRCMSTB_WKTMR_COUNTER 0x04 +#define BRCMSTB_WKTMR_ALARM 0x08 +#define BRCMSTB_WKTMR_PRESCALER 0x0C +#define BRCMSTB_WKTMR_PRESCALER_VAL 0x10 + +#define BRCMSTB_WKTMR_DEFAULT_FREQ 27000000 + +static inline void brcmstb_waketmr_clear_alarm(struct brcmstb_waketmr *timer) +{ + writel_relaxed(1, timer->base + BRCMSTB_WKTMR_EVENT); + (void)readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT); +} + +static void brcmstb_waketmr_set_alarm(struct brcmstb_waketmr *timer, + unsigned int secs) +{ + brcmstb_waketmr_clear_alarm(timer); + + writel_relaxed(secs + 1, timer->base + BRCMSTB_WKTMR_ALARM); +} + +static irqreturn_t brcmstb_waketmr_irq(int irq, void *data) +{ + struct brcmstb_waketmr *timer = data; + + pm_wakeup_event(timer->dev, 0); + + return IRQ_HANDLED; +} + +struct wktmr_time { + u32 sec; + u32 pre; +}; + +static void wktmr_read(struct brcmstb_waketmr *timer, + struct wktmr_time *t) +{ + u32 tmp; + + do { + t->sec = readl_relaxed(timer->base + BRCMSTB_WKTMR_COUNTER); + tmp = readl_relaxed(timer->base + BRCMSTB_WKTMR_PRESCALER_VAL); + } while (tmp >= timer->rate); + + t->pre = timer->rate - tmp; +} + +static int brcmstb_waketmr_prepare_suspend(struct brcmstb_waketmr *timer) +{ + struct device *dev = timer->dev; + int ret = 0; + + if (device_may_wakeup(dev)) { + ret = enable_irq_wake(timer->irq); + if (ret) { + dev_err(dev, "failed to enable wake-up interrupt\n"); + return ret; + } + } + + return ret; +} + +/* If enabled as a wakeup-source, arm the timer when powering off */ +static int brcmstb_waketmr_reboot(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct brcmstb_waketmr *timer; + + timer = container_of(nb, struct brcmstb_waketmr, reboot_notifier); + + /* Set timer for cold boot */ + if (action == SYS_POWER_OFF) + brcmstb_waketmr_prepare_suspend(timer); + + return NOTIFY_DONE; +} + +static int brcmstb_waketmr_gettime(struct device *dev, + struct rtc_time *tm) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(dev); + struct wktmr_time now; + + wktmr_read(timer, &now); + + rtc_time_to_tm(now.sec, tm); + + return 0; +} + +static int brcmstb_waketmr_settime(struct device *dev, + struct rtc_time *tm) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(dev); + time64_t sec; + + sec = rtc_tm_to_time64(tm); + + if (sec > U32_MAX || sec < 0) + return -EINVAL; + + writel_relaxed(sec, timer->base + BRCMSTB_WKTMR_COUNTER); + + return 0; +} + +static int brcmstb_waketmr_getalarm(struct device *dev, + struct rtc_wkalrm *alarm) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(dev); + time64_t sec; + u32 reg; + + sec = readl_relaxed(timer->base + BRCMSTB_WKTMR_ALARM); + if (sec != 0) { + /* Alarm is enabled */ + alarm->enabled = 1; + rtc_time64_to_tm(sec, &alarm->time); + } + + reg = readl_relaxed(timer->base + BRCMSTB_WKTMR_EVENT); + alarm->pending = !!(reg & 1); + + return 0; +} + +static int brcmstb_waketmr_setalarm(struct device *dev, + struct rtc_wkalrm *alarm) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(dev); + time64_t sec; + + if (alarm->enabled) + sec = rtc_tm_to_time64(&alarm->time); + else + sec = 0; + + if (sec > U32_MAX || sec < 0) + return -EINVAL; + + brcmstb_waketmr_set_alarm(timer, sec); + + return 0; +} + +/* + * Does not do much but keep the RTC class happy. We always support + * alarms. + */ +static int brcmstb_waketmr_alarm_enable(struct device *dev, + unsigned int enabled) +{ + return 0; +} + +static const struct rtc_class_ops brcmstb_waketmr_ops = { + .read_time = brcmstb_waketmr_gettime, + .set_time = brcmstb_waketmr_settime, + .read_alarm = brcmstb_waketmr_getalarm, + .set_alarm = brcmstb_waketmr_setalarm, + .alarm_irq_enable = brcmstb_waketmr_alarm_enable, +}; + +static int brcmstb_waketmr_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct brcmstb_waketmr *timer; + struct resource *res; + int ret; + + timer = devm_kzalloc(dev, sizeof(*timer), GFP_KERNEL); + if (!timer) + return -ENOMEM; + + platform_set_drvdata(pdev, timer); + timer->dev = dev; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + timer->base = devm_ioremap_resource(dev, res); + if (IS_ERR(timer->base)) + return PTR_ERR(timer->base); + + /* + * Set wakeup capability before requesting wakeup interrupt, so we can + * process boot-time "wakeups" (e.g., from S5 soft-off) + */ + device_set_wakeup_capable(dev, true); + device_wakeup_enable(dev); + + timer->irq = platform_get_irq(pdev, 0); + if (timer->irq < 0) + return -ENODEV; + + timer->clk = devm_clk_get(dev, NULL); + if (!IS_ERR(timer->clk)) { + ret = clk_prepare_enable(timer->clk); + if (ret) + return ret; + timer->rate = clk_get_rate(timer->clk); + if (!timer->rate) + timer->rate = BRCMSTB_WKTMR_DEFAULT_FREQ; + } else { + timer->rate = BRCMSTB_WKTMR_DEFAULT_FREQ; + timer->clk = NULL; + } + + ret = devm_request_irq(dev, timer->irq, brcmstb_waketmr_irq, 0, + "brcmstb-waketimer", timer); + if (ret < 0) + return ret; + + timer->reboot_notifier.notifier_call = brcmstb_waketmr_reboot; + register_reboot_notifier(&timer->reboot_notifier); + + timer->rtc = rtc_device_register("brcmstb-waketmr", dev, + &brcmstb_waketmr_ops, THIS_MODULE); + if (IS_ERR(timer->rtc)) { + dev_err(dev, "unable to register device\n"); + unregister_reboot_notifier(&timer->reboot_notifier); + return PTR_ERR(timer->rtc); + } + + dev_info(dev, "registered, with irq %d\n", timer->irq); + + return ret; +} + +static int brcmstb_waketmr_remove(struct platform_device *pdev) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(&pdev->dev); + + unregister_reboot_notifier(&timer->reboot_notifier); + rtc_device_unregister(timer->rtc); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int brcmstb_waketmr_suspend(struct device *dev) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(dev); + + return brcmstb_waketmr_prepare_suspend(timer); +} + +static int brcmstb_waketmr_resume(struct device *dev) +{ + struct brcmstb_waketmr *timer = dev_get_drvdata(dev); + int ret; + + if (!device_may_wakeup(dev)) + return 0; + + ret = disable_irq_wake(timer->irq); + + brcmstb_waketmr_clear_alarm(timer); + + return ret; +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(brcmstb_waketmr_pm_ops, + brcmstb_waketmr_suspend, brcmstb_waketmr_resume); + +static const struct of_device_id brcmstb_waketmr_of_match[] = { + { .compatible = "brcm,brcmstb-waketimer" }, + { /* sentinel */ }, +}; + +static struct platform_driver brcmstb_waketmr_driver = { + .probe = brcmstb_waketmr_probe, + .remove = brcmstb_waketmr_remove, + .driver = { + .name = "brcmstb-waketimer", + .pm = &brcmstb_waketmr_pm_ops, + .of_match_table = of_match_ptr(brcmstb_waketmr_of_match), + } +}; +module_platform_driver(brcmstb_waketmr_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Brian Norris"); +MODULE_AUTHOR("Markus Mayer"); +MODULE_DESCRIPTION("Wake-up timer driver for STB chips"); diff --git a/drivers/rtc/rtc-core.h b/drivers/rtc/rtc-core.h index 7a4ed2f7c7d7..ecab76a3207c 100644 --- a/drivers/rtc/rtc-core.h +++ b/drivers/rtc/rtc-core.h @@ -45,3 +45,11 @@ static inline const struct attribute_group **rtc_get_dev_attribute_groups(void) return NULL; } #endif + +#ifdef CONFIG_RTC_NVMEM +void rtc_nvmem_register(struct rtc_device *rtc); +void rtc_nvmem_unregister(struct rtc_device *rtc); +#else +static inline void rtc_nvmem_register(struct rtc_device *rtc) {} +static inline void rtc_nvmem_unregister(struct rtc_device *rtc) {} +#endif diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index e81a8711fea7..794bc4fa4937 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c @@ -464,7 +464,7 @@ void rtc_dev_prepare(struct rtc_device *rtc) return; if (rtc->id >= RTC_DEV_MAX) { - dev_dbg(&rtc->dev, "%s: too many RTC devices\n", rtc->name); + dev_dbg(&rtc->dev, "too many RTC devices\n"); return; } diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index 77339b3d50a1..4fac49e55d47 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -24,6 +24,7 @@ #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/clk-provider.h> +#include <linux/regmap.h> /* * We can't determine type by probing, but if we expect pre-Linux code @@ -33,6 +34,7 @@ */ enum ds_type { ds_1307, + ds_1308, ds_1337, ds_1338, ds_1339, @@ -43,6 +45,7 @@ enum ds_type { m41t00, mcp794xx, rx_8025, + rx_8130, last_ds_type /* always last */ /* rs5c372 too? different address... */ }; @@ -115,17 +118,16 @@ struct ds1307 { u8 offset; /* register's offset */ u8 regs[11]; u16 nvram_offset; - struct bin_attribute *nvram; + struct nvmem_config nvmem_cfg; enum ds_type type; unsigned long flags; #define HAS_NVRAM 0 /* bit 0 == sysfs file active */ #define HAS_ALARM 1 /* bit 1 == irq claimed */ - struct i2c_client *client; + struct device *dev; + struct regmap *regmap; + const char *name; + int irq; struct rtc_device *rtc; - s32 (*read_block_data)(const struct i2c_client *client, u8 command, - u8 length, u8 *values); - s32 (*write_block_data)(const struct i2c_client *client, u8 command, - u8 length, const u8 *values); #ifdef CONFIG_COMMON_CLK struct clk_hw clks[2]; #endif @@ -135,21 +137,30 @@ struct chip_desc { unsigned alarm:1; u16 nvram_offset; u16 nvram_size; + u8 century_reg; + u8 century_enable_bit; + u8 century_bit; u16 trickle_charger_reg; u8 trickle_charger_setup; - u8 (*do_trickle_setup)(struct i2c_client *, uint32_t, bool); + u8 (*do_trickle_setup)(struct ds1307 *, uint32_t, + bool); }; -static u8 do_trickle_setup_ds1339(struct i2c_client *, - uint32_t ohms, bool diode); +static u8 do_trickle_setup_ds1339(struct ds1307 *, uint32_t ohms, bool diode); static struct chip_desc chips[last_ds_type] = { [ds_1307] = { .nvram_offset = 8, .nvram_size = 56, }, + [ds_1308] = { + .nvram_offset = 8, + .nvram_size = 56, + }, [ds_1337] = { .alarm = 1, + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, }, [ds_1338] = { .nvram_offset = 8, @@ -157,10 +168,15 @@ static struct chip_desc chips[last_ds_type] = { }, [ds_1339] = { .alarm = 1, + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, .trickle_charger_reg = 0x10, .do_trickle_setup = &do_trickle_setup_ds1339, }, [ds_1340] = { + .century_reg = DS1307_REG_HOUR, + .century_enable_bit = DS1340_BIT_CENTURY_EN, + .century_bit = DS1340_BIT_CENTURY, .trickle_charger_reg = 0x08, }, [ds_1388] = { @@ -168,6 +184,14 @@ static struct chip_desc chips[last_ds_type] = { }, [ds_3231] = { .alarm = 1, + .century_reg = DS1307_REG_MONTH, + .century_bit = DS1337_BIT_CENTURY, + }, + [rx_8130] = { + .alarm = 1, + /* this is battery backed SRAM */ + .nvram_offset = 0x20, + .nvram_size = 4, /* 32bit (4 word x 8 bit) */ }, [mcp794xx] = { .alarm = 1, @@ -179,6 +203,7 @@ static struct chip_desc chips[last_ds_type] = { static const struct i2c_device_id ds1307_id[] = { { "ds1307", ds_1307 }, + { "ds1308", ds_1308 }, { "ds1337", ds_1337 }, { "ds1338", ds_1338 }, { "ds1339", ds_1339 }, @@ -192,6 +217,7 @@ static const struct i2c_device_id ds1307_id[] = { { "pt7c4338", ds_1307 }, { "rx8025", rx_8025 }, { "isl12057", ds_1337 }, + { "rx8130", rx_8130 }, { } }; MODULE_DEVICE_TABLE(i2c, ds1307_id); @@ -203,6 +229,10 @@ static const struct of_device_id ds1307_of_match[] = { .data = (void *)ds_1307 }, { + .compatible = "dallas,ds1308", + .data = (void *)ds_1308 + }, + { .compatible = "dallas,ds1337", .data = (void *)ds_1337 }, @@ -262,6 +292,7 @@ MODULE_DEVICE_TABLE(of, ds1307_of_match); #ifdef CONFIG_ACPI static const struct acpi_device_id ds1307_acpi_ids[] = { { .id = "DS1307", .driver_data = ds_1307 }, + { .id = "DS1308", .driver_data = ds_1308 }, { .id = "DS1337", .driver_data = ds_1337 }, { .id = "DS1338", .driver_data = ds_1338 }, { .id = "DS1339", .driver_data = ds_1339 }, @@ -280,136 +311,6 @@ static const struct acpi_device_id ds1307_acpi_ids[] = { MODULE_DEVICE_TABLE(acpi, ds1307_acpi_ids); #endif -/*----------------------------------------------------------------------*/ - -#define BLOCK_DATA_MAX_TRIES 10 - -static s32 ds1307_read_block_data_once(const struct i2c_client *client, - u8 command, u8 length, u8 *values) -{ - s32 i, data; - - for (i = 0; i < length; i++) { - data = i2c_smbus_read_byte_data(client, command + i); - if (data < 0) - return data; - values[i] = data; - } - return i; -} - -static s32 ds1307_read_block_data(const struct i2c_client *client, u8 command, - u8 length, u8 *values) -{ - u8 oldvalues[255]; - s32 ret; - int tries = 0; - - dev_dbg(&client->dev, "ds1307_read_block_data (length=%d)\n", length); - ret = ds1307_read_block_data_once(client, command, length, values); - if (ret < 0) - return ret; - do { - if (++tries > BLOCK_DATA_MAX_TRIES) { - dev_err(&client->dev, - "ds1307_read_block_data failed\n"); - return -EIO; - } - memcpy(oldvalues, values, length); - ret = ds1307_read_block_data_once(client, command, length, - values); - if (ret < 0) - return ret; - } while (memcmp(oldvalues, values, length)); - return length; -} - -static s32 ds1307_write_block_data(const struct i2c_client *client, u8 command, - u8 length, const u8 *values) -{ - u8 currvalues[255]; - int tries = 0; - - dev_dbg(&client->dev, "ds1307_write_block_data (length=%d)\n", length); - do { - s32 i, ret; - - if (++tries > BLOCK_DATA_MAX_TRIES) { - dev_err(&client->dev, - "ds1307_write_block_data failed\n"); - return -EIO; - } - for (i = 0; i < length; i++) { - ret = i2c_smbus_write_byte_data(client, command + i, - values[i]); - if (ret < 0) - return ret; - } - ret = ds1307_read_block_data_once(client, command, length, - currvalues); - if (ret < 0) - return ret; - } while (memcmp(currvalues, values, length)); - return length; -} - -/*----------------------------------------------------------------------*/ - -/* These RTC devices are not designed to be connected to a SMbus adapter. - SMbus limits block operations length to 32 bytes, whereas it's not - limited on I2C buses. As a result, accesses may exceed 32 bytes; - in that case, split them into smaller blocks */ - -static s32 ds1307_native_smbus_write_block_data(const struct i2c_client *client, - u8 command, u8 length, const u8 *values) -{ - u8 suboffset = 0; - - if (length <= I2C_SMBUS_BLOCK_MAX) { - s32 retval = i2c_smbus_write_i2c_block_data(client, - command, length, values); - if (retval < 0) - return retval; - return length; - } - - while (suboffset < length) { - s32 retval = i2c_smbus_write_i2c_block_data(client, - command + suboffset, - min(I2C_SMBUS_BLOCK_MAX, length - suboffset), - values + suboffset); - if (retval < 0) - return retval; - - suboffset += I2C_SMBUS_BLOCK_MAX; - } - return length; -} - -static s32 ds1307_native_smbus_read_block_data(const struct i2c_client *client, - u8 command, u8 length, u8 *values) -{ - u8 suboffset = 0; - - if (length <= I2C_SMBUS_BLOCK_MAX) - return i2c_smbus_read_i2c_block_data(client, - command, length, values); - - while (suboffset < length) { - s32 retval = i2c_smbus_read_i2c_block_data(client, - command + suboffset, - min(I2C_SMBUS_BLOCK_MAX, length - suboffset), - values + suboffset); - if (retval < 0) - return retval; - - suboffset += I2C_SMBUS_BLOCK_MAX; - } - return length; -} - -/*----------------------------------------------------------------------*/ - /* * The ds1337 and ds1339 both have two alarms, but we only use the first * one (with a "seconds" field). For ds1337 we expect nINTA is our alarm @@ -417,27 +318,24 @@ static s32 ds1307_native_smbus_read_block_data(const struct i2c_client *client, */ static irqreturn_t ds1307_irq(int irq, void *dev_id) { - struct i2c_client *client = dev_id; - struct ds1307 *ds1307 = i2c_get_clientdata(client); + struct ds1307 *ds1307 = dev_id; struct mutex *lock = &ds1307->rtc->ops_lock; - int stat, control; + int stat, ret; mutex_lock(lock); - stat = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS); - if (stat < 0) + ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &stat); + if (ret) goto out; if (stat & DS1337_BIT_A1I) { stat &= ~DS1337_BIT_A1I; - i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, stat); + regmap_write(ds1307->regmap, DS1337_REG_STATUS, stat); - control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); - if (control < 0) + ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL, + DS1337_BIT_A1IE, 0); + if (ret) goto out; - control &= ~DS1337_BIT_A1IE; - i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control); - rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); } @@ -452,14 +350,14 @@ out: static int ds1307_get_time(struct device *dev, struct rtc_time *t) { struct ds1307 *ds1307 = dev_get_drvdata(dev); - int tmp; + int tmp, ret; + const struct chip_desc *chip = &chips[ds1307->type]; /* read the RTC date and time registers all at once */ - tmp = ds1307->read_block_data(ds1307->client, - ds1307->offset, 7, ds1307->regs); - if (tmp != 7) { - dev_err(dev, "%s error %d\n", "read", tmp); - return -EIO; + ret = regmap_bulk_read(ds1307->regmap, ds1307->offset, ds1307->regs, 7); + if (ret) { + dev_err(dev, "%s error %d\n", "read", ret); + return ret; } dev_dbg(dev, "%s: %7ph\n", "read", ds1307->regs); @@ -481,22 +379,9 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) t->tm_mon = bcd2bin(tmp) - 1; t->tm_year = bcd2bin(ds1307->regs[DS1307_REG_YEAR]) + 100; -#ifdef CONFIG_RTC_DRV_DS1307_CENTURY - switch (ds1307->type) { - case ds_1337: - case ds_1339: - case ds_3231: - if (ds1307->regs[DS1307_REG_MONTH] & DS1337_BIT_CENTURY) - t->tm_year += 100; - break; - case ds_1340: - if (ds1307->regs[DS1307_REG_HOUR] & DS1340_BIT_CENTURY) - t->tm_year += 100; - break; - default: - break; - } -#endif + if (ds1307->regs[chip->century_reg] & chip->century_bit && + IS_ENABLED(CONFIG_RTC_DRV_DS1307_CENTURY)) + t->tm_year += 100; dev_dbg(dev, "%s secs=%d, mins=%d, " "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", @@ -511,6 +396,7 @@ static int ds1307_get_time(struct device *dev, struct rtc_time *t) static int ds1307_set_time(struct device *dev, struct rtc_time *t) { struct ds1307 *ds1307 = dev_get_drvdata(dev); + const struct chip_desc *chip = &chips[ds1307->type]; int result; int tmp; u8 *buf = ds1307->regs; @@ -521,24 +407,14 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) t->tm_hour, t->tm_mday, t->tm_mon, t->tm_year, t->tm_wday); -#ifdef CONFIG_RTC_DRV_DS1307_CENTURY if (t->tm_year < 100) return -EINVAL; - switch (ds1307->type) { - case ds_1337: - case ds_1339: - case ds_3231: - case ds_1340: - if (t->tm_year > 299) - return -EINVAL; - default: - if (t->tm_year > 199) - return -EINVAL; - break; - } +#ifdef CONFIG_RTC_DRV_DS1307_CENTURY + if (t->tm_year > (chip->century_bit ? 299 : 199)) + return -EINVAL; #else - if (t->tm_year < 100 || t->tm_year > 199) + if (t->tm_year > 199) return -EINVAL; #endif @@ -553,19 +429,12 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) tmp = t->tm_year - 100; buf[DS1307_REG_YEAR] = bin2bcd(tmp); - switch (ds1307->type) { - case ds_1337: - case ds_1339: - case ds_3231: - if (t->tm_year > 199) - buf[DS1307_REG_MONTH] |= DS1337_BIT_CENTURY; - break; - case ds_1340: - buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY_EN; - if (t->tm_year > 199) - buf[DS1307_REG_HOUR] |= DS1340_BIT_CENTURY; - break; - case mcp794xx: + if (chip->century_enable_bit) + buf[chip->century_reg] |= chip->century_enable_bit; + if (t->tm_year > 199 && chip->century_bit) + buf[chip->century_reg] |= chip->century_bit; + + if (ds1307->type == mcp794xx) { /* * these bits were cleared when preparing the date/time * values and need to be set again before writing the @@ -573,16 +442,12 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) */ buf[DS1307_REG_SECS] |= MCP794XX_BIT_ST; buf[DS1307_REG_WDAY] |= MCP794XX_BIT_VBATEN; - break; - default: - break; } dev_dbg(dev, "%s: %7ph\n", "write", buf); - result = ds1307->write_block_data(ds1307->client, - ds1307->offset, 7, buf); - if (result < 0) { + result = regmap_bulk_write(ds1307->regmap, ds1307->offset, buf, 7); + if (result) { dev_err(dev, "%s error %d\n", "write", result); return result; } @@ -591,19 +456,18 @@ static int ds1307_set_time(struct device *dev, struct rtc_time *t) static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t) { - struct i2c_client *client = to_i2c_client(dev); - struct ds1307 *ds1307 = i2c_get_clientdata(client); + struct ds1307 *ds1307 = dev_get_drvdata(dev); int ret; if (!test_bit(HAS_ALARM, &ds1307->flags)) return -EINVAL; /* read all ALARM1, ALARM2, and status registers at once */ - ret = ds1307->read_block_data(client, - DS1339_REG_ALARM1_SECS, 9, ds1307->regs); - if (ret != 9) { + ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS, + ds1307->regs, 9); + if (ret) { dev_err(dev, "%s error %d\n", "alarm read", ret); - return -EIO; + return ret; } dev_dbg(dev, "%s: %4ph, %3ph, %2ph\n", "alarm read", @@ -633,8 +497,7 @@ static int ds1337_read_alarm(struct device *dev, struct rtc_wkalrm *t) static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) { - struct i2c_client *client = to_i2c_client(dev); - struct ds1307 *ds1307 = i2c_get_clientdata(client); + struct ds1307 *ds1307 = dev_get_drvdata(dev); unsigned char *buf = ds1307->regs; u8 control, status; int ret; @@ -649,11 +512,10 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) t->enabled, t->pending); /* read current status of both alarms and the chip */ - ret = ds1307->read_block_data(client, - DS1339_REG_ALARM1_SECS, 9, buf); - if (ret != 9) { + ret = regmap_bulk_read(ds1307->regmap, DS1339_REG_ALARM1_SECS, buf, 9); + if (ret) { dev_err(dev, "%s error %d\n", "alarm write", ret); - return -EIO; + return ret; } control = ds1307->regs[7]; status = ds1307->regs[8]; @@ -676,9 +538,8 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) buf[7] = control & ~(DS1337_BIT_A1IE | DS1337_BIT_A2IE); buf[8] = status & ~(DS1337_BIT_A1I | DS1337_BIT_A2I); - ret = ds1307->write_block_data(client, - DS1339_REG_ALARM1_SECS, 9, buf); - if (ret < 0) { + ret = regmap_bulk_write(ds1307->regmap, DS1339_REG_ALARM1_SECS, buf, 9); + if (ret) { dev_err(dev, "can't set alarm time\n"); return ret; } @@ -687,7 +548,7 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) if (t->enabled) { dev_dbg(dev, "alarm IRQ armed\n"); buf[7] |= DS1337_BIT_A1IE; /* only ALARM1 is used */ - i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, buf[7]); + regmap_write(ds1307->regmap, DS1337_REG_CONTROL, buf[7]); } return 0; @@ -695,35 +556,181 @@ static int ds1337_set_alarm(struct device *dev, struct rtc_wkalrm *t) static int ds1307_alarm_irq_enable(struct device *dev, unsigned int enabled) { - struct i2c_client *client = to_i2c_client(dev); - struct ds1307 *ds1307 = i2c_get_clientdata(client); - int ret; + struct ds1307 *ds1307 = dev_get_drvdata(dev); if (!test_bit(HAS_ALARM, &ds1307->flags)) return -ENOTTY; - ret = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); + return regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL, + DS1337_BIT_A1IE, + enabled ? DS1337_BIT_A1IE : 0); +} + +static const struct rtc_class_ops ds13xx_rtc_ops = { + .read_time = ds1307_get_time, + .set_time = ds1307_set_time, + .read_alarm = ds1337_read_alarm, + .set_alarm = ds1337_set_alarm, + .alarm_irq_enable = ds1307_alarm_irq_enable, +}; + +/*----------------------------------------------------------------------*/ + +/* + * Alarm support for rx8130 devices. + */ + +#define RX8130_REG_ALARM_MIN 0x07 +#define RX8130_REG_ALARM_HOUR 0x08 +#define RX8130_REG_ALARM_WEEK_OR_DAY 0x09 +#define RX8130_REG_EXTENSION 0x0c +#define RX8130_REG_EXTENSION_WADA (1 << 3) +#define RX8130_REG_FLAG 0x0d +#define RX8130_REG_FLAG_AF (1 << 3) +#define RX8130_REG_CONTROL0 0x0e +#define RX8130_REG_CONTROL0_AIE (1 << 3) + +static irqreturn_t rx8130_irq(int irq, void *dev_id) +{ + struct ds1307 *ds1307 = dev_id; + struct mutex *lock = &ds1307->rtc->ops_lock; + u8 ctl[3]; + int ret; + + mutex_lock(lock); + + /* Read control registers. */ + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); if (ret < 0) - return ret; + goto out; + if (!(ctl[1] & RX8130_REG_FLAG_AF)) + goto out; + ctl[1] &= ~RX8130_REG_FLAG_AF; + ctl[2] &= ~RX8130_REG_CONTROL0_AIE; - if (enabled) - ret |= DS1337_BIT_A1IE; - else - ret &= ~DS1337_BIT_A1IE; + ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + if (ret < 0) + goto out; + + rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); + +out: + mutex_unlock(lock); + + return IRQ_HANDLED; +} - ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, ret); +static int rx8130_read_alarm(struct device *dev, struct rtc_wkalrm *t) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + u8 ald[3], ctl[3]; + int ret; + + if (!test_bit(HAS_ALARM, &ds1307->flags)) + return -EINVAL; + + /* Read alarm registers. */ + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, 3); if (ret < 0) return ret; + /* Read control registers. */ + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + if (ret < 0) + return ret; + + t->enabled = !!(ctl[2] & RX8130_REG_CONTROL0_AIE); + t->pending = !!(ctl[1] & RX8130_REG_FLAG_AF); + + /* Report alarm 0 time assuming 24-hour and day-of-month modes. */ + t->time.tm_sec = -1; + t->time.tm_min = bcd2bin(ald[0] & 0x7f); + t->time.tm_hour = bcd2bin(ald[1] & 0x7f); + t->time.tm_wday = -1; + t->time.tm_mday = bcd2bin(ald[2] & 0x7f); + t->time.tm_mon = -1; + t->time.tm_year = -1; + t->time.tm_yday = -1; + t->time.tm_isdst = -1; + + dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d enabled=%d\n", + __func__, t->time.tm_sec, t->time.tm_min, t->time.tm_hour, + t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, t->enabled); + return 0; } -static const struct rtc_class_ops ds13xx_rtc_ops = { +static int rx8130_set_alarm(struct device *dev, struct rtc_wkalrm *t) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + u8 ald[3], ctl[3]; + int ret; + + if (!test_bit(HAS_ALARM, &ds1307->flags)) + return -EINVAL; + + dev_dbg(dev, "%s, sec=%d min=%d hour=%d wday=%d mday=%d mon=%d " + "enabled=%d pending=%d\n", __func__, + t->time.tm_sec, t->time.tm_min, t->time.tm_hour, + t->time.tm_wday, t->time.tm_mday, t->time.tm_mon, + t->enabled, t->pending); + + /* Read control registers. */ + ret = regmap_bulk_read(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + if (ret < 0) + return ret; + + ctl[0] &= ~RX8130_REG_EXTENSION_WADA; + ctl[1] |= RX8130_REG_FLAG_AF; + ctl[2] &= ~RX8130_REG_CONTROL0_AIE; + + ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); + if (ret < 0) + return ret; + + /* Hardware alarm precision is 1 minute! */ + ald[0] = bin2bcd(t->time.tm_min); + ald[1] = bin2bcd(t->time.tm_hour); + ald[2] = bin2bcd(t->time.tm_mday); + + ret = regmap_bulk_write(ds1307->regmap, RX8130_REG_ALARM_MIN, ald, 3); + if (ret < 0) + return ret; + + if (!t->enabled) + return 0; + + ctl[2] |= RX8130_REG_CONTROL0_AIE; + + return regmap_bulk_write(ds1307->regmap, RX8130_REG_EXTENSION, ctl, 3); +} + +static int rx8130_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct ds1307 *ds1307 = dev_get_drvdata(dev); + int ret, reg; + + if (!test_bit(HAS_ALARM, &ds1307->flags)) + return -EINVAL; + + ret = regmap_read(ds1307->regmap, RX8130_REG_CONTROL0, ®); + if (ret < 0) + return ret; + + if (enabled) + reg |= RX8130_REG_CONTROL0_AIE; + else + reg &= ~RX8130_REG_CONTROL0_AIE; + + return regmap_write(ds1307->regmap, RX8130_REG_CONTROL0, reg); +} + +static const struct rtc_class_ops rx8130_rtc_ops = { .read_time = ds1307_get_time, .set_time = ds1307_set_time, - .read_alarm = ds1337_read_alarm, - .set_alarm = ds1337_set_alarm, - .alarm_irq_enable = ds1307_alarm_irq_enable, + .read_alarm = rx8130_read_alarm, + .set_alarm = rx8130_set_alarm, + .alarm_irq_enable = rx8130_alarm_irq_enable, }; /*----------------------------------------------------------------------*/ @@ -752,31 +759,27 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { static irqreturn_t mcp794xx_irq(int irq, void *dev_id) { - struct i2c_client *client = dev_id; - struct ds1307 *ds1307 = i2c_get_clientdata(client); + struct ds1307 *ds1307 = dev_id; struct mutex *lock = &ds1307->rtc->ops_lock; int reg, ret; mutex_lock(lock); /* Check and clear alarm 0 interrupt flag. */ - reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_ALARM0_CTRL); - if (reg < 0) + ret = regmap_read(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, ®); + if (ret) goto out; if (!(reg & MCP794XX_BIT_ALMX_IF)) goto out; reg &= ~MCP794XX_BIT_ALMX_IF; - ret = i2c_smbus_write_byte_data(client, MCP794XX_REG_ALARM0_CTRL, reg); - if (ret < 0) + ret = regmap_write(ds1307->regmap, MCP794XX_REG_ALARM0_CTRL, reg); + if (ret) goto out; /* Disable alarm 0. */ - reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_CONTROL); - if (reg < 0) - goto out; - reg &= ~MCP794XX_BIT_ALM0_EN; - ret = i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, reg); - if (ret < 0) + ret = regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL, + MCP794XX_BIT_ALM0_EN, 0); + if (ret) goto out; rtc_update_irq(ds1307->rtc, 1, RTC_AF | RTC_IRQF); @@ -789,8 +792,7 @@ out: static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) { - struct i2c_client *client = to_i2c_client(dev); - struct ds1307 *ds1307 = i2c_get_clientdata(client); + struct ds1307 *ds1307 = dev_get_drvdata(dev); u8 *regs = ds1307->regs; int ret; @@ -798,8 +800,8 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) return -EINVAL; /* Read control and alarm 0 registers. */ - ret = ds1307->read_block_data(client, MCP794XX_REG_CONTROL, 10, regs); - if (ret < 0) + ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10); + if (ret) return ret; t->enabled = !!(regs[0] & MCP794XX_BIT_ALM0_EN); @@ -828,8 +830,7 @@ static int mcp794xx_read_alarm(struct device *dev, struct rtc_wkalrm *t) static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) { - struct i2c_client *client = to_i2c_client(dev); - struct ds1307 *ds1307 = i2c_get_clientdata(client); + struct ds1307 *ds1307 = dev_get_drvdata(dev); unsigned char *regs = ds1307->regs; int ret; @@ -843,8 +844,8 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) t->enabled, t->pending); /* Read control and alarm 0 registers. */ - ret = ds1307->read_block_data(client, MCP794XX_REG_CONTROL, 10, regs); - if (ret < 0) + ret = regmap_bulk_read(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10); + if (ret) return ret; /* Set alarm 0, using 24-hour and day-of-month modes. */ @@ -862,35 +863,26 @@ static int mcp794xx_set_alarm(struct device *dev, struct rtc_wkalrm *t) /* Disable interrupt. We will not enable until completely programmed */ regs[0] &= ~MCP794XX_BIT_ALM0_EN; - ret = ds1307->write_block_data(client, MCP794XX_REG_CONTROL, 10, regs); - if (ret < 0) + ret = regmap_bulk_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs, 10); + if (ret) return ret; if (!t->enabled) return 0; regs[0] |= MCP794XX_BIT_ALM0_EN; - return i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, regs[0]); + return regmap_write(ds1307->regmap, MCP794XX_REG_CONTROL, regs[0]); } static int mcp794xx_alarm_irq_enable(struct device *dev, unsigned int enabled) { - struct i2c_client *client = to_i2c_client(dev); - struct ds1307 *ds1307 = i2c_get_clientdata(client); - int reg; + struct ds1307 *ds1307 = dev_get_drvdata(dev); if (!test_bit(HAS_ALARM, &ds1307->flags)) return -EINVAL; - reg = i2c_smbus_read_byte_data(client, MCP794XX_REG_CONTROL); - if (reg < 0) - return reg; - - if (enabled) - reg |= MCP794XX_BIT_ALM0_EN; - else - reg &= ~MCP794XX_BIT_ALM0_EN; - - return i2c_smbus_write_byte_data(client, MCP794XX_REG_CONTROL, reg); + return regmap_update_bits(ds1307->regmap, MCP794XX_REG_CONTROL, + MCP794XX_BIT_ALM0_EN, + enabled ? MCP794XX_BIT_ALM0_EN : 0); } static const struct rtc_class_ops mcp794xx_rtc_ops = { @@ -903,50 +895,27 @@ static const struct rtc_class_ops mcp794xx_rtc_ops = { /*----------------------------------------------------------------------*/ -static ssize_t -ds1307_nvram_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int ds1307_nvram_read(void *priv, unsigned int offset, void *val, + size_t bytes) { - struct i2c_client *client; - struct ds1307 *ds1307; - int result; + struct ds1307 *ds1307 = priv; - client = kobj_to_i2c_client(kobj); - ds1307 = i2c_get_clientdata(client); - - result = ds1307->read_block_data(client, ds1307->nvram_offset + off, - count, buf); - if (result < 0) - dev_err(&client->dev, "%s error %d\n", "nvram read", result); - return result; + return regmap_bulk_read(ds1307->regmap, ds1307->nvram_offset + offset, + val, bytes); } -static ssize_t -ds1307_nvram_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int ds1307_nvram_write(void *priv, unsigned int offset, void *val, + size_t bytes) { - struct i2c_client *client; - struct ds1307 *ds1307; - int result; + struct ds1307 *ds1307 = priv; - client = kobj_to_i2c_client(kobj); - ds1307 = i2c_get_clientdata(client); - - result = ds1307->write_block_data(client, ds1307->nvram_offset + off, - count, buf); - if (result < 0) { - dev_err(&client->dev, "%s error %d\n", "nvram write", result); - return result; - } - return count; + return regmap_bulk_write(ds1307->regmap, ds1307->nvram_offset + offset, + val, bytes); } - /*----------------------------------------------------------------------*/ -static u8 do_trickle_setup_ds1339(struct i2c_client *client, +static u8 do_trickle_setup_ds1339(struct ds1307 *ds1307, uint32_t ohms, bool diode) { u8 setup = (diode) ? DS1307_TRICKLE_CHARGER_DIODE : @@ -963,14 +932,14 @@ static u8 do_trickle_setup_ds1339(struct i2c_client *client, setup |= DS1307_TRICKLE_CHARGER_4K_OHM; break; default: - dev_warn(&client->dev, + dev_warn(ds1307->dev, "Unsupported ohm value %u in dt\n", ohms); return 0; } return setup; } -static void ds1307_trickle_init(struct i2c_client *client, +static void ds1307_trickle_init(struct ds1307 *ds1307, struct chip_desc *chip) { uint32_t ohms = 0; @@ -978,11 +947,12 @@ static void ds1307_trickle_init(struct i2c_client *client, if (!chip->do_trickle_setup) goto out; - if (device_property_read_u32(&client->dev, "trickle-resistor-ohms", &ohms)) + if (device_property_read_u32(ds1307->dev, "trickle-resistor-ohms", + &ohms)) goto out; - if (device_property_read_bool(&client->dev, "trickle-diode-disable")) + if (device_property_read_bool(ds1307->dev, "trickle-diode-disable")) diode = false; - chip->trickle_charger_setup = chip->do_trickle_setup(client, + chip->trickle_charger_setup = chip->do_trickle_setup(ds1307, ohms, diode); out: return; @@ -1009,13 +979,10 @@ static int ds3231_hwmon_read_temp(struct device *dev, s32 *mC) s16 temp; int ret; - ret = ds1307->read_block_data(ds1307->client, DS3231_REG_TEMPERATURE, - sizeof(temp_buf), temp_buf); - if (ret < 0) + ret = regmap_bulk_read(ds1307->regmap, DS3231_REG_TEMPERATURE, + temp_buf, sizeof(temp_buf)); + if (ret) return ret; - if (ret != sizeof(temp_buf)) - return -EIO; - /* * Temperature is represented as a 10-bit code with a resolution of * 0.25 degree celsius and encoded in two's complement format. @@ -1055,12 +1022,11 @@ static void ds1307_hwmon_register(struct ds1307 *ds1307) if (ds1307->type != ds_3231) return; - dev = devm_hwmon_device_register_with_groups(&ds1307->client->dev, - ds1307->client->name, + dev = devm_hwmon_device_register_with_groups(ds1307->dev, ds1307->name, ds1307, ds3231_hwmon_groups); if (IS_ERR(dev)) { - dev_warn(&ds1307->client->dev, - "unable to register hwmon device %ld\n", PTR_ERR(dev)); + dev_warn(ds1307->dev, "unable to register hwmon device %ld\n", + PTR_ERR(dev)); } } @@ -1099,24 +1065,12 @@ static int ds3231_clk_sqw_rates[] = { static int ds1337_write_control(struct ds1307 *ds1307, u8 mask, u8 value) { - struct i2c_client *client = ds1307->client; struct mutex *lock = &ds1307->rtc->ops_lock; - int control; int ret; mutex_lock(lock); - - control = i2c_smbus_read_byte_data(client, DS1337_REG_CONTROL); - if (control < 0) { - ret = control; - goto out; - } - - control &= ~mask; - control |= value; - - ret = i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, control); -out: + ret = regmap_update_bits(ds1307->regmap, DS1337_REG_CONTROL, + mask, value); mutex_unlock(lock); return ret; @@ -1126,12 +1080,12 @@ static unsigned long ds3231_clk_sqw_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw); - int control; + int control, ret; int rate_sel = 0; - control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL); - if (control < 0) - return control; + ret = regmap_read(ds1307->regmap, DS1337_REG_CONTROL, &control); + if (ret) + return ret; if (control & DS1337_BIT_RS1) rate_sel += 1; if (control & DS1337_BIT_RS2) @@ -1195,11 +1149,11 @@ static void ds3231_clk_sqw_unprepare(struct clk_hw *hw) static int ds3231_clk_sqw_is_prepared(struct clk_hw *hw) { struct ds1307 *ds1307 = clk_sqw_to_ds1307(hw); - int control; + int control, ret; - control = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_CONTROL); - if (control < 0) - return control; + ret = regmap_read(ds1307->regmap, DS1337_REG_CONTROL, &control); + if (ret) + return ret; return !(control & DS1337_BIT_INTCN); } @@ -1221,26 +1175,13 @@ static unsigned long ds3231_clk_32khz_recalc_rate(struct clk_hw *hw, static int ds3231_clk_32khz_control(struct ds1307 *ds1307, bool enable) { - struct i2c_client *client = ds1307->client; struct mutex *lock = &ds1307->rtc->ops_lock; - int status; int ret; mutex_lock(lock); - - status = i2c_smbus_read_byte_data(client, DS1337_REG_STATUS); - if (status < 0) { - ret = status; - goto out; - } - - if (enable) - status |= DS3231_BIT_EN32KHZ; - else - status &= ~DS3231_BIT_EN32KHZ; - - ret = i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, status); -out: + ret = regmap_update_bits(ds1307->regmap, DS1337_REG_STATUS, + DS3231_BIT_EN32KHZ, + enable ? DS3231_BIT_EN32KHZ : 0); mutex_unlock(lock); return ret; @@ -1263,11 +1204,11 @@ static void ds3231_clk_32khz_unprepare(struct clk_hw *hw) static int ds3231_clk_32khz_is_prepared(struct clk_hw *hw) { struct ds1307 *ds1307 = clk_32khz_to_ds1307(hw); - int status; + int status, ret; - status = i2c_smbus_read_byte_data(ds1307->client, DS1337_REG_STATUS); - if (status < 0) - return status; + ret = regmap_read(ds1307->regmap, DS1337_REG_STATUS, &status); + if (ret) + return ret; return !!(status & DS3231_BIT_EN32KHZ); } @@ -1292,18 +1233,17 @@ static struct clk_init_data ds3231_clks_init[] = { static int ds3231_clks_register(struct ds1307 *ds1307) { - struct i2c_client *client = ds1307->client; - struct device_node *node = client->dev.of_node; + struct device_node *node = ds1307->dev->of_node; struct clk_onecell_data *onecell; int i; - onecell = devm_kzalloc(&client->dev, sizeof(*onecell), GFP_KERNEL); + onecell = devm_kzalloc(ds1307->dev, sizeof(*onecell), GFP_KERNEL); if (!onecell) return -ENOMEM; onecell->clk_num = ARRAY_SIZE(ds3231_clks_init); - onecell->clks = devm_kcalloc(&client->dev, onecell->clk_num, - sizeof(onecell->clks[0]), GFP_KERNEL); + onecell->clks = devm_kcalloc(ds1307->dev, onecell->clk_num, + sizeof(onecell->clks[0]), GFP_KERNEL); if (!onecell->clks) return -ENOMEM; @@ -1322,8 +1262,8 @@ static int ds3231_clks_register(struct ds1307 *ds1307) &init.name); ds1307->clks[i].init = &init; - onecell->clks[i] = devm_clk_register(&client->dev, - &ds1307->clks[i]); + onecell->clks[i] = devm_clk_register(ds1307->dev, + &ds1307->clks[i]); if (IS_ERR(onecell->clks[i])) return PTR_ERR(onecell->clks[i]); } @@ -1345,8 +1285,8 @@ static void ds1307_clks_register(struct ds1307 *ds1307) ret = ds3231_clks_register(ds1307); if (ret) { - dev_warn(&ds1307->client->dev, - "unable to register clock device %d\n", ret); + dev_warn(ds1307->dev, "unable to register clock device %d\n", + ret); } } @@ -1358,6 +1298,12 @@ static void ds1307_clks_register(struct ds1307 *ds1307) #endif /* CONFIG_COMMON_CLK */ +static const struct regmap_config regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x12, +}; + static int ds1307_probe(struct i2c_client *client, const struct i2c_device_id *id) { @@ -1365,7 +1311,6 @@ static int ds1307_probe(struct i2c_client *client, int err = -ENODEV; int tmp, wday; struct chip_desc *chip; - struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); bool want_irq = false; bool ds1307_can_wakeup_device = false; unsigned char *buf; @@ -1382,17 +1327,22 @@ static int ds1307_probe(struct i2c_client *client, }; const struct rtc_class_ops *rtc_ops = &ds13xx_rtc_ops; - if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA) - && !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) - return -EIO; - ds1307 = devm_kzalloc(&client->dev, sizeof(struct ds1307), GFP_KERNEL); if (!ds1307) return -ENOMEM; - i2c_set_clientdata(client, ds1307); + dev_set_drvdata(&client->dev, ds1307); + ds1307->dev = &client->dev; + ds1307->name = client->name; + ds1307->irq = client->irq; - ds1307->client = client; + ds1307->regmap = devm_regmap_init_i2c(client, ®map_config); + if (IS_ERR(ds1307->regmap)) { + dev_err(ds1307->dev, "regmap allocation failed\n"); + return PTR_ERR(ds1307->regmap); + } + + i2c_set_clientdata(client, ds1307); if (client->dev.of_node) { ds1307->type = (enum ds_type) @@ -1405,7 +1355,7 @@ static int ds1307_probe(struct i2c_client *client, const struct acpi_device_id *acpi_id; acpi_id = acpi_match_device(ACPI_PTR(ds1307_acpi_ids), - &client->dev); + ds1307->dev); if (!acpi_id) return -ENODEV; chip = &chips[acpi_id->driver_data]; @@ -1413,27 +1363,21 @@ static int ds1307_probe(struct i2c_client *client, } if (!pdata) - ds1307_trickle_init(client, chip); + ds1307_trickle_init(ds1307, chip); else if (pdata->trickle_charger_setup) chip->trickle_charger_setup = pdata->trickle_charger_setup; if (chip->trickle_charger_setup && chip->trickle_charger_reg) { - dev_dbg(&client->dev, "writing trickle charger info 0x%x to 0x%x\n", + dev_dbg(ds1307->dev, + "writing trickle charger info 0x%x to 0x%x\n", DS13XX_TRICKLE_CHARGER_MAGIC | chip->trickle_charger_setup, chip->trickle_charger_reg); - i2c_smbus_write_byte_data(client, chip->trickle_charger_reg, + regmap_write(ds1307->regmap, chip->trickle_charger_reg, DS13XX_TRICKLE_CHARGER_MAGIC | chip->trickle_charger_setup); } buf = ds1307->regs; - if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { - ds1307->read_block_data = ds1307_native_smbus_read_block_data; - ds1307->write_block_data = ds1307_native_smbus_write_block_data; - } else { - ds1307->read_block_data = ds1307_read_block_data; - ds1307->write_block_data = ds1307_write_block_data; - } #ifdef CONFIG_OF /* @@ -1459,11 +1403,10 @@ static int ds1307_probe(struct i2c_client *client, case ds_1339: case ds_3231: /* get registers that the "rtc" read below won't read... */ - tmp = ds1307->read_block_data(ds1307->client, - DS1337_REG_CONTROL, 2, buf); - if (tmp != 2) { - dev_dbg(&client->dev, "read error %d\n", tmp); - err = -EIO; + err = regmap_bulk_read(ds1307->regmap, DS1337_REG_CONTROL, + buf, 2); + if (err) { + dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } @@ -1477,8 +1420,8 @@ static int ds1307_probe(struct i2c_client *client, * For some variants, be sure alarms can trigger when we're * running on Vbackup (BBSQI/BBSQW) */ - if (chip->alarm && (ds1307->client->irq > 0 || - ds1307_can_wakeup_device)) { + if (chip->alarm && (ds1307->irq > 0 || + ds1307_can_wakeup_device)) { ds1307->regs[0] |= DS1337_BIT_INTCN | bbsqi_bitpos[ds1307->type]; ds1307->regs[0] &= ~(DS1337_BIT_A2IE | DS1337_BIT_A1IE); @@ -1486,50 +1429,49 @@ static int ds1307_probe(struct i2c_client *client, want_irq = true; } - i2c_smbus_write_byte_data(client, DS1337_REG_CONTROL, - ds1307->regs[0]); + regmap_write(ds1307->regmap, DS1337_REG_CONTROL, + ds1307->regs[0]); /* oscillator fault? clear flag, and warn */ if (ds1307->regs[1] & DS1337_BIT_OSF) { - i2c_smbus_write_byte_data(client, DS1337_REG_STATUS, - ds1307->regs[1] & ~DS1337_BIT_OSF); - dev_warn(&client->dev, "SET TIME!\n"); + regmap_write(ds1307->regmap, DS1337_REG_STATUS, + ds1307->regs[1] & ~DS1337_BIT_OSF); + dev_warn(ds1307->dev, "SET TIME!\n"); } break; case rx_8025: - tmp = i2c_smbus_read_i2c_block_data(ds1307->client, - RX8025_REG_CTRL1 << 4 | 0x08, 2, buf); - if (tmp != 2) { - dev_dbg(&client->dev, "read error %d\n", tmp); - err = -EIO; + err = regmap_bulk_read(ds1307->regmap, + RX8025_REG_CTRL1 << 4 | 0x08, buf, 2); + if (err) { + dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } /* oscillator off? turn it on, so clock can tick. */ if (!(ds1307->regs[1] & RX8025_BIT_XST)) { ds1307->regs[1] |= RX8025_BIT_XST; - i2c_smbus_write_byte_data(client, - RX8025_REG_CTRL2 << 4 | 0x08, - ds1307->regs[1]); - dev_warn(&client->dev, + regmap_write(ds1307->regmap, + RX8025_REG_CTRL2 << 4 | 0x08, + ds1307->regs[1]); + dev_warn(ds1307->dev, "oscillator stop detected - SET TIME!\n"); } if (ds1307->regs[1] & RX8025_BIT_PON) { ds1307->regs[1] &= ~RX8025_BIT_PON; - i2c_smbus_write_byte_data(client, - RX8025_REG_CTRL2 << 4 | 0x08, - ds1307->regs[1]); - dev_warn(&client->dev, "power-on detected\n"); + regmap_write(ds1307->regmap, + RX8025_REG_CTRL2 << 4 | 0x08, + ds1307->regs[1]); + dev_warn(ds1307->dev, "power-on detected\n"); } if (ds1307->regs[1] & RX8025_BIT_VDET) { ds1307->regs[1] &= ~RX8025_BIT_VDET; - i2c_smbus_write_byte_data(client, - RX8025_REG_CTRL2 << 4 | 0x08, - ds1307->regs[1]); - dev_warn(&client->dev, "voltage drop detected\n"); + regmap_write(ds1307->regmap, + RX8025_REG_CTRL2 << 4 | 0x08, + ds1307->regs[1]); + dev_warn(ds1307->dev, "voltage drop detected\n"); } /* make sure we are running in 24hour mode */ @@ -1537,16 +1479,15 @@ static int ds1307_probe(struct i2c_client *client, u8 hour; /* switch to 24 hour mode */ - i2c_smbus_write_byte_data(client, - RX8025_REG_CTRL1 << 4 | 0x08, - ds1307->regs[0] | - RX8025_BIT_2412); - - tmp = i2c_smbus_read_i2c_block_data(ds1307->client, - RX8025_REG_CTRL1 << 4 | 0x08, 2, buf); - if (tmp != 2) { - dev_dbg(&client->dev, "read error %d\n", tmp); - err = -EIO; + regmap_write(ds1307->regmap, + RX8025_REG_CTRL1 << 4 | 0x08, + ds1307->regs[0] | RX8025_BIT_2412); + + err = regmap_bulk_read(ds1307->regmap, + RX8025_REG_CTRL1 << 4 | 0x08, + buf, 2); + if (err) { + dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } @@ -1557,9 +1498,16 @@ static int ds1307_probe(struct i2c_client *client, if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM) hour += 12; - i2c_smbus_write_byte_data(client, - DS1307_REG_HOUR << 4 | 0x08, - hour); + regmap_write(ds1307->regmap, + DS1307_REG_HOUR << 4 | 0x08, hour); + } + break; + case rx_8130: + ds1307->offset = 0x10; /* Seconds starts at 0x10 */ + rtc_ops = &rx8130_rtc_ops; + if (chip->alarm && ds1307->irq > 0) { + irq_handler = rx8130_irq; + want_irq = true; } break; case ds_1388: @@ -1567,7 +1515,8 @@ static int ds1307_probe(struct i2c_client *client, break; case mcp794xx: rtc_ops = &mcp794xx_rtc_ops; - if (ds1307->client->irq > 0 && chip->alarm) { + if (chip->alarm && (ds1307->irq > 0 || + ds1307_can_wakeup_device)) { irq_handler = mcp794xx_irq; want_irq = true; } @@ -1578,10 +1527,9 @@ static int ds1307_probe(struct i2c_client *client, read_rtc: /* read RTC registers */ - tmp = ds1307->read_block_data(ds1307->client, ds1307->offset, 8, buf); - if (tmp != 8) { - dev_dbg(&client->dev, "read error %d\n", tmp); - err = -EIO; + err = regmap_bulk_read(ds1307->regmap, ds1307->offset, buf, 8); + if (err) { + dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } @@ -1597,56 +1545,56 @@ read_rtc: case m41t00: /* clock halted? turn it on, so clock can tick. */ if (tmp & DS1307_BIT_CH) { - i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0); - dev_warn(&client->dev, "SET TIME!\n"); + regmap_write(ds1307->regmap, DS1307_REG_SECS, 0); + dev_warn(ds1307->dev, "SET TIME!\n"); goto read_rtc; } break; + case ds_1308: case ds_1338: /* clock halted? turn it on, so clock can tick. */ if (tmp & DS1307_BIT_CH) - i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0); + regmap_write(ds1307->regmap, DS1307_REG_SECS, 0); /* oscillator fault? clear flag, and warn */ if (ds1307->regs[DS1307_REG_CONTROL] & DS1338_BIT_OSF) { - i2c_smbus_write_byte_data(client, DS1307_REG_CONTROL, - ds1307->regs[DS1307_REG_CONTROL] - & ~DS1338_BIT_OSF); - dev_warn(&client->dev, "SET TIME!\n"); + regmap_write(ds1307->regmap, DS1307_REG_CONTROL, + ds1307->regs[DS1307_REG_CONTROL] & + ~DS1338_BIT_OSF); + dev_warn(ds1307->dev, "SET TIME!\n"); goto read_rtc; } break; case ds_1340: /* clock halted? turn it on, so clock can tick. */ if (tmp & DS1340_BIT_nEOSC) - i2c_smbus_write_byte_data(client, DS1307_REG_SECS, 0); + regmap_write(ds1307->regmap, DS1307_REG_SECS, 0); - tmp = i2c_smbus_read_byte_data(client, DS1340_REG_FLAG); - if (tmp < 0) { - dev_dbg(&client->dev, "read error %d\n", tmp); - err = -EIO; + err = regmap_read(ds1307->regmap, DS1340_REG_FLAG, &tmp); + if (err) { + dev_dbg(ds1307->dev, "read error %d\n", err); goto exit; } /* oscillator fault? clear flag, and warn */ if (tmp & DS1340_BIT_OSF) { - i2c_smbus_write_byte_data(client, DS1340_REG_FLAG, 0); - dev_warn(&client->dev, "SET TIME!\n"); + regmap_write(ds1307->regmap, DS1340_REG_FLAG, 0); + dev_warn(ds1307->dev, "SET TIME!\n"); } break; case mcp794xx: /* make sure that the backup battery is enabled */ if (!(ds1307->regs[DS1307_REG_WDAY] & MCP794XX_BIT_VBATEN)) { - i2c_smbus_write_byte_data(client, DS1307_REG_WDAY, - ds1307->regs[DS1307_REG_WDAY] - | MCP794XX_BIT_VBATEN); + regmap_write(ds1307->regmap, DS1307_REG_WDAY, + ds1307->regs[DS1307_REG_WDAY] | + MCP794XX_BIT_VBATEN); } /* clock halted? turn it on, so clock can tick. */ if (!(tmp & MCP794XX_BIT_ST)) { - i2c_smbus_write_byte_data(client, DS1307_REG_SECS, - MCP794XX_BIT_ST); - dev_warn(&client->dev, "SET TIME!\n"); + regmap_write(ds1307->regmap, DS1307_REG_SECS, + MCP794XX_BIT_ST); + dev_warn(ds1307->dev, "SET TIME!\n"); goto read_rtc; } @@ -1680,16 +1628,15 @@ read_rtc: tmp = 0; if (ds1307->regs[DS1307_REG_HOUR] & DS1307_BIT_PM) tmp += 12; - i2c_smbus_write_byte_data(client, - ds1307->offset + DS1307_REG_HOUR, - bin2bcd(tmp)); + regmap_write(ds1307->regmap, ds1307->offset + DS1307_REG_HOUR, + bin2bcd(tmp)); } /* * Some IPs have weekday reset value = 0x1 which might not correct * hence compute the wday using the current date/month/year values */ - ds1307_get_time(&client->dev, &tm); + ds1307_get_time(ds1307->dev, &tm); wday = tm.tm_wday; timestamp = rtc_tm_to_time64(&tm); rtc_time64_to_tm(timestamp, &tm); @@ -1699,78 +1646,63 @@ read_rtc: * If different then set the wday which we computed using * timestamp */ - if (wday != tm.tm_wday) { - wday = i2c_smbus_read_byte_data(client, MCP794XX_REG_WEEKDAY); - wday = wday & ~MCP794XX_REG_WEEKDAY_WDAY_MASK; - wday = wday | (tm.tm_wday + 1); - i2c_smbus_write_byte_data(client, MCP794XX_REG_WEEKDAY, wday); - } + if (wday != tm.tm_wday) + regmap_update_bits(ds1307->regmap, MCP794XX_REG_WEEKDAY, + MCP794XX_REG_WEEKDAY_WDAY_MASK, + tm.tm_wday + 1); if (want_irq) { - device_set_wakeup_capable(&client->dev, true); + device_set_wakeup_capable(ds1307->dev, true); set_bit(HAS_ALARM, &ds1307->flags); } - ds1307->rtc = devm_rtc_device_register(&client->dev, client->name, - rtc_ops, THIS_MODULE); + + ds1307->rtc = devm_rtc_allocate_device(ds1307->dev); if (IS_ERR(ds1307->rtc)) { return PTR_ERR(ds1307->rtc); } - if (ds1307_can_wakeup_device && ds1307->client->irq <= 0) { + if (ds1307_can_wakeup_device && ds1307->irq <= 0) { /* Disable request for an IRQ */ want_irq = false; - dev_info(&client->dev, "'wakeup-source' is set, request for an IRQ is disabled!\n"); + dev_info(ds1307->dev, + "'wakeup-source' is set, request for an IRQ is disabled!\n"); /* We cannot support UIE mode if we do not have an IRQ line */ ds1307->rtc->uie_unsupported = 1; } if (want_irq) { - err = devm_request_threaded_irq(&client->dev, - client->irq, NULL, irq_handler, + err = devm_request_threaded_irq(ds1307->dev, + ds1307->irq, NULL, irq_handler, IRQF_SHARED | IRQF_ONESHOT, - ds1307->rtc->name, client); + ds1307->name, ds1307); if (err) { client->irq = 0; - device_set_wakeup_capable(&client->dev, false); + device_set_wakeup_capable(ds1307->dev, false); clear_bit(HAS_ALARM, &ds1307->flags); - dev_err(&client->dev, "unable to request IRQ!\n"); + dev_err(ds1307->dev, "unable to request IRQ!\n"); } else - dev_dbg(&client->dev, "got IRQ %d\n", client->irq); + dev_dbg(ds1307->dev, "got IRQ %d\n", client->irq); } if (chip->nvram_size) { - - ds1307->nvram = devm_kzalloc(&client->dev, - sizeof(struct bin_attribute), - GFP_KERNEL); - if (!ds1307->nvram) { - dev_err(&client->dev, "cannot allocate memory for nvram sysfs\n"); - } else { - - ds1307->nvram->attr.name = "nvram"; - ds1307->nvram->attr.mode = S_IRUGO | S_IWUSR; - - sysfs_bin_attr_init(ds1307->nvram); - - ds1307->nvram->read = ds1307_nvram_read; - ds1307->nvram->write = ds1307_nvram_write; - ds1307->nvram->size = chip->nvram_size; - ds1307->nvram_offset = chip->nvram_offset; - - err = sysfs_create_bin_file(&client->dev.kobj, - ds1307->nvram); - if (err) { - dev_err(&client->dev, - "unable to create sysfs file: %s\n", - ds1307->nvram->attr.name); - } else { - set_bit(HAS_NVRAM, &ds1307->flags); - dev_info(&client->dev, "%zu bytes nvram\n", - ds1307->nvram->size); - } - } + ds1307->nvmem_cfg.name = "ds1307_nvram"; + ds1307->nvmem_cfg.word_size = 1; + ds1307->nvmem_cfg.stride = 1; + ds1307->nvmem_cfg.size = chip->nvram_size; + ds1307->nvmem_cfg.reg_read = ds1307_nvram_read; + ds1307->nvmem_cfg.reg_write = ds1307_nvram_write; + ds1307->nvmem_cfg.priv = ds1307; + ds1307->nvram_offset = chip->nvram_offset; + + ds1307->rtc->nvmem_config = &ds1307->nvmem_cfg; + ds1307->rtc->nvram_old_abi = true; } + ds1307->rtc->ops = rtc_ops; + err = rtc_register_device(ds1307->rtc); + if (err) + return err; + ds1307_hwmon_register(ds1307); ds1307_clks_register(ds1307); @@ -1780,16 +1712,6 @@ exit: return err; } -static int ds1307_remove(struct i2c_client *client) -{ - struct ds1307 *ds1307 = i2c_get_clientdata(client); - - if (test_and_clear_bit(HAS_NVRAM, &ds1307->flags)) - sysfs_remove_bin_file(&client->dev.kobj, ds1307->nvram); - - return 0; -} - static struct i2c_driver ds1307_driver = { .driver = { .name = "rtc-ds1307", @@ -1797,7 +1719,6 @@ static struct i2c_driver ds1307_driver = { .acpi_match_table = ACPI_PTR(ds1307_acpi_ids), }, .probe = ds1307_probe, - .remove = ds1307_remove, .id_table = ds1307_id, }; diff --git a/drivers/rtc/rtc-ds3232.c b/drivers/rtc/rtc-ds3232.c index deff431a37c4..0550f7ba464f 100644 --- a/drivers/rtc/rtc-ds3232.c +++ b/drivers/rtc/rtc-ds3232.c @@ -22,6 +22,7 @@ #include <linux/bcd.h> #include <linux/slab.h> #include <linux/regmap.h> +#include <linux/hwmon.h> #define DS3232_REG_SECONDS 0x00 #define DS3232_REG_MINUTES 0x01 @@ -46,6 +47,8 @@ # define DS3232_REG_SR_A2F 0x02 # define DS3232_REG_SR_A1F 0x01 +#define DS3232_REG_TEMPERATURE 0x11 + struct ds3232 { struct device *dev; struct regmap *regmap; @@ -275,6 +278,120 @@ static int ds3232_update_alarm(struct device *dev, unsigned int enabled) return ret; } +/* + * Temperature sensor support for ds3232/ds3234 devices. + * A user-initiated temperature conversion is not started by this function, + * so the temperature is updated once every 64 seconds. + */ +static int ds3232_hwmon_read_temp(struct device *dev, long int *mC) +{ + struct ds3232 *ds3232 = dev_get_drvdata(dev); + u8 temp_buf[2]; + s16 temp; + int ret; + + ret = regmap_bulk_read(ds3232->regmap, DS3232_REG_TEMPERATURE, temp_buf, + sizeof(temp_buf)); + if (ret < 0) + return ret; + + /* + * Temperature is represented as a 10-bit code with a resolution of + * 0.25 degree celsius and encoded in two's complement format. + */ + temp = (temp_buf[0] << 8) | temp_buf[1]; + temp >>= 6; + *mC = temp * 250; + + return 0; +} + +static umode_t ds3232_hwmon_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + if (type != hwmon_temp) + return 0; + + switch (attr) { + case hwmon_temp_input: + return 0444; + default: + return 0; + } +} + +static int ds3232_hwmon_read(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, long *temp) +{ + int err; + + switch (attr) { + case hwmon_temp_input: + err = ds3232_hwmon_read_temp(dev, temp); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static u32 ds3232_hwmon_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0 +}; + +static const struct hwmon_channel_info ds3232_hwmon_chip = { + .type = hwmon_chip, + .config = ds3232_hwmon_chip_config, +}; + +static u32 ds3232_hwmon_temp_config[] = { + HWMON_T_INPUT, + 0 +}; + +static const struct hwmon_channel_info ds3232_hwmon_temp = { + .type = hwmon_temp, + .config = ds3232_hwmon_temp_config, +}; + +static const struct hwmon_channel_info *ds3232_hwmon_info[] = { + &ds3232_hwmon_chip, + &ds3232_hwmon_temp, + NULL +}; + +static const struct hwmon_ops ds3232_hwmon_hwmon_ops = { + .is_visible = ds3232_hwmon_is_visible, + .read = ds3232_hwmon_read, +}; + +static const struct hwmon_chip_info ds3232_hwmon_chip_info = { + .ops = &ds3232_hwmon_hwmon_ops, + .info = ds3232_hwmon_info, +}; + +static void ds3232_hwmon_register(struct device *dev, const char *name) +{ + struct ds3232 *ds3232 = dev_get_drvdata(dev); + struct device *hwmon_dev; + + if (!IS_ENABLED(CONFIG_RTC_DRV_DS3232_HWMON)) + return; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, name, ds3232, + &ds3232_hwmon_chip_info, + NULL); + if (IS_ERR(hwmon_dev)) { + dev_err(dev, "unable to register hwmon device %ld\n", + PTR_ERR(hwmon_dev)); + } +} + static int ds3232_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct ds3232 *ds3232 = dev_get_drvdata(dev); @@ -366,6 +483,8 @@ static int ds3232_probe(struct device *dev, struct regmap *regmap, int irq, if (ds3232->irq > 0) device_init_wakeup(dev, 1); + ds3232_hwmon_register(dev, name); + ds3232->rtc = devm_rtc_device_register(dev, name, &ds3232_rtc_ops, THIS_MODULE); if (IS_ERR(ds3232->rtc)) diff --git a/drivers/rtc/rtc-gemini.c b/drivers/rtc/rtc-ftrtc010.c index 5279390bb42d..af8d6beae20c 100644 --- a/drivers/rtc/rtc-gemini.c +++ b/drivers/rtc/rtc-ftrtc010.c @@ -1,5 +1,5 @@ /* - * Gemini OnChip RTC + * Faraday Technology FTRTC010 driver * * Copyright (C) 2009 Janos Laube <janos.dev@gmail.com> * @@ -26,33 +26,36 @@ #include <linux/platform_device.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/clk.h> -#define DRV_NAME "rtc-gemini" +#define DRV_NAME "rtc-ftrtc010" MODULE_AUTHOR("Hans Ulli Kroll <ulli.kroll@googlemail.com>"); MODULE_DESCRIPTION("RTC driver for Gemini SoC"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRV_NAME); -struct gemini_rtc { +struct ftrtc010_rtc { struct rtc_device *rtc_dev; void __iomem *rtc_base; int rtc_irq; + struct clk *pclk; + struct clk *extclk; }; -enum gemini_rtc_offsets { - GEMINI_RTC_SECOND = 0x00, - GEMINI_RTC_MINUTE = 0x04, - GEMINI_RTC_HOUR = 0x08, - GEMINI_RTC_DAYS = 0x0C, - GEMINI_RTC_ALARM_SECOND = 0x10, - GEMINI_RTC_ALARM_MINUTE = 0x14, - GEMINI_RTC_ALARM_HOUR = 0x18, - GEMINI_RTC_RECORD = 0x1C, - GEMINI_RTC_CR = 0x20 +enum ftrtc010_rtc_offsets { + FTRTC010_RTC_SECOND = 0x00, + FTRTC010_RTC_MINUTE = 0x04, + FTRTC010_RTC_HOUR = 0x08, + FTRTC010_RTC_DAYS = 0x0C, + FTRTC010_RTC_ALARM_SECOND = 0x10, + FTRTC010_RTC_ALARM_MINUTE = 0x14, + FTRTC010_RTC_ALARM_HOUR = 0x18, + FTRTC010_RTC_RECORD = 0x1C, + FTRTC010_RTC_CR = 0x20, }; -static irqreturn_t gemini_rtc_interrupt(int irq, void *dev) +static irqreturn_t ftrtc010_rtc_interrupt(int irq, void *dev) { return IRQ_HANDLED; } @@ -66,18 +69,18 @@ static irqreturn_t gemini_rtc_interrupt(int irq, void *dev) * the same thing, without the rtc-lib.c calls. */ -static int gemini_rtc_read_time(struct device *dev, struct rtc_time *tm) +static int ftrtc010_rtc_read_time(struct device *dev, struct rtc_time *tm) { - struct gemini_rtc *rtc = dev_get_drvdata(dev); + struct ftrtc010_rtc *rtc = dev_get_drvdata(dev); unsigned int days, hour, min, sec; unsigned long offset, time; - sec = readl(rtc->rtc_base + GEMINI_RTC_SECOND); - min = readl(rtc->rtc_base + GEMINI_RTC_MINUTE); - hour = readl(rtc->rtc_base + GEMINI_RTC_HOUR); - days = readl(rtc->rtc_base + GEMINI_RTC_DAYS); - offset = readl(rtc->rtc_base + GEMINI_RTC_RECORD); + sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND); + min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE); + hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR); + days = readl(rtc->rtc_base + FTRTC010_RTC_DAYS); + offset = readl(rtc->rtc_base + FTRTC010_RTC_RECORD); time = offset + days * 86400 + hour * 3600 + min * 60 + sec; @@ -86,9 +89,9 @@ static int gemini_rtc_read_time(struct device *dev, struct rtc_time *tm) return 0; } -static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm) +static int ftrtc010_rtc_set_time(struct device *dev, struct rtc_time *tm) { - struct gemini_rtc *rtc = dev_get_drvdata(dev); + struct ftrtc010_rtc *rtc = dev_get_drvdata(dev); unsigned int sec, min, hour, day; unsigned long offset, time; @@ -97,27 +100,27 @@ static int gemini_rtc_set_time(struct device *dev, struct rtc_time *tm) rtc_tm_to_time(tm, &time); - sec = readl(rtc->rtc_base + GEMINI_RTC_SECOND); - min = readl(rtc->rtc_base + GEMINI_RTC_MINUTE); - hour = readl(rtc->rtc_base + GEMINI_RTC_HOUR); - day = readl(rtc->rtc_base + GEMINI_RTC_DAYS); + sec = readl(rtc->rtc_base + FTRTC010_RTC_SECOND); + min = readl(rtc->rtc_base + FTRTC010_RTC_MINUTE); + hour = readl(rtc->rtc_base + FTRTC010_RTC_HOUR); + day = readl(rtc->rtc_base + FTRTC010_RTC_DAYS); offset = time - (day * 86400 + hour * 3600 + min * 60 + sec); - writel(offset, rtc->rtc_base + GEMINI_RTC_RECORD); - writel(0x01, rtc->rtc_base + GEMINI_RTC_CR); + writel(offset, rtc->rtc_base + FTRTC010_RTC_RECORD); + writel(0x01, rtc->rtc_base + FTRTC010_RTC_CR); return 0; } -static const struct rtc_class_ops gemini_rtc_ops = { - .read_time = gemini_rtc_read_time, - .set_time = gemini_rtc_set_time, +static const struct rtc_class_ops ftrtc010_rtc_ops = { + .read_time = ftrtc010_rtc_read_time, + .set_time = ftrtc010_rtc_set_time, }; -static int gemini_rtc_probe(struct platform_device *pdev) +static int ftrtc010_rtc_probe(struct platform_device *pdev) { - struct gemini_rtc *rtc; + struct ftrtc010_rtc *rtc; struct device *dev = &pdev->dev; struct resource *res; int ret; @@ -127,6 +130,27 @@ static int gemini_rtc_probe(struct platform_device *pdev) return -ENOMEM; platform_set_drvdata(pdev, rtc); + rtc->pclk = devm_clk_get(dev, "PCLK"); + if (IS_ERR(rtc->pclk)) { + dev_err(dev, "could not get PCLK\n"); + } else { + ret = clk_prepare_enable(rtc->pclk); + if (ret) { + dev_err(dev, "failed to enable PCLK\n"); + return ret; + } + } + rtc->extclk = devm_clk_get(dev, "EXTCLK"); + if (IS_ERR(rtc->extclk)) { + dev_err(dev, "could not get EXTCLK\n"); + } else { + ret = clk_prepare_enable(rtc->extclk); + if (ret) { + dev_err(dev, "failed to enable EXTCLK\n"); + return ret; + } + } + res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res) return -ENODEV; @@ -142,38 +166,43 @@ static int gemini_rtc_probe(struct platform_device *pdev) if (!rtc->rtc_base) return -ENOMEM; - ret = devm_request_irq(dev, rtc->rtc_irq, gemini_rtc_interrupt, + ret = devm_request_irq(dev, rtc->rtc_irq, ftrtc010_rtc_interrupt, IRQF_SHARED, pdev->name, dev); if (unlikely(ret)) return ret; rtc->rtc_dev = rtc_device_register(pdev->name, dev, - &gemini_rtc_ops, THIS_MODULE); + &ftrtc010_rtc_ops, THIS_MODULE); return PTR_ERR_OR_ZERO(rtc->rtc_dev); } -static int gemini_rtc_remove(struct platform_device *pdev) +static int ftrtc010_rtc_remove(struct platform_device *pdev) { - struct gemini_rtc *rtc = platform_get_drvdata(pdev); + struct ftrtc010_rtc *rtc = platform_get_drvdata(pdev); + if (!IS_ERR(rtc->extclk)) + clk_disable_unprepare(rtc->extclk); + if (!IS_ERR(rtc->pclk)) + clk_disable_unprepare(rtc->pclk); rtc_device_unregister(rtc->rtc_dev); return 0; } -static const struct of_device_id gemini_rtc_dt_match[] = { +static const struct of_device_id ftrtc010_rtc_dt_match[] = { { .compatible = "cortina,gemini-rtc" }, + { .compatible = "faraday,ftrtc010" }, { } }; -MODULE_DEVICE_TABLE(of, gemini_rtc_dt_match); +MODULE_DEVICE_TABLE(of, ftrtc010_rtc_dt_match); -static struct platform_driver gemini_rtc_driver = { +static struct platform_driver ftrtc010_rtc_driver = { .driver = { .name = DRV_NAME, - .of_match_table = gemini_rtc_dt_match, + .of_match_table = ftrtc010_rtc_dt_match, }, - .probe = gemini_rtc_probe, - .remove = gemini_rtc_remove, + .probe = ftrtc010_rtc_probe, + .remove = ftrtc010_rtc_remove, }; -module_platform_driver_probe(gemini_rtc_driver, gemini_rtc_probe); +module_platform_driver_probe(ftrtc010_rtc_driver, ftrtc010_rtc_probe); diff --git a/drivers/rtc/rtc-m41t80.c b/drivers/rtc/rtc-m41t80.c index 5ec4653022ff..8940e9e43ea0 100644 --- a/drivers/rtc/rtc-m41t80.c +++ b/drivers/rtc/rtc-m41t80.c @@ -16,6 +16,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bcd.h> +#include <linux/clk-provider.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> @@ -53,6 +54,8 @@ #define M41T80_ALARM_REG_SIZE \ (M41T80_REG_ALARM_SEC + 1 - M41T80_REG_ALARM_MON) +#define M41T80_SQW_MAX_FREQ 32768 + #define M41T80_SEC_ST BIT(7) /* ST: Stop Bit */ #define M41T80_ALMON_AFE BIT(7) /* AFE: AF Enable Bit */ #define M41T80_ALMON_SQWE BIT(6) /* SQWE: SQW Enable Bit */ @@ -147,7 +150,11 @@ MODULE_DEVICE_TABLE(of, m41t80_of_match); struct m41t80_data { unsigned long features; + struct i2c_client *client; struct rtc_device *rtc; +#ifdef CONFIG_COMMON_CLK + struct clk_hw sqw; +#endif }; static irqreturn_t m41t80_handle_irq(int irq, void *dev_id) @@ -227,6 +234,7 @@ static int m41t80_get_datetime(struct i2c_client *client, /* Sets the given date and time to the real time clock. */ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) { + struct m41t80_data *clientdata = i2c_get_clientdata(client); unsigned char buf[8]; int err, flags; @@ -242,6 +250,17 @@ static int m41t80_set_datetime(struct i2c_client *client, struct rtc_time *tm) buf[M41T80_REG_YEAR] = bin2bcd(tm->tm_year - 100); buf[M41T80_REG_WDAY] = tm->tm_wday; + /* If the square wave output is controlled in the weekday register */ + if (clientdata->features & M41T80_FEATURE_SQ_ALT) { + int val; + + val = i2c_smbus_read_byte_data(client, M41T80_REG_WDAY); + if (val < 0) + return val; + + buf[M41T80_REG_WDAY] |= (val & 0xf0); + } + err = i2c_smbus_write_i2c_block_data(client, M41T80_REG_SSEC, sizeof(buf), buf); if (err < 0) { @@ -332,6 +351,9 @@ static int m41t80_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) return err; } + /* Keep SQWE bit value */ + alarmvals[0] |= (ret & M41T80_ALMON_SQWE); + ret = i2c_smbus_read_byte_data(client, M41T80_REG_FLAGS); if (ret < 0) return ret; @@ -431,103 +453,175 @@ static ssize_t flags_show(struct device *dev, } static DEVICE_ATTR_RO(flags); -static ssize_t sqwfreq_show(struct device *dev, - struct device_attribute *attr, char *buf) +static struct attribute *attrs[] = { + &dev_attr_flags.attr, + NULL, +}; + +static struct attribute_group attr_group = { + .attrs = attrs, +}; + +#ifdef CONFIG_COMMON_CLK +#define sqw_to_m41t80_data(_hw) container_of(_hw, struct m41t80_data, sqw) + +static unsigned long m41t80_sqw_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) { - struct i2c_client *client = to_i2c_client(dev); - struct m41t80_data *clientdata = i2c_get_clientdata(client); - int val, reg_sqw; + struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); + struct i2c_client *client = m41t80->client; + int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ? + M41T80_REG_WDAY : M41T80_REG_SQW; + int ret = i2c_smbus_read_byte_data(client, reg_sqw); + unsigned long val = M41T80_SQW_MAX_FREQ; - if (!(clientdata->features & M41T80_FEATURE_SQ)) - return -EINVAL; + if (ret < 0) + return 0; - reg_sqw = M41T80_REG_SQW; - if (clientdata->features & M41T80_FEATURE_SQ_ALT) - reg_sqw = M41T80_REG_WDAY; - val = i2c_smbus_read_byte_data(client, reg_sqw); - if (val < 0) - return val; - val = (val >> 4) & 0xf; - switch (val) { - case 0: - break; - case 1: - val = 32768; - break; - default: - val = 32768 >> val; - } - return sprintf(buf, "%d\n", val); + ret >>= 4; + if (ret == 0) + val = 0; + else if (ret > 1) + val = val / (1 << ret); + + return val; } -static ssize_t sqwfreq_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t count) +static long m41t80_sqw_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) { - struct i2c_client *client = to_i2c_client(dev); - struct m41t80_data *clientdata = i2c_get_clientdata(client); - int almon, sqw, reg_sqw, rc; - unsigned long val; + int i, freq = M41T80_SQW_MAX_FREQ; - rc = kstrtoul(buf, 0, &val); - if (rc < 0) - return rc; + if (freq <= rate) + return freq; - if (!(clientdata->features & M41T80_FEATURE_SQ)) - return -EINVAL; + for (i = 2; i <= ilog2(M41T80_SQW_MAX_FREQ); i++) { + freq /= 1 << i; + if (freq <= rate) + return freq; + } - if (val) { - if (!is_power_of_2(val)) + return 0; +} + +static int m41t80_sqw_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); + struct i2c_client *client = m41t80->client; + int reg_sqw = (m41t80->features & M41T80_FEATURE_SQ_ALT) ? + M41T80_REG_WDAY : M41T80_REG_SQW; + int reg, ret, val = 0; + + if (rate) { + if (!is_power_of_2(rate)) return -EINVAL; - val = ilog2(val); - if (val == 15) + val = ilog2(rate); + if (val == ilog2(M41T80_SQW_MAX_FREQ)) val = 1; - else if (val < 14) - val = 15 - val; + else if (val < (ilog2(M41T80_SQW_MAX_FREQ) - 1)) + val = ilog2(M41T80_SQW_MAX_FREQ) - val; else return -EINVAL; } - /* disable SQW, set SQW frequency & re-enable */ - almon = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); - if (almon < 0) - return almon; - reg_sqw = M41T80_REG_SQW; - if (clientdata->features & M41T80_FEATURE_SQ_ALT) - reg_sqw = M41T80_REG_WDAY; - sqw = i2c_smbus_read_byte_data(client, reg_sqw); - if (sqw < 0) - return sqw; - sqw = (sqw & 0x0f) | (val << 4); - - rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, - almon & ~M41T80_ALMON_SQWE); - if (rc < 0) - return rc; - if (val) { - rc = i2c_smbus_write_byte_data(client, reg_sqw, sqw); - if (rc < 0) - return rc; + reg = i2c_smbus_read_byte_data(client, reg_sqw); + if (reg < 0) + return reg; - rc = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, - almon | M41T80_ALMON_SQWE); - if (rc < 0) - return rc; - } - return count; + reg = (reg & 0x0f) | (val << 4); + + ret = i2c_smbus_write_byte_data(client, reg_sqw, reg); + if (ret < 0) + return ret; + + return -EINVAL; } -static DEVICE_ATTR_RW(sqwfreq); -static struct attribute *attrs[] = { - &dev_attr_flags.attr, - &dev_attr_sqwfreq.attr, - NULL, -}; +static int m41t80_sqw_control(struct clk_hw *hw, bool enable) +{ + struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); + struct i2c_client *client = m41t80->client; + int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); -static struct attribute_group attr_group = { - .attrs = attrs, + if (ret < 0) + return ret; + + if (enable) + ret |= M41T80_ALMON_SQWE; + else + ret &= ~M41T80_ALMON_SQWE; + + return i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, ret); +} + +static int m41t80_sqw_prepare(struct clk_hw *hw) +{ + return m41t80_sqw_control(hw, 1); +} + +static void m41t80_sqw_unprepare(struct clk_hw *hw) +{ + m41t80_sqw_control(hw, 0); +} + +static int m41t80_sqw_is_prepared(struct clk_hw *hw) +{ + struct m41t80_data *m41t80 = sqw_to_m41t80_data(hw); + struct i2c_client *client = m41t80->client; + int ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); + + if (ret < 0) + return ret; + + return !!(ret & M41T80_ALMON_SQWE); +} + +static const struct clk_ops m41t80_sqw_ops = { + .prepare = m41t80_sqw_prepare, + .unprepare = m41t80_sqw_unprepare, + .is_prepared = m41t80_sqw_is_prepared, + .recalc_rate = m41t80_sqw_recalc_rate, + .round_rate = m41t80_sqw_round_rate, + .set_rate = m41t80_sqw_set_rate, }; +static struct clk *m41t80_sqw_register_clk(struct m41t80_data *m41t80) +{ + struct i2c_client *client = m41t80->client; + struct device_node *node = client->dev.of_node; + struct clk *clk; + struct clk_init_data init; + int ret; + + /* First disable the clock */ + ret = i2c_smbus_read_byte_data(client, M41T80_REG_ALARM_MON); + if (ret < 0) + return ERR_PTR(ret); + ret = i2c_smbus_write_byte_data(client, M41T80_REG_ALARM_MON, + ret & ~(M41T80_ALMON_SQWE)); + if (ret < 0) + return ERR_PTR(ret); + + init.name = "m41t80-sqw"; + init.ops = &m41t80_sqw_ops; + init.flags = 0; + init.parent_names = NULL; + init.num_parents = 0; + m41t80->sqw.init = &init; + + /* optional override of the clockname */ + of_property_read_string(node, "clock-output-names", &init.name); + + /* register the clock */ + clk = clk_register(&client->dev, &m41t80->sqw); + if (!IS_ERR(clk)) + of_clk_add_provider(node, of_clk_src_simple_get, clk); + + return clk; +} +#endif + #ifdef CONFIG_RTC_DRV_M41T80_WDT /* ***************************************************************************** @@ -845,6 +939,7 @@ static int m41t80_probe(struct i2c_client *client, if (!m41t80_data) return -ENOMEM; + m41t80_data->client = client; if (client->dev.of_node) m41t80_data->features = (unsigned long) of_device_get_match_data(&client->dev); @@ -937,6 +1032,10 @@ static int m41t80_probe(struct i2c_client *client, } } #endif +#ifdef CONFIG_COMMON_CLK + if (m41t80_data->features & M41T80_FEATURE_SQ) + m41t80_sqw_register_clk(m41t80_data); +#endif return 0; } diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index 77319122642a..401f46d8f21b 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c @@ -43,17 +43,6 @@ #define MAX_PIE_NUM 9 #define MAX_PIE_FREQ 512 -static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = { - { 2, RTC_2HZ_BIT }, - { 4, RTC_SAM0_BIT }, - { 8, RTC_SAM1_BIT }, - { 16, RTC_SAM2_BIT }, - { 32, RTC_SAM3_BIT }, - { 64, RTC_SAM4_BIT }, - { 128, RTC_SAM5_BIT }, - { 256, RTC_SAM6_BIT }, - { MAX_PIE_FREQ, RTC_SAM7_BIT }, -}; #define MXC_RTC_TIME 0 #define MXC_RTC_ALARM 1 diff --git a/drivers/rtc/rtc-nuc900.c b/drivers/rtc/rtc-nuc900.c index b1b6b3041bfb..4ed81117cf5f 100644 --- a/drivers/rtc/rtc-nuc900.c +++ b/drivers/rtc/rtc-nuc900.c @@ -93,7 +93,7 @@ static int *check_rtc_access_enable(struct nuc900_rtc *nuc900_rtc) __raw_writel(AERPOWERON, nuc900_rtc->rtc_reg + REG_RTC_AER); while (!(__raw_readl(nuc900_rtc->rtc_reg + REG_RTC_AER) & AERRWENB) - && timeout--) + && --timeout) mdelay(1); if (!timeout) diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c index ea20f627dabe..e2a946c0e667 100644 --- a/drivers/rtc/rtc-opal.c +++ b/drivers/rtc/rtc-opal.c @@ -142,6 +142,16 @@ static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) y_m_d = be32_to_cpu(__y_m_d); h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32); + + /* check if no alarm is set */ + if (y_m_d == 0 && h_m_s_ms == 0) { + pr_debug("No alarm is set\n"); + rc = -ENOENT; + goto exit; + } else { + pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms); + } + opal_to_tm(y_m_d, h_m_s_ms, &alarm->time); exit: @@ -157,7 +167,14 @@ static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm) u32 y_m_d = 0; int token, rc; - tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms); + /* if alarm is enabled */ + if (alarm->enabled) { + tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms); + pr_debug("Alarm set to %x %llx\n", y_m_d, h_m_s_ms); + + } else { + pr_debug("Alarm getting disabled\n"); + } token = opal_async_get_token_interruptible(); if (token < 0) { @@ -190,6 +207,18 @@ exit: return rc; } +int opal_tpo_alarm_irq_enable(struct device *dev, unsigned int enabled) +{ + struct rtc_wkalrm alarm = { .enabled = 0 }; + + /* + * TPO is automatically enabled when opal_set_tpo_time() is called with + * non-zero rtc-time. We only handle disable case which needs to be + * explicitly told to opal. + */ + return enabled ? 0 : opal_set_tpo_time(dev, &alarm); +} + static struct rtc_class_ops opal_rtc_ops = { .read_time = opal_get_rtc_time, .set_time = opal_set_rtc_time, @@ -205,6 +234,7 @@ static int opal_rtc_probe(struct platform_device *pdev) device_set_wakeup_capable(&pdev->dev, true); opal_rtc_ops.read_alarm = opal_get_tpo_time; opal_rtc_ops.set_alarm = opal_set_tpo_time; + opal_rtc_ops.alarm_irq_enable = opal_tpo_alarm_irq_enable; } rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops, diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c index 1227ceab61ee..cea6ea4df970 100644 --- a/drivers/rtc/rtc-pcf8563.c +++ b/drivers/rtc/rtc-pcf8563.c @@ -606,7 +606,7 @@ static int pcf8563_probe(struct i2c_client *client, err = devm_request_threaded_irq(&client->dev, client->irq, NULL, pcf8563_irq, IRQF_SHARED|IRQF_ONESHOT|IRQF_TRIGGER_FALLING, - pcf8563->rtc->name, client); + pcf8563_driver.driver.name, client); if (err) { dev_err(&client->dev, "unable to request IRQ %d\n", client->irq); diff --git a/drivers/rtc/rtc-rv8803.c b/drivers/rtc/rtc-rv8803.c index 9ad97ab29866..aae2576741a6 100644 --- a/drivers/rtc/rtc-rv8803.c +++ b/drivers/rtc/rtc-rv8803.c @@ -68,6 +68,7 @@ struct rv8803_data { struct mutex flags_lock; u8 ctrl; enum rv8803_type type; + struct nvmem_config nvmem_cfg; }; static int rv8803_read_reg(const struct i2c_client *client, u8 reg) @@ -460,48 +461,32 @@ static int rv8803_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) } } -static ssize_t rv8803_nvram_write(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int rv8803_nvram_write(void *priv, unsigned int offset, void *val, + size_t bytes) { - struct device *dev = kobj_to_dev(kobj); - struct i2c_client *client = to_i2c_client(dev); int ret; - ret = rv8803_write_reg(client, RV8803_RAM, buf[0]); + ret = rv8803_write_reg(priv, RV8803_RAM, *(u8 *)val); if (ret) return ret; - return 1; + return 0; } -static ssize_t rv8803_nvram_read(struct file *filp, struct kobject *kobj, - struct bin_attribute *attr, - char *buf, loff_t off, size_t count) +static int rv8803_nvram_read(void *priv, unsigned int offset, + void *val, size_t bytes) { - struct device *dev = kobj_to_dev(kobj); - struct i2c_client *client = to_i2c_client(dev); int ret; - ret = rv8803_read_reg(client, RV8803_RAM); + ret = rv8803_read_reg(priv, RV8803_RAM); if (ret < 0) return ret; - buf[0] = ret; + *(u8 *)val = ret; - return 1; + return 0; } -static struct bin_attribute rv8803_nvram_attr = { - .attr = { - .name = "nvram", - .mode = S_IRUGO | S_IWUSR, - }, - .size = 1, - .read = rv8803_nvram_read, - .write = rv8803_nvram_write, -}; - static struct rtc_class_ops rv8803_rtc_ops = { .read_time = rv8803_get_time, .set_time = rv8803_set_time, @@ -577,6 +562,11 @@ static int rv8803_probe(struct i2c_client *client, if (flags & RV8803_FLAG_AF) dev_warn(&client->dev, "An alarm maybe have been missed.\n"); + rv8803->rtc = devm_rtc_allocate_device(&client->dev); + if (IS_ERR(rv8803->rtc)) { + return PTR_ERR(rv8803->rtc); + } + if (client->irq > 0) { err = devm_request_threaded_irq(&client->dev, client->irq, NULL, rv8803_handle_irq, @@ -592,12 +582,20 @@ static int rv8803_probe(struct i2c_client *client, } } - rv8803->rtc = devm_rtc_device_register(&client->dev, client->name, - &rv8803_rtc_ops, THIS_MODULE); - if (IS_ERR(rv8803->rtc)) { - dev_err(&client->dev, "unable to register the class device\n"); - return PTR_ERR(rv8803->rtc); - } + rv8803->nvmem_cfg.name = "rv8803_nvram", + rv8803->nvmem_cfg.word_size = 1, + rv8803->nvmem_cfg.stride = 1, + rv8803->nvmem_cfg.size = 1, + rv8803->nvmem_cfg.reg_read = rv8803_nvram_read, + rv8803->nvmem_cfg.reg_write = rv8803_nvram_write, + rv8803->nvmem_cfg.priv = client; + + rv8803->rtc->ops = &rv8803_rtc_ops; + rv8803->rtc->nvmem_config = &rv8803->nvmem_cfg; + rv8803->rtc->nvram_old_abi = true; + err = rtc_register_device(rv8803->rtc); + if (err) + return err; err = rv8803_write_reg(rv8803->client, RV8803_EXT, RV8803_EXT_WADA); if (err) @@ -609,22 +607,11 @@ static int rv8803_probe(struct i2c_client *client, return err; } - err = device_create_bin_file(&client->dev, &rv8803_nvram_attr); - if (err) - return err; - rv8803->rtc->max_user_freq = 1; return 0; } -static int rv8803_remove(struct i2c_client *client) -{ - device_remove_bin_file(&client->dev, &rv8803_nvram_attr); - - return 0; -} - static const struct i2c_device_id rv8803_id[] = { { "rv8803", rv_8803 }, { "rx8900", rx_8900 }, @@ -651,7 +638,6 @@ static struct i2c_driver rv8803_driver = { .of_match_table = of_match_ptr(rv8803_of_match), }, .probe = rv8803_probe, - .remove = rv8803_remove, .id_table = rv8803_id, }; module_i2c_driver(rv8803_driver); diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index d44fb34df8fe..a8992c227f61 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -41,7 +41,7 @@ struct s3c_rtc { struct clk *rtc_src_clk; bool clk_disabled; - struct s3c_rtc_data *data; + const struct s3c_rtc_data *data; int irq_alarm; int irq_tick; @@ -49,7 +49,8 @@ struct s3c_rtc { spinlock_t pie_lock; spinlock_t alarm_clk_lock; - int ticnt_save, ticnt_en_save; + int ticnt_save; + int ticnt_en_save; bool wake_en; }; @@ -67,18 +68,32 @@ struct s3c_rtc_data { void (*disable) (struct s3c_rtc *info); }; -static void s3c_rtc_enable_clk(struct s3c_rtc *info) +static int s3c_rtc_enable_clk(struct s3c_rtc *info) { unsigned long irq_flags; + int ret = 0; spin_lock_irqsave(&info->alarm_clk_lock, irq_flags); + if (info->clk_disabled) { - clk_enable(info->rtc_clk); - if (info->data->needs_src_clk) - clk_enable(info->rtc_src_clk); + ret = clk_enable(info->rtc_clk); + if (ret) + goto out; + + if (info->data->needs_src_clk) { + ret = clk_enable(info->rtc_src_clk); + if (ret) { + clk_disable(info->rtc_clk); + goto out; + } + } info->clk_disabled = false; } + +out: spin_unlock_irqrestore(&info->alarm_clk_lock, irq_flags); + + return ret; } static void s3c_rtc_disable_clk(struct s3c_rtc *info) @@ -121,10 +136,13 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) { struct s3c_rtc *info = dev_get_drvdata(dev); unsigned int tmp; + int ret; dev_dbg(info->dev, "%s: aie=%d\n", __func__, enabled); - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; tmp = readb(info->base + S3C2410_RTCALM) & ~S3C2410_RTCALM_ALMEN; @@ -135,10 +153,13 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) s3c_rtc_disable_clk(info); - if (enabled) - s3c_rtc_enable_clk(info); - else + if (enabled) { + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; + } else { s3c_rtc_disable_clk(info); + } return 0; } @@ -146,10 +167,14 @@ static int s3c_rtc_setaie(struct device *dev, unsigned int enabled) /* Set RTC frequency */ static int s3c_rtc_setfreq(struct s3c_rtc *info, int freq) { + int ret; + if (!is_power_of_2(freq)) return -EINVAL; - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; spin_lock_irq(&info->pie_lock); if (info->data->set_freq) @@ -166,10 +191,13 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) { struct s3c_rtc *info = dev_get_drvdata(dev); unsigned int have_retried = 0; + int ret; - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; - retry_get_time: +retry_get_time: rtc_tm->tm_min = readb(info->base + S3C2410_RTCMIN); rtc_tm->tm_hour = readb(info->base + S3C2410_RTCHOUR); rtc_tm->tm_mday = readb(info->base + S3C2410_RTCDATE); @@ -199,8 +227,8 @@ static int s3c_rtc_gettime(struct device *dev, struct rtc_time *rtc_tm) rtc_tm->tm_year += 100; dev_dbg(dev, "read time %04d.%02d.%02d %02d:%02d:%02d\n", - 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, - rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); + 1900 + rtc_tm->tm_year, rtc_tm->tm_mon, rtc_tm->tm_mday, + rtc_tm->tm_hour, rtc_tm->tm_min, rtc_tm->tm_sec); rtc_tm->tm_mon -= 1; @@ -211,10 +239,11 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) { struct s3c_rtc *info = dev_get_drvdata(dev); int year = tm->tm_year - 100; + int ret; dev_dbg(dev, "set time %04d.%02d.%02d %02d:%02d:%02d\n", - 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); + 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, + tm->tm_hour, tm->tm_min, tm->tm_sec); /* we get around y2k by simply not supporting it */ @@ -223,7 +252,9 @@ static int s3c_rtc_settime(struct device *dev, struct rtc_time *tm) return -EINVAL; } - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; writeb(bin2bcd(tm->tm_sec), info->base + S3C2410_RTCSEC); writeb(bin2bcd(tm->tm_min), info->base + S3C2410_RTCMIN); @@ -242,8 +273,11 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) struct s3c_rtc *info = dev_get_drvdata(dev); struct rtc_time *alm_tm = &alrm->time; unsigned int alm_en; + int ret; - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; alm_tm->tm_sec = readb(info->base + S3C2410_ALMSEC); alm_tm->tm_min = readb(info->base + S3C2410_ALMMIN); @@ -259,9 +293,9 @@ static int s3c_rtc_getalarm(struct device *dev, struct rtc_wkalrm *alrm) alrm->enabled = (alm_en & S3C2410_RTCALM_ALMEN) ? 1 : 0; dev_dbg(dev, "read alarm %d, %04d.%02d.%02d %02d:%02d:%02d\n", - alm_en, - 1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday, - alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec); + alm_en, + 1900 + alm_tm->tm_year, alm_tm->tm_mon, alm_tm->tm_mday, + alm_tm->tm_hour, alm_tm->tm_min, alm_tm->tm_sec); /* decode the alarm enable field */ if (alm_en & S3C2410_RTCALM_SECEN) @@ -292,14 +326,17 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) struct s3c_rtc *info = dev_get_drvdata(dev); struct rtc_time *tm = &alrm->time; unsigned int alrm_en; + int ret; int year = tm->tm_year - 100; dev_dbg(dev, "s3c_rtc_setalarm: %d, %04d.%02d.%02d %02d:%02d:%02d\n", - alrm->enabled, - 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, - tm->tm_hour, tm->tm_min, tm->tm_sec); + alrm->enabled, + 1900 + tm->tm_year, tm->tm_mon + 1, tm->tm_mday, + tm->tm_hour, tm->tm_min, tm->tm_sec); - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; alrm_en = readb(info->base + S3C2410_RTCALM) & S3C2410_RTCALM_ALMEN; writeb(0x00, info->base + S3C2410_RTCALM); @@ -348,8 +385,11 @@ static int s3c_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) static int s3c_rtc_proc(struct device *dev, struct seq_file *seq) { struct s3c_rtc *info = dev_get_drvdata(dev); + int ret; - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; if (info->data->enable_tick) info->data->enable_tick(info, seq); @@ -378,8 +418,7 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info) dev_info(info->dev, "rtc disabled, re-enabling\n"); tmp = readw(info->base + S3C2410_RTCCON); - writew(tmp | S3C2410_RTCCON_RTCEN, - info->base + S3C2410_RTCCON); + writew(tmp | S3C2410_RTCCON_RTCEN, info->base + S3C2410_RTCCON); } if (con & S3C2410_RTCCON_CNTSEL) { @@ -387,7 +426,7 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info) tmp = readw(info->base + S3C2410_RTCCON); writew(tmp & ~S3C2410_RTCCON_CNTSEL, - info->base + S3C2410_RTCCON); + info->base + S3C2410_RTCCON); } if (con & S3C2410_RTCCON_CLKRST) { @@ -395,7 +434,7 @@ static void s3c24xx_rtc_enable(struct s3c_rtc *info) tmp = readw(info->base + S3C2410_RTCCON); writew(tmp & ~S3C2410_RTCCON_CLKRST, - info->base + S3C2410_RTCCON); + info->base + S3C2410_RTCCON); } } @@ -437,12 +476,12 @@ static int s3c_rtc_remove(struct platform_device *pdev) static const struct of_device_id s3c_rtc_dt_match[]; -static struct s3c_rtc_data *s3c_rtc_get_data(struct platform_device *pdev) +static const struct s3c_rtc_data *s3c_rtc_get_data(struct platform_device *pdev) { const struct of_device_id *match; match = of_match_node(s3c_rtc_dt_match, pdev->dev.of_node); - return (struct s3c_rtc_data *)match->data; + return match->data; } static int s3c_rtc_probe(struct platform_device *pdev) @@ -481,7 +520,7 @@ static int s3c_rtc_probe(struct platform_device *pdev) } dev_dbg(&pdev->dev, "s3c2410_rtc: tick irq %d, alarm irq %d\n", - info->irq_tick, info->irq_alarm); + info->irq_tick, info->irq_alarm); /* get the memory region */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -498,7 +537,9 @@ static int s3c_rtc_probe(struct platform_device *pdev) dev_dbg(&pdev->dev, "probe deferred due to missing rtc clk\n"); return ret; } - clk_prepare_enable(info->rtc_clk); + ret = clk_prepare_enable(info->rtc_clk); + if (ret) + return ret; if (info->data->needs_src_clk) { info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); @@ -510,10 +551,11 @@ static int s3c_rtc_probe(struct platform_device *pdev) else dev_dbg(&pdev->dev, "probe deferred due to missing rtc src clk\n"); - clk_disable_unprepare(info->rtc_clk); - return ret; + goto err_src_clk; } - clk_prepare_enable(info->rtc_src_clk); + ret = clk_prepare_enable(info->rtc_src_clk); + if (ret) + goto err_src_clk; } /* check to see if everything is setup correctly */ @@ -521,7 +563,7 @@ static int s3c_rtc_probe(struct platform_device *pdev) info->data->enable(info); dev_dbg(&pdev->dev, "s3c2410_rtc: RTCCON=%02x\n", - readw(info->base + S3C2410_RTCCON)); + readw(info->base + S3C2410_RTCCON)); device_init_wakeup(&pdev->dev, 1); @@ -541,7 +583,7 @@ static int s3c_rtc_probe(struct platform_device *pdev) /* register RTC and exit */ info->rtc = devm_rtc_device_register(&pdev->dev, "s3c", &s3c_rtcops, - THIS_MODULE); + THIS_MODULE); if (IS_ERR(info->rtc)) { dev_err(&pdev->dev, "cannot attach rtc\n"); ret = PTR_ERR(info->rtc); @@ -549,14 +591,14 @@ static int s3c_rtc_probe(struct platform_device *pdev) } ret = devm_request_irq(&pdev->dev, info->irq_alarm, s3c_rtc_alarmirq, - 0, "s3c2410-rtc alarm", info); + 0, "s3c2410-rtc alarm", info); if (ret) { dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq_alarm, ret); goto err_nortc; } ret = devm_request_irq(&pdev->dev, info->irq_tick, s3c_rtc_tickirq, - 0, "s3c2410-rtc tick", info); + 0, "s3c2410-rtc tick", info); if (ret) { dev_err(&pdev->dev, "IRQ%d error %d\n", info->irq_tick, ret); goto err_nortc; @@ -569,12 +611,13 @@ static int s3c_rtc_probe(struct platform_device *pdev) return 0; - err_nortc: +err_nortc: if (info->data->disable) info->data->disable(info); if (info->data->needs_src_clk) clk_disable_unprepare(info->rtc_src_clk); +err_src_clk: clk_disable_unprepare(info->rtc_clk); return ret; @@ -585,8 +628,11 @@ static int s3c_rtc_probe(struct platform_device *pdev) static int s3c_rtc_suspend(struct device *dev) { struct s3c_rtc *info = dev_get_drvdata(dev); + int ret; - s3c_rtc_enable_clk(info); + ret = s3c_rtc_enable_clk(info); + if (ret) + return ret; /* save TICNT for anyone using periodic interrupts */ if (info->data->save_tick_cnt) @@ -747,8 +793,7 @@ static void s3c6410_rtc_restore_tick_cnt(struct s3c_rtc *info) writel(info->ticnt_save, info->base + S3C2410_TICNT); if (info->ticnt_en_save) { con = readw(info->base + S3C2410_RTCCON); - writew(con | info->ticnt_en_save, - info->base + S3C2410_RTCCON); + writew(con | info->ticnt_en_save, info->base + S3C2410_RTCCON); } } @@ -802,19 +847,19 @@ static struct s3c_rtc_data const s3c6410_rtc_data = { static const struct of_device_id s3c_rtc_dt_match[] = { { .compatible = "samsung,s3c2410-rtc", - .data = (void *)&s3c2410_rtc_data, + .data = &s3c2410_rtc_data, }, { .compatible = "samsung,s3c2416-rtc", - .data = (void *)&s3c2416_rtc_data, + .data = &s3c2416_rtc_data, }, { .compatible = "samsung,s3c2443-rtc", - .data = (void *)&s3c2443_rtc_data, + .data = &s3c2443_rtc_data, }, { .compatible = "samsung,s3c6410-rtc", - .data = (void *)&s3c6410_rtc_data, + .data = &s3c6410_rtc_data, }, { .compatible = "samsung,exynos3250-rtc", - .data = (void *)&s3c6410_rtc_data, + .data = &s3c6410_rtc_data, }, { /* sentinel */ }, }; diff --git a/drivers/rtc/rtc-st-lpc.c b/drivers/rtc/rtc-st-lpc.c index 74c0a336ceea..82b0af159a28 100644 --- a/drivers/rtc/rtc-st-lpc.c +++ b/drivers/rtc/rtc-st-lpc.c @@ -99,7 +99,7 @@ static int st_rtc_read_time(struct device *dev, struct rtc_time *tm) lpt = ((unsigned long long)lpt_msb << 32) | lpt_lsb; do_div(lpt, rtc->clkrate); - rtc_time_to_tm(lpt, tm); + rtc_time64_to_tm(lpt, tm); return 0; } @@ -107,13 +107,10 @@ static int st_rtc_read_time(struct device *dev, struct rtc_time *tm) static int st_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct st_rtc *rtc = dev_get_drvdata(dev); - unsigned long long lpt; - unsigned long secs, flags; - int ret; + unsigned long long lpt, secs; + unsigned long flags; - ret = rtc_tm_to_time(tm, &secs); - if (ret) - return ret; + secs = rtc_tm_to_time64(tm); lpt = (unsigned long long)secs * rtc->clkrate; @@ -161,13 +158,13 @@ static int st_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *t) { struct st_rtc *rtc = dev_get_drvdata(dev); struct rtc_time now; - unsigned long now_secs; - unsigned long alarm_secs; + unsigned long long now_secs; + unsigned long long alarm_secs; unsigned long long lpa; st_rtc_read_time(dev, &now); - rtc_tm_to_time(&now, &now_secs); - rtc_tm_to_time(&t->time, &alarm_secs); + now_secs = rtc_tm_to_time64(&now); + alarm_secs = rtc_tm_to_time64(&t->time); /* Invalid alarm time */ if (now_secs > alarm_secs) diff --git a/drivers/rtc/rtc-stm32.c b/drivers/rtc/rtc-stm32.c index bd57eb1029e1..3a5c3d7d0c77 100644 --- a/drivers/rtc/rtc-stm32.c +++ b/drivers/rtc/rtc-stm32.c @@ -94,11 +94,17 @@ /* STM32_PWR_CR bit field */ #define PWR_CR_DBP BIT(8) +struct stm32_rtc_data { + bool has_pclk; +}; + struct stm32_rtc { struct rtc_device *rtc_dev; void __iomem *base; struct regmap *dbp; - struct clk *ck_rtc; + struct stm32_rtc_data *data; + struct clk *pclk; + struct clk *rtc_ck; int irq_alarm; }; @@ -122,9 +128,9 @@ static int stm32_rtc_enter_init_mode(struct stm32_rtc *rtc) writel_relaxed(isr, rtc->base + STM32_RTC_ISR); /* - * It takes around 2 ck_rtc clock cycles to enter in + * It takes around 2 rtc_ck clock cycles to enter in * initialization phase mode (and have INITF flag set). As - * slowest ck_rtc frequency may be 32kHz and highest should be + * slowest rtc_ck frequency may be 32kHz and highest should be * 1MHz, we poll every 10 us with a timeout of 100ms. */ return readl_relaxed_poll_timeout_atomic( @@ -153,7 +159,7 @@ static int stm32_rtc_wait_sync(struct stm32_rtc *rtc) /* * Wait for RSF to be set to ensure the calendar registers are - * synchronised, it takes around 2 ck_rtc clock cycles + * synchronised, it takes around 2 rtc_ck clock cycles */ return readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR, isr, @@ -456,7 +462,7 @@ static int stm32_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) /* * Poll Alarm write flag to be sure that Alarm update is allowed: it - * takes around 2 ck_rtc clock cycles + * takes around 2 rtc_ck clock cycles */ ret = readl_relaxed_poll_timeout_atomic(rtc->base + STM32_RTC_ISR, isr, @@ -490,8 +496,17 @@ static const struct rtc_class_ops stm32_rtc_ops = { .alarm_irq_enable = stm32_rtc_alarm_irq_enable, }; +static const struct stm32_rtc_data stm32_rtc_data = { + .has_pclk = false, +}; + +static const struct stm32_rtc_data stm32h7_rtc_data = { + .has_pclk = true, +}; + static const struct of_device_id stm32_rtc_of_match[] = { - { .compatible = "st,stm32-rtc" }, + { .compatible = "st,stm32-rtc", .data = &stm32_rtc_data }, + { .compatible = "st,stm32h7-rtc", .data = &stm32h7_rtc_data }, {} }; MODULE_DEVICE_TABLE(of, stm32_rtc_of_match); @@ -503,7 +518,7 @@ static int stm32_rtc_init(struct platform_device *pdev, unsigned int rate; int ret = 0; - rate = clk_get_rate(rtc->ck_rtc); + rate = clk_get_rate(rtc->rtc_ck); /* Find prediv_a and prediv_s to obtain the 1Hz calendar clock */ pred_a_max = STM32_RTC_PRER_PRED_A >> STM32_RTC_PRER_PRED_A_SHIFT; @@ -524,7 +539,7 @@ static int stm32_rtc_init(struct platform_device *pdev, pred_a = pred_a_max; pred_s = (rate / (pred_a + 1)) - 1; - dev_warn(&pdev->dev, "ck_rtc is %s\n", + dev_warn(&pdev->dev, "rtc_ck is %s\n", (rate < ((pred_a + 1) * (pred_s + 1))) ? "fast" : "slow"); } @@ -561,6 +576,7 @@ static int stm32_rtc_probe(struct platform_device *pdev) { struct stm32_rtc *rtc; struct resource *res; + const struct of_device_id *match; int ret; rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); @@ -579,15 +595,34 @@ static int stm32_rtc_probe(struct platform_device *pdev) return PTR_ERR(rtc->dbp); } - rtc->ck_rtc = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(rtc->ck_rtc)) { - dev_err(&pdev->dev, "no ck_rtc clock"); - return PTR_ERR(rtc->ck_rtc); + match = of_match_device(stm32_rtc_of_match, &pdev->dev); + rtc->data = (struct stm32_rtc_data *)match->data; + + if (!rtc->data->has_pclk) { + rtc->pclk = NULL; + rtc->rtc_ck = devm_clk_get(&pdev->dev, NULL); + } else { + rtc->pclk = devm_clk_get(&pdev->dev, "pclk"); + if (IS_ERR(rtc->pclk)) { + dev_err(&pdev->dev, "no pclk clock"); + return PTR_ERR(rtc->pclk); + } + rtc->rtc_ck = devm_clk_get(&pdev->dev, "rtc_ck"); + } + if (IS_ERR(rtc->rtc_ck)) { + dev_err(&pdev->dev, "no rtc_ck clock"); + return PTR_ERR(rtc->rtc_ck); + } + + if (rtc->data->has_pclk) { + ret = clk_prepare_enable(rtc->pclk); + if (ret) + return ret; } - ret = clk_prepare_enable(rtc->ck_rtc); + ret = clk_prepare_enable(rtc->rtc_ck); if (ret) - return ret; + goto err; regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, PWR_CR_DBP); @@ -595,7 +630,7 @@ static int stm32_rtc_probe(struct platform_device *pdev) * After a system reset, RTC_ISR.INITS flag can be read to check if * the calendar has been initalized or not. INITS flag is reset by a * power-on reset (no vbat, no power-supply). It is not reset if - * ck_rtc parent clock has changed (so RTC prescalers need to be + * rtc_ck parent clock has changed (so RTC prescalers need to be * changed). That's why we cannot rely on this flag to know if RTC * init has to be done. */ @@ -646,7 +681,9 @@ static int stm32_rtc_probe(struct platform_device *pdev) return 0; err: - clk_disable_unprepare(rtc->ck_rtc); + if (rtc->data->has_pclk) + clk_disable_unprepare(rtc->pclk); + clk_disable_unprepare(rtc->rtc_ck); regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0); @@ -667,7 +704,9 @@ static int stm32_rtc_remove(struct platform_device *pdev) writel_relaxed(cr, rtc->base + STM32_RTC_CR); stm32_rtc_wpr_lock(rtc); - clk_disable_unprepare(rtc->ck_rtc); + clk_disable_unprepare(rtc->rtc_ck); + if (rtc->data->has_pclk) + clk_disable_unprepare(rtc->pclk); /* Enable backup domain write protection */ regmap_update_bits(rtc->dbp, PWR_CR, PWR_CR_DBP, 0); @@ -682,6 +721,9 @@ static int stm32_rtc_suspend(struct device *dev) { struct stm32_rtc *rtc = dev_get_drvdata(dev); + if (rtc->data->has_pclk) + clk_disable_unprepare(rtc->pclk); + if (device_may_wakeup(dev)) return enable_irq_wake(rtc->irq_alarm); @@ -693,6 +735,12 @@ static int stm32_rtc_resume(struct device *dev) struct stm32_rtc *rtc = dev_get_drvdata(dev); int ret = 0; + if (rtc->data->has_pclk) { + ret = clk_prepare_enable(rtc->pclk); + if (ret) + return ret; + } + ret = stm32_rtc_wait_sync(rtc); if (ret < 0) return ret; diff --git a/drivers/rtc/rtc-sysfs.c b/drivers/rtc/rtc-sysfs.c index 1218d5d4224d..e364550eb9a7 100644 --- a/drivers/rtc/rtc-sysfs.c +++ b/drivers/rtc/rtc-sysfs.c @@ -27,7 +27,8 @@ static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { - return sprintf(buf, "%s\n", to_rtc_device(dev)->name); + return sprintf(buf, "%s %s\n", dev_driver_string(dev->parent), + dev_name(dev->parent)); } static DEVICE_ATTR_RO(name); diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 65f5a794f26d..98749fa817da 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c @@ -98,7 +98,7 @@ vmcp_write(struct file *file, const char __user *buff, size_t count, } if (!session->response) session->response = (char *)__get_free_pages(GFP_KERNEL - | __GFP_REPEAT | GFP_DMA, + | __GFP_RETRY_MAYFAIL | GFP_DMA, get_order(session->bufsize)); if (!session->response) { mutex_unlock(&session->mutex); diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 7e0d4f724dda..432fc40990bd 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c @@ -559,6 +559,7 @@ static void chp_process_crw(struct crw *crw0, struct crw *crw1, chpid.id = crw0->rsid; switch (crw0->erc) { case CRW_ERC_IPARM: /* Path has come. */ + case CRW_ERC_INIT: if (!chp_is_registered(chpid)) chp_new(chpid); chsc_chp_online(chpid); diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 1563b1458e44..2ade6131a89f 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1115,7 +1115,7 @@ static const struct net_device_ops ctcm_mpc_netdev_ops = { .ndo_start_xmit = ctcmpc_tx, }; -void static ctcm_dev_setup(struct net_device *dev) +static void ctcm_dev_setup(struct net_device *dev) { dev->type = ARPHRD_SLIP; dev->tx_queue_len = 100; diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 3062cde33a3d..8975cd321390 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2408,7 +2408,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return rc; } -int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) +inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; struct neighbour *n = NULL; diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c index 077f62e208aa..6a4367cc9caa 100644 --- a/drivers/scsi/cxlflash/main.c +++ b/drivers/scsi/cxlflash/main.c @@ -3401,9 +3401,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, if (is_write) { req_flags |= SISL_REQ_FLAGS_HOST_WRITE; - rc = copy_from_user(kbuf, ubuf, ulen); - if (unlikely(rc)) + if (copy_from_user(kbuf, ubuf, ulen)) { + rc = -EFAULT; goto out; + } } } @@ -3431,8 +3432,10 @@ static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, goto out; } - if (ulen && !is_write) - rc = copy_to_user(ubuf, kbuf, ulen); + if (ulen && !is_write) { + if (copy_to_user(ubuf, kbuf, ulen)) + rc = -EFAULT; + } out: kfree(buf); dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 551d103c27f1..2bfea7082e3a 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -1693,7 +1693,7 @@ static int prep_ssp_v2_hw(struct hisi_hba *hisi_hba, static int parse_trans_tx_err_code_v2_hw(u32 err_msk) { - const u8 trans_tx_err_code_prio[] = { + static const u8 trans_tx_err_code_prio[] = { TRANS_TX_OPEN_FAIL_WITH_IT_NEXUS_LOSS, TRANS_TX_ERR_PHY_NOT_ENABLE, TRANS_TX_OPEN_CNX_ERR_WRONG_DESTINATION, @@ -1738,7 +1738,7 @@ static int parse_trans_tx_err_code_v2_hw(u32 err_msk) static int parse_trans_rx_err_code_v2_hw(u32 err_msk) { - const u8 trans_rx_err_code_prio[] = { + static const u8 trans_rx_err_code_prio[] = { TRANS_RX_ERR_WITH_RXFRAME_CRC_ERR, TRANS_RX_ERR_WITH_RXFIS_8B10B_DISP_ERR, TRANS_RX_ERR_WITH_RXFRAME_HAVE_ERRPRM, @@ -1784,7 +1784,7 @@ static int parse_trans_rx_err_code_v2_hw(u32 err_msk) static int parse_dma_tx_err_code_v2_hw(u32 err_msk) { - const u8 dma_tx_err_code_prio[] = { + static const u8 dma_tx_err_code_prio[] = { DMA_TX_UNEXP_XFER_ERR, DMA_TX_UNEXP_RETRANS_ERR, DMA_TX_XFER_LEN_OVERFLOW, @@ -1810,7 +1810,7 @@ static int parse_dma_tx_err_code_v2_hw(u32 err_msk) static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) { - const u8 sipc_rx_err_code_prio[] = { + static const u8 sipc_rx_err_code_prio[] = { SIPC_RX_FIS_STATUS_ERR_BIT_VLD, SIPC_RX_PIO_WRSETUP_STATUS_DRQ_ERR, SIPC_RX_FIS_STATUS_BSY_BIT_ERR, @@ -1836,7 +1836,7 @@ static int parse_sipc_rx_err_code_v2_hw(u32 err_msk) static int parse_dma_rx_err_code_v2_hw(u32 err_msk) { - const u8 dma_rx_err_code_prio[] = { + static const u8 dma_rx_err_code_prio[] = { DMA_RX_UNKNOWN_FRM_ERR, DMA_RX_DATA_LEN_OVERFLOW, DMA_RX_DATA_LEN_UNDERFLOW, diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c index 659ab483d716..1f75d0380516 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c @@ -155,6 +155,9 @@ static long ibmvscsis_unregister_command_q(struct scsi_info *vscsi) qrc = h_free_crq(vscsi->dds.unit_id); switch (qrc) { case H_SUCCESS: + spin_lock_bh(&vscsi->intr_lock); + vscsi->flags &= ~PREP_FOR_SUSPEND_FLAGS; + spin_unlock_bh(&vscsi->intr_lock); break; case H_HARDWARE: @@ -422,6 +425,9 @@ static void ibmvscsis_disconnect(struct work_struct *work) new_state = vscsi->new_state; vscsi->new_state = 0; + vscsi->flags |= DISCONNECT_SCHEDULED; + vscsi->flags &= ~SCHEDULE_DISCONNECT; + pr_debug("disconnect: flags 0x%x, state 0x%hx\n", vscsi->flags, vscsi->state); @@ -802,6 +808,13 @@ static long ibmvscsis_establish_new_q(struct scsi_info *vscsi) long rc = ADAPT_SUCCESS; uint format; + rc = h_vioctl(vscsi->dds.unit_id, H_ENABLE_PREPARE_FOR_SUSPEND, 30000, + 0, 0, 0, 0); + if (rc == H_SUCCESS) + vscsi->flags |= PREP_FOR_SUSPEND_ENABLED; + else if (rc != H_NOT_FOUND) + pr_err("Error from Enable Prepare for Suspend: %ld\n", rc); + vscsi->flags &= PRESERVE_FLAG_FIELDS; vscsi->rsp_q_timer.timer_pops = 0; vscsi->debit = 0; @@ -951,6 +964,63 @@ static void ibmvscsis_free_cmd_resources(struct scsi_info *vscsi, } /** + * ibmvscsis_ready_for_suspend() - Helper function to call VIOCTL + * @vscsi: Pointer to our adapter structure + * @idle: Indicates whether we were called from adapter_idle. This + * is important to know if we need to do a disconnect, since if + * we're called from adapter_idle, we're still processing the + * current disconnect, so we can't just call post_disconnect. + * + * This function is called when the adapter is idle when phyp has sent + * us a Prepare for Suspend Transport Event. + * + * EXECUTION ENVIRONMENT: + * Process or interrupt environment called with interrupt lock held + */ +static long ibmvscsis_ready_for_suspend(struct scsi_info *vscsi, bool idle) +{ + long rc = 0; + struct viosrp_crq *crq; + + /* See if there is a Resume event in the queue */ + crq = vscsi->cmd_q.base_addr + vscsi->cmd_q.index; + + pr_debug("ready_suspend: flags 0x%x, state 0x%hx crq_valid:%x\n", + vscsi->flags, vscsi->state, (int)crq->valid); + + if (!(vscsi->flags & PREP_FOR_SUSPEND_ABORTED) && !(crq->valid)) { + rc = h_vioctl(vscsi->dds.unit_id, H_READY_FOR_SUSPEND, 0, 0, 0, + 0, 0); + if (rc) { + pr_err("Ready for Suspend Vioctl failed: %ld\n", rc); + rc = 0; + } + } else if (((vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE) && + (vscsi->flags & PREP_FOR_SUSPEND_ABORTED)) || + ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || + (crq->format != RESUME_FROM_SUSP)))) { + if (idle) { + vscsi->state = ERR_DISCONNECT_RECONNECT; + ibmvscsis_reset_queue(vscsi); + rc = -1; + } else if (vscsi->state == CONNECTED) { + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, 0); + } + + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; + + if ((crq->valid) && ((crq->valid != VALID_TRANS_EVENT) || + (crq->format != RESUME_FROM_SUSP))) + pr_err("Invalid element in CRQ after Prepare for Suspend"); + } + + vscsi->flags &= ~(PREP_FOR_SUSPEND_PENDING | PREP_FOR_SUSPEND_ABORTED); + + return rc; +} + +/** * ibmvscsis_trans_event() - Handle a Transport Event * @vscsi: Pointer to our adapter structure * @crq: Pointer to CRQ entry containing the Transport Event @@ -974,18 +1044,8 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, case PARTNER_FAILED: case PARTNER_DEREGISTER: ibmvscsis_delete_client_info(vscsi, true); - break; - - default: - rc = ERROR; - dev_err(&vscsi->dev, "trans_event: invalid format %d\n", - (uint)crq->format); - ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, - RESPONSE_Q_DOWN); - break; - } - - if (rc == ADAPT_SUCCESS) { + if (crq->format == MIGRATED) + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; switch (vscsi->state) { case NO_QUEUE: case ERR_DISCONNECTED: @@ -1034,6 +1094,60 @@ static long ibmvscsis_trans_event(struct scsi_info *vscsi, vscsi->flags |= (RESPONSE_Q_DOWN | TRANS_EVENT); break; } + break; + + case PREPARE_FOR_SUSPEND: + pr_debug("Prep for Suspend, crq status = 0x%x\n", + (int)crq->status); + switch (vscsi->state) { + case ERR_DISCONNECTED: + case WAIT_CONNECTION: + case CONNECTED: + ibmvscsis_ready_for_suspend(vscsi, false); + break; + case SRP_PROCESSING: + vscsi->resume_state = vscsi->state; + vscsi->flags |= PREP_FOR_SUSPEND_PENDING; + if (crq->status == CRQ_ENTRY_OVERWRITTEN) + vscsi->flags |= PREP_FOR_SUSPEND_OVERWRITE; + ibmvscsis_post_disconnect(vscsi, WAIT_IDLE, 0); + break; + case NO_QUEUE: + case UNDEFINED: + case UNCONFIGURING: + case WAIT_ENABLED: + case ERR_DISCONNECT: + case ERR_DISCONNECT_RECONNECT: + case WAIT_IDLE: + pr_err("Invalid state for Prepare for Suspend Trans Event: 0x%x\n", + vscsi->state); + break; + } + break; + + case RESUME_FROM_SUSP: + pr_debug("Resume from Suspend, crq status = 0x%x\n", + (int)crq->status); + if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { + vscsi->flags |= PREP_FOR_SUSPEND_ABORTED; + } else { + if ((crq->status == CRQ_ENTRY_OVERWRITTEN) || + (vscsi->flags & PREP_FOR_SUSPEND_OVERWRITE)) { + ibmvscsis_post_disconnect(vscsi, + ERR_DISCONNECT_RECONNECT, + 0); + vscsi->flags &= ~PREP_FOR_SUSPEND_OVERWRITE; + } + } + break; + + default: + rc = ERROR; + dev_err(&vscsi->dev, "trans_event: invalid format %d\n", + (uint)crq->format); + ibmvscsis_post_disconnect(vscsi, ERR_DISCONNECT, + RESPONSE_Q_DOWN); + break; } rc = vscsi->flags & SCHEDULE_DISCONNECT; @@ -1201,6 +1315,7 @@ static struct ibmvscsis_cmd *ibmvscsis_get_free_cmd(struct scsi_info *vscsi) static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) { int free_qs = false; + long rc = 0; pr_debug("adapter_idle: flags 0x%x, state 0x%hx\n", vscsi->flags, vscsi->state); @@ -1240,7 +1355,14 @@ static void ibmvscsis_adapter_idle(struct scsi_info *vscsi) vscsi->rsp_q_timer.timer_pops = 0; vscsi->debit = 0; vscsi->credit = 0; - if (vscsi->flags & TRANS_EVENT) { + if (vscsi->flags & PREP_FOR_SUSPEND_PENDING) { + vscsi->state = vscsi->resume_state; + vscsi->resume_state = 0; + rc = ibmvscsis_ready_for_suspend(vscsi, true); + vscsi->flags &= ~DISCONNECT_SCHEDULED; + if (rc) + break; + } else if (vscsi->flags & TRANS_EVENT) { vscsi->state = WAIT_CONNECTION; vscsi->flags &= PRESERVE_FLAG_FIELDS; } else { @@ -3792,8 +3914,16 @@ static struct se_portal_group *ibmvscsis_make_tpg(struct se_wwn *wwn, { struct ibmvscsis_tport *tport = container_of(wwn, struct ibmvscsis_tport, tport_wwn); + u16 tpgt; int rc; + if (strstr(name, "tpgt_") != name) + return ERR_PTR(-EINVAL); + rc = kstrtou16(name + 5, 0, &tpgt); + if (rc) + return ERR_PTR(rc); + tport->tport_tpgt = tpgt; + tport->releasing = false; rc = core_tpg_register(&tport->tport_wwn, &tport->se_tpg, diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h index b4391a8de456..cc96c2731134 100644 --- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h +++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.h @@ -262,6 +262,14 @@ struct scsi_info { #define DISCONNECT_SCHEDULED 0x00800 /* remove function is sleeping */ #define CFG_SLEEPING 0x01000 + /* Register for Prepare for Suspend Transport Events */ +#define PREP_FOR_SUSPEND_ENABLED 0x02000 + /* Prepare for Suspend event sent */ +#define PREP_FOR_SUSPEND_PENDING 0x04000 + /* Resume from Suspend event sent */ +#define PREP_FOR_SUSPEND_ABORTED 0x08000 + /* Prepare for Suspend event overwrote another CRQ entry */ +#define PREP_FOR_SUSPEND_OVERWRITE 0x10000 u32 flags; /* adapter lock */ spinlock_t intr_lock; @@ -272,6 +280,7 @@ struct scsi_info { /* used in crq, to tag what iu the response is for */ u64 empty_iu_tag; uint new_state; + uint resume_state; /* control block for the response queue timer */ struct timer_cb rsp_q_timer; /* keep last client to enable proper accounting */ @@ -324,8 +333,13 @@ struct scsi_info { #define TARGET_STOP(VSCSI) (long)(((VSCSI)->state & DONT_PROCESS_STATE) | \ ((VSCSI)->flags & BLOCK)) +#define PREP_FOR_SUSPEND_FLAGS (PREP_FOR_SUSPEND_ENABLED | \ + PREP_FOR_SUSPEND_PENDING | \ + PREP_FOR_SUSPEND_ABORTED | \ + PREP_FOR_SUSPEND_OVERWRITE) + /* flag bit that are not reset during disconnect */ -#define PRESERVE_FLAG_FIELDS 0 +#define PRESERVE_FLAG_FIELDS (PREP_FOR_SUSPEND_FLAGS) #define vio_iu(IUE) ((union viosrp_iu *)((IUE)->sbuf->buf)) @@ -333,8 +347,15 @@ struct scsi_info { #define WRITE_CMD(cdb) (((cdb)[0] & 0x1F) == 0xA) #ifndef H_GET_PARTNER_INFO -#define H_GET_PARTNER_INFO 0x0000000000000008LL +#define H_GET_PARTNER_INFO 0x0000000000000008LL +#endif +#ifndef H_ENABLE_PREPARE_FOR_SUSPEND +#define H_ENABLE_PREPARE_FOR_SUSPEND 0x000000000000001DLL #endif +#ifndef H_READY_FOR_SUSPEND +#define H_READY_FOR_SUSPEND 0x000000000000001ELL +#endif + #define h_copy_rdma(l, sa, sb, da, db) \ plpar_hcall_norets(H_COPY_RDMA, l, sa, sb, da, db) diff --git a/drivers/scsi/ibmvscsi_tgt/libsrp.h b/drivers/scsi/ibmvscsi_tgt/libsrp.h index 4696f331453e..9fec55b36322 100644 --- a/drivers/scsi/ibmvscsi_tgt/libsrp.h +++ b/drivers/scsi/ibmvscsi_tgt/libsrp.h @@ -30,10 +30,13 @@ enum srp_trans_event { UNUSED_FORMAT = 0, PARTNER_FAILED = 1, PARTNER_DEREGISTER = 2, - MIGRATED = 6 + MIGRATED = 6, + PREPARE_FOR_SUSPEND = 9, + RESUME_FROM_SUSP = 0xA }; enum srp_status { + CRQ_ENTRY_OVERWRITTEN = 0x20, HEADER_DESCRIPTOR = 0xF1, PING = 0xF5, PING_RESPONSE = 0xF6 diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c index 47f66e949745..ed197bc8e801 100644 --- a/drivers/scsi/isci/request.c +++ b/drivers/scsi/isci/request.c @@ -213,7 +213,7 @@ static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) * @task_context: * */ -static void scu_ssp_reqeust_construct_task_context( +static void scu_ssp_request_construct_task_context( struct isci_request *ireq, struct scu_task_context *task_context) { @@ -425,7 +425,7 @@ static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, u8 prot_type = scsi_get_prot_type(scmd); u8 prot_op = scsi_get_prot_op(scmd); - scu_ssp_reqeust_construct_task_context(ireq, task_context); + scu_ssp_request_construct_task_context(ireq, task_context); task_context->ssp_command_iu_length = sizeof(struct ssp_cmd_iu) / sizeof(u32); @@ -472,7 +472,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire { struct scu_task_context *task_context = ireq->tc; - scu_ssp_reqeust_construct_task_context(ireq, task_context); + scu_ssp_request_construct_task_context(ireq, task_context); task_context->control_frame = 1; task_context->priority = SCU_TASK_PRIORITY_HIGH; @@ -495,7 +495,7 @@ static void scu_ssp_task_request_construct_task_context(struct isci_request *ire * the command buffer is complete. none Revisit task context construction to * determine what is common for SSP/SMP/STP task context structures. */ -static void scu_sata_reqeust_construct_task_context( +static void scu_sata_request_construct_task_context( struct isci_request *ireq, struct scu_task_context *task_context) { @@ -562,7 +562,7 @@ static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq { struct scu_task_context *task_context = ireq->tc; - scu_sata_reqeust_construct_task_context(ireq, task_context); + scu_sata_request_construct_task_context(ireq, task_context); task_context->control_frame = 0; task_context->priority = SCU_TASK_PRIORITY_NORMAL; @@ -613,7 +613,7 @@ static void sci_stp_optimized_request_construct(struct isci_request *ireq, struct scu_task_context *task_context = ireq->tc; /* Build the STP task context structure */ - scu_sata_reqeust_construct_task_context(ireq, task_context); + scu_sata_request_construct_task_context(ireq, task_context); /* Copy over the SGL elements */ sci_request_build_sgl(ireq); @@ -1401,7 +1401,7 @@ static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_re * @data_buffer: The buffer of data to be copied. * @length: The length of the data transfer. * - * Copy the data from the buffer for the length specified to the IO reqeust SGL + * Copy the data from the buffer for the length specified to the IO request SGL * specified data region. enum sci_status */ static enum sci_status diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c index fd501f8dbb11..8660f923ace0 100644 --- a/drivers/scsi/libfc/fc_disc.c +++ b/drivers/scsi/libfc/fc_disc.c @@ -573,7 +573,7 @@ static void fc_disc_gpn_ft_resp(struct fc_seq *sp, struct fc_frame *fp, event = DISC_EV_FAILED; } if (error) - fc_disc_error(disc, fp); + fc_disc_error(disc, ERR_PTR(error)); else if (event != DISC_EV_NONE) fc_disc_done(disc, event); fc_frame_free(fp); diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index cfe1d01eb73f..adc784539061 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -26,6 +26,7 @@ #include <linux/export.h> #include <linux/delay.h> #include <asm/unaligned.h> +#include <linux/t10-pi.h> #include <linux/crc-t10dif.h> #include <net/checksum.h> @@ -2934,8 +2935,8 @@ lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd) * First check to see if a protection data * check is valid */ - if ((src->ref_tag == 0xffffffff) || - (src->app_tag == 0xffff)) { + if ((src->ref_tag == T10_PI_REF_ESCAPE) || + (src->app_tag == T10_PI_APP_ESCAPE)) { start_ref_tag++; goto skipit; } diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index b58bba4604e8..7786c97e033f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -1227,7 +1227,7 @@ static void qedf_rport_event_handler(struct fc_lport *lport, if (rdata->spp_type != FC_TYPE_FCP) { QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_DISC, - "Not offlading since since spp type isn't FCP\n"); + "Not offloading since spp type isn't FCP\n"); break; } if (!(rdata->ids.roles & FC_RPORT_ROLE_FCP_TARGET)) { diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h index 32632c9b2276..91d2f51c351b 100644 --- a/drivers/scsi/qedi/qedi.h +++ b/drivers/scsi/qedi/qedi.h @@ -23,11 +23,17 @@ #include <linux/qed/qed_iscsi_if.h> #include <linux/qed/qed_ll2_if.h> #include "qedi_version.h" +#include "qedi_nvm_iscsi_cfg.h" #define QEDI_MODULE_NAME "qedi" struct qedi_endpoint; +#ifndef GET_FIELD2 +#define GET_FIELD2(value, name) \ + (((value) & (name ## _MASK)) >> (name ## _OFFSET)) +#endif + /* * PCI function probe defines */ @@ -66,6 +72,11 @@ struct qedi_endpoint; #define QEDI_HW_DMA_BOUNDARY 0xfff #define QEDI_PATH_HANDLE 0xFE0000000UL +enum qedi_nvm_tgts { + QEDI_NVM_TGT_PRI, + QEDI_NVM_TGT_SEC, +}; + struct qedi_uio_ctrl { /* meta data */ u32 uio_hsi_version; @@ -283,6 +294,8 @@ struct qedi_ctx { void *bdq_pbl_list; dma_addr_t bdq_pbl_list_dma; u8 bdq_pbl_list_num_entries; + struct nvm_iscsi_cfg *iscsi_cfg; + dma_addr_t nvm_buf_dma; void __iomem *bdq_primary_prod; void __iomem *bdq_secondary_prod; u16 bdq_prod_idx; @@ -337,6 +350,10 @@ struct qedi_ctx { bool use_fast_sge; atomic_t num_offloads; +#define SYSFS_FLAG_FW_SEL_BOOT 2 +#define IPV6_LEN 41 +#define IPV4_LEN 17 + struct iscsi_boot_kset *boot_kset; }; struct qedi_work { diff --git a/drivers/scsi/qedi/qedi_fw.c b/drivers/scsi/qedi/qedi_fw.c index 19254bd739d9..93d54acd4a22 100644 --- a/drivers/scsi/qedi/qedi_fw.c +++ b/drivers/scsi/qedi/qedi_fw.c @@ -1411,7 +1411,7 @@ static void qedi_tmf_work(struct work_struct *work) list_work = kzalloc(sizeof(*list_work), GFP_ATOMIC); if (!list_work) { - QEDI_ERR(&qedi->dbg_ctx, "Memory alloction failed\n"); + QEDI_ERR(&qedi->dbg_ctx, "Memory allocation failed\n"); goto abort_ret; } diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index 5f5a4ef2e529..2c3783684815 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -19,6 +19,7 @@ #include <linux/mm.h> #include <linux/if_vlan.h> #include <linux/cpu.h> +#include <linux/iscsi_boot_sysfs.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> @@ -1143,6 +1144,30 @@ exit_setup_int: return rc; } +static void qedi_free_nvm_iscsi_cfg(struct qedi_ctx *qedi) +{ + if (qedi->iscsi_cfg) + dma_free_coherent(&qedi->pdev->dev, + sizeof(struct nvm_iscsi_cfg), + qedi->iscsi_cfg, qedi->nvm_buf_dma); +} + +static int qedi_alloc_nvm_iscsi_cfg(struct qedi_ctx *qedi) +{ + qedi->iscsi_cfg = dma_zalloc_coherent(&qedi->pdev->dev, + sizeof(struct nvm_iscsi_cfg), + &qedi->nvm_buf_dma, GFP_KERNEL); + if (!qedi->iscsi_cfg) { + QEDI_ERR(&qedi->dbg_ctx, "Could not allocate NVM BUF.\n"); + return -ENOMEM; + } + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "NVM BUF addr=0x%p dma=0x%llx.\n", qedi->iscsi_cfg, + qedi->nvm_buf_dma); + + return 0; +} + static void qedi_free_bdq(struct qedi_ctx *qedi) { int i; @@ -1183,6 +1208,7 @@ static void qedi_free_global_queues(struct qedi_ctx *qedi) kfree(gl[i]); } qedi_free_bdq(qedi); + qedi_free_nvm_iscsi_cfg(qedi); } static int qedi_alloc_bdq(struct qedi_ctx *qedi) @@ -1309,6 +1335,11 @@ static int qedi_alloc_global_queues(struct qedi_ctx *qedi) if (rc) goto mem_alloc_failure; + /* Allocate DMA coherent buffers for NVM_ISCSI_CFG */ + rc = qedi_alloc_nvm_iscsi_cfg(qedi); + if (rc) + goto mem_alloc_failure; + /* Allocate a CQ and an associated PBL for each MSI-X * vector. */ @@ -1671,6 +1702,387 @@ void qedi_reset_host_mtu(struct qedi_ctx *qedi, u16 mtu) qedi_ops->ll2->start(qedi->cdev, ¶ms); } +/** + * qedi_get_nvram_block: - Scan through the iSCSI NVRAM block (while accounting + * for gaps) for the matching absolute-pf-id of the QEDI device. + */ +static struct nvm_iscsi_block * +qedi_get_nvram_block(struct qedi_ctx *qedi) +{ + int i; + u8 pf; + u32 flags; + struct nvm_iscsi_block *block; + + pf = qedi->dev_info.common.abs_pf_id; + block = &qedi->iscsi_cfg->block[0]; + for (i = 0; i < NUM_OF_ISCSI_PF_SUPPORTED; i++, block++) { + flags = ((block->id) & NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK) >> + NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET; + if (flags & (NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY | + NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED) && + (pf == (block->id & NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK) + >> NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET)) + return block; + } + return NULL; +} + +static ssize_t qedi_show_boot_eth_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + struct nvm_iscsi_initiator *initiator; + char *str = buf; + int rc = 1; + u32 ipv6_en, dhcp_en, ip_len; + struct nvm_iscsi_block *block; + char *fmt, *ip, *sub, *gw; + + block = qedi_get_nvram_block(qedi); + if (!block) + return 0; + + initiator = &block->initiator; + ipv6_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_IPV6_ENABLED; + dhcp_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED; + /* Static IP assignments. */ + fmt = ipv6_en ? "%pI6\n" : "%pI4\n"; + ip = ipv6_en ? initiator->ipv6.addr.byte : initiator->ipv4.addr.byte; + ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN; + sub = ipv6_en ? initiator->ipv6.subnet_mask.byte : + initiator->ipv4.subnet_mask.byte; + gw = ipv6_en ? initiator->ipv6.gateway.byte : + initiator->ipv4.gateway.byte; + /* DHCP IP adjustments. */ + fmt = dhcp_en ? "%s\n" : fmt; + if (dhcp_en) { + ip = ipv6_en ? "0::0" : "0.0.0.0"; + sub = ip; + gw = ip; + ip_len = ipv6_en ? 5 : 8; + } + + switch (type) { + case ISCSI_BOOT_ETH_IP_ADDR: + rc = snprintf(str, ip_len, fmt, ip); + break; + case ISCSI_BOOT_ETH_SUBNET_MASK: + rc = snprintf(str, ip_len, fmt, sub); + break; + case ISCSI_BOOT_ETH_GATEWAY: + rc = snprintf(str, ip_len, fmt, gw); + break; + case ISCSI_BOOT_ETH_FLAGS: + rc = snprintf(str, 3, "%hhd\n", + SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_ETH_INDEX: + rc = snprintf(str, 3, "0\n"); + break; + case ISCSI_BOOT_ETH_MAC: + rc = sysfs_format_mac(str, qedi->mac, ETH_ALEN); + break; + case ISCSI_BOOT_ETH_VLAN: + rc = snprintf(str, 12, "%d\n", + GET_FIELD2(initiator->generic_cont0, + NVM_ISCSI_CFG_INITIATOR_VLAN)); + break; + case ISCSI_BOOT_ETH_ORIGIN: + if (dhcp_en) + rc = snprintf(str, 3, "3\n"); + break; + default: + rc = 0; + break; + } + + return rc; +} + +static umode_t qedi_eth_get_attr_visibility(void *data, int type) +{ + int rc = 1; + + switch (type) { + case ISCSI_BOOT_ETH_FLAGS: + case ISCSI_BOOT_ETH_MAC: + case ISCSI_BOOT_ETH_INDEX: + case ISCSI_BOOT_ETH_IP_ADDR: + case ISCSI_BOOT_ETH_SUBNET_MASK: + case ISCSI_BOOT_ETH_GATEWAY: + case ISCSI_BOOT_ETH_ORIGIN: + case ISCSI_BOOT_ETH_VLAN: + rc = 0444; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t qedi_show_boot_ini_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + struct nvm_iscsi_initiator *initiator; + char *str = buf; + int rc; + struct nvm_iscsi_block *block; + + block = qedi_get_nvram_block(qedi); + if (!block) + return 0; + + initiator = &block->initiator; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", + initiator->initiator_name.byte); + break; + default: + rc = 0; + break; + } + return rc; +} + +static umode_t qedi_ini_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_INI_INITIATOR_NAME: + rc = 0444; + break; + default: + rc = 0; + break; + } + return rc; +} + +static ssize_t +qedi_show_boot_tgt_info(struct qedi_ctx *qedi, int type, + char *buf, enum qedi_nvm_tgts idx) +{ + char *str = buf; + int rc = 1; + u32 ctrl_flags, ipv6_en, chap_en, mchap_en, ip_len; + struct nvm_iscsi_block *block; + char *chap_name, *chap_secret; + char *mchap_name, *mchap_secret; + + block = qedi_get_nvram_block(qedi); + if (!block) + goto exit_show_tgt_info; + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT, + "Port:%d, tgt_idx:%d\n", + GET_FIELD2(block->id, NVM_ISCSI_CFG_BLK_MAPPED_PF_ID), idx); + + ctrl_flags = block->target[idx].ctrl_flags & + NVM_ISCSI_CFG_TARGET_ENABLED; + + if (!ctrl_flags) { + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_EVT, + "Target disabled\n"); + goto exit_show_tgt_info; + } + + ipv6_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_IPV6_ENABLED; + ip_len = ipv6_en ? IPV6_LEN : IPV4_LEN; + chap_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_ENABLED; + chap_name = chap_en ? block->initiator.chap_name.byte : NULL; + chap_secret = chap_en ? block->initiator.chap_password.byte : NULL; + + mchap_en = block->generic.ctrl_flags & + NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED; + mchap_name = mchap_en ? block->target[idx].chap_name.byte : NULL; + mchap_secret = mchap_en ? block->target[idx].chap_password.byte : NULL; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + rc = snprintf(str, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n", + block->target[idx].target_name.byte); + break; + case ISCSI_BOOT_TGT_IP_ADDR: + if (ipv6_en) + rc = snprintf(str, ip_len, "%pI6\n", + block->target[idx].ipv6_addr.byte); + else + rc = snprintf(str, ip_len, "%pI4\n", + block->target[idx].ipv4_addr.byte); + break; + case ISCSI_BOOT_TGT_PORT: + rc = snprintf(str, 12, "%d\n", + GET_FIELD2(block->target[idx].generic_cont0, + NVM_ISCSI_CFG_TARGET_TCP_PORT)); + break; + case ISCSI_BOOT_TGT_LUN: + rc = snprintf(str, 22, "%.*d\n", + block->target[idx].lun.value[1], + block->target[idx].lun.value[0]); + break; + case ISCSI_BOOT_TGT_CHAP_NAME: + rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", + chap_name); + break; + case ISCSI_BOOT_TGT_CHAP_SECRET: + rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", + chap_secret); + break; + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + rc = snprintf(str, NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN, "%s\n", + mchap_name); + break; + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + rc = snprintf(str, NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN, "%s\n", + mchap_secret); + break; + case ISCSI_BOOT_TGT_FLAGS: + rc = snprintf(str, 3, "%hhd\n", SYSFS_FLAG_FW_SEL_BOOT); + break; + case ISCSI_BOOT_TGT_NIC_ASSOC: + rc = snprintf(str, 3, "0\n"); + break; + default: + rc = 0; + break; + } + +exit_show_tgt_info: + return rc; +} + +static ssize_t qedi_show_boot_tgt_pri_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + + return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_PRI); +} + +static ssize_t qedi_show_boot_tgt_sec_info(void *data, int type, char *buf) +{ + struct qedi_ctx *qedi = data; + + return qedi_show_boot_tgt_info(qedi, type, buf, QEDI_NVM_TGT_SEC); +} + +static umode_t qedi_tgt_get_attr_visibility(void *data, int type) +{ + int rc; + + switch (type) { + case ISCSI_BOOT_TGT_NAME: + case ISCSI_BOOT_TGT_IP_ADDR: + case ISCSI_BOOT_TGT_PORT: + case ISCSI_BOOT_TGT_LUN: + case ISCSI_BOOT_TGT_CHAP_NAME: + case ISCSI_BOOT_TGT_CHAP_SECRET: + case ISCSI_BOOT_TGT_REV_CHAP_NAME: + case ISCSI_BOOT_TGT_REV_CHAP_SECRET: + case ISCSI_BOOT_TGT_NIC_ASSOC: + case ISCSI_BOOT_TGT_FLAGS: + rc = 0444; + break; + default: + rc = 0; + break; + } + return rc; +} + +static void qedi_boot_release(void *data) +{ + struct qedi_ctx *qedi = data; + + scsi_host_put(qedi->shost); +} + +static int qedi_get_boot_info(struct qedi_ctx *qedi) +{ + int ret = 1; + u16 len; + + len = sizeof(struct nvm_iscsi_cfg); + + QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO, + "Get NVM iSCSI CFG image\n"); + ret = qedi_ops->common->nvm_get_image(qedi->cdev, + QED_NVM_IMAGE_ISCSI_CFG, + (char *)qedi->iscsi_cfg, len); + if (ret) + QEDI_ERR(&qedi->dbg_ctx, + "Could not get NVM image. ret = %d\n", ret); + + return ret; +} + +static int qedi_setup_boot_info(struct qedi_ctx *qedi) +{ + struct iscsi_boot_kobj *boot_kobj; + + if (qedi_get_boot_info(qedi)) + return -EPERM; + + qedi->boot_kset = iscsi_boot_create_host_kset(qedi->shost->host_no); + if (!qedi->boot_kset) + goto kset_free; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 0, qedi, + qedi_show_boot_tgt_pri_info, + qedi_tgt_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_target(qedi->boot_kset, 1, qedi, + qedi_show_boot_tgt_sec_info, + qedi_tgt_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_initiator(qedi->boot_kset, 0, qedi, + qedi_show_boot_ini_info, + qedi_ini_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + if (!scsi_host_get(qedi->shost)) + goto kset_free; + + boot_kobj = iscsi_boot_create_ethernet(qedi->boot_kset, 0, qedi, + qedi_show_boot_eth_info, + qedi_eth_get_attr_visibility, + qedi_boot_release); + if (!boot_kobj) + goto put_host; + + return 0; + +put_host: + scsi_host_put(qedi->shost); +kset_free: + iscsi_boot_destroy_kset(qedi->boot_kset); + return -ENOMEM; +} + static void __qedi_remove(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi = pci_get_drvdata(pdev); @@ -1724,6 +2136,9 @@ static void __qedi_remove(struct pci_dev *pdev, int mode) qedi->ll2_recv_thread = NULL; } qedi_ll2_free_skbs(qedi); + + if (qedi->boot_kset) + iscsi_boot_destroy_kset(qedi->boot_kset); } } @@ -1967,6 +2382,10 @@ static int __qedi_probe(struct pci_dev *pdev, int mode) /* F/w needs 1st task context memory entry for performance */ set_bit(QEDI_RESERVE_TASK_ID, qedi->task_idx_map); atomic_set(&qedi->num_offloads, 0); + + if (qedi_setup_boot_info(qedi)) + QEDI_ERR(&qedi->dbg_ctx, + "No iSCSI boot target configured\n"); } return 0; diff --git a/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h new file mode 100644 index 000000000000..df39b69b366d --- /dev/null +++ b/drivers/scsi/qedi/qedi_nvm_iscsi_cfg.h @@ -0,0 +1,210 @@ +/* + * QLogic iSCSI Offload Driver + * Copyright (c) 2016 Cavium Inc. + * + * This software is available under the terms of the GNU General Public License + * (GPL) Version 2, available from the file COPYING in the main directory of + * this source tree. + */ + +#ifndef NVM_ISCSI_CFG_H +#define NVM_ISCSI_CFG_H + +#define NUM_OF_ISCSI_TARGET_PER_PF 4 /* Defined as per the + * ISCSI IBFT constraint + */ +#define NUM_OF_ISCSI_PF_SUPPORTED 4 /* One PF per Port - + * assuming 4 port card + */ + +#define NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN 256 + +union nvm_iscsi_dhcp_vendor_id { + u32 value[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_DHCP_NAME_MAX_LEN]; +}; + +#define NVM_ISCSI_IPV4_ADDR_BYTE_LEN 4 +union nvm_iscsi_ipv4_addr { + u32 addr; + u8 byte[NVM_ISCSI_IPV4_ADDR_BYTE_LEN]; +}; + +#define NVM_ISCSI_IPV6_ADDR_BYTE_LEN 16 +union nvm_iscsi_ipv6_addr { + u32 addr[4]; + u8 byte[NVM_ISCSI_IPV6_ADDR_BYTE_LEN]; +}; + +struct nvm_iscsi_initiator_ipv4 { + union nvm_iscsi_ipv4_addr addr; /* 0x0 */ + union nvm_iscsi_ipv4_addr subnet_mask; /* 0x4 */ + union nvm_iscsi_ipv4_addr gateway; /* 0x8 */ + union nvm_iscsi_ipv4_addr primary_dns; /* 0xC */ + union nvm_iscsi_ipv4_addr secondary_dns; /* 0x10 */ + union nvm_iscsi_ipv4_addr dhcp_addr; /* 0x14 */ + + union nvm_iscsi_ipv4_addr isns_server; /* 0x18 */ + union nvm_iscsi_ipv4_addr slp_server; /* 0x1C */ + union nvm_iscsi_ipv4_addr primay_radius_server; /* 0x20 */ + union nvm_iscsi_ipv4_addr secondary_radius_server; /* 0x24 */ + + union nvm_iscsi_ipv4_addr rsvd[4]; /* 0x28 */ +}; + +struct nvm_iscsi_initiator_ipv6 { + union nvm_iscsi_ipv6_addr addr; /* 0x0 */ + union nvm_iscsi_ipv6_addr subnet_mask; /* 0x10 */ + union nvm_iscsi_ipv6_addr gateway; /* 0x20 */ + union nvm_iscsi_ipv6_addr primary_dns; /* 0x30 */ + union nvm_iscsi_ipv6_addr secondary_dns; /* 0x40 */ + union nvm_iscsi_ipv6_addr dhcp_addr; /* 0x50 */ + + union nvm_iscsi_ipv6_addr isns_server; /* 0x60 */ + union nvm_iscsi_ipv6_addr slp_server; /* 0x70 */ + union nvm_iscsi_ipv6_addr primay_radius_server; /* 0x80 */ + union nvm_iscsi_ipv6_addr secondary_radius_server; /* 0x90 */ + + union nvm_iscsi_ipv6_addr rsvd[3]; /* 0xA0 */ + + u32 config; /* 0xD0 */ +#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_MASK 0x000000FF +#define NVM_ISCSI_CFG_INITIATOR_IPV6_SUBNET_MASK_PREFIX_OFFSET 0 + + u32 rsvd_1[3]; +}; + +#define NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN 256 +union nvm_iscsi_name { + u32 value[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN]; +}; + +#define NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN 256 +union nvm_iscsi_chap_name { + u32 value[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_CHAP_NAME_MAX_LEN]; +}; + +#define NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN 16 /* md5 need per RFC1996 + * is 16 octets + */ +union nvm_iscsi_chap_password { + u32 value[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN / 4]; + u8 byte[NVM_ISCSI_CFG_CHAP_PWD_MAX_LEN]; +}; + +union nvm_iscsi_lun { + u8 byte[8]; + u32 value[2]; +}; + +struct nvm_iscsi_generic { + u32 ctrl_flags; /* 0x0 */ +#define NVM_ISCSI_CFG_GEN_CHAP_ENABLED BIT(0) +#define NVM_ISCSI_CFG_GEN_DHCP_TCPIP_CONFIG_ENABLED BIT(1) +#define NVM_ISCSI_CFG_GEN_DHCP_ISCSI_CONFIG_ENABLED BIT(2) +#define NVM_ISCSI_CFG_GEN_IPV6_ENABLED BIT(3) +#define NVM_ISCSI_CFG_GEN_IPV4_FALLBACK_ENABLED BIT(4) +#define NVM_ISCSI_CFG_GEN_ISNS_WORLD_LOGIN BIT(5) +#define NVM_ISCSI_CFG_GEN_ISNS_SELECTIVE_LOGIN BIT(6) +#define NVM_ISCSI_CFG_GEN_ADDR_REDIRECT_ENABLED BIT(7) +#define NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED BIT(8) + + u32 timeout; /* 0x4 */ +#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_MASK 0x0000FFFF +#define NVM_ISCSI_CFG_GEN_DHCP_REQUEST_TIMEOUT_OFFSET 0 +#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_MASK 0xFFFF0000 +#define NVM_ISCSI_CFG_GEN_PORT_LOGIN_TIMEOUT_OFFSET 16 + + union nvm_iscsi_dhcp_vendor_id dhcp_vendor_id; /* 0x8 */ + u32 rsvd[62]; /* 0x108 */ +}; + +struct nvm_iscsi_initiator { + struct nvm_iscsi_initiator_ipv4 ipv4; /* 0x0 */ + struct nvm_iscsi_initiator_ipv6 ipv6; /* 0x38 */ + + union nvm_iscsi_name initiator_name; /* 0x118 */ + union nvm_iscsi_chap_name chap_name; /* 0x218 */ + union nvm_iscsi_chap_password chap_password; /* 0x318 */ + + u32 generic_cont0; /* 0x398 */ +#define NVM_ISCSI_CFG_INITIATOR_VLAN_MASK 0x0000FFFF +#define NVM_ISCSI_CFG_INITIATOR_VLAN_OFFSET 0 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_MASK 0x00030000 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_OFFSET 16 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4 1 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_6 2 +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_4_AND_6 3 + + u32 ctrl_flags; +#define NVM_ISCSI_CFG_INITIATOR_IP_VERSION_PRIORITY_V6 BIT(0) +#define NVM_ISCSI_CFG_INITIATOR_VLAN_ENABLED BIT(1) + + u32 rsvd[116]; /* 0x32C */ +}; + +struct nvm_iscsi_target { + u32 ctrl_flags; /* 0x0 */ +#define NVM_ISCSI_CFG_TARGET_ENABLED BIT(0) +#define NVM_ISCSI_CFG_BOOT_TIME_LOGIN_STATUS BIT(1) + + u32 generic_cont0; /* 0x4 */ +#define NVM_ISCSI_CFG_TARGET_TCP_PORT_MASK 0x0000FFFF +#define NVM_ISCSI_CFG_TARGET_TCP_PORT_OFFSET 0 + + u32 ip_ver; +#define NVM_ISCSI_CFG_IPv4 4 +#define NVM_ISCSI_CFG_IPv6 6 + + u32 rsvd_1[7]; /* 0x24 */ + union nvm_iscsi_ipv4_addr ipv4_addr; /* 0x28 */ + union nvm_iscsi_ipv6_addr ipv6_addr; /* 0x2C */ + union nvm_iscsi_lun lun; /* 0x3C */ + + union nvm_iscsi_name target_name; /* 0x44 */ + union nvm_iscsi_chap_name chap_name; /* 0x144 */ + union nvm_iscsi_chap_password chap_password; /* 0x244 */ + + u32 rsvd_2[107]; /* 0x2C4 */ +}; + +struct nvm_iscsi_block { + u32 id; /* 0x0 */ +#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_MASK 0x0000000F +#define NVM_ISCSI_CFG_BLK_MAPPED_PF_ID_OFFSET 0 +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_MASK 0x00000FF0 +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_OFFSET 4 +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_IS_NOT_EMPTY BIT(0) +#define NVM_ISCSI_CFG_BLK_CTRL_FLAG_PF_MAPPED BIT(1) + + u32 rsvd_1[5]; /* 0x4 */ + + struct nvm_iscsi_generic generic; /* 0x18 */ + struct nvm_iscsi_initiator initiator; /* 0x218 */ + struct nvm_iscsi_target target[NUM_OF_ISCSI_TARGET_PER_PF]; + /* 0x718 */ + + u32 rsvd_2[58]; /* 0x1718 */ + /* total size - 0x1800 - 6K block */ +}; + +struct nvm_iscsi_cfg { + u32 id; /* 0x0 */ +#define NVM_ISCSI_CFG_BLK_VERSION_MINOR_MASK 0x000000FF +#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR_MASK 0x0000FF00 +#define NVM_ISCSI_CFG_BLK_SIGNATURE_MASK 0xFFFF0000 +#define NVM_ISCSI_CFG_BLK_SIGNATURE 0x49430000 /* IC - Iscsi + * Config + */ + +#define NVM_ISCSI_CFG_BLK_VERSION_MAJOR 0 +#define NVM_ISCSI_CFG_BLK_VERSION_MINOR 10 +#define NVM_ISCSI_CFG_BLK_VERSION ((NVM_ISCSI_CFG_BLK_VERSION_MAJOR << 8) | \ + NVM_ISCSI_CFG_BLK_VERSION_MINOR) + + struct nvm_iscsi_block block[NUM_OF_ISCSI_PF_SUPPORTED]; /* 0x4 */ +}; + +#endif diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 6c6e624a5aa6..7b3b702ef622 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -2040,9 +2040,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) * For type 3: ref & app tag is all 'f's * For type 0,1,2: app tag is all 'f's */ - if ((a_app_tag == 0xffff) && + if ((a_app_tag == T10_PI_APP_ESCAPE) && ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) || - (a_ref_tag == 0xffffffff))) { + (a_ref_tag == T10_PI_REF_ESCAPE))) { uint32_t blocks_done, resid; sector_t lba_s = scsi_get_lba(cmd); @@ -2084,9 +2084,9 @@ qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24) spt = page_address(sg_page(sg)) + sg->offset; spt += j; - spt->app_tag = 0xffff; + spt->app_tag = T10_PI_APP_ESCAPE; if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3) - spt->ref_tag = 0xffffffff; + spt->ref_tag = T10_PI_REF_ESCAPE; } return 0; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index 2a0173e5d10e..e101cd3043b9 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -1874,36 +1874,13 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, struct abts_recv_from_24xx *abts, struct fc_port *sess) { struct qla_hw_data *ha = vha->hw; - struct se_session *se_sess = sess->se_sess; struct qla_tgt_mgmt_cmd *mcmd; - struct qla_tgt_cmd *cmd; - struct se_cmd *se_cmd; int rc; - bool found_lun = false; - unsigned long flags; - - spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); - list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { - if (se_cmd->tag == abts->exchange_addr_to_abort) { - found_lun = true; - break; - } - } - spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); - /* cmd not in LIO lists, look in qla list */ - if (!found_lun) { - if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { - /* send TASK_ABORT response immediately */ - qlt_24xx_send_abts_resp(ha->base_qpair, abts, - FCP_TMF_CMPL, false); - return 0; - } else { - ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081, - "unable to find cmd in driver or LIO for tag 0x%x\n", - abts->exchange_addr_to_abort); - return -ENOENT; - } + if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) { + /* send TASK_ABORT response immediately */ + qlt_24xx_send_abts_resp(ha->base_qpair, abts, FCP_TMF_CMPL, false); + return 0; } ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, @@ -1919,14 +1896,17 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, } memset(mcmd, 0, sizeof(*mcmd)); - cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd); mcmd->sess = sess; memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts)); mcmd->reset_count = ha->base_qpair->chip_reset; mcmd->tmr_func = QLA_TGT_ABTS; mcmd->qpair = ha->base_qpair; - rc = ha->tgt.tgt_ops->handle_tmr(mcmd, cmd->unpacked_lun, mcmd->tmr_func, + /* + * LUN is looked up by target-core internally based on the passed + * abts->exchange_addr_to_abort tag. + */ + rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, mcmd->tmr_func, abts->exchange_addr_to_abort); if (rc != 0) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052, @@ -3747,7 +3727,7 @@ static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha, h &= QLA_CMD_HANDLE_MASK; if (h != QLA_TGT_NULL_HANDLE) { - if (unlikely(h > req->num_outstanding_cmds)) { + if (unlikely(h >= req->num_outstanding_cmds)) { ql_dbg(ql_dbg_tgt, vha, 0xe052, "qla_target(%d): Wrong handle %x received\n", vha->vp_idx, handle); diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c index c4b414833b86..b20da0d27ad7 100644 --- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c +++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c @@ -600,11 +600,13 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, struct fc_port *sess = mcmd->sess; struct se_cmd *se_cmd = &mcmd->se_cmd; int transl_tmr_func = 0; + int flags = TARGET_SCF_ACK_KREF; switch (tmr_func) { case QLA_TGT_ABTS: pr_debug("%ld: ABTS received\n", sess->vha->host_no); transl_tmr_func = TMR_ABORT_TASK; + flags |= TARGET_SCF_LOOKUP_LUN_FROM_TAG; break; case QLA_TGT_2G_ABORT_TASK: pr_debug("%ld: 2G Abort Task received\n", sess->vha->host_no); @@ -637,7 +639,7 @@ static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, u64 lun, } return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd, - transl_tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF); + transl_tmr_func, GFP_ATOMIC, tag, flags); } static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd) diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c index 21225d62b0c1..1e82d4128a84 100644 --- a/drivers/scsi/sg.c +++ b/drivers/scsi/sg.c @@ -758,8 +758,11 @@ static bool sg_is_valid_dxfer(sg_io_hdr_t *hp) if (hp->dxferp || hp->dxfer_len > 0) return false; return true; - case SG_DXFER_TO_DEV: case SG_DXFER_FROM_DEV: + if (hp->dxfer_len < 0) + return false; + return true; + case SG_DXFER_TO_DEV: case SG_DXFER_TO_FROM_DEV: if (!hp->dxferp || hp->dxfer_len == 0) return false; diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c index 8b93197daefe..9be211d68b15 100644 --- a/drivers/scsi/virtio_scsi.c +++ b/drivers/scsi/virtio_scsi.c @@ -837,6 +837,7 @@ static struct scsi_host_template virtscsi_host_template_multi = { .eh_abort_handler = virtscsi_abort, .eh_device_reset_handler = virtscsi_device_reset, .eh_timed_out = virtscsi_eh_timed_out, + .slave_alloc = virtscsi_device_alloc, .can_queue = 1024, .dma_boundary = UINT_MAX, diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index 2afe3597982e..f4b7a98a7913 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -134,7 +134,6 @@ struct apid_data { * @spmic: SPMI controller object * @ver_ops: version dependent operations. * @ppid_to_apid in-memory copy of PPID -> channel (APID) mapping table. - * v2 only. */ struct spmi_pmic_arb { void __iomem *rd_base; @@ -1016,6 +1015,13 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) goto err_put_ctrl; } + pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID, + sizeof(*pa->ppid_to_apid), GFP_KERNEL); + if (!pa->ppid_to_apid) { + err = -ENOMEM; + goto err_put_ctrl; + } + hw_ver = readl_relaxed(core + PMIC_ARB_VERSION); if (hw_ver < PMIC_ARB_VERSION_V2_MIN) { @@ -1048,15 +1054,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) err = PTR_ERR(pa->wr_base); goto err_put_ctrl; } - - pa->ppid_to_apid = devm_kcalloc(&ctrl->dev, - PMIC_ARB_MAX_PPID, - sizeof(*pa->ppid_to_apid), - GFP_KERNEL); - if (!pa->ppid_to_apid) { - err = -ENOMEM; - goto err_put_ctrl; - } } dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n", diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index 2b9b0941d9eb..6d23226e5f69 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c @@ -365,11 +365,23 @@ static int spmi_drv_remove(struct device *dev) return 0; } +static int spmi_drv_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + int ret; + + ret = of_device_uevent_modalias(dev, env); + if (ret != -ENODEV) + return ret; + + return 0; +} + static struct bus_type spmi_bus_type = { .name = "spmi", .match = spmi_device_match, .probe = spmi_drv_probe, .remove = spmi_drv_remove, + .uevent = spmi_drv_uevent, }; /** diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig index 268d4e6ef48a..ef28a1cb64ae 100644 --- a/drivers/staging/Kconfig +++ b/drivers/staging/Kconfig @@ -110,4 +110,6 @@ source "drivers/staging/ccree/Kconfig" source "drivers/staging/typec/Kconfig" +source "drivers/staging/vboxvideo/Kconfig" + endif # STAGING diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile index b93e6f5f0f6e..2918580bdb9e 100644 --- a/drivers/staging/Makefile +++ b/drivers/staging/Makefile @@ -44,3 +44,4 @@ obj-$(CONFIG_KS7010) += ks7010/ obj-$(CONFIG_GREYBUS) += greybus/ obj-$(CONFIG_BCM2835_VCHIQ) += vc04_services/ obj-$(CONFIG_CRYPTO_DEV_CCREE) += ccree/ +obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c index b2e382888981..2f7bfc1c59e5 100644 --- a/drivers/staging/comedi/drivers/ni_mio_common.c +++ b/drivers/staging/comedi/drivers/ni_mio_common.c @@ -3116,8 +3116,7 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev, /* following line: 2-1 per STC */ ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG); ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG); - /* following line: N-1 per STC */ - ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG); + ni_stc_writel(dev, trigvar, NISTC_AO_UI_LOADA_REG); } else { /* TRIG_EXT */ /* FIXME: assert scan_begin_arg != 0, ret failure otherwise */ devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA; diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c index 85b242ec5f9b..8fc191d99927 100644 --- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c +++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c @@ -1640,8 +1640,13 @@ kiblnd_send(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg) ibmsg = tx->tx_msg; ibmsg->ibm_u.immediate.ibim_hdr = *hdr; - copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, IBLND_MSG_SIZE, - &from); + rc = copy_from_iter(&ibmsg->ibm_u.immediate.ibim_payload, payload_nob, + &from); + if (rc != payload_nob) { + kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); + return -EFAULT; + } + nob = offsetof(struct kib_immediate_msg, ibim_payload[payload_nob]); kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); @@ -1741,8 +1746,14 @@ kiblnd_recv(struct lnet_ni *ni, void *private, struct lnet_msg *lntmsg, break; } - copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, - IBLND_MSG_SIZE, to); + rc = copy_to_iter(&rxmsg->ibm_u.immediate.ibim_payload, rlen, + to); + if (rc != rlen) { + rc = -EFAULT; + break; + } + + rc = 0; lnet_finalize(ni, lntmsg, 0); break; diff --git a/drivers/staging/mt29f_spinand/mt29f_spinand.c b/drivers/staging/mt29f_spinand/mt29f_spinand.c index e389009fca42..a4e3ae8f0c85 100644 --- a/drivers/staging/mt29f_spinand/mt29f_spinand.c +++ b/drivers/staging/mt29f_spinand/mt29f_spinand.c @@ -915,6 +915,8 @@ static int spinand_probe(struct spi_device *spi_nand) chip->waitfunc = spinand_wait; chip->options |= NAND_CACHEPRG; chip->select_chip = spinand_select_chip; + chip->onfi_set_features = nand_onfi_get_set_features_notsupp; + chip->onfi_get_features = nand_onfi_get_set_features_notsupp; mtd = nand_to_mtd(chip); diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c index 002d09159896..a69007ef77bf 100644 --- a/drivers/staging/rtl8188eu/core/rtw_cmd.c +++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c @@ -132,7 +132,7 @@ void rtw_free_cmd_obj(struct cmd_obj *pcmd) kfree(pcmd->parmbuf); } - if (!pcmd->rsp) { + if (pcmd->rsp) { if (pcmd->rspsz != 0) { /* free rsp in cmd_obj */ kfree(pcmd->rsp); diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c index 963235fd7292..d283341cfe43 100644 --- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c +++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c @@ -43,6 +43,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = { {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */ + {USB_DEVICE(0x2357, 0x010c)}, /* TP-Link TL-WN722N v2 */ {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ {} /* Terminating entry */ }; diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c index 944dd25924be..4754f7a20684 100644 --- a/drivers/staging/sm750fb/ddk750_chip.c +++ b/drivers/staging/sm750fb/ddk750_chip.c @@ -40,7 +40,7 @@ static unsigned int get_mxclk_freq(void) pll_reg = peek32(MXCLK_PLL_CTRL); M = (pll_reg & PLL_CTRL_M_MASK) >> PLL_CTRL_M_SHIFT; - N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_M_SHIFT; + N = (pll_reg & PLL_CTRL_N_MASK) >> PLL_CTRL_N_SHIFT; OD = (pll_reg & PLL_CTRL_OD_MASK) >> PLL_CTRL_OD_SHIFT; POD = (pll_reg & PLL_CTRL_POD_MASK) >> PLL_CTRL_POD_SHIFT; diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c index 3aa4128703d5..67207b0554cd 100644 --- a/drivers/staging/sm750fb/sm750.c +++ b/drivers/staging/sm750fb/sm750.c @@ -1053,6 +1053,26 @@ release_fb: return err; } +static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev) +{ + struct apertures_struct *ap; + bool primary = false; + + ap = alloc_apertures(1); + if (!ap) + return -ENOMEM; + + ap->ranges[0].base = pci_resource_start(pdev, 0); + ap->ranges[0].size = pci_resource_len(pdev, 0); +#ifdef CONFIG_X86 + primary = pdev->resource[PCI_ROM_RESOURCE].flags & + IORESOURCE_ROM_SHADOW; +#endif + remove_conflicting_framebuffers(ap, "sm750_fb1", primary); + kfree(ap); + return 0; +} + static int lynxfb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1061,6 +1081,10 @@ static int lynxfb_pci_probe(struct pci_dev *pdev, int fbidx; int err; + err = lynxfb_kick_out_firmware_fb(pdev); + if (err) + return err; + /* enable device */ err = pcim_enable_device(pdev); if (err) diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 82e5de248947..67956e24779c 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -2314,6 +2314,7 @@ static void __exit speakup_exit(void) mutex_lock(&spk_mutex); synth_release(); mutex_unlock(&spk_mutex); + spk_ttyio_unregister_ldisc(); speakup_kobj_exit(); @@ -2376,6 +2377,7 @@ static int __init speakup_init(void) if (err) goto error_kobjects; + spk_ttyio_register_ldisc(); synth_init(synth_name); speakup_register_devsynth(); /* diff --git a/drivers/staging/speakup/spk_priv.h b/drivers/staging/speakup/spk_priv.h index 87b6a0a4c54d..046040ac074c 100644 --- a/drivers/staging/speakup/spk_priv.h +++ b/drivers/staging/speakup/spk_priv.h @@ -48,6 +48,8 @@ void spk_stop_serial_interrupt(void); int spk_wait_for_xmitr(struct spk_synth *in_synth); void spk_serial_release(void); void spk_ttyio_release(void); +void spk_ttyio_register_ldisc(void); +void spk_ttyio_unregister_ldisc(void); void synth_buffer_skip_nonlatin1(void); u16 synth_buffer_getc(void); diff --git a/drivers/staging/speakup/spk_ttyio.c b/drivers/staging/speakup/spk_ttyio.c index ed8e96b06ead..fe340b07c482 100644 --- a/drivers/staging/speakup/spk_ttyio.c +++ b/drivers/staging/speakup/spk_ttyio.c @@ -154,12 +154,6 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth) struct ktermios tmp_termios; dev_t dev; - ret = tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops); - if (ret) { - pr_err("Error registering line discipline.\n"); - return ret; - } - ret = get_dev_to_use(synth, &dev); if (ret) return ret; @@ -196,10 +190,24 @@ static int spk_ttyio_initialise_ldisc(struct spk_synth *synth) tty_unlock(tty); ret = tty_set_ldisc(tty, N_SPEAKUP); + if (ret) + pr_err("speakup: Failed to set N_SPEAKUP on tty\n"); return ret; } +void spk_ttyio_register_ldisc(void) +{ + if (tty_register_ldisc(N_SPEAKUP, &spk_ttyio_ldisc_ops)) + pr_warn("speakup: Error registering line discipline. Most synths won't work.\n"); +} + +void spk_ttyio_unregister_ldisc(void) +{ + if (tty_unregister_ldisc(N_SPEAKUP)) + pr_warn("speakup: Couldn't unregister ldisc\n"); +} + static int spk_ttyio_out(struct spk_synth *in_synth, const char ch) { if (in_synth->alive && speakup_tty && speakup_tty->ops->write) { @@ -300,7 +308,7 @@ void spk_ttyio_release(void) tty_ldisc_flush(speakup_tty); tty_unlock(speakup_tty); - tty_ldisc_release(speakup_tty); + tty_release_struct(speakup_tty, speakup_tty->index); } EXPORT_SYMBOL_GPL(spk_ttyio_release); diff --git a/drivers/staging/vboxvideo/Kconfig b/drivers/staging/vboxvideo/Kconfig new file mode 100644 index 000000000000..a52746f9a670 --- /dev/null +++ b/drivers/staging/vboxvideo/Kconfig @@ -0,0 +1,12 @@ +config DRM_VBOXVIDEO + tristate "Virtual Box Graphics Card" + depends on DRM && X86 && PCI + select DRM_KMS_HELPER + help + This is a KMS driver for the virtual Graphics Card used in + Virtual Box virtual machines. + + Although it is possible to builtin this module, it is advised + to build this driver as a module, so that it can be updated + independently of the kernel. Select M to built this driver as a + module and add support for these devices via drm/kms interfaces. diff --git a/drivers/staging/vboxvideo/Makefile b/drivers/staging/vboxvideo/Makefile new file mode 100644 index 000000000000..2d0b3bc7ad73 --- /dev/null +++ b/drivers/staging/vboxvideo/Makefile @@ -0,0 +1,7 @@ +ccflags-y := -Iinclude/drm + +vboxvideo-y := hgsmi_base.o modesetting.o vbva_base.o \ + vbox_drv.o vbox_fb.o vbox_hgsmi.o vbox_irq.o vbox_main.o \ + vbox_mode.o vbox_prime.o vbox_ttm.o + +obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo.o diff --git a/drivers/staging/vboxvideo/TODO b/drivers/staging/vboxvideo/TODO new file mode 100644 index 000000000000..ce764309b079 --- /dev/null +++ b/drivers/staging/vboxvideo/TODO @@ -0,0 +1,9 @@ +TODO: +-Move the driver over to the atomic API +-Stop using old load / unload drm_driver hooks +-Get a full review from the drm-maintainers on dri-devel done on this driver +-Extend this TODO with the results of that review + +Please send any patches to Greg Kroah-Hartman <gregkh@linuxfoundation.org>, +Hans de Goede <hdegoede@redhat.com> and +Michael Thayer <michael.thayer@oracle.com>. diff --git a/drivers/staging/vboxvideo/hgsmi_base.c b/drivers/staging/vboxvideo/hgsmi_base.c new file mode 100644 index 000000000000..15ff5f42e2cd --- /dev/null +++ b/drivers/staging/vboxvideo/hgsmi_base.c @@ -0,0 +1,246 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "vbox_drv.h" +#include "vbox_err.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_channels.h" +#include "hgsmi_ch_setup.h" + +/** + * Inform the host of the location of the host flags in VRAM via an HGSMI cmd. + * @param ctx the context of the guest heap to use. + * @param location the offset chosen for the flags within guest VRAM. + * @returns 0 on success, -errno on failure + */ +int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location) +{ + struct hgsmi_buffer_location *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_HGSMI, + HGSMI_CC_HOST_FLAGS_LOCATION); + if (!p) + return -ENOMEM; + + p->buf_location = location; + p->buf_len = sizeof(struct hgsmi_host_flags); + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Notify the host of HGSMI-related guest capabilities via an HGSMI command. + * @param ctx the context of the guest heap to use. + * @param caps the capabilities to report, see vbva_caps. + * @returns 0 on success, -errno on failure + */ +int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps) +{ + struct vbva_caps *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_INFO_CAPS); + if (!p) + return -ENOMEM; + + p->rc = VERR_NOT_IMPLEMENTED; + p->caps = caps; + + hgsmi_buffer_submit(ctx, p); + + WARN_ON_ONCE(RT_FAILURE(p->rc)); + + hgsmi_buffer_free(ctx, p); + + return 0; +} + +int hgsmi_test_query_conf(struct gen_pool *ctx) +{ + u32 value = 0; + int ret; + + ret = hgsmi_query_conf(ctx, U32_MAX, &value); + if (ret) + return ret; + + return value == U32_MAX ? 0 : -EIO; +} + +/** + * Query the host for an HGSMI configuration parameter via an HGSMI command. + * @param ctx the context containing the heap used + * @param index the index of the parameter to query, + * @see vbva_conf32::index + * @param value_ret where to store the value of the parameter on success + * @returns 0 on success, -errno on failure + */ +int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret) +{ + struct vbva_conf32 *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_QUERY_CONF32); + if (!p) + return -ENOMEM; + + p->index = index; + p->value = U32_MAX; + + hgsmi_buffer_submit(ctx, p); + + *value_ret = p->value; + + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Pass the host a new mouse pointer shape via an HGSMI command. + * + * @param ctx the context containing the heap to be used + * @param flags cursor flags, @see VMMDevReqMousePointer::flags + * @param hot_x horizontal position of the hot spot + * @param hot_y vertical position of the hot spot + * @param width width in pixels of the cursor + * @param height height in pixels of the cursor + * @param pixels pixel data, @see VMMDevReqMousePointer for the format + * @param len size in bytes of the pixel data + * @returns 0 on success, -errno on failure + */ +int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, + u32 hot_x, u32 hot_y, u32 width, u32 height, + u8 *pixels, u32 len) +{ + struct vbva_mouse_pointer_shape *p; + u32 pixel_len = 0; + int rc; + + if (flags & VBOX_MOUSE_POINTER_SHAPE) { + /* + * Size of the pointer data: + * sizeof (AND mask) + sizeof (XOR_MASK) + */ + pixel_len = ((((width + 7) / 8) * height + 3) & ~3) + + width * 4 * height; + if (pixel_len > len) + return -EINVAL; + + /* + * If shape is supplied, then always create the pointer visible. + * See comments in 'vboxUpdatePointerShape' + */ + flags |= VBOX_MOUSE_POINTER_VISIBLE; + } + + p = hgsmi_buffer_alloc(ctx, sizeof(*p) + pixel_len, HGSMI_CH_VBVA, + VBVA_MOUSE_POINTER_SHAPE); + if (!p) + return -ENOMEM; + + p->result = VINF_SUCCESS; + p->flags = flags; + p->hot_X = hot_x; + p->hot_y = hot_y; + p->width = width; + p->height = height; + if (pixel_len) + memcpy(p->data, pixels, pixel_len); + + hgsmi_buffer_submit(ctx, p); + + switch (p->result) { + case VINF_SUCCESS: + rc = 0; + break; + case VERR_NO_MEMORY: + rc = -ENOMEM; + break; + case VERR_NOT_SUPPORTED: + rc = -EBUSY; + break; + default: + rc = -EINVAL; + } + + hgsmi_buffer_free(ctx, p); + + return rc; +} + +/** + * Report the guest cursor position. The host may wish to use this information + * to re-position its own cursor (though this is currently unlikely). The + * current host cursor position is returned. + * @param ctx The context containing the heap used. + * @param report_position Are we reporting a position? + * @param x Guest cursor X position. + * @param y Guest cursor Y position. + * @param x_host Host cursor X position is stored here. Optional. + * @param y_host Host cursor Y position is stored here. Optional. + * @returns 0 on success, -errno on failure + */ +int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, + u32 x, u32 y, u32 *x_host, u32 *y_host) +{ + struct vbva_cursor_position *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_CURSOR_POSITION); + if (!p) + return -ENOMEM; + + p->report_position = report_position; + p->x = x; + p->y = y; + + hgsmi_buffer_submit(ctx, p); + + *x_host = p->x; + *y_host = p->y; + + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * @todo Mouse pointer position to be read from VMMDev memory, address of the + * memory region can be queried from VMMDev via an IOCTL. This VMMDev memory + * region will contain host information which is needed by the guest. + * + * Reading will not cause a switch to the host. + * + * Have to take into account: + * * synchronization: host must write to the memory only from EMT, + * large structures must be read under flag, which tells the host + * that the guest is currently reading the memory (OWNER flag?). + * * guest writes: may be allocate a page for the host info and make + * the page readonly for the guest. + * * the information should be available only for additions drivers. + * * VMMDev additions driver will inform the host which version of the info + * it expects, host must support all versions. + */ diff --git a/drivers/staging/vboxvideo/hgsmi_ch_setup.h b/drivers/staging/vboxvideo/hgsmi_ch_setup.h new file mode 100644 index 000000000000..8e6d9e11a69c --- /dev/null +++ b/drivers/staging/vboxvideo/hgsmi_ch_setup.h @@ -0,0 +1,66 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __HGSMI_CH_SETUP_H__ +#define __HGSMI_CH_SETUP_H__ + +/* + * Tell the host the location of hgsmi_host_flags structure, where the host + * can write information about pending buffers, etc, and which can be quickly + * polled by the guest without a need to port IO. + */ +#define HGSMI_CC_HOST_FLAGS_LOCATION 0 + +struct hgsmi_buffer_location { + u32 buf_location; + u32 buf_len; +} __packed; + +/* HGSMI setup and configuration data structures. */ +/* host->guest commands pending, should be accessed under FIFO lock only */ +#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x01u +/* IRQ is fired, should be accessed under VGAState::lock only */ +#define HGSMIHOSTFLAGS_IRQ 0x02u +/* vsync interrupt flag, should be accessed under VGAState::lock only */ +#define HGSMIHOSTFLAGS_VSYNC 0x10u +/** monitor hotplug flag, should be accessed under VGAState::lock only */ +#define HGSMIHOSTFLAGS_HOTPLUG 0x20u +/** + * Cursor capability state change flag, should be accessed under + * VGAState::lock only. @see vbva_conf32. + */ +#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40u + +struct hgsmi_host_flags { + /* + * Host flags can be accessed and modified in multiple threads + * concurrently, e.g. CrOpenGL HGCM and GUI threads when completing + * HGSMI 3D and Video Accel respectively, EMT thread when dealing with + * HGSMI command processing, etc. + * Besides settings/cleaning flags atomically, some flags have their + * own special sync restrictions, see comments for flags above. + */ + u32 host_flags; + u32 reserved[3]; +} __packed; + +#endif diff --git a/drivers/staging/vboxvideo/hgsmi_channels.h b/drivers/staging/vboxvideo/hgsmi_channels.h new file mode 100644 index 000000000000..a2a34b2167b4 --- /dev/null +++ b/drivers/staging/vboxvideo/hgsmi_channels.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __HGSMI_CHANNELS_H__ +#define __HGSMI_CHANNELS_H__ + +/* + * Each channel has an 8 bit identifier. There are a number of predefined + * (hardcoded) channels. + * + * HGSMI_CH_HGSMI channel can be used to map a string channel identifier + * to a free 16 bit numerical value. values are allocated in range + * [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST]. + */ + +/* A reserved channel value */ +#define HGSMI_CH_RESERVED 0x00 +/* HGCMI: setup and configuration */ +#define HGSMI_CH_HGSMI 0x01 +/* Graphics: VBVA */ +#define HGSMI_CH_VBVA 0x02 +/* Graphics: Seamless with a single guest region */ +#define HGSMI_CH_SEAMLESS 0x03 +/* Graphics: Seamless with separate host windows */ +#define HGSMI_CH_SEAMLESS2 0x04 +/* Graphics: OpenGL HW acceleration */ +#define HGSMI_CH_OPENGL 0x05 + +/* The first channel index to be used for string mappings (inclusive) */ +#define HGSMI_CH_STRING_FIRST 0x20 +/* The last channel index for string mappings (inclusive) */ +#define HGSMI_CH_STRING_LAST 0xff + +#endif diff --git a/drivers/staging/vboxvideo/hgsmi_defs.h b/drivers/staging/vboxvideo/hgsmi_defs.h new file mode 100644 index 000000000000..5b21fb974d20 --- /dev/null +++ b/drivers/staging/vboxvideo/hgsmi_defs.h @@ -0,0 +1,92 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __HGSMI_DEFS_H__ +#define __HGSMI_DEFS_H__ + +/* Buffer sequence type mask. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03 +/* Single buffer, not a part of a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00 +/* The first buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01 +/* A middle buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02 +/* The last buffer in a sequence. */ +#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03 + +/* 16 bytes buffer header. */ +struct hgsmi_buffer_header { + u32 data_size; /* Size of data that follows the header. */ + u8 flags; /* HGSMI_BUFFER_HEADER_F_* */ + u8 channel; /* The channel the data must be routed to. */ + u16 channel_info; /* Opaque to the HGSMI, used by the channel. */ + + union { + /* Opaque placeholder to make the union 8 bytes. */ + u8 header_data[8]; + + /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */ + struct { + u32 reserved1; /* A reserved field, initialize to 0. */ + u32 reserved2; /* A reserved field, initialize to 0. */ + } buffer; + + /* HGSMI_BUFFER_HEADER_F_SEQ_START */ + struct { + /* Must be the same for all buffers in the sequence. */ + u32 sequence_number; + /* The total size of the sequence. */ + u32 sequence_size; + } sequence_start; + + /* + * HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and + * HGSMI_BUFFER_HEADER_F_SEQ_END + */ + struct { + /* Must be the same for all buffers in the sequence. */ + u32 sequence_number; + /* Data offset in the entire sequence. */ + u32 sequence_offset; + } sequence_continue; + } u; +} __packed; + +/* 8 bytes buffer tail. */ +struct hgsmi_buffer_tail { + /* Reserved, must be initialized to 0. */ + u32 reserved; + /* + * One-at-a-Time Hash: http://www.burtleburtle.net/bob/hash/doobs.html + * Over the header, offset and for first 4 bytes of the tail. + */ + u32 checksum; +} __packed; + +/* + * The size of the array of channels. Array indexes are u8. + * Note: the value must not be changed. + */ +#define HGSMI_NUMBER_OF_CHANNELS 0x100 + +#endif diff --git a/drivers/staging/vboxvideo/modesetting.c b/drivers/staging/vboxvideo/modesetting.c new file mode 100644 index 000000000000..7616b8aab23a --- /dev/null +++ b/drivers/staging/vboxvideo/modesetting.c @@ -0,0 +1,142 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "vbox_drv.h" +#include "vbox_err.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_channels.h" + +/** + * Set a video mode via an HGSMI request. The views must have been + * initialised first using @a VBoxHGSMISendViewInfo and if the mode is being + * set on the first display then it must be set first using registers. + * @param ctx The context containing the heap to use + * @param display The screen number + * @param origin_x The horizontal displacement relative to the first scrn + * @param origin_y The vertical displacement relative to the first screen + * @param start_offset The offset of the visible area of the framebuffer + * relative to the framebuffer start + * @param pitch The offset in bytes between the starts of two adjecent + * scan lines in video RAM + * @param width The mode width + * @param height The mode height + * @param bpp The colour depth of the mode + * @param flags Flags + */ +void hgsmi_process_display_info(struct gen_pool *ctx, u32 display, + s32 origin_x, s32 origin_y, u32 start_offset, + u32 pitch, u32 width, u32 height, + u16 bpp, u16 flags) +{ + struct vbva_infoscreen *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_INFO_SCREEN); + if (!p) + return; + + p->view_index = display; + p->origin_x = origin_x; + p->origin_y = origin_y; + p->start_offset = start_offset; + p->line_size = pitch; + p->width = width; + p->height = height; + p->bits_per_pixel = bpp; + p->flags = flags; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); +} + +/** + * Report the rectangle relative to which absolute pointer events should be + * expressed. This information remains valid until the next VBVA resize event + * for any screen, at which time it is reset to the bounding rectangle of all + * virtual screens. + * @param ctx The context containing the heap to use. + * @param origin_x Upper left X co-ordinate relative to the first screen. + * @param origin_y Upper left Y co-ordinate relative to the first screen. + * @param width Rectangle width. + * @param height Rectangle height. + * @returns 0 on success, -errno on failure + */ +int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y, + u32 width, u32 height) +{ + struct vbva_report_input_mapping *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, + VBVA_REPORT_INPUT_MAPPING); + if (!p) + return -ENOMEM; + + p->x = origin_x; + p->y = origin_y; + p->cx = width; + p->cy = height; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); + + return 0; +} + +/** + * Get most recent video mode hints. + * @param ctx The context containing the heap to use. + * @param screens The number of screens to query hints for, starting at 0. + * @param hints Array of vbva_modehint structures for receiving the hints. + * @returns 0 on success, -errno on failure + */ +int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens, + struct vbva_modehint *hints) +{ + struct vbva_query_mode_hints *p; + size_t size; + + if (WARN_ON(!hints)) + return -EINVAL; + + size = screens * sizeof(struct vbva_modehint); + p = hgsmi_buffer_alloc(ctx, sizeof(*p) + size, HGSMI_CH_VBVA, + VBVA_QUERY_MODE_HINTS); + if (!p) + return -ENOMEM; + + p->hints_queried_count = screens; + p->hint_structure_guest_size = sizeof(struct vbva_modehint); + p->rc = VERR_NOT_SUPPORTED; + + hgsmi_buffer_submit(ctx, p); + + if (RT_FAILURE(p->rc)) { + hgsmi_buffer_free(ctx, p); + return -EIO; + } + + memcpy(hints, ((u8 *)p) + sizeof(struct vbva_query_mode_hints), size); + hgsmi_buffer_free(ctx, p); + + return 0; +} diff --git a/drivers/staging/vboxvideo/vbox_drv.c b/drivers/staging/vboxvideo/vbox_drv.c new file mode 100644 index 000000000000..92ae1560a16d --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_drv.c @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_drv.c + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#include <linux/module.h> +#include <linux/console.h> +#include <linux/vt_kern.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc_helper.h> + +#include "vbox_drv.h" + +int vbox_modeset = -1; + +MODULE_PARM_DESC(modeset, "Disable/Enable modesetting"); +module_param_named(modeset, vbox_modeset, int, 0400); + +static struct drm_driver driver; + +static const struct pci_device_id pciidlist[] = { + { 0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { 0, 0, 0}, +}; +MODULE_DEVICE_TABLE(pci, pciidlist); + +static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + return drm_get_pci_dev(pdev, ent, &driver); +} + +static void vbox_pci_remove(struct pci_dev *pdev) +{ + struct drm_device *dev = pci_get_drvdata(pdev); + + drm_put_dev(dev); +} + +static int vbox_drm_freeze(struct drm_device *dev) +{ + struct vbox_private *vbox = dev->dev_private; + + drm_kms_helper_poll_disable(dev); + + pci_save_state(dev->pdev); + + drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, true); + + return 0; +} + +static int vbox_drm_thaw(struct drm_device *dev) +{ + struct vbox_private *vbox = dev->dev_private; + + drm_mode_config_reset(dev); + drm_helper_resume_force_mode(dev); + drm_fb_helper_set_suspend_unlocked(&vbox->fbdev->helper, false); + + return 0; +} + +static int vbox_drm_resume(struct drm_device *dev) +{ + int ret; + + if (pci_enable_device(dev->pdev)) + return -EIO; + + ret = vbox_drm_thaw(dev); + if (ret) + return ret; + + drm_kms_helper_poll_enable(dev); + + return 0; +} + +static int vbox_pm_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + int error; + + error = vbox_drm_freeze(ddev); + if (error) + return error; + + pci_disable_device(pdev); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int vbox_pm_resume(struct device *dev) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + + return vbox_drm_resume(ddev); +} + +static int vbox_pm_freeze(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct drm_device *ddev = pci_get_drvdata(pdev); + + if (!ddev || !ddev->dev_private) + return -ENODEV; + + return vbox_drm_freeze(ddev); +} + +static int vbox_pm_thaw(struct device *dev) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + + return vbox_drm_thaw(ddev); +} + +static int vbox_pm_poweroff(struct device *dev) +{ + struct drm_device *ddev = pci_get_drvdata(to_pci_dev(dev)); + + return vbox_drm_freeze(ddev); +} + +static const struct dev_pm_ops vbox_pm_ops = { + .suspend = vbox_pm_suspend, + .resume = vbox_pm_resume, + .freeze = vbox_pm_freeze, + .thaw = vbox_pm_thaw, + .poweroff = vbox_pm_poweroff, + .restore = vbox_pm_resume, +}; + +static struct pci_driver vbox_pci_driver = { + .name = DRIVER_NAME, + .id_table = pciidlist, + .probe = vbox_pci_probe, + .remove = vbox_pci_remove, + .driver.pm = &vbox_pm_ops, +}; + +static const struct file_operations vbox_fops = { + .owner = THIS_MODULE, + .open = drm_open, + .release = drm_release, + .unlocked_ioctl = drm_ioctl, + .mmap = vbox_mmap, + .poll = drm_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = drm_compat_ioctl, +#endif + .read = drm_read, +}; + +static int vbox_master_set(struct drm_device *dev, + struct drm_file *file_priv, bool from_open) +{ + struct vbox_private *vbox = dev->dev_private; + + /* + * We do not yet know whether the new owner can handle hotplug, so we + * do not advertise dynamic modes on the first query and send a + * tentative hotplug notification after that to see if they query again. + */ + vbox->initial_mode_queried = false; + + mutex_lock(&vbox->hw_mutex); + /* + * Disable VBVA when someone releases master in case the next person + * tries tries to do VESA. + */ + /** @todo work out if anyone is likely to and whether it will work. */ + /* + * Update: we also disable it because if the new master does not do + * dirty rectangle reporting (e.g. old versions of Plymouth) then at + * least the first screen will still be updated. We enable it as soon + * as we receive a dirty rectangle report. + */ + vbox_disable_accel(vbox); + mutex_unlock(&vbox->hw_mutex); + + return 0; +} + +static void vbox_master_drop(struct drm_device *dev, struct drm_file *file_priv) +{ + struct vbox_private *vbox = dev->dev_private; + + /* See vbox_master_set() */ + vbox->initial_mode_queried = false; + + mutex_lock(&vbox->hw_mutex); + vbox_disable_accel(vbox); + mutex_unlock(&vbox->hw_mutex); +} + +static struct drm_driver driver = { + .driver_features = + DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | + DRIVER_PRIME, + .dev_priv_size = 0, + + .load = vbox_driver_load, + .unload = vbox_driver_unload, + .lastclose = vbox_driver_lastclose, + .master_set = vbox_master_set, + .master_drop = vbox_master_drop, + .set_busid = drm_pci_set_busid, + + .fops = &vbox_fops, + .irq_handler = vbox_irq_handler, + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, + + .gem_free_object = vbox_gem_free_object, + .dumb_create = vbox_dumb_create, + .dumb_map_offset = vbox_dumb_mmap_offset, + .dumb_destroy = drm_gem_dumb_destroy, + .prime_handle_to_fd = drm_gem_prime_handle_to_fd, + .prime_fd_to_handle = drm_gem_prime_fd_to_handle, + .gem_prime_export = drm_gem_prime_export, + .gem_prime_import = drm_gem_prime_import, + .gem_prime_pin = vbox_gem_prime_pin, + .gem_prime_unpin = vbox_gem_prime_unpin, + .gem_prime_get_sg_table = vbox_gem_prime_get_sg_table, + .gem_prime_import_sg_table = vbox_gem_prime_import_sg_table, + .gem_prime_vmap = vbox_gem_prime_vmap, + .gem_prime_vunmap = vbox_gem_prime_vunmap, + .gem_prime_mmap = vbox_gem_prime_mmap, +}; + +static int __init vbox_init(void) +{ +#ifdef CONFIG_VGA_CONSOLE + if (vgacon_text_force() && vbox_modeset == -1) + return -EINVAL; +#endif + + if (vbox_modeset == 0) + return -EINVAL; + + return drm_pci_init(&driver, &vbox_pci_driver); +} + +static void __exit vbox_exit(void) +{ + drm_pci_exit(&driver, &vbox_pci_driver); +} + +module_init(vbox_init); +module_exit(vbox_exit); + +MODULE_AUTHOR("Oracle Corporation"); +MODULE_DESCRIPTION(DRIVER_DESC); +MODULE_LICENSE("GPL and additional rights"); diff --git a/drivers/staging/vboxvideo/vbox_drv.h b/drivers/staging/vboxvideo/vbox_drv.h new file mode 100644 index 000000000000..4b9302703b36 --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_drv.h @@ -0,0 +1,296 @@ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_drv.h + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#ifndef __VBOX_DRV_H__ +#define __VBOX_DRV_H__ + +#include <linux/genalloc.h> +#include <linux/io.h> +#include <linux/string.h> +#include <linux/version.h> + +#include <drm/drmP.h> +#include <drm/drm_encoder.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_gem.h> + +#include <drm/ttm/ttm_bo_api.h> +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_memory.h> +#include <drm/ttm/ttm_module.h> + +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_ch_setup.h" + +#define DRIVER_NAME "vboxvideo" +#define DRIVER_DESC "Oracle VM VirtualBox Graphics Card" +#define DRIVER_DATE "20130823" + +#define DRIVER_MAJOR 1 +#define DRIVER_MINOR 0 +#define DRIVER_PATCHLEVEL 0 + +#define VBOX_MAX_CURSOR_WIDTH 64 +#define VBOX_MAX_CURSOR_HEIGHT 64 +#define CURSOR_PIXEL_COUNT (VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT) +#define CURSOR_DATA_SIZE (CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8) + +#define VBOX_MAX_SCREENS 32 + +#define GUEST_HEAP_OFFSET(vbox) ((vbox)->full_vram_size - \ + VBVA_ADAPTER_INFORMATION_SIZE) +#define GUEST_HEAP_SIZE VBVA_ADAPTER_INFORMATION_SIZE +#define GUEST_HEAP_USABLE_SIZE (VBVA_ADAPTER_INFORMATION_SIZE - \ + sizeof(struct hgsmi_host_flags)) +#define HOST_FLAGS_OFFSET GUEST_HEAP_USABLE_SIZE + +struct vbox_fbdev; + +struct vbox_private { + struct drm_device *dev; + + u8 __iomem *guest_heap; + u8 __iomem *vbva_buffers; + struct gen_pool *guest_pool; + struct vbva_buf_ctx *vbva_info; + bool any_pitch; + u32 num_crtcs; + /** Amount of available VRAM, including space used for buffers. */ + u32 full_vram_size; + /** Amount of available VRAM, not including space used for buffers. */ + u32 available_vram_size; + /** Array of structures for receiving mode hints. */ + struct vbva_modehint *last_mode_hints; + + struct vbox_fbdev *fbdev; + + int fb_mtrr; + + struct { + struct drm_global_reference mem_global_ref; + struct ttm_bo_global_ref bo_global_ref; + struct ttm_bo_device bdev; + } ttm; + + struct mutex hw_mutex; /* protects modeset and accel/vbva accesses */ + /** + * We decide whether or not user-space supports display hot-plug + * depending on whether they react to a hot-plug event after the initial + * mode query. + */ + bool initial_mode_queried; + struct work_struct hotplug_work; + u32 input_mapping_width; + u32 input_mapping_height; + /** + * Is user-space using an X.Org-style layout of one large frame-buffer + * encompassing all screen ones or is the fbdev console active? + */ + bool single_framebuffer; + u32 cursor_width; + u32 cursor_height; + u32 cursor_hot_x; + u32 cursor_hot_y; + size_t cursor_data_size; + u8 cursor_data[CURSOR_DATA_SIZE]; +}; + +#undef CURSOR_PIXEL_COUNT +#undef CURSOR_DATA_SIZE + +int vbox_driver_load(struct drm_device *dev, unsigned long flags); +void vbox_driver_unload(struct drm_device *dev); +void vbox_driver_lastclose(struct drm_device *dev); + +struct vbox_gem_object; + +struct vbox_connector { + struct drm_connector base; + char name[32]; + struct vbox_crtc *vbox_crtc; + struct { + u16 width; + u16 height; + bool disconnected; + } mode_hint; +}; + +struct vbox_crtc { + struct drm_crtc base; + bool blanked; + bool disconnected; + unsigned int crtc_id; + u32 fb_offset; + bool cursor_enabled; + u16 x_hint; + u16 y_hint; +}; + +struct vbox_encoder { + struct drm_encoder base; +}; + +struct vbox_framebuffer { + struct drm_framebuffer base; + struct drm_gem_object *obj; +}; + +struct vbox_fbdev { + struct drm_fb_helper helper; + struct vbox_framebuffer afb; + int size; + struct ttm_bo_kmap_obj mapping; + int x1, y1, x2, y2; /* dirty rect */ + spinlock_t dirty_lock; +}; + +#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base) +#define to_vbox_connector(x) container_of(x, struct vbox_connector, base) +#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base) +#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base) + +int vbox_mode_init(struct drm_device *dev); +void vbox_mode_fini(struct drm_device *dev); + +#define DRM_MODE_FB_CMD drm_mode_fb_cmd2 +#define CRTC_FB(crtc) ((crtc)->primary->fb) + +void vbox_enable_accel(struct vbox_private *vbox); +void vbox_disable_accel(struct vbox_private *vbox); +void vbox_report_caps(struct vbox_private *vbox); + +void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, + struct drm_clip_rect *rects, + unsigned int num_rects); + +int vbox_framebuffer_init(struct drm_device *dev, + struct vbox_framebuffer *vbox_fb, + const struct DRM_MODE_FB_CMD *mode_cmd, + struct drm_gem_object *obj); + +int vbox_fbdev_init(struct drm_device *dev); +void vbox_fbdev_fini(struct drm_device *dev); +void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr); + +struct vbox_bo { + struct ttm_buffer_object bo; + struct ttm_placement placement; + struct ttm_bo_kmap_obj kmap; + struct drm_gem_object gem; + struct ttm_place placements[3]; + int pin_count; +}; + +#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem) + +static inline struct vbox_bo *vbox_bo(struct ttm_buffer_object *bo) +{ + return container_of(bo, struct vbox_bo, bo); +} + +#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base) + +int vbox_dumb_create(struct drm_file *file, + struct drm_device *dev, + struct drm_mode_create_dumb *args); + +void vbox_gem_free_object(struct drm_gem_object *obj); +int vbox_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, u64 *offset); + +#define DRM_FILE_PAGE_OFFSET (0x10000000ULL >> PAGE_SHIFT) + +int vbox_mm_init(struct vbox_private *vbox); +void vbox_mm_fini(struct vbox_private *vbox); + +int vbox_bo_create(struct drm_device *dev, int size, int align, + u32 flags, struct vbox_bo **pvboxbo); + +int vbox_gem_create(struct drm_device *dev, + u32 size, bool iskernel, struct drm_gem_object **obj); + +int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr); +int vbox_bo_unpin(struct vbox_bo *bo); + +static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait) +{ + int ret; + + ret = ttm_bo_reserve(&bo->bo, true, no_wait, NULL); + if (ret) { + if (ret != -ERESTARTSYS && ret != -EBUSY) + DRM_ERROR("reserve failed %p\n", bo); + return ret; + } + return 0; +} + +static inline void vbox_bo_unreserve(struct vbox_bo *bo) +{ + ttm_bo_unreserve(&bo->bo); +} + +void vbox_ttm_placement(struct vbox_bo *bo, int domain); +int vbox_bo_push_sysram(struct vbox_bo *bo); +int vbox_mmap(struct file *filp, struct vm_area_struct *vma); + +/* vbox_prime.c */ +int vbox_gem_prime_pin(struct drm_gem_object *obj); +void vbox_gem_prime_unpin(struct drm_gem_object *obj); +struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj); +struct drm_gem_object *vbox_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table); +void *vbox_gem_prime_vmap(struct drm_gem_object *obj); +void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); +int vbox_gem_prime_mmap(struct drm_gem_object *obj, + struct vm_area_struct *area); + +/* vbox_irq.c */ +int vbox_irq_init(struct vbox_private *vbox); +void vbox_irq_fini(struct vbox_private *vbox); +void vbox_report_hotplug(struct vbox_private *vbox); +irqreturn_t vbox_irq_handler(int irq, void *arg); + +/* vbox_hgsmi.c */ +void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, + u8 channel, u16 channel_info); +void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf); +int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf); + +static inline void vbox_write_ioport(u16 index, u16 data) +{ + outw(index, VBE_DISPI_IOPORT_INDEX); + outw(data, VBE_DISPI_IOPORT_DATA); +} + +#endif diff --git a/drivers/staging/vboxvideo/vbox_err.h b/drivers/staging/vboxvideo/vbox_err.h new file mode 100644 index 000000000000..562db8630eb0 --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_err.h @@ -0,0 +1,50 @@ +/* + * Copyright (C) 2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __VBOX_ERR_H__ +#define __VBOX_ERR_H__ + +/** + * @name VirtualBox virtual-hardware error macros + * @{ + */ + +#define VINF_SUCCESS 0 +#define VERR_INVALID_PARAMETER (-2) +#define VERR_INVALID_POINTER (-6) +#define VERR_NO_MEMORY (-8) +#define VERR_NOT_IMPLEMENTED (-12) +#define VERR_INVALID_FUNCTION (-36) +#define VERR_NOT_SUPPORTED (-37) +#define VERR_TOO_MUCH_DATA (-42) +#define VERR_INVALID_STATE (-79) +#define VERR_OUT_OF_RESOURCES (-80) +#define VERR_ALREADY_EXISTS (-105) +#define VERR_INTERNAL_ERROR (-225) + +#define RT_SUCCESS_NP(rc) ((int)(rc) >= VINF_SUCCESS) +#define RT_SUCCESS(rc) (likely(RT_SUCCESS_NP(rc))) +#define RT_FAILURE(rc) (unlikely(!RT_SUCCESS_NP(rc))) + +/** @} */ + +#endif diff --git a/drivers/staging/vboxvideo/vbox_fb.c b/drivers/staging/vboxvideo/vbox_fb.c new file mode 100644 index 000000000000..35f6d9f8c203 --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_fb.c @@ -0,0 +1,412 @@ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_fb.c + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + */ +#include <linux/module.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/string.h> +#include <linux/mm.h> +#include <linux/tty.h> +#include <linux/sysrq.h> +#include <linux/delay.h> +#include <linux/fb.h> +#include <linux/init.h> + +#include <drm/drmP.h> +#include <drm/drm_crtc.h> +#include <drm/drm_fb_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "vbox_drv.h" +#include "vboxvideo.h" + +#define VBOX_DIRTY_DELAY (HZ / 30) +/** + * Tell the host about dirty rectangles to update. + */ +static void vbox_dirty_update(struct vbox_fbdev *fbdev, + int x, int y, int width, int height) +{ + struct drm_gem_object *obj; + struct vbox_bo *bo; + int ret = -EBUSY; + bool store_for_later = false; + int x2, y2; + unsigned long flags; + struct drm_clip_rect rect; + + obj = fbdev->afb.obj; + bo = gem_to_vbox_bo(obj); + + /* + * try and reserve the BO, if we fail with busy + * then the BO is being moved and we should + * store up the damage until later. + */ + if (drm_can_sleep()) + ret = vbox_bo_reserve(bo, true); + if (ret) { + if (ret != -EBUSY) + return; + + store_for_later = true; + } + + x2 = x + width - 1; + y2 = y + height - 1; + spin_lock_irqsave(&fbdev->dirty_lock, flags); + + if (fbdev->y1 < y) + y = fbdev->y1; + if (fbdev->y2 > y2) + y2 = fbdev->y2; + if (fbdev->x1 < x) + x = fbdev->x1; + if (fbdev->x2 > x2) + x2 = fbdev->x2; + + if (store_for_later) { + fbdev->x1 = x; + fbdev->x2 = x2; + fbdev->y1 = y; + fbdev->y2 = y2; + spin_unlock_irqrestore(&fbdev->dirty_lock, flags); + return; + } + + fbdev->x1 = INT_MAX; + fbdev->y1 = INT_MAX; + fbdev->x2 = 0; + fbdev->y2 = 0; + + spin_unlock_irqrestore(&fbdev->dirty_lock, flags); + + /* + * Not sure why the original code subtracted 1 here, but I will keep + * it that way to avoid unnecessary differences. + */ + rect.x1 = x; + rect.x2 = x2 + 1; + rect.y1 = y; + rect.y2 = y2 + 1; + vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1); + + vbox_bo_unreserve(bo); +} + +#ifdef CONFIG_FB_DEFERRED_IO +static void vbox_deferred_io(struct fb_info *info, struct list_head *pagelist) +{ + struct vbox_fbdev *fbdev = info->par; + unsigned long start, end, min, max; + struct page *page; + int y1, y2; + + min = ULONG_MAX; + max = 0; + list_for_each_entry(page, pagelist, lru) { + start = page->index << PAGE_SHIFT; + end = start + PAGE_SIZE - 1; + min = min(min, start); + max = max(max, end); + } + + if (min < max) { + y1 = min / info->fix.line_length; + y2 = (max / info->fix.line_length) + 1; + DRM_INFO("%s: Calling dirty update: 0, %d, %d, %d\n", + __func__, y1, info->var.xres, y2 - y1 - 1); + vbox_dirty_update(fbdev, 0, y1, info->var.xres, y2 - y1 - 1); + } +} + +static struct fb_deferred_io vbox_defio = { + .delay = VBOX_DIRTY_DELAY, + .deferred_io = vbox_deferred_io, +}; +#endif + +static void vbox_fillrect(struct fb_info *info, const struct fb_fillrect *rect) +{ + struct vbox_fbdev *fbdev = info->par; + + sys_fillrect(info, rect); + vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width, rect->height); +} + +static void vbox_copyarea(struct fb_info *info, const struct fb_copyarea *area) +{ + struct vbox_fbdev *fbdev = info->par; + + sys_copyarea(info, area); + vbox_dirty_update(fbdev, area->dx, area->dy, area->width, area->height); +} + +static void vbox_imageblit(struct fb_info *info, const struct fb_image *image) +{ + struct vbox_fbdev *fbdev = info->par; + + sys_imageblit(info, image); + vbox_dirty_update(fbdev, image->dx, image->dy, image->width, + image->height); +} + +static struct fb_ops vboxfb_ops = { + .owner = THIS_MODULE, + .fb_check_var = drm_fb_helper_check_var, + .fb_set_par = drm_fb_helper_set_par, + .fb_fillrect = vbox_fillrect, + .fb_copyarea = vbox_copyarea, + .fb_imageblit = vbox_imageblit, + .fb_pan_display = drm_fb_helper_pan_display, + .fb_blank = drm_fb_helper_blank, + .fb_setcmap = drm_fb_helper_setcmap, + .fb_debug_enter = drm_fb_helper_debug_enter, + .fb_debug_leave = drm_fb_helper_debug_leave, +}; + +static int vboxfb_create_object(struct vbox_fbdev *fbdev, + struct DRM_MODE_FB_CMD *mode_cmd, + struct drm_gem_object **gobj_p) +{ + struct drm_device *dev = fbdev->helper.dev; + u32 size; + struct drm_gem_object *gobj; + u32 pitch = mode_cmd->pitches[0]; + int ret; + + size = pitch * mode_cmd->height; + ret = vbox_gem_create(dev, size, true, &gobj); + if (ret) + return ret; + + *gobj_p = gobj; + + return 0; +} + +static int vboxfb_create(struct drm_fb_helper *helper, + struct drm_fb_helper_surface_size *sizes) +{ + struct vbox_fbdev *fbdev = + container_of(helper, struct vbox_fbdev, helper); + struct drm_device *dev = fbdev->helper.dev; + struct DRM_MODE_FB_CMD mode_cmd; + struct drm_framebuffer *fb; + struct fb_info *info; + struct device *device = &dev->pdev->dev; + struct drm_gem_object *gobj; + struct vbox_bo *bo; + int size, ret; + u32 pitch; + + mode_cmd.width = sizes->surface_width; + mode_cmd.height = sizes->surface_height; + pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8); + mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp, + sizes->surface_depth); + mode_cmd.pitches[0] = pitch; + + size = pitch * mode_cmd.height; + + ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj); + if (ret) { + DRM_ERROR("failed to create fbcon backing object %d\n", ret); + return ret; + } + + ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj); + if (ret) + return ret; + + bo = gem_to_vbox_bo(gobj); + + ret = vbox_bo_reserve(bo, false); + if (ret) + return ret; + + ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, NULL); + if (ret) { + vbox_bo_unreserve(bo); + return ret; + } + + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap); + vbox_bo_unreserve(bo); + if (ret) { + DRM_ERROR("failed to kmap fbcon\n"); + return ret; + } + + info = framebuffer_alloc(0, device); + if (!info) + return -ENOMEM; + info->par = fbdev; + + fbdev->size = size; + + fb = &fbdev->afb.base; + fbdev->helper.fb = fb; + fbdev->helper.fbdev = info; + + strcpy(info->fix.id, "vboxdrmfb"); + + /* + * The last flag forces a mode set on VT switches even if the kernel + * does not think it is needed. + */ + info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT | + FBINFO_MISC_ALWAYS_SETPAR; + info->fbops = &vboxfb_ops; + + ret = fb_alloc_cmap(&info->cmap, 256, 0); + if (ret) + return -ENOMEM; + + /* + * This seems to be done for safety checking that the framebuffer + * is not registered twice by different drivers. + */ + info->apertures = alloc_apertures(1); + if (!info->apertures) + return -ENOMEM; + info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0); + info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0); + + drm_fb_helper_fill_fix(info, fb->pitches[0], fb->format->depth); + drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width, + sizes->fb_height); + + info->screen_base = bo->kmap.virtual; + info->screen_size = size; + +#ifdef CONFIG_FB_DEFERRED_IO + info->fbdefio = &vbox_defio; + fb_deferred_io_init(info); +#endif + + info->pixmap.flags = FB_PIXMAP_SYSTEM; + + DRM_DEBUG_KMS("allocated %dx%d\n", fb->width, fb->height); + + return 0; +} + +static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, + u16 blue, int regno) +{ +} + +static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, + u16 *blue, int regno) +{ + *red = regno; + *green = regno; + *blue = regno; +} + +static struct drm_fb_helper_funcs vbox_fb_helper_funcs = { + .gamma_set = vbox_fb_gamma_set, + .gamma_get = vbox_fb_gamma_get, + .fb_probe = vboxfb_create, +}; + +void vbox_fbdev_fini(struct drm_device *dev) +{ + struct vbox_private *vbox = dev->dev_private; + struct vbox_fbdev *fbdev = vbox->fbdev; + struct vbox_framebuffer *afb = &fbdev->afb; + + drm_fb_helper_unregister_fbi(&fbdev->helper); + + if (afb->obj) { + struct vbox_bo *bo = gem_to_vbox_bo(afb->obj); + + if (!vbox_bo_reserve(bo, false)) { + if (bo->kmap.virtual) + ttm_bo_kunmap(&bo->kmap); + /* + * QXL does this, but is it really needed before + * freeing? + */ + if (bo->pin_count) + vbox_bo_unpin(bo); + vbox_bo_unreserve(bo); + } + drm_gem_object_unreference_unlocked(afb->obj); + afb->obj = NULL; + } + drm_fb_helper_fini(&fbdev->helper); + + drm_framebuffer_unregister_private(&afb->base); + drm_framebuffer_cleanup(&afb->base); +} + +int vbox_fbdev_init(struct drm_device *dev) +{ + struct vbox_private *vbox = dev->dev_private; + struct vbox_fbdev *fbdev; + int ret; + + fbdev = devm_kzalloc(dev->dev, sizeof(*fbdev), GFP_KERNEL); + if (!fbdev) + return -ENOMEM; + + vbox->fbdev = fbdev; + spin_lock_init(&fbdev->dirty_lock); + + drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs); + ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs); + if (ret) + return ret; + + ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper); + if (ret) + goto err_fini; + + /* disable all the possible outputs/crtcs before entering KMS mode */ + drm_helper_disable_unused_functions(dev); + + ret = drm_fb_helper_initial_config(&fbdev->helper, 32); + if (ret) + goto err_fini; + + return 0; + +err_fini: + drm_fb_helper_fini(&fbdev->helper); + return ret; +} + +void vbox_fbdev_set_base(struct vbox_private *vbox, unsigned long gpu_addr) +{ + struct fb_info *fbdev = vbox->fbdev->helper.fbdev; + + fbdev->fix.smem_start = fbdev->apertures->ranges[0].base + gpu_addr; + fbdev->fix.smem_len = vbox->available_vram_size - gpu_addr; +} diff --git a/drivers/staging/vboxvideo/vbox_hgsmi.c b/drivers/staging/vboxvideo/vbox_hgsmi.c new file mode 100644 index 000000000000..822fd31121cb --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_hgsmi.c @@ -0,0 +1,115 @@ +/* + * Copyright (C) 2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Hans de Goede <hdegoede@redhat.com> + */ + +#include "vbox_drv.h" +#include "vboxvideo_vbe.h" +#include "hgsmi_defs.h" + +/* One-at-a-Time Hash from http://www.burtleburtle.net/bob/hash/doobs.html */ +static u32 hgsmi_hash_process(u32 hash, const u8 *data, int size) +{ + while (size--) { + hash += *data++; + hash += (hash << 10); + hash ^= (hash >> 6); + } + + return hash; +} + +static u32 hgsmi_hash_end(u32 hash) +{ + hash += (hash << 3); + hash ^= (hash >> 11); + hash += (hash << 15); + + return hash; +} + +/* Not really a checksum but that is the naming used in all vbox code */ +static u32 hgsmi_checksum(u32 offset, + const struct hgsmi_buffer_header *header, + const struct hgsmi_buffer_tail *tail) +{ + u32 checksum; + + checksum = hgsmi_hash_process(0, (u8 *)&offset, sizeof(offset)); + checksum = hgsmi_hash_process(checksum, (u8 *)header, sizeof(*header)); + /* 4 -> Do not checksum the checksum itself */ + checksum = hgsmi_hash_process(checksum, (u8 *)tail, 4); + + return hgsmi_hash_end(checksum); +} + +void *hgsmi_buffer_alloc(struct gen_pool *guest_pool, size_t size, + u8 channel, u16 channel_info) +{ + struct hgsmi_buffer_header *h; + struct hgsmi_buffer_tail *t; + size_t total_size; + dma_addr_t offset; + + total_size = size + sizeof(*h) + sizeof(*t); + h = gen_pool_dma_alloc(guest_pool, total_size, &offset); + if (!h) + return NULL; + + t = (struct hgsmi_buffer_tail *)((u8 *)h + sizeof(*h) + size); + + h->flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE; + h->data_size = size; + h->channel = channel; + h->channel_info = channel_info; + memset(&h->u.header_data, 0, sizeof(h->u.header_data)); + + t->reserved = 0; + t->checksum = hgsmi_checksum(offset, h, t); + + return (u8 *)h + sizeof(*h); +} + +void hgsmi_buffer_free(struct gen_pool *guest_pool, void *buf) +{ + struct hgsmi_buffer_header *h = + (struct hgsmi_buffer_header *)((u8 *)buf - sizeof(*h)); + size_t total_size = h->data_size + sizeof(*h) + + sizeof(struct hgsmi_buffer_tail); + + gen_pool_free(guest_pool, (unsigned long)h, total_size); +} + +int hgsmi_buffer_submit(struct gen_pool *guest_pool, void *buf) +{ + phys_addr_t offset; + + offset = gen_pool_virt_to_phys(guest_pool, (unsigned long)buf - + sizeof(struct hgsmi_buffer_header)); + outl(offset, VGA_PORT_HGSMI_GUEST); + /* Make the compiler aware that the host has changed memory. */ + mb(); + + return 0; +} diff --git a/drivers/staging/vboxvideo/vbox_irq.c b/drivers/staging/vboxvideo/vbox_irq.c new file mode 100644 index 000000000000..3ca8bec62ac4 --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_irq.c @@ -0,0 +1,197 @@ +/* + * Copyright (C) 2016-2017 Oracle Corporation + * This file is based on qxl_irq.c + * Copyright 2013 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Dave Airlie + * Alon Levy + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ + +#include <drm/drm_crtc_helper.h> + +#include "vbox_drv.h" +#include "vboxvideo.h" + +static void vbox_clear_irq(void) +{ + outl((u32)~0, VGA_PORT_HGSMI_HOST); +} + +static u32 vbox_get_flags(struct vbox_private *vbox) +{ + return readl(vbox->guest_heap + HOST_FLAGS_OFFSET); +} + +void vbox_report_hotplug(struct vbox_private *vbox) +{ + schedule_work(&vbox->hotplug_work); +} + +irqreturn_t vbox_irq_handler(int irq, void *arg) +{ + struct drm_device *dev = (struct drm_device *)arg; + struct vbox_private *vbox = (struct vbox_private *)dev->dev_private; + u32 host_flags = vbox_get_flags(vbox); + + if (!(host_flags & HGSMIHOSTFLAGS_IRQ)) + return IRQ_NONE; + + /* + * Due to a bug in the initial host implementation of hot-plug irqs, + * the hot-plug and cursor capability flags were never cleared. + * Fortunately we can tell when they would have been set by checking + * that the VSYNC flag is not set. + */ + if (host_flags & + (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES) && + !(host_flags & HGSMIHOSTFLAGS_VSYNC)) + vbox_report_hotplug(vbox); + + vbox_clear_irq(); + + return IRQ_HANDLED; +} + +/** + * Check that the position hints provided by the host are suitable for GNOME + * shell (i.e. all screens disjoint and hints for all enabled screens) and if + * not replace them with default ones. Providing valid hints improves the + * chances that we will get a known screen layout for pointer mapping. + */ +static void validate_or_set_position_hints(struct vbox_private *vbox) +{ + struct vbva_modehint *hintsi, *hintsj; + bool valid = true; + u16 currentx = 0; + int i, j; + + for (i = 0; i < vbox->num_crtcs; ++i) { + for (j = 0; j < i; ++j) { + hintsi = &vbox->last_mode_hints[i]; + hintsj = &vbox->last_mode_hints[j]; + + if (hintsi->enabled && hintsj->enabled) { + if (hintsi->dx >= 0xffff || + hintsi->dy >= 0xffff || + hintsj->dx >= 0xffff || + hintsj->dy >= 0xffff || + (hintsi->dx < + hintsj->dx + (hintsj->cx & 0x8fff) && + hintsi->dx + (hintsi->cx & 0x8fff) > + hintsj->dx) || + (hintsi->dy < + hintsj->dy + (hintsj->cy & 0x8fff) && + hintsi->dy + (hintsi->cy & 0x8fff) > + hintsj->dy)) + valid = false; + } + } + } + if (!valid) + for (i = 0; i < vbox->num_crtcs; ++i) { + if (vbox->last_mode_hints[i].enabled) { + vbox->last_mode_hints[i].dx = currentx; + vbox->last_mode_hints[i].dy = 0; + currentx += + vbox->last_mode_hints[i].cx & 0x8fff; + } + } +} + +/** + * Query the host for the most recent video mode hints. + */ +static void vbox_update_mode_hints(struct vbox_private *vbox) +{ + struct drm_device *dev = vbox->dev; + struct drm_connector *connector; + struct vbox_connector *vbox_conn; + struct vbva_modehint *hints; + u16 flags; + bool disconnected; + unsigned int crtc_id; + int ret; + + ret = hgsmi_get_mode_hints(vbox->guest_pool, vbox->num_crtcs, + vbox->last_mode_hints); + if (ret) { + DRM_ERROR("vboxvideo: hgsmi_get_mode_hints failed: %d\n", ret); + return; + } + + validate_or_set_position_hints(vbox); + drm_modeset_lock_all(dev); + list_for_each_entry(connector, &dev->mode_config.connector_list, head) { + vbox_conn = to_vbox_connector(connector); + + hints = &vbox->last_mode_hints[vbox_conn->vbox_crtc->crtc_id]; + if (hints->magic != VBVAMODEHINT_MAGIC) + continue; + + disconnected = !(hints->enabled); + crtc_id = vbox_conn->vbox_crtc->crtc_id; + vbox_conn->mode_hint.width = hints->cx & 0x8fff; + vbox_conn->mode_hint.height = hints->cy & 0x8fff; + vbox_conn->vbox_crtc->x_hint = hints->dx; + vbox_conn->vbox_crtc->y_hint = hints->dy; + vbox_conn->mode_hint.disconnected = disconnected; + + if (vbox_conn->vbox_crtc->disconnected == disconnected) + continue; + + if (disconnected) + flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_DISABLED; + else + flags = VBVA_SCREEN_F_ACTIVE | VBVA_SCREEN_F_BLANK; + + hgsmi_process_display_info(vbox->guest_pool, crtc_id, 0, 0, 0, + hints->cx * 4, hints->cx, + hints->cy, 0, flags); + + vbox_conn->vbox_crtc->disconnected = disconnected; + } + drm_modeset_unlock_all(dev); +} + +static void vbox_hotplug_worker(struct work_struct *work) +{ + struct vbox_private *vbox = container_of(work, struct vbox_private, + hotplug_work); + + vbox_update_mode_hints(vbox); + drm_kms_helper_hotplug_event(vbox->dev); +} + +int vbox_irq_init(struct vbox_private *vbox) +{ + INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker); + vbox_update_mode_hints(vbox); + + return drm_irq_install(vbox->dev, vbox->dev->pdev->irq); +} + +void vbox_irq_fini(struct vbox_private *vbox) +{ + drm_irq_uninstall(vbox->dev); + flush_work(&vbox->hotplug_work); +} diff --git a/drivers/staging/vboxvideo/vbox_main.c b/drivers/staging/vboxvideo/vbox_main.c new file mode 100644 index 000000000000..d0c6ec75a3c7 --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_main.c @@ -0,0 +1,534 @@ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_main.c + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * Authors: Dave Airlie <airlied@redhat.com>, + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#include <drm/drm_fb_helper.h> +#include <drm/drm_crtc_helper.h> + +#include "vbox_drv.h" +#include "vbox_err.h" +#include "vboxvideo_guest.h" +#include "vboxvideo_vbe.h" + +static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb) +{ + struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb); + + if (vbox_fb->obj) + drm_gem_object_unreference_unlocked(vbox_fb->obj); + + drm_framebuffer_cleanup(fb); + kfree(fb); +} + +void vbox_enable_accel(struct vbox_private *vbox) +{ + unsigned int i; + struct vbva_buffer *vbva; + + if (!vbox->vbva_info || !vbox->vbva_buffers) { + /* Should never happen... */ + DRM_ERROR("vboxvideo: failed to set up VBVA.\n"); + return; + } + + for (i = 0; i < vbox->num_crtcs; ++i) { + if (vbox->vbva_info[i].vbva) + continue; + + vbva = (void *)vbox->vbva_buffers + i * VBVA_MIN_BUFFER_SIZE; + if (!vbva_enable(&vbox->vbva_info[i], + vbox->guest_pool, vbva, i)) { + /* very old host or driver error. */ + DRM_ERROR("vboxvideo: vbva_enable failed\n"); + return; + } + } +} + +void vbox_disable_accel(struct vbox_private *vbox) +{ + unsigned int i; + + for (i = 0; i < vbox->num_crtcs; ++i) + vbva_disable(&vbox->vbva_info[i], vbox->guest_pool, i); +} + +void vbox_report_caps(struct vbox_private *vbox) +{ + u32 caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION | + VBVACAPS_IRQ | VBVACAPS_USE_VBVA_ONLY; + + if (vbox->initial_mode_queried) + caps |= VBVACAPS_VIDEO_MODE_HINTS; + + hgsmi_send_caps_info(vbox->guest_pool, caps); +} + +/** + * Send information about dirty rectangles to VBVA. If necessary we enable + * VBVA first, as this is normally disabled after a change of master in case + * the new master does not send dirty rectangle information (is this even + * allowed?) + */ +void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb, + struct drm_clip_rect *rects, + unsigned int num_rects) +{ + struct vbox_private *vbox = fb->dev->dev_private; + struct drm_crtc *crtc; + unsigned int i; + + mutex_lock(&vbox->hw_mutex); + list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head) { + if (CRTC_FB(crtc) != fb) + continue; + + vbox_enable_accel(vbox); + + for (i = 0; i < num_rects; ++i) { + struct vbva_cmd_hdr cmd_hdr; + unsigned int crtc_id = to_vbox_crtc(crtc)->crtc_id; + + if ((rects[i].x1 > crtc->x + crtc->hwmode.hdisplay) || + (rects[i].y1 > crtc->y + crtc->hwmode.vdisplay) || + (rects[i].x2 < crtc->x) || + (rects[i].y2 < crtc->y)) + continue; + + cmd_hdr.x = (s16)rects[i].x1; + cmd_hdr.y = (s16)rects[i].y1; + cmd_hdr.w = (u16)rects[i].x2 - rects[i].x1; + cmd_hdr.h = (u16)rects[i].y2 - rects[i].y1; + + if (!vbva_buffer_begin_update(&vbox->vbva_info[crtc_id], + vbox->guest_pool)) + continue; + + vbva_write(&vbox->vbva_info[crtc_id], vbox->guest_pool, + &cmd_hdr, sizeof(cmd_hdr)); + vbva_buffer_end_update(&vbox->vbva_info[crtc_id]); + } + } + mutex_unlock(&vbox->hw_mutex); +} + +static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb, + struct drm_file *file_priv, + unsigned int flags, unsigned int color, + struct drm_clip_rect *rects, + unsigned int num_rects) +{ + vbox_framebuffer_dirty_rectangles(fb, rects, num_rects); + + return 0; +} + +static const struct drm_framebuffer_funcs vbox_fb_funcs = { + .destroy = vbox_user_framebuffer_destroy, + .dirty = vbox_user_framebuffer_dirty, +}; + +int vbox_framebuffer_init(struct drm_device *dev, + struct vbox_framebuffer *vbox_fb, + const struct DRM_MODE_FB_CMD *mode_cmd, + struct drm_gem_object *obj) +{ + int ret; + + drm_helper_mode_fill_fb_struct(dev, &vbox_fb->base, mode_cmd); + vbox_fb->obj = obj; + ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs); + if (ret) { + DRM_ERROR("framebuffer init failed %d\n", ret); + return ret; + } + + return 0; +} + +static struct drm_framebuffer *vbox_user_framebuffer_create( + struct drm_device *dev, + struct drm_file *filp, + const struct drm_mode_fb_cmd2 *mode_cmd) +{ + struct drm_gem_object *obj; + struct vbox_framebuffer *vbox_fb; + int ret = -ENOMEM; + + obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]); + if (!obj) + return ERR_PTR(-ENOENT); + + vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL); + if (!vbox_fb) + goto err_unref_obj; + + ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj); + if (ret) + goto err_free_vbox_fb; + + return &vbox_fb->base; + +err_free_vbox_fb: + kfree(vbox_fb); +err_unref_obj: + drm_gem_object_unreference_unlocked(obj); + return ERR_PTR(ret); +} + +static const struct drm_mode_config_funcs vbox_mode_funcs = { + .fb_create = vbox_user_framebuffer_create, +}; + +static int vbox_accel_init(struct vbox_private *vbox) +{ + unsigned int i; + + vbox->vbva_info = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs, + sizeof(*vbox->vbva_info), GFP_KERNEL); + if (!vbox->vbva_info) + return -ENOMEM; + + /* Take a command buffer for each screen from the end of usable VRAM. */ + vbox->available_vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE; + + vbox->vbva_buffers = pci_iomap_range(vbox->dev->pdev, 0, + vbox->available_vram_size, + vbox->num_crtcs * + VBVA_MIN_BUFFER_SIZE); + if (!vbox->vbva_buffers) + return -ENOMEM; + + for (i = 0; i < vbox->num_crtcs; ++i) + vbva_setup_buffer_context(&vbox->vbva_info[i], + vbox->available_vram_size + + i * VBVA_MIN_BUFFER_SIZE, + VBVA_MIN_BUFFER_SIZE); + + return 0; +} + +static void vbox_accel_fini(struct vbox_private *vbox) +{ + vbox_disable_accel(vbox); + pci_iounmap(vbox->dev->pdev, vbox->vbva_buffers); +} + +/** Do we support the 4.3 plus mode hint reporting interface? */ +static bool have_hgsmi_mode_hints(struct vbox_private *vbox) +{ + u32 have_hints, have_cursor; + int ret; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_MODE_HINT_REPORTING, + &have_hints); + if (ret) + return false; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, + &have_cursor); + if (ret) + return false; + + return have_hints == VINF_SUCCESS && have_cursor == VINF_SUCCESS; +} + +static bool vbox_check_supported(u16 id) +{ + u16 dispi_id; + + vbox_write_ioport(VBE_DISPI_INDEX_ID, id); + dispi_id = inw(VBE_DISPI_IOPORT_DATA); + + return dispi_id == id; +} + +/** + * Set up our heaps and data exchange buffers in VRAM before handing the rest + * to the memory manager. + */ +static int vbox_hw_init(struct vbox_private *vbox) +{ + int ret = -ENOMEM; + + vbox->full_vram_size = inl(VBE_DISPI_IOPORT_DATA); + vbox->any_pitch = vbox_check_supported(VBE_DISPI_ID_ANYX); + + DRM_INFO("VRAM %08x\n", vbox->full_vram_size); + + /* Map guest-heap at end of vram */ + vbox->guest_heap = + pci_iomap_range(vbox->dev->pdev, 0, GUEST_HEAP_OFFSET(vbox), + GUEST_HEAP_SIZE); + if (!vbox->guest_heap) + return -ENOMEM; + + /* Create guest-heap mem-pool use 2^4 = 16 byte chunks */ + vbox->guest_pool = gen_pool_create(4, -1); + if (!vbox->guest_pool) + goto err_unmap_guest_heap; + + ret = gen_pool_add_virt(vbox->guest_pool, + (unsigned long)vbox->guest_heap, + GUEST_HEAP_OFFSET(vbox), + GUEST_HEAP_USABLE_SIZE, -1); + if (ret) + goto err_destroy_guest_pool; + + ret = hgsmi_test_query_conf(vbox->guest_pool); + if (ret) { + DRM_ERROR("vboxvideo: hgsmi_test_query_conf failed\n"); + goto err_destroy_guest_pool; + } + + /* Reduce available VRAM size to reflect the guest heap. */ + vbox->available_vram_size = GUEST_HEAP_OFFSET(vbox); + /* Linux drm represents monitors as a 32-bit array. */ + hgsmi_query_conf(vbox->guest_pool, VBOX_VBVA_CONF32_MONITOR_COUNT, + &vbox->num_crtcs); + vbox->num_crtcs = clamp_t(u32, vbox->num_crtcs, 1, VBOX_MAX_SCREENS); + + if (!have_hgsmi_mode_hints(vbox)) { + ret = -ENOTSUPP; + goto err_destroy_guest_pool; + } + + vbox->last_mode_hints = devm_kcalloc(vbox->dev->dev, vbox->num_crtcs, + sizeof(struct vbva_modehint), + GFP_KERNEL); + if (!vbox->last_mode_hints) { + ret = -ENOMEM; + goto err_destroy_guest_pool; + } + + ret = vbox_accel_init(vbox); + if (ret) + goto err_destroy_guest_pool; + + return 0; + +err_destroy_guest_pool: + gen_pool_destroy(vbox->guest_pool); +err_unmap_guest_heap: + pci_iounmap(vbox->dev->pdev, vbox->guest_heap); + return ret; +} + +static void vbox_hw_fini(struct vbox_private *vbox) +{ + vbox_accel_fini(vbox); + gen_pool_destroy(vbox->guest_pool); + pci_iounmap(vbox->dev->pdev, vbox->guest_heap); +} + +int vbox_driver_load(struct drm_device *dev, unsigned long flags) +{ + struct vbox_private *vbox; + int ret = 0; + + if (!vbox_check_supported(VBE_DISPI_ID_HGSMI)) + return -ENODEV; + + vbox = devm_kzalloc(dev->dev, sizeof(*vbox), GFP_KERNEL); + if (!vbox) + return -ENOMEM; + + dev->dev_private = vbox; + vbox->dev = dev; + + mutex_init(&vbox->hw_mutex); + + ret = vbox_hw_init(vbox); + if (ret) + return ret; + + ret = vbox_mm_init(vbox); + if (ret) + goto err_hw_fini; + + drm_mode_config_init(dev); + + dev->mode_config.funcs = (void *)&vbox_mode_funcs; + dev->mode_config.min_width = 64; + dev->mode_config.min_height = 64; + dev->mode_config.preferred_depth = 24; + dev->mode_config.max_width = VBE_DISPI_MAX_XRES; + dev->mode_config.max_height = VBE_DISPI_MAX_YRES; + + ret = vbox_mode_init(dev); + if (ret) + goto err_drm_mode_cleanup; + + ret = vbox_irq_init(vbox); + if (ret) + goto err_mode_fini; + + ret = vbox_fbdev_init(dev); + if (ret) + goto err_irq_fini; + + return 0; + +err_irq_fini: + vbox_irq_fini(vbox); +err_mode_fini: + vbox_mode_fini(dev); +err_drm_mode_cleanup: + drm_mode_config_cleanup(dev); + vbox_mm_fini(vbox); +err_hw_fini: + vbox_hw_fini(vbox); + return ret; +} + +void vbox_driver_unload(struct drm_device *dev) +{ + struct vbox_private *vbox = dev->dev_private; + + vbox_fbdev_fini(dev); + vbox_irq_fini(vbox); + vbox_mode_fini(dev); + drm_mode_config_cleanup(dev); + vbox_mm_fini(vbox); + vbox_hw_fini(vbox); +} + +/** + * @note this is described in the DRM framework documentation. AST does not + * have it, but we get an oops on driver unload if it is not present. + */ +void vbox_driver_lastclose(struct drm_device *dev) +{ + struct vbox_private *vbox = dev->dev_private; + + if (vbox->fbdev) + drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper); +} + +int vbox_gem_create(struct drm_device *dev, + u32 size, bool iskernel, struct drm_gem_object **obj) +{ + struct vbox_bo *vboxbo; + int ret; + + *obj = NULL; + + size = roundup(size, PAGE_SIZE); + if (size == 0) + return -EINVAL; + + ret = vbox_bo_create(dev, size, 0, 0, &vboxbo); + if (ret) { + if (ret != -ERESTARTSYS) + DRM_ERROR("failed to allocate GEM object\n"); + return ret; + } + + *obj = &vboxbo->gem; + + return 0; +} + +int vbox_dumb_create(struct drm_file *file, + struct drm_device *dev, struct drm_mode_create_dumb *args) +{ + int ret; + struct drm_gem_object *gobj; + u32 handle; + + args->pitch = args->width * ((args->bpp + 7) / 8); + args->size = args->pitch * args->height; + + ret = vbox_gem_create(dev, args->size, false, &gobj); + if (ret) + return ret; + + ret = drm_gem_handle_create(file, gobj, &handle); + drm_gem_object_unreference_unlocked(gobj); + if (ret) + return ret; + + args->handle = handle; + + return 0; +} + +static void vbox_bo_unref(struct vbox_bo **bo) +{ + struct ttm_buffer_object *tbo; + + if ((*bo) == NULL) + return; + + tbo = &((*bo)->bo); + ttm_bo_unref(&tbo); + if (!tbo) + *bo = NULL; +} + +void vbox_gem_free_object(struct drm_gem_object *obj) +{ + struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj); + + vbox_bo_unref(&vbox_bo); +} + +static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo) +{ + return drm_vma_node_offset_addr(&bo->bo.vma_node); +} + +int +vbox_dumb_mmap_offset(struct drm_file *file, + struct drm_device *dev, + u32 handle, u64 *offset) +{ + struct drm_gem_object *obj; + int ret; + struct vbox_bo *bo; + + mutex_lock(&dev->struct_mutex); + obj = drm_gem_object_lookup(file, handle); + if (!obj) { + ret = -ENOENT; + goto out_unlock; + } + + bo = gem_to_vbox_bo(obj); + *offset = vbox_bo_mmap_offset(bo); + + drm_gem_object_unreference(obj); + ret = 0; + +out_unlock: + mutex_unlock(&dev->struct_mutex); + return ret; +} diff --git a/drivers/staging/vboxvideo/vbox_mode.c b/drivers/staging/vboxvideo/vbox_mode.c new file mode 100644 index 000000000000..f2b85f3256fa --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_mode.c @@ -0,0 +1,877 @@ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_mode.c + * Copyright 2012 Red Hat Inc. + * Parts based on xf86-video-ast + * Copyright (c) 2005 ASPEED Technology Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com, + * Hans de Goede <hdegoede@redhat.com> + */ +#include <linux/export.h> +#include <drm/drm_crtc_helper.h> +#include <drm/drm_plane_helper.h> + +#include "vbox_drv.h" +#include "vboxvideo.h" +#include "hgsmi_channels.h" + +static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, + u32 handle, u32 width, u32 height, + s32 hot_x, s32 hot_y); +static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y); + +/** + * Set a graphics mode. Poke any required values into registers, do an HGSMI + * mode set and tell the host we support advanced graphics functions. + */ +static void vbox_do_modeset(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct vbox_private *vbox; + int width, height, bpp, pitch; + unsigned int crtc_id; + u16 flags; + s32 x_offset, y_offset; + + vbox = crtc->dev->dev_private; + width = mode->hdisplay ? mode->hdisplay : 640; + height = mode->vdisplay ? mode->vdisplay : 480; + crtc_id = vbox_crtc->crtc_id; + bpp = crtc->enabled ? CRTC_FB(crtc)->format->cpp[0] * 8 : 32; + pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8; + x_offset = vbox->single_framebuffer ? crtc->x : vbox_crtc->x_hint; + y_offset = vbox->single_framebuffer ? crtc->y : vbox_crtc->y_hint; + + /* + * This is the old way of setting graphics modes. It assumed one screen + * and a frame-buffer at the start of video RAM. On older versions of + * VirtualBox, certain parts of the code still assume that the first + * screen is programmed this way, so try to fake it. + */ + if (vbox_crtc->crtc_id == 0 && crtc->enabled && + vbox_crtc->fb_offset / pitch < 0xffff - crtc->y && + vbox_crtc->fb_offset % (bpp / 8) == 0) { + vbox_write_ioport(VBE_DISPI_INDEX_XRES, width); + vbox_write_ioport(VBE_DISPI_INDEX_YRES, height); + vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp); + vbox_write_ioport(VBE_DISPI_INDEX_BPP, + CRTC_FB(crtc)->format->cpp[0] * 8); + vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED); + vbox_write_ioport( + VBE_DISPI_INDEX_X_OFFSET, + vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x); + vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET, + vbox_crtc->fb_offset / pitch + crtc->y); + } + + flags = VBVA_SCREEN_F_ACTIVE; + flags |= (crtc->enabled && !vbox_crtc->blanked) ? + 0 : VBVA_SCREEN_F_BLANK; + flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0; + hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id, + x_offset, y_offset, + crtc->x * bpp / 8 + crtc->y * pitch, + pitch, width, height, + vbox_crtc->blanked ? 0 : bpp, flags); +} + +static int vbox_set_view(struct drm_crtc *crtc) +{ + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct vbox_private *vbox = crtc->dev->dev_private; + struct vbva_infoview *p; + + /* + * Tell the host about the view. This design originally targeted the + * Windows XP driver architecture and assumed that each screen would + * have a dedicated frame buffer with the command buffer following it, + * the whole being a "view". The host works out which screen a command + * buffer belongs to by checking whether it is in the first view, then + * whether it is in the second and so on. The first match wins. We + * cheat around this by making the first view be the managed memory + * plus the first command buffer, the second the same plus the second + * buffer and so on. + */ + p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p), + HGSMI_CH_VBVA, VBVA_INFO_VIEW); + if (!p) + return -ENOMEM; + + p->view_index = vbox_crtc->crtc_id; + p->view_offset = vbox_crtc->fb_offset; + p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset + + vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE; + p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset; + + hgsmi_buffer_submit(vbox->guest_pool, p); + hgsmi_buffer_free(vbox->guest_pool, p); + + return 0; +} + +static void vbox_crtc_load_lut(struct drm_crtc *crtc) +{ +} + +static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode) +{ + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct vbox_private *vbox = crtc->dev->dev_private; + + switch (mode) { + case DRM_MODE_DPMS_ON: + vbox_crtc->blanked = false; + break; + case DRM_MODE_DPMS_STANDBY: + case DRM_MODE_DPMS_SUSPEND: + case DRM_MODE_DPMS_OFF: + vbox_crtc->blanked = true; + break; + } + + mutex_lock(&vbox->hw_mutex); + vbox_do_modeset(crtc, &crtc->hwmode); + mutex_unlock(&vbox->hw_mutex); +} + +static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +/* + * Try to map the layout of virtual screens to the range of the input device. + * Return true if we need to re-set the crtc modes due to screen offset + * changes. + */ +static bool vbox_set_up_input_mapping(struct vbox_private *vbox) +{ + struct drm_crtc *crtci; + struct drm_connector *connectori; + struct drm_framebuffer *fb1 = NULL; + bool single_framebuffer = true; + bool old_single_framebuffer = vbox->single_framebuffer; + u16 width = 0, height = 0; + + /* + * Are we using an X.Org-style single large frame-buffer for all crtcs? + * If so then screen layout can be deduced from the crtc offsets. + * Same fall-back if this is the fbdev frame-buffer. + */ + list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head) { + if (!fb1) { + fb1 = CRTC_FB(crtci); + if (to_vbox_framebuffer(fb1) == &vbox->fbdev->afb) + break; + } else if (CRTC_FB(crtci) && fb1 != CRTC_FB(crtci)) { + single_framebuffer = false; + } + } + if (single_framebuffer) { + list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, + head) { + if (to_vbox_crtc(crtci)->crtc_id != 0) + continue; + + vbox->single_framebuffer = true; + vbox->input_mapping_width = CRTC_FB(crtci)->width; + vbox->input_mapping_height = CRTC_FB(crtci)->height; + return old_single_framebuffer != + vbox->single_framebuffer; + } + } + /* Otherwise calculate the total span of all screens. */ + list_for_each_entry(connectori, &vbox->dev->mode_config.connector_list, + head) { + struct vbox_connector *vbox_connector = + to_vbox_connector(connectori); + struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc; + + width = max_t(u16, width, vbox_crtc->x_hint + + vbox_connector->mode_hint.width); + height = max_t(u16, height, vbox_crtc->y_hint + + vbox_connector->mode_hint.height); + } + + vbox->single_framebuffer = false; + vbox->input_mapping_width = width; + vbox->input_mapping_height = height; + + return old_single_framebuffer != vbox->single_framebuffer; +} + +static int vbox_crtc_do_set_base(struct drm_crtc *crtc, + struct drm_framebuffer *old_fb, int x, int y) +{ + struct vbox_private *vbox = crtc->dev->dev_private; + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct drm_gem_object *obj; + struct vbox_framebuffer *vbox_fb; + struct vbox_bo *bo; + int ret; + u64 gpu_addr; + + /* Unpin the previous fb. */ + if (old_fb) { + vbox_fb = to_vbox_framebuffer(old_fb); + obj = vbox_fb->obj; + bo = gem_to_vbox_bo(obj); + ret = vbox_bo_reserve(bo, false); + if (ret) + return ret; + + vbox_bo_unpin(bo); + vbox_bo_unreserve(bo); + } + + vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc)); + obj = vbox_fb->obj; + bo = gem_to_vbox_bo(obj); + + ret = vbox_bo_reserve(bo, false); + if (ret) + return ret; + + ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr); + if (ret) { + vbox_bo_unreserve(bo); + return ret; + } + + if (&vbox->fbdev->afb == vbox_fb) + vbox_fbdev_set_base(vbox, gpu_addr); + vbox_bo_unreserve(bo); + + /* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */ + vbox_crtc->fb_offset = gpu_addr; + if (vbox_set_up_input_mapping(vbox)) { + struct drm_crtc *crtci; + + list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, + head) { + vbox_set_view(crtc); + vbox_do_modeset(crtci, &crtci->mode); + } + } + + return 0; +} + +static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, + struct drm_framebuffer *old_fb) +{ + return vbox_crtc_do_set_base(crtc, old_fb, x, y); +} + +static int vbox_crtc_mode_set(struct drm_crtc *crtc, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode, + int x, int y, struct drm_framebuffer *old_fb) +{ + struct vbox_private *vbox = crtc->dev->dev_private; + int ret; + + vbox_crtc_mode_set_base(crtc, x, y, old_fb); + + mutex_lock(&vbox->hw_mutex); + ret = vbox_set_view(crtc); + if (!ret) + vbox_do_modeset(crtc, mode); + hgsmi_update_input_mapping(vbox->guest_pool, 0, 0, + vbox->input_mapping_width, + vbox->input_mapping_height); + mutex_unlock(&vbox->hw_mutex); + + return ret; +} + +static void vbox_crtc_disable(struct drm_crtc *crtc) +{ +} + +static void vbox_crtc_prepare(struct drm_crtc *crtc) +{ +} + +static void vbox_crtc_commit(struct drm_crtc *crtc) +{ +} + +static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = { + .dpms = vbox_crtc_dpms, + .mode_fixup = vbox_crtc_mode_fixup, + .mode_set = vbox_crtc_mode_set, + /* .mode_set_base = vbox_crtc_mode_set_base, */ + .disable = vbox_crtc_disable, + .load_lut = vbox_crtc_load_lut, + .prepare = vbox_crtc_prepare, + .commit = vbox_crtc_commit, +}; + +static void vbox_crtc_reset(struct drm_crtc *crtc) +{ +} + +static void vbox_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static const struct drm_crtc_funcs vbox_crtc_funcs = { + .cursor_move = vbox_cursor_move, + .cursor_set2 = vbox_cursor_set2, + .reset = vbox_crtc_reset, + .set_config = drm_crtc_helper_set_config, + /* .gamma_set = vbox_crtc_gamma_set, */ + .destroy = vbox_crtc_destroy, +}; + +static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i) +{ + struct vbox_crtc *vbox_crtc; + + vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL); + if (!vbox_crtc) + return NULL; + + vbox_crtc->crtc_id = i; + + drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs); + drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256); + drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs); + + return vbox_crtc; +} + +static void vbox_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static struct drm_encoder *vbox_best_single_encoder(struct drm_connector + *connector) +{ + int enc_id = connector->encoder_ids[0]; + + /* pick the encoder ids */ + if (enc_id) + return drm_encoder_find(connector->dev, enc_id); + + return NULL; +} + +static const struct drm_encoder_funcs vbox_enc_funcs = { + .destroy = vbox_encoder_destroy, +}; + +static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode) +{ +} + +static bool vbox_mode_fixup(struct drm_encoder *encoder, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static void vbox_encoder_mode_set(struct drm_encoder *encoder, + struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ +} + +static void vbox_encoder_prepare(struct drm_encoder *encoder) +{ +} + +static void vbox_encoder_commit(struct drm_encoder *encoder) +{ +} + +static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = { + .dpms = vbox_encoder_dpms, + .mode_fixup = vbox_mode_fixup, + .prepare = vbox_encoder_prepare, + .commit = vbox_encoder_commit, + .mode_set = vbox_encoder_mode_set, +}; + +static struct drm_encoder *vbox_encoder_init(struct drm_device *dev, + unsigned int i) +{ + struct vbox_encoder *vbox_encoder; + + vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL); + if (!vbox_encoder) + return NULL; + + drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs, + DRM_MODE_ENCODER_DAC, NULL); + drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs); + + vbox_encoder->base.possible_crtcs = 1 << i; + return &vbox_encoder->base; +} + +/** + * Generate EDID data with a mode-unique serial number for the virtual + * monitor to try to persuade Unity that different modes correspond to + * different monitors and it should not try to force the same resolution on + * them. + */ +static void vbox_set_edid(struct drm_connector *connector, int width, + int height) +{ + enum { EDID_SIZE = 128 }; + unsigned char edid[EDID_SIZE] = { + 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */ + 0x58, 0x58, /* manufacturer (VBX) */ + 0x00, 0x00, /* product code */ + 0x00, 0x00, 0x00, 0x00, /* serial number goes here */ + 0x01, /* week of manufacture */ + 0x00, /* year of manufacture */ + 0x01, 0x03, /* EDID version */ + 0x80, /* capabilities - digital */ + 0x00, /* horiz. res in cm, zero for projectors */ + 0x00, /* vert. res in cm */ + 0x78, /* display gamma (120 == 2.2). */ + 0xEE, /* features (standby, suspend, off, RGB, std */ + /* colour space, preferred timing mode) */ + 0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54, + /* chromaticity for standard colour space. */ + 0x00, 0x00, 0x00, /* no default timings */ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, /* no standard timings */ + 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02, + 0x02, 0x02, + /* descriptor block 1 goes below */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + /* descriptor block 2, monitor ranges */ + 0x00, 0x00, 0x00, 0xFD, 0x00, + 0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20, + 0x20, 0x20, + /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */ + 0x20, + /* descriptor block 3, monitor name */ + 0x00, 0x00, 0x00, 0xFC, 0x00, + 'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r', + '\n', + /* descriptor block 4: dummy data */ + 0x00, 0x00, 0x00, 0x10, 0x00, + 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, + 0x20, + 0x00, /* number of extensions */ + 0x00 /* checksum goes here */ + }; + int clock = (width + 6) * (height + 6) * 60 / 10000; + unsigned int i, sum = 0; + + edid[12] = width & 0xff; + edid[13] = width >> 8; + edid[14] = height & 0xff; + edid[15] = height >> 8; + edid[54] = clock & 0xff; + edid[55] = clock >> 8; + edid[56] = width & 0xff; + edid[58] = (width >> 4) & 0xf0; + edid[59] = height & 0xff; + edid[61] = (height >> 4) & 0xf0; + for (i = 0; i < EDID_SIZE - 1; ++i) + sum += edid[i]; + edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF; + drm_mode_connector_update_edid_property(connector, (struct edid *)edid); +} + +static int vbox_get_modes(struct drm_connector *connector) +{ + struct vbox_connector *vbox_connector = NULL; + struct drm_display_mode *mode = NULL; + struct vbox_private *vbox = NULL; + unsigned int num_modes = 0; + int preferred_width, preferred_height; + + vbox_connector = to_vbox_connector(connector); + vbox = connector->dev->dev_private; + /* + * Heuristic: we do not want to tell the host that we support dynamic + * resizing unless we feel confident that the user space client using + * the video driver can handle hot-plug events. So the first time modes + * are queried after a "master" switch we tell the host that we do not, + * and immediately after we send the client a hot-plug notification as + * a test to see if they will respond and query again. + * That is also the reason why capabilities are reported to the host at + * this place in the code rather than elsewhere. + * We need to report the flags location before reporting the IRQ + * capability. + */ + hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) + + HOST_FLAGS_OFFSET); + if (vbox_connector->vbox_crtc->crtc_id == 0) + vbox_report_caps(vbox); + if (!vbox->initial_mode_queried) { + if (vbox_connector->vbox_crtc->crtc_id == 0) { + vbox->initial_mode_queried = true; + vbox_report_hotplug(vbox); + } + return drm_add_modes_noedid(connector, 800, 600); + } + num_modes = drm_add_modes_noedid(connector, 2560, 1600); + preferred_width = vbox_connector->mode_hint.width ? + vbox_connector->mode_hint.width : 1024; + preferred_height = vbox_connector->mode_hint.height ? + vbox_connector->mode_hint.height : 768; + mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height, + 60, false, false, false); + if (mode) { + mode->type |= DRM_MODE_TYPE_PREFERRED; + drm_mode_probed_add(connector, mode); + ++num_modes; + } + vbox_set_edid(connector, preferred_width, preferred_height); + drm_object_property_set_value( + &connector->base, vbox->dev->mode_config.suggested_x_property, + vbox_connector->vbox_crtc->x_hint); + drm_object_property_set_value( + &connector->base, vbox->dev->mode_config.suggested_y_property, + vbox_connector->vbox_crtc->y_hint); + + return num_modes; +} + +static int vbox_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + return MODE_OK; +} + +static void vbox_connector_destroy(struct drm_connector *connector) +{ + struct vbox_connector *vbox_connector; + + vbox_connector = to_vbox_connector(connector); + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + kfree(connector); +} + +static enum drm_connector_status +vbox_connector_detect(struct drm_connector *connector, bool force) +{ + struct vbox_connector *vbox_connector; + + vbox_connector = to_vbox_connector(connector); + + return vbox_connector->mode_hint.disconnected ? + connector_status_disconnected : connector_status_connected; +} + +static int vbox_fill_modes(struct drm_connector *connector, u32 max_x, + u32 max_y) +{ + struct vbox_connector *vbox_connector; + struct drm_device *dev; + struct drm_display_mode *mode, *iterator; + + vbox_connector = to_vbox_connector(connector); + dev = vbox_connector->base.dev; + list_for_each_entry_safe(mode, iterator, &connector->modes, head) { + list_del(&mode->head); + drm_mode_destroy(dev, mode); + } + + return drm_helper_probe_single_connector_modes(connector, max_x, max_y); +} + +static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = { + .mode_valid = vbox_mode_valid, + .get_modes = vbox_get_modes, + .best_encoder = vbox_best_single_encoder, +}; + +static const struct drm_connector_funcs vbox_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .detect = vbox_connector_detect, + .fill_modes = vbox_fill_modes, + .destroy = vbox_connector_destroy, +}; + +static int vbox_connector_init(struct drm_device *dev, + struct vbox_crtc *vbox_crtc, + struct drm_encoder *encoder) +{ + struct vbox_connector *vbox_connector; + struct drm_connector *connector; + + vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL); + if (!vbox_connector) + return -ENOMEM; + + connector = &vbox_connector->base; + vbox_connector->vbox_crtc = vbox_crtc; + + drm_connector_init(dev, connector, &vbox_connector_funcs, + DRM_MODE_CONNECTOR_VGA); + drm_connector_helper_add(connector, &vbox_connector_helper_funcs); + + connector->interlace_allowed = 0; + connector->doublescan_allowed = 0; + + drm_mode_create_suggested_offset_properties(dev); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_x_property, -1); + drm_object_attach_property(&connector->base, + dev->mode_config.suggested_y_property, -1); + drm_connector_register(connector); + + drm_mode_connector_attach_encoder(connector, encoder); + + return 0; +} + +int vbox_mode_init(struct drm_device *dev) +{ + struct vbox_private *vbox = dev->dev_private; + struct drm_encoder *encoder; + struct vbox_crtc *vbox_crtc; + unsigned int i; + int ret; + + /* vbox_cursor_init(dev); */ + for (i = 0; i < vbox->num_crtcs; ++i) { + vbox_crtc = vbox_crtc_init(dev, i); + if (!vbox_crtc) + return -ENOMEM; + encoder = vbox_encoder_init(dev, i); + if (!encoder) + return -ENOMEM; + ret = vbox_connector_init(dev, vbox_crtc, encoder); + if (ret) + return ret; + } + + return 0; +} + +void vbox_mode_fini(struct drm_device *dev) +{ + /* vbox_cursor_fini(dev); */ +} + +/** + * Copy the ARGB image and generate the mask, which is needed in case the host + * does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set + * if the corresponding alpha value in the ARGB image is greater than 0xF0. + */ +static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height, + size_t mask_size) +{ + size_t line_size = (width + 7) / 8; + u32 i, j; + + memcpy(dst + mask_size, src, width * height * 4); + for (i = 0; i < height; ++i) + for (j = 0; j < width; ++j) + if (((u32 *)src)[i * width + j] > 0xf0000000) + dst[i * line_size + j / 8] |= (0x80 >> (j % 8)); +} + +static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, + u32 handle, u32 width, u32 height, + s32 hot_x, s32 hot_y) +{ + struct vbox_private *vbox = crtc->dev->dev_private; + struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); + struct ttm_bo_kmap_obj uobj_map; + size_t data_size, mask_size; + struct drm_gem_object *obj; + u32 flags, caps = 0; + struct vbox_bo *bo; + bool src_isiomem; + u8 *dst = NULL; + u8 *src; + int ret; + + /* + * Re-set this regularly as in 5.0.20 and earlier the information was + * lost on save and restore. + */ + hgsmi_update_input_mapping(vbox->guest_pool, 0, 0, + vbox->input_mapping_width, + vbox->input_mapping_height); + if (!handle) { + bool cursor_enabled = false; + struct drm_crtc *crtci; + + /* Hide cursor. */ + vbox_crtc->cursor_enabled = false; + list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, + head) { + if (to_vbox_crtc(crtci)->cursor_enabled) + cursor_enabled = true; + } + + if (!cursor_enabled) + hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0, + 0, 0, NULL, 0); + return 0; + } + + vbox_crtc->cursor_enabled = true; + + if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT || + width == 0 || height == 0) + return -EINVAL; + + ret = hgsmi_query_conf(vbox->guest_pool, + VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps); + if (ret) + return ret; + + if (!(caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) { + /* + * -EINVAL means cursor_set2() not supported, -EAGAIN means + * retry at once. + */ + return -EBUSY; + } + + obj = drm_gem_object_lookup(file_priv, handle); + if (!obj) { + DRM_ERROR("Cannot find cursor object %x for crtc\n", handle); + return -ENOENT; + } + + bo = gem_to_vbox_bo(obj); + ret = vbox_bo_reserve(bo, false); + if (ret) + goto out_unref_obj; + + /* + * The mask must be calculated based on the alpha + * channel, one bit per ARGB word, and must be 32-bit + * padded. + */ + mask_size = ((width + 7) / 8 * height + 3) & ~3; + data_size = width * height * 4 + mask_size; + vbox->cursor_hot_x = min_t(u32, max(hot_x, 0), width); + vbox->cursor_hot_y = min_t(u32, max(hot_y, 0), height); + vbox->cursor_width = width; + vbox->cursor_height = height; + vbox->cursor_data_size = data_size; + dst = vbox->cursor_data; + + ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map); + if (ret) { + vbox->cursor_data_size = 0; + goto out_unreserve_bo; + } + + src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem); + if (src_isiomem) { + DRM_ERROR("src cursor bo not in main memory\n"); + ret = -EIO; + goto out_unmap_bo; + } + + copy_cursor_image(src, dst, width, height, mask_size); + + flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | + VBOX_MOUSE_POINTER_ALPHA; + ret = hgsmi_update_pointer_shape(vbox->guest_pool, flags, + vbox->cursor_hot_x, vbox->cursor_hot_y, + width, height, dst, data_size); +out_unmap_bo: + ttm_bo_kunmap(&uobj_map); +out_unreserve_bo: + vbox_bo_unreserve(bo); +out_unref_obj: + drm_gem_object_unreference_unlocked(obj); + + return ret; +} + +static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y) +{ + struct vbox_private *vbox = crtc->dev->dev_private; + u32 flags = VBOX_MOUSE_POINTER_VISIBLE | + VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; + s32 crtc_x = + vbox->single_framebuffer ? crtc->x : to_vbox_crtc(crtc)->x_hint; + s32 crtc_y = + vbox->single_framebuffer ? crtc->y : to_vbox_crtc(crtc)->y_hint; + u32 host_x, host_y; + u32 hot_x = 0; + u32 hot_y = 0; + int ret; + + /* + * We compare these to unsigned later and don't + * need to handle negative. + */ + if (x + crtc_x < 0 || y + crtc_y < 0 || vbox->cursor_data_size == 0) + return 0; + + ret = hgsmi_cursor_position(vbox->guest_pool, true, x + crtc_x, + y + crtc_y, &host_x, &host_y); + + /* + * The only reason we have vbox_cursor_move() is that some older clients + * might use DRM_IOCTL_MODE_CURSOR instead of DRM_IOCTL_MODE_CURSOR2 and + * use DRM_MODE_CURSOR_MOVE to set the hot-spot. + * + * However VirtualBox 5.0.20 and earlier has a bug causing it to return + * 0,0 as host cursor location after a save and restore. + * + * To work around this we ignore a 0, 0 return, since missing the odd + * time when it legitimately happens is not going to hurt much. + */ + if (ret || (host_x == 0 && host_y == 0)) + return ret; + + if (x + crtc_x < host_x) + hot_x = min(host_x - x - crtc_x, vbox->cursor_width); + if (y + crtc_y < host_y) + hot_y = min(host_y - y - crtc_y, vbox->cursor_height); + + if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y) + return 0; + + vbox->cursor_hot_x = hot_x; + vbox->cursor_hot_y = hot_y; + + return hgsmi_update_pointer_shape(vbox->guest_pool, flags, + hot_x, hot_y, vbox->cursor_width, vbox->cursor_height, + vbox->cursor_data, vbox->cursor_data_size); +} diff --git a/drivers/staging/vboxvideo/vbox_prime.c b/drivers/staging/vboxvideo/vbox_prime.c new file mode 100644 index 000000000000..b7453e427a1d --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_prime.c @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2017 Oracle Corporation + * Copyright 2017 Canonical + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Andreas Pokorny + */ + +#include "vbox_drv.h" + +/* + * Based on qxl_prime.c: + * Empty Implementations as there should not be any other driver for a virtual + * device that might share buffers with vboxvideo + */ + +int vbox_gem_prime_pin(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return -ENOSYS; +} + +void vbox_gem_prime_unpin(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); +} + +struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +struct drm_gem_object *vbox_gem_prime_import_sg_table( + struct drm_device *dev, struct dma_buf_attachment *attach, + struct sg_table *table) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +void *vbox_gem_prime_vmap(struct drm_gem_object *obj) +{ + WARN_ONCE(1, "not implemented"); + return ERR_PTR(-ENOSYS); +} + +void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) +{ + WARN_ONCE(1, "not implemented"); +} + +int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area) +{ + WARN_ONCE(1, "not implemented"); + return -ENOSYS; +} diff --git a/drivers/staging/vboxvideo/vbox_ttm.c b/drivers/staging/vboxvideo/vbox_ttm.c new file mode 100644 index 000000000000..34a905d40735 --- /dev/null +++ b/drivers/staging/vboxvideo/vbox_ttm.c @@ -0,0 +1,472 @@ +/* + * Copyright (C) 2013-2017 Oracle Corporation + * This file is based on ast_ttm.c + * Copyright 2012 Red Hat Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + * + * Authors: Dave Airlie <airlied@redhat.com> + * Michael Thayer <michael.thayer@oracle.com> + */ +#include "vbox_drv.h" +#include <ttm/ttm_page_alloc.h> + +static inline struct vbox_private *vbox_bdev(struct ttm_bo_device *bd) +{ + return container_of(bd, struct vbox_private, ttm.bdev); +} + +static int vbox_ttm_mem_global_init(struct drm_global_reference *ref) +{ + return ttm_mem_global_init(ref->object); +} + +static void vbox_ttm_mem_global_release(struct drm_global_reference *ref) +{ + ttm_mem_global_release(ref->object); +} + +/** + * Adds the vbox memory manager object/structures to the global memory manager. + */ +static int vbox_ttm_global_init(struct vbox_private *vbox) +{ + struct drm_global_reference *global_ref; + int ret; + + global_ref = &vbox->ttm.mem_global_ref; + global_ref->global_type = DRM_GLOBAL_TTM_MEM; + global_ref->size = sizeof(struct ttm_mem_global); + global_ref->init = &vbox_ttm_mem_global_init; + global_ref->release = &vbox_ttm_mem_global_release; + ret = drm_global_item_ref(global_ref); + if (ret) { + DRM_ERROR("Failed setting up TTM memory subsystem.\n"); + return ret; + } + + vbox->ttm.bo_global_ref.mem_glob = vbox->ttm.mem_global_ref.object; + global_ref = &vbox->ttm.bo_global_ref.ref; + global_ref->global_type = DRM_GLOBAL_TTM_BO; + global_ref->size = sizeof(struct ttm_bo_global); + global_ref->init = &ttm_bo_global_init; + global_ref->release = &ttm_bo_global_release; + + ret = drm_global_item_ref(global_ref); + if (ret) { + DRM_ERROR("Failed setting up TTM BO subsystem.\n"); + drm_global_item_unref(&vbox->ttm.mem_global_ref); + return ret; + } + + return 0; +} + +/** + * Removes the vbox memory manager object from the global memory manager. + */ +static void vbox_ttm_global_release(struct vbox_private *vbox) +{ + drm_global_item_unref(&vbox->ttm.bo_global_ref.ref); + drm_global_item_unref(&vbox->ttm.mem_global_ref); +} + +static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo) +{ + struct vbox_bo *bo; + + bo = container_of(tbo, struct vbox_bo, bo); + + drm_gem_object_release(&bo->gem); + kfree(bo); +} + +static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo) +{ + if (bo->destroy == &vbox_bo_ttm_destroy) + return true; + + return false; +} + +static int +vbox_bo_init_mem_type(struct ttm_bo_device *bdev, u32 type, + struct ttm_mem_type_manager *man) +{ + switch (type) { + case TTM_PL_SYSTEM: + man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_MASK_CACHING; + man->default_caching = TTM_PL_FLAG_CACHED; + break; + case TTM_PL_VRAM: + man->func = &ttm_bo_manager_func; + man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; + man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + break; + default: + DRM_ERROR("Unsupported memory type %u\n", (unsigned int)type); + return -EINVAL; + } + + return 0; +} + +static void +vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) +{ + struct vbox_bo *vboxbo = vbox_bo(bo); + + if (!vbox_ttm_bo_is_vbox_bo(bo)) + return; + + vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM); + *pl = vboxbo->placement; +} + +static int vbox_bo_verify_access(struct ttm_buffer_object *bo, + struct file *filp) +{ + return 0; +} + +static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + struct vbox_private *vbox = vbox_bdev(bdev); + + mem->bus.addr = NULL; + mem->bus.offset = 0; + mem->bus.size = mem->num_pages << PAGE_SHIFT; + mem->bus.base = 0; + mem->bus.is_iomem = false; + if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) + return -EINVAL; + switch (mem->mem_type) { + case TTM_PL_SYSTEM: + /* system memory */ + return 0; + case TTM_PL_VRAM: + mem->bus.offset = mem->start << PAGE_SHIFT; + mem->bus.base = pci_resource_start(vbox->dev->pdev, 0); + mem->bus.is_iomem = true; + break; + default: + return -EINVAL; + } + return 0; +} + +static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ +} + +static int vbox_bo_move(struct ttm_buffer_object *bo, + bool evict, bool interruptible, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) +{ + return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); +} + +static void vbox_ttm_backend_destroy(struct ttm_tt *tt) +{ + ttm_tt_fini(tt); + kfree(tt); +} + +static struct ttm_backend_func vbox_tt_backend_func = { + .destroy = &vbox_ttm_backend_destroy, +}; + +static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev, + unsigned long size, + u32 page_flags, + struct page *dummy_read_page) +{ + struct ttm_tt *tt; + + tt = kzalloc(sizeof(*tt), GFP_KERNEL); + if (!tt) + return NULL; + + tt->func = &vbox_tt_backend_func; + if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) { + kfree(tt); + return NULL; + } + + return tt; +} + +static int vbox_ttm_tt_populate(struct ttm_tt *ttm) +{ + return ttm_pool_populate(ttm); +} + +static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm) +{ + ttm_pool_unpopulate(ttm); +} + +struct ttm_bo_driver vbox_bo_driver = { + .ttm_tt_create = vbox_ttm_tt_create, + .ttm_tt_populate = vbox_ttm_tt_populate, + .ttm_tt_unpopulate = vbox_ttm_tt_unpopulate, + .init_mem_type = vbox_bo_init_mem_type, + .eviction_valuable = ttm_bo_eviction_valuable, + .evict_flags = vbox_bo_evict_flags, + .move = vbox_bo_move, + .verify_access = vbox_bo_verify_access, + .io_mem_reserve = &vbox_ttm_io_mem_reserve, + .io_mem_free = &vbox_ttm_io_mem_free, + .io_mem_pfn = ttm_bo_default_io_mem_pfn, +}; + +int vbox_mm_init(struct vbox_private *vbox) +{ + int ret; + struct drm_device *dev = vbox->dev; + struct ttm_bo_device *bdev = &vbox->ttm.bdev; + + ret = vbox_ttm_global_init(vbox); + if (ret) + return ret; + + ret = ttm_bo_device_init(&vbox->ttm.bdev, + vbox->ttm.bo_global_ref.ref.object, + &vbox_bo_driver, + dev->anon_inode->i_mapping, + DRM_FILE_PAGE_OFFSET, true); + if (ret) { + DRM_ERROR("Error initialising bo driver; %d\n", ret); + goto err_ttm_global_release; + } + + ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM, + vbox->available_vram_size >> PAGE_SHIFT); + if (ret) { + DRM_ERROR("Failed ttm VRAM init: %d\n", ret); + goto err_device_release; + } + +#ifdef DRM_MTRR_WC + vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0), + DRM_MTRR_WC); +#else + vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0), + pci_resource_len(dev->pdev, 0)); +#endif + return 0; + +err_device_release: + ttm_bo_device_release(&vbox->ttm.bdev); +err_ttm_global_release: + vbox_ttm_global_release(vbox); + return ret; +} + +void vbox_mm_fini(struct vbox_private *vbox) +{ +#ifdef DRM_MTRR_WC + drm_mtrr_del(vbox->fb_mtrr, + pci_resource_start(vbox->dev->pdev, 0), + pci_resource_len(vbox->dev->pdev, 0), DRM_MTRR_WC); +#else + arch_phys_wc_del(vbox->fb_mtrr); +#endif + ttm_bo_device_release(&vbox->ttm.bdev); + vbox_ttm_global_release(vbox); +} + +void vbox_ttm_placement(struct vbox_bo *bo, int domain) +{ + unsigned int i; + u32 c = 0; + + bo->placement.placement = bo->placements; + bo->placement.busy_placement = bo->placements; + + if (domain & TTM_PL_FLAG_VRAM) + bo->placements[c++].flags = + TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM; + if (domain & TTM_PL_FLAG_SYSTEM) + bo->placements[c++].flags = + TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + if (!c) + bo->placements[c++].flags = + TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + + bo->placement.num_placement = c; + bo->placement.num_busy_placement = c; + + for (i = 0; i < c; ++i) { + bo->placements[i].fpfn = 0; + bo->placements[i].lpfn = 0; + } +} + +int vbox_bo_create(struct drm_device *dev, int size, int align, + u32 flags, struct vbox_bo **pvboxbo) +{ + struct vbox_private *vbox = dev->dev_private; + struct vbox_bo *vboxbo; + size_t acc_size; + int ret; + + vboxbo = kzalloc(sizeof(*vboxbo), GFP_KERNEL); + if (!vboxbo) + return -ENOMEM; + + ret = drm_gem_object_init(dev, &vboxbo->gem, size); + if (ret) + goto err_free_vboxbo; + + vboxbo->bo.bdev = &vbox->ttm.bdev; + + vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); + + acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size, + sizeof(struct vbox_bo)); + + ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size, + ttm_bo_type_device, &vboxbo->placement, + align >> PAGE_SHIFT, false, NULL, acc_size, + NULL, NULL, vbox_bo_ttm_destroy); + if (ret) + goto err_free_vboxbo; + + *pvboxbo = vboxbo; + + return 0; + +err_free_vboxbo: + kfree(vboxbo); + return ret; +} + +static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo) +{ + return bo->bo.offset; +} + +int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr) +{ + int i, ret; + + if (bo->pin_count) { + bo->pin_count++; + if (gpu_addr) + *gpu_addr = vbox_bo_gpu_offset(bo); + + return 0; + } + + vbox_ttm_placement(bo, pl_flag); + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) + return ret; + + bo->pin_count = 1; + + if (gpu_addr) + *gpu_addr = vbox_bo_gpu_offset(bo); + + return 0; +} + +int vbox_bo_unpin(struct vbox_bo *bo) +{ + int i, ret; + + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) + return ret; + + return 0; +} + +/* + * Move a vbox-owned buffer object to system memory if no one else has it + * pinned. The caller must have pinned it previously, and this call will + * release the caller's pin. + */ +int vbox_bo_push_sysram(struct vbox_bo *bo) +{ + int i, ret; + + if (!bo->pin_count) { + DRM_ERROR("unpin bad %p\n", bo); + return 0; + } + bo->pin_count--; + if (bo->pin_count) + return 0; + + if (bo->kmap.virtual) + ttm_bo_kunmap(&bo->kmap); + + vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM); + + for (i = 0; i < bo->placement.num_placement; i++) + bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; + + ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false); + if (ret) { + DRM_ERROR("pushing to VRAM failed\n"); + return ret; + } + + return 0; +} + +int vbox_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct drm_file *file_priv; + struct vbox_private *vbox; + + if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) + return -EINVAL; + + file_priv = filp->private_data; + vbox = file_priv->minor->dev->dev_private; + + return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev); +} diff --git a/drivers/staging/vboxvideo/vboxvideo.h b/drivers/staging/vboxvideo/vboxvideo.h new file mode 100644 index 000000000000..d835d75d761c --- /dev/null +++ b/drivers/staging/vboxvideo/vboxvideo.h @@ -0,0 +1,491 @@ +/* + * Copyright (C) 2006-2016 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + */ + +#ifndef __VBOXVIDEO_H__ +#define __VBOXVIDEO_H__ + +/* + * This should be in sync with monitorCount <xsd:maxInclusive value="64"/> in + * src/VBox/Main/xml/VirtualBox-settings-common.xsd + */ +#define VBOX_VIDEO_MAX_SCREENS 64 + +/* + * The last 4096 bytes of the guest VRAM contains the generic info for all + * DualView chunks: sizes and offsets of chunks. This is filled by miniport. + * + * Last 4096 bytes of each chunk contain chunk specific data: framebuffer info, + * etc. This is used exclusively by the corresponding instance of a display + * driver. + * + * The VRAM layout: + * Last 4096 bytes - Adapter information area. + * 4096 bytes aligned miniport heap (value specified in the config rouded up). + * Slack - what left after dividing the VRAM. + * 4096 bytes aligned framebuffers: + * last 4096 bytes of each framebuffer is the display information area. + * + * The Virtual Graphics Adapter information in the guest VRAM is stored by the + * guest video driver using structures prepended by VBOXVIDEOINFOHDR. + * + * When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO + * the host starts to process the info. The first element at the start of + * the 4096 bytes region should be normally be a LINK that points to + * actual information chain. That way the guest driver can have some + * fixed layout of the information memory block and just rewrite + * the link to point to relevant memory chain. + * + * The processing stops at the END element. + * + * The host can access the memory only when the port IO is processed. + * All data that will be needed later must be copied from these 4096 bytes. + * But other VRAM can be used by host until the mode is disabled. + * + * The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO + * to disable the mode. + * + * VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information + * from the host and issue commands to the host. + * + * The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the + * following operations with the VBE data register can be performed: + * + * Operation Result + * write 16 bit value NOP + * read 16 bit value count of monitors + * write 32 bit value set the vbox cmd value and the cmd processed by the host + * read 32 bit value result of the last vbox command is returned + */ + +/** + * VBVA command header. + * + * @todo Where does this fit in? + */ +struct vbva_cmd_hdr { + /** Coordinates of affected rectangle. */ + s16 x; + s16 y; + u16 w; + u16 h; +} __packed; + +/** @name VBVA ring defines. + * + * The VBVA ring buffer is suitable for transferring large (< 2GB) amount of + * data. For example big bitmaps which do not fit to the buffer. + * + * Guest starts writing to the buffer by initializing a record entry in the + * records queue. VBVA_F_RECORD_PARTIAL indicates that the record is being + * written. As data is written to the ring buffer, the guest increases + * free_offset. + * + * The host reads the records on flushes and processes all completed records. + * When host encounters situation when only a partial record presents and + * len_and_flags & ~VBVA_F_RECORD_PARTIAL >= VBVA_RING_BUFFER_SIZE - + * VBVA_RING_BUFFER_THRESHOLD, the host fetched all record data and updates + * data_offset. After that on each flush the host continues fetching the data + * until the record is completed. + * + */ +#define VBVA_RING_BUFFER_SIZE (4194304 - 1024) +#define VBVA_RING_BUFFER_THRESHOLD (4096) + +#define VBVA_MAX_RECORDS (64) + +#define VBVA_F_MODE_ENABLED 0x00000001u +#define VBVA_F_MODE_VRDP 0x00000002u +#define VBVA_F_MODE_VRDP_RESET 0x00000004u +#define VBVA_F_MODE_VRDP_ORDER_MASK 0x00000008u + +#define VBVA_F_STATE_PROCESSING 0x00010000u + +#define VBVA_F_RECORD_PARTIAL 0x80000000u + +/** + * VBVA record. + */ +struct vbva_record { + /** The length of the record. Changed by guest. */ + u32 len_and_flags; +} __packed; + +/* + * The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of + * the runtime heapsimple API. Use minimum 2 pages here, because the info area + * also may contain other data (for example hgsmi_host_flags structure). + */ +#define VBVA_ADAPTER_INFORMATION_SIZE 65536 +#define VBVA_MIN_BUFFER_SIZE 65536 + +/* The value for port IO to let the adapter to interpret the adapter memory. */ +#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF + +/* The value for port IO to let the adapter to interpret the adapter memory. */ +#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000 + +/* The value for port IO to let the adapter to interpret the display memory. + * The display number is encoded in low 16 bits. + */ +#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000 + +struct vbva_host_flags { + u32 host_events; + u32 supported_orders; +} __packed; + +struct vbva_buffer { + struct vbva_host_flags host_flags; + + /* The offset where the data start in the buffer. */ + u32 data_offset; + /* The offset where next data must be placed in the buffer. */ + u32 free_offset; + + /* The queue of record descriptions. */ + struct vbva_record records[VBVA_MAX_RECORDS]; + u32 record_first_index; + u32 record_free_index; + + /* Space to leave free when large partial records are transferred. */ + u32 partial_write_tresh; + + u32 data_len; + /* variable size for the rest of the vbva_buffer area in VRAM. */ + u8 data[0]; +} __packed; + +#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024) + +/* guest->host commands */ +#define VBVA_QUERY_CONF32 1 +#define VBVA_SET_CONF32 2 +#define VBVA_INFO_VIEW 3 +#define VBVA_INFO_HEAP 4 +#define VBVA_FLUSH 5 +#define VBVA_INFO_SCREEN 6 +#define VBVA_ENABLE 7 +#define VBVA_MOUSE_POINTER_SHAPE 8 +/* informs host about HGSMI caps. see vbva_caps below */ +#define VBVA_INFO_CAPS 12 +/* configures scanline, see VBVASCANLINECFG below */ +#define VBVA_SCANLINE_CFG 13 +/* requests scanline info, see VBVASCANLINEINFO below */ +#define VBVA_SCANLINE_INFO 14 +/* inform host about VBVA Command submission */ +#define VBVA_CMDVBVA_SUBMIT 16 +/* inform host about VBVA Command submission */ +#define VBVA_CMDVBVA_FLUSH 17 +/* G->H DMA command */ +#define VBVA_CMDVBVA_CTL 18 +/* Query most recent mode hints sent */ +#define VBVA_QUERY_MODE_HINTS 19 +/** + * Report the guest virtual desktop position and size for mapping host and + * guest pointer positions. + */ +#define VBVA_REPORT_INPUT_MAPPING 20 +/** Report the guest cursor position and query the host position. */ +#define VBVA_CURSOR_POSITION 21 + +/* host->guest commands */ +#define VBVAHG_EVENT 1 +#define VBVAHG_DISPLAY_CUSTOM 2 + +/* vbva_conf32::index */ +#define VBOX_VBVA_CONF32_MONITOR_COUNT 0 +#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1 +/** + * Returns VINF_SUCCESS if the host can report mode hints via VBVA. + * Set value to VERR_NOT_SUPPORTED before calling. + */ +#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2 +/** + * Returns VINF_SUCCESS if the host can report guest cursor enabled status via + * VBVA. Set value to VERR_NOT_SUPPORTED before calling. + */ +#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3 +/** + * Returns the currently available host cursor capabilities. Available if + * vbva_conf32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success. + * @see VMMDevReqMouseStatus::mouseFeatures. + */ +#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4 +/** Returns the supported flags in vbva_infoscreen::flags. */ +#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5 +/** Returns the max size of VBVA record. */ +#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6 + +struct vbva_conf32 { + u32 index; + u32 value; +} __packed; + +/** Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED0 BIT(0) +/** + * Guest cursor capability: can the host show a hardware cursor at the host + * pointer location? + */ +#define VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE BIT(1) +/** Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED2 BIT(2) +/** Reserved for historical reasons. Must always be unset. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED3 BIT(3) +/** Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED4 BIT(4) +/** Reserved for historical reasons. */ +#define VBOX_VBVA_CURSOR_CAPABILITY_RESERVED5 BIT(5) + +struct vbva_infoview { + /* Index of the screen, assigned by the guest. */ + u32 view_index; + + /* The screen offset in VRAM, the framebuffer starts here. */ + u32 view_offset; + + /* The size of the VRAM memory that can be used for the view. */ + u32 view_size; + + /* The recommended maximum size of the VRAM memory for the screen. */ + u32 max_screen_size; +} __packed; + +struct vbva_flush { + u32 reserved; +} __packed; + +/* vbva_infoscreen::flags */ +#define VBVA_SCREEN_F_NONE 0x0000 +#define VBVA_SCREEN_F_ACTIVE 0x0001 +/** + * The virtual monitor has been disabled by the guest and should be removed + * by the host and ignored for purposes of pointer position calculation. + */ +#define VBVA_SCREEN_F_DISABLED 0x0002 +/** + * The virtual monitor has been blanked by the guest and should be blacked + * out by the host using width, height, etc values from the vbva_infoscreen + * request. + */ +#define VBVA_SCREEN_F_BLANK 0x0004 +/** + * The virtual monitor has been blanked by the guest and should be blacked + * out by the host using the previous mode values for width. height, etc. + */ +#define VBVA_SCREEN_F_BLANK2 0x0008 + +struct vbva_infoscreen { + /* Which view contains the screen. */ + u32 view_index; + + /* Physical X origin relative to the primary screen. */ + s32 origin_x; + + /* Physical Y origin relative to the primary screen. */ + s32 origin_y; + + /* Offset of visible framebuffer relative to the framebuffer start. */ + u32 start_offset; + + /* The scan line size in bytes. */ + u32 line_size; + + /* Width of the screen. */ + u32 width; + + /* Height of the screen. */ + u32 height; + + /* Color depth. */ + u16 bits_per_pixel; + + /* VBVA_SCREEN_F_* */ + u16 flags; +} __packed; + +/* vbva_enable::flags */ +#define VBVA_F_NONE 0x00000000 +#define VBVA_F_ENABLE 0x00000001 +#define VBVA_F_DISABLE 0x00000002 +/* extended VBVA to be used with WDDM */ +#define VBVA_F_EXTENDED 0x00000004 +/* vbva offset is absolute VRAM offset */ +#define VBVA_F_ABSOFFSET 0x00000008 + +struct vbva_enable { + u32 flags; + u32 offset; + s32 result; +} __packed; + +struct vbva_enable_ex { + struct vbva_enable base; + u32 screen_id; +} __packed; + +struct vbva_mouse_pointer_shape { + /* The host result. */ + s32 result; + + /* VBOX_MOUSE_POINTER_* bit flags. */ + u32 flags; + + /* X coordinate of the hot spot. */ + u32 hot_X; + + /* Y coordinate of the hot spot. */ + u32 hot_y; + + /* Width of the pointer in pixels. */ + u32 width; + + /* Height of the pointer in scanlines. */ + u32 height; + + /* Pointer data. + * + **** + * The data consists of 1 bpp AND mask followed by 32 bpp XOR (color) + * mask. + * + * For pointers without alpha channel the XOR mask pixels are 32 bit + * values: (lsb)BGR0(msb). For pointers with alpha channel the XOR mask + * consists of (lsb)BGRA(msb) 32 bit values. + * + * Guest driver must create the AND mask for pointers with alpha chan., + * so if host does not support alpha, the pointer could be displayed as + * a normal color pointer. The AND mask can be constructed from alpha + * values. For example alpha value >= 0xf0 means bit 0 in the AND mask. + * + * The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND + * mask, therefore, is and_len = (width + 7) / 8 * height. The padding + * bits at the end of any scanline are undefined. + * + * The XOR mask follows the AND mask on the next 4 bytes aligned offset: + * u8 *xor = and + (and_len + 3) & ~3 + * Bytes in the gap between the AND and the XOR mask are undefined. + * XOR mask scanlines have no gap between them and size of XOR mask is: + * xor_len = width * 4 * height. + **** + * + * Preallocate 4 bytes for accessing actual data as p->data. + */ + u8 data[4]; +} __packed; + +/** + * @name vbva_mouse_pointer_shape::flags + * @note The VBOX_MOUSE_POINTER_* flags are used in the guest video driver, + * values must be <= 0x8000 and must not be changed. (try make more sense + * of this, please). + * @{ + */ + +/** pointer is visible */ +#define VBOX_MOUSE_POINTER_VISIBLE 0x0001 +/** pointer has alpha channel */ +#define VBOX_MOUSE_POINTER_ALPHA 0x0002 +/** pointerData contains new pointer shape */ +#define VBOX_MOUSE_POINTER_SHAPE 0x0004 + +/** @} */ + +/* + * The guest driver can handle asynch guest cmd completion by reading the + * command offset from io port. + */ +#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001 +/* the guest driver can handle video adapter IRQs */ +#define VBVACAPS_IRQ 0x00000002 +/** The guest can read video mode hints sent via VBVA. */ +#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004 +/** The guest can switch to a software cursor on demand. */ +#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008 +/** The guest does not depend on host handling the VBE registers. */ +#define VBVACAPS_USE_VBVA_ONLY 0x00000010 + +struct vbva_caps { + s32 rc; + u32 caps; +} __packed; + +/** Query the most recent mode hints received from the host. */ +struct vbva_query_mode_hints { + /** The maximum number of screens to return hints for. */ + u16 hints_queried_count; + /** The size of the mode hint structures directly following this one. */ + u16 hint_structure_guest_size; + /** Return code for the operation. Initialise to VERR_NOT_SUPPORTED. */ + s32 rc; +} __packed; + +/** + * Structure in which a mode hint is returned. The guest allocates an array + * of these immediately after the vbva_query_mode_hints structure. + * To accommodate future extensions, the vbva_query_mode_hints structure + * specifies the size of the vbva_modehint structures allocated by the guest, + * and the host only fills out structure elements which fit into that size. The + * host should fill any unused members (e.g. dx, dy) or structure space on the + * end with ~0. The whole structure can legally be set to ~0 to skip a screen. + */ +struct vbva_modehint { + u32 magic; + u32 cx; + u32 cy; + u32 bpp; /* Which has never been used... */ + u32 display; + u32 dx; /**< X offset into the virtual frame-buffer. */ + u32 dy; /**< Y offset into the virtual frame-buffer. */ + u32 enabled; /* Not flags. Add new members for new flags. */ +} __packed; + +#define VBVAMODEHINT_MAGIC 0x0801add9u + +/** + * Report the rectangle relative to which absolute pointer events should be + * expressed. This information remains valid until the next VBVA resize event + * for any screen, at which time it is reset to the bounding rectangle of all + * virtual screens and must be re-set. + * @see VBVA_REPORT_INPUT_MAPPING. + */ +struct vbva_report_input_mapping { + s32 x; /**< Upper left X co-ordinate relative to the first screen. */ + s32 y; /**< Upper left Y co-ordinate relative to the first screen. */ + u32 cx; /**< Rectangle width. */ + u32 cy; /**< Rectangle height. */ +} __packed; + +/** + * Report the guest cursor position and query the host one. The host may wish + * to use the guest information to re-position its own cursor (though this is + * currently unlikely). + * @see VBVA_CURSOR_POSITION + */ +struct vbva_cursor_position { + u32 report_position; /**< Are we reporting a position? */ + u32 x; /**< Guest cursor X position */ + u32 y; /**< Guest cursor Y position */ +} __packed; + +#endif diff --git a/drivers/staging/vboxvideo/vboxvideo_guest.h b/drivers/staging/vboxvideo/vboxvideo_guest.h new file mode 100644 index 000000000000..d09da841711a --- /dev/null +++ b/drivers/staging/vboxvideo/vboxvideo_guest.h @@ -0,0 +1,95 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __VBOXVIDEO_GUEST_H__ +#define __VBOXVIDEO_GUEST_H__ + +#include <linux/genalloc.h> +#include "vboxvideo.h" + +/** + * Structure grouping the context needed for sending graphics acceleration + * information to the host via VBVA. Each screen has its own VBVA buffer. + */ +struct vbva_buf_ctx { + /** Offset of the buffer in the VRAM section for the screen */ + u32 buffer_offset; + /** Length of the buffer in bytes */ + u32 buffer_length; + /** Set if we wrote to the buffer faster than the host could read it */ + bool buffer_overflow; + /** VBVA record that we are currently preparing for the host, or NULL */ + struct vbva_record *record; + /** + * Pointer to the VBVA buffer mapped into the current address space. + * Will be NULL if VBVA is not enabled. + */ + struct vbva_buffer *vbva; +}; + +/** + * @name Base HGSMI APIs + * @{ + */ +int hgsmi_report_flags_location(struct gen_pool *ctx, u32 location); +int hgsmi_send_caps_info(struct gen_pool *ctx, u32 caps); +int hgsmi_test_query_conf(struct gen_pool *ctx); +int hgsmi_query_conf(struct gen_pool *ctx, u32 index, u32 *value_ret); +int hgsmi_update_pointer_shape(struct gen_pool *ctx, u32 flags, + u32 hot_x, u32 hot_y, u32 width, u32 height, + u8 *pixels, u32 len); +int hgsmi_cursor_position(struct gen_pool *ctx, bool report_position, + u32 x, u32 y, u32 *x_host, u32 *y_host); +/** @} */ + +/** + * @name VBVA APIs + * @{ + */ +bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + struct vbva_buffer *vbva, s32 screen); +void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + s32 screen); +bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx); +void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx); +bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + const void *p, u32 len); +void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, + u32 buffer_offset, u32 buffer_length); +/** @} */ + +/** + * @name Modesetting APIs + * @{ + */ +void hgsmi_process_display_info(struct gen_pool *ctx, u32 display, + s32 origin_x, s32 origin_y, u32 start_offset, + u32 pitch, u32 width, u32 height, + u16 bpp, u16 flags); +int hgsmi_update_input_mapping(struct gen_pool *ctx, s32 origin_x, s32 origin_y, + u32 width, u32 height); +int hgsmi_get_mode_hints(struct gen_pool *ctx, unsigned int screens, + struct vbva_modehint *hints); +/** @} */ + +#endif diff --git a/drivers/staging/vboxvideo/vboxvideo_vbe.h b/drivers/staging/vboxvideo/vboxvideo_vbe.h new file mode 100644 index 000000000000..f842f4d9c80a --- /dev/null +++ b/drivers/staging/vboxvideo/vboxvideo_vbe.h @@ -0,0 +1,84 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __VBOXVIDEO_VBE_H__ +#define __VBOXVIDEO_VBE_H__ + +/* GUEST <-> HOST Communication API */ + +/** + * @todo FIXME: Either dynamicly ask host for this or put somewhere high in + * physical memory like 0xE0000000. + */ + +#define VBE_DISPI_BANK_ADDRESS 0xA0000 +#define VBE_DISPI_BANK_SIZE_KB 64 + +#define VBE_DISPI_MAX_XRES 16384 +#define VBE_DISPI_MAX_YRES 16384 +#define VBE_DISPI_MAX_BPP 32 + +#define VBE_DISPI_IOPORT_INDEX 0x01CE +#define VBE_DISPI_IOPORT_DATA 0x01CF + +#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8 +#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9 + +#define VBE_DISPI_INDEX_ID 0x0 +#define VBE_DISPI_INDEX_XRES 0x1 +#define VBE_DISPI_INDEX_YRES 0x2 +#define VBE_DISPI_INDEX_BPP 0x3 +#define VBE_DISPI_INDEX_ENABLE 0x4 +#define VBE_DISPI_INDEX_BANK 0x5 +#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6 +#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7 +#define VBE_DISPI_INDEX_X_OFFSET 0x8 +#define VBE_DISPI_INDEX_Y_OFFSET 0x9 +#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa +#define VBE_DISPI_INDEX_FB_BASE_HI 0xb + +#define VBE_DISPI_ID0 0xB0C0 +#define VBE_DISPI_ID1 0xB0C1 +#define VBE_DISPI_ID2 0xB0C2 +#define VBE_DISPI_ID3 0xB0C3 +#define VBE_DISPI_ID4 0xB0C4 + +#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00 +/* The VBOX interface id. Indicates support for VBVA shared memory interface. */ +#define VBE_DISPI_ID_HGSMI 0xBE01 +#define VBE_DISPI_ID_ANYX 0xBE02 + +#define VBE_DISPI_DISABLED 0x00 +#define VBE_DISPI_ENABLED 0x01 +#define VBE_DISPI_GETCAPS 0x02 +#define VBE_DISPI_8BIT_DAC 0x20 +/** + * @note this definition is a BOCHS legacy, used only in the video BIOS + * code and ignored by the emulated hardware. + */ +#define VBE_DISPI_LFB_ENABLED 0x40 +#define VBE_DISPI_NOCLEARMEM 0x80 + +#define VGA_PORT_HGSMI_HOST 0x3b0 +#define VGA_PORT_HGSMI_GUEST 0x3d0 + +#endif diff --git a/drivers/staging/vboxvideo/vbva_base.c b/drivers/staging/vboxvideo/vbva_base.c new file mode 100644 index 000000000000..c10c782f94e1 --- /dev/null +++ b/drivers/staging/vboxvideo/vbva_base.c @@ -0,0 +1,233 @@ +/* + * Copyright (C) 2006-2017 Oracle Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "vbox_drv.h" +#include "vbox_err.h" +#include "vboxvideo_guest.h" +#include "hgsmi_channels.h" + +/* + * There is a hardware ring buffer in the graphics device video RAM, formerly + * in the VBox VMMDev PCI memory space. + * All graphics commands go there serialized by vbva_buffer_begin_update. + * and vbva_buffer_end_update. + * + * free_offset is writing position. data_offset is reading position. + * free_offset == data_offset means buffer is empty. + * There must be always gap between data_offset and free_offset when data + * are in the buffer. + * Guest only changes free_offset, host changes data_offset. + */ + +static u32 vbva_buffer_available(const struct vbva_buffer *vbva) +{ + s32 diff = vbva->data_offset - vbva->free_offset; + + return diff > 0 ? diff : vbva->data_len + diff; +} + +static void vbva_buffer_place_data_at(struct vbva_buf_ctx *vbva_ctx, + const void *p, u32 len, u32 offset) +{ + struct vbva_buffer *vbva = vbva_ctx->vbva; + u32 bytes_till_boundary = vbva->data_len - offset; + u8 *dst = &vbva->data[offset]; + s32 diff = len - bytes_till_boundary; + + if (diff <= 0) { + /* Chunk will not cross buffer boundary. */ + memcpy(dst, p, len); + } else { + /* Chunk crosses buffer boundary. */ + memcpy(dst, p, bytes_till_boundary); + memcpy(&vbva->data[0], (u8 *)p + bytes_till_boundary, diff); + } +} + +static void vbva_buffer_flush(struct gen_pool *ctx) +{ + struct vbva_flush *p; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_FLUSH); + if (!p) + return; + + p->reserved = 0; + + hgsmi_buffer_submit(ctx, p); + hgsmi_buffer_free(ctx, p); +} + +bool vbva_write(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + const void *p, u32 len) +{ + struct vbva_record *record; + struct vbva_buffer *vbva; + u32 available; + + vbva = vbva_ctx->vbva; + record = vbva_ctx->record; + + if (!vbva || vbva_ctx->buffer_overflow || + !record || !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)) + return false; + + available = vbva_buffer_available(vbva); + + while (len > 0) { + u32 chunk = len; + + if (chunk >= available) { + vbva_buffer_flush(ctx); + available = vbva_buffer_available(vbva); + } + + if (chunk >= available) { + if (WARN_ON(available <= vbva->partial_write_tresh)) { + vbva_ctx->buffer_overflow = true; + return false; + } + chunk = available - vbva->partial_write_tresh; + } + + vbva_buffer_place_data_at(vbva_ctx, p, chunk, + vbva->free_offset); + + vbva->free_offset = (vbva->free_offset + chunk) % + vbva->data_len; + record->len_and_flags += chunk; + available -= chunk; + len -= chunk; + p += chunk; + } + + return true; +} + +static bool vbva_inform_host(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx, s32 screen, bool enable) +{ + struct vbva_enable_ex *p; + bool ret; + + p = hgsmi_buffer_alloc(ctx, sizeof(*p), HGSMI_CH_VBVA, VBVA_ENABLE); + if (!p) + return false; + + p->base.flags = enable ? VBVA_F_ENABLE : VBVA_F_DISABLE; + p->base.offset = vbva_ctx->buffer_offset; + p->base.result = VERR_NOT_SUPPORTED; + if (screen >= 0) { + p->base.flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET; + p->screen_id = screen; + } + + hgsmi_buffer_submit(ctx, p); + + if (enable) + ret = RT_SUCCESS(p->base.result); + else + ret = true; + + hgsmi_buffer_free(ctx, p); + + return ret; +} + +bool vbva_enable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + struct vbva_buffer *vbva, s32 screen) +{ + bool ret = false; + + memset(vbva, 0, sizeof(*vbva)); + vbva->partial_write_tresh = 256; + vbva->data_len = vbva_ctx->buffer_length - sizeof(struct vbva_buffer); + vbva_ctx->vbva = vbva; + + ret = vbva_inform_host(vbva_ctx, ctx, screen, true); + if (!ret) + vbva_disable(vbva_ctx, ctx, screen); + + return ret; +} + +void vbva_disable(struct vbva_buf_ctx *vbva_ctx, struct gen_pool *ctx, + s32 screen) +{ + vbva_ctx->buffer_overflow = false; + vbva_ctx->record = NULL; + vbva_ctx->vbva = NULL; + + vbva_inform_host(vbva_ctx, ctx, screen, false); +} + +bool vbva_buffer_begin_update(struct vbva_buf_ctx *vbva_ctx, + struct gen_pool *ctx) +{ + struct vbva_record *record; + u32 next; + + if (!vbva_ctx->vbva || + !(vbva_ctx->vbva->host_flags.host_events & VBVA_F_MODE_ENABLED)) + return false; + + WARN_ON(vbva_ctx->buffer_overflow || vbva_ctx->record); + + next = (vbva_ctx->vbva->record_free_index + 1) % VBVA_MAX_RECORDS; + + /* Flush if all slots in the records queue are used */ + if (next == vbva_ctx->vbva->record_first_index) + vbva_buffer_flush(ctx); + + /* If even after flush there is no place then fail the request */ + if (next == vbva_ctx->vbva->record_first_index) + return false; + + record = &vbva_ctx->vbva->records[vbva_ctx->vbva->record_free_index]; + record->len_and_flags = VBVA_F_RECORD_PARTIAL; + vbva_ctx->vbva->record_free_index = next; + /* Remember which record we are using. */ + vbva_ctx->record = record; + + return true; +} + +void vbva_buffer_end_update(struct vbva_buf_ctx *vbva_ctx) +{ + struct vbva_record *record = vbva_ctx->record; + + WARN_ON(!vbva_ctx->vbva || !record || + !(record->len_and_flags & VBVA_F_RECORD_PARTIAL)); + + /* Mark the record completed. */ + record->len_and_flags &= ~VBVA_F_RECORD_PARTIAL; + + vbva_ctx->buffer_overflow = false; + vbva_ctx->record = NULL; +} + +void vbva_setup_buffer_context(struct vbva_buf_ctx *vbva_ctx, + u32 buffer_offset, u32 buffer_length) +{ + vbva_ctx->buffer_offset = buffer_offset; + vbva_ctx->buffer_length = buffer_length; +} diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 030bec855d86..314ffac50bb8 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -3391,7 +3391,6 @@ static int vchiq_probe(struct platform_device *pdev) struct device_node *fw_node; struct rpi_firmware *fw; int err; - void *ptr_err; fw_node = of_parse_phandle(pdev->dev.of_node, "firmware", 0); if (!fw_node) { @@ -3427,14 +3426,14 @@ static int vchiq_probe(struct platform_device *pdev) /* create sysfs entries */ vchiq_class = class_create(THIS_MODULE, DEVICE_NAME); - ptr_err = vchiq_class; - if (IS_ERR(ptr_err)) + err = PTR_ERR(vchiq_class); + if (IS_ERR(vchiq_class)) goto failed_class_create; vchiq_dev = device_create(vchiq_class, NULL, vchiq_devid, NULL, "vchiq"); - ptr_err = vchiq_dev; - if (IS_ERR(ptr_err)) + err = PTR_ERR(vchiq_dev); + if (IS_ERR(vchiq_dev)) goto failed_device_create; /* create debugfs entries */ @@ -3455,7 +3454,6 @@ failed_device_create: class_destroy(vchiq_class); failed_class_create: cdev_del(&vchiq_cdev); - err = PTR_ERR(ptr_err); failed_cdev_add: unregister_chrdev_region(vchiq_devid, 1); failed_platform_init: diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 3fdca2cdd8da..74e4975dd1b1 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -488,15 +488,13 @@ EXPORT_SYMBOL(iscsit_queue_rsp); void iscsit_aborted_task(struct iscsi_conn *conn, struct iscsi_cmd *cmd) { - bool scsi_cmd = (cmd->iscsi_opcode == ISCSI_OP_SCSI_CMD); - spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node) && !(cmd->se_cmd.transport_state & CMD_T_FABRIC_STOP)) list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); - __iscsit_free_cmd(cmd, scsi_cmd, true); + __iscsit_free_cmd(cmd, true); } EXPORT_SYMBOL(iscsit_aborted_task); @@ -1251,12 +1249,8 @@ int iscsit_process_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, * execution. These exceptions are processed in CmdSN order using * iscsit_check_received_cmdsn() in iscsit_get_immediate_data() below. */ - if (cmd->sense_reason) { - if (cmd->reject_reason) - return 0; - + if (cmd->sense_reason) return 1; - } /* * Call directly into transport_generic_new_cmd() to perform * the backend memory allocation. diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 903b667f8e01..f9bc8ec6fb6b 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -47,18 +47,21 @@ static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len) } } -static void chap_gen_challenge( +static int chap_gen_challenge( struct iscsi_conn *conn, int caller, char *c_str, unsigned int *c_len) { + int ret; unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1]; struct iscsi_chap *chap = conn->auth_protocol; memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1); - get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH); + ret = get_random_bytes_wait(chap->challenge, CHAP_CHALLENGE_LENGTH); + if (unlikely(ret)) + return ret; chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge, CHAP_CHALLENGE_LENGTH); /* @@ -69,6 +72,7 @@ static void chap_gen_challenge( pr_debug("[%s] Sending CHAP_C=0x%s\n\n", (caller) ? "server" : "client", challenge_asciihex); + return 0; } static int chap_check_algorithm(const char *a_str) @@ -143,6 +147,7 @@ static struct iscsi_chap *chap_server_open( case CHAP_DIGEST_UNKNOWN: default: pr_err("Unsupported CHAP_A value\n"); + kfree(conn->auth_protocol); return NULL; } @@ -156,7 +161,10 @@ static struct iscsi_chap *chap_server_open( /* * Generate Challenge. */ - chap_gen_challenge(conn, 1, aic_str, aic_len); + if (chap_gen_challenge(conn, 1, aic_str, aic_len) < 0) { + kfree(conn->auth_protocol); + return NULL; + } return chap; } diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index 535a8e06a401..0dd4c45f7575 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -781,6 +781,7 @@ DEF_TPG_ATTRIB(default_erl); DEF_TPG_ATTRIB(t10_pi); DEF_TPG_ATTRIB(fabric_prot_type); DEF_TPG_ATTRIB(tpg_enabled_sendtargets); +DEF_TPG_ATTRIB(login_keys_workaround); static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_attr_authentication, @@ -796,6 +797,7 @@ static struct configfs_attribute *lio_target_tpg_attrib_attrs[] = { &iscsi_tpg_attrib_attr_t10_pi, &iscsi_tpg_attrib_attr_fabric_prot_type, &iscsi_tpg_attrib_attr_tpg_enabled_sendtargets, + &iscsi_tpg_attrib_attr_login_keys_workaround, NULL, }; diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 92b96b51d506..e9bdc8b86e7d 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -245,22 +245,26 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn) return 0; } -static void iscsi_login_set_conn_values( +static int iscsi_login_set_conn_values( struct iscsi_session *sess, struct iscsi_conn *conn, __be16 cid) { + int ret; conn->sess = sess; conn->cid = be16_to_cpu(cid); /* * Generate a random Status sequence number (statsn) for the new * iSCSI connection. */ - get_random_bytes(&conn->stat_sn, sizeof(u32)); + ret = get_random_bytes_wait(&conn->stat_sn, sizeof(u32)); + if (unlikely(ret)) + return ret; mutex_lock(&auth_id_lock); conn->auth_id = iscsit_global->auth_id++; mutex_unlock(&auth_id_lock); + return 0; } __printf(2, 3) int iscsi_change_param_sprintf( @@ -306,7 +310,11 @@ static int iscsi_login_zero_tsih_s1( return -ENOMEM; } - iscsi_login_set_conn_values(sess, conn, pdu->cid); + ret = iscsi_login_set_conn_values(sess, conn, pdu->cid); + if (unlikely(ret)) { + kfree(sess); + return ret; + } sess->init_task_tag = pdu->itt; memcpy(&sess->isid, pdu->isid, 6); sess->exp_cmd_sn = be32_to_cpu(pdu->cmdsn); @@ -497,8 +505,7 @@ static int iscsi_login_non_zero_tsih_s1( { struct iscsi_login_req *pdu = (struct iscsi_login_req *)buf; - iscsi_login_set_conn_values(NULL, conn, pdu->cid); - return 0; + return iscsi_login_set_conn_values(NULL, conn, pdu->cid); } /* @@ -554,9 +561,8 @@ static int iscsi_login_non_zero_tsih_s2( atomic_set(&sess->session_continuation, 1); spin_unlock_bh(&sess->conn_lock); - iscsi_login_set_conn_values(sess, conn, pdu->cid); - - if (iscsi_copy_param_list(&conn->param_list, + if (iscsi_login_set_conn_values(sess, conn, pdu->cid) < 0 || + iscsi_copy_param_list(&conn->param_list, conn->tpg->param_list, 0) < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, ISCSI_LOGIN_STATUS_NO_RESOURCES); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 6f88b31242b0..7a6751fecd32 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -655,28 +655,6 @@ err: iscsit_deaccess_np(np, tpg, tpg_np); } -static void iscsi_target_do_cleanup(struct work_struct *work) -{ - struct iscsi_conn *conn = container_of(work, - struct iscsi_conn, login_cleanup_work.work); - struct sock *sk = conn->sock->sk; - struct iscsi_login *login = conn->login; - struct iscsi_np *np = login->np; - struct iscsi_portal_group *tpg = conn->tpg; - struct iscsi_tpg_np *tpg_np = conn->tpg_np; - - pr_debug("Entering iscsi_target_do_cleanup\n"); - - cancel_delayed_work_sync(&conn->login_work); - conn->orig_state_change(sk); - - iscsi_target_restore_sock_callbacks(conn); - iscsi_target_login_drop(conn, login); - iscsit_deaccess_np(np, tpg, tpg_np); - - pr_debug("iscsi_target_do_cleanup done()\n"); -} - static void iscsi_target_sk_state_change(struct sock *sk) { struct iscsi_conn *conn; @@ -886,7 +864,8 @@ static int iscsi_target_handle_csg_zero( SENDER_TARGET, login->rsp_buf, &login->rsp_length, - conn->param_list); + conn->param_list, + conn->tpg->tpg_attrib.login_keys_workaround); if (ret < 0) return -1; @@ -956,7 +935,8 @@ static int iscsi_target_handle_csg_one(struct iscsi_conn *conn, struct iscsi_log SENDER_TARGET, login->rsp_buf, &login->rsp_length, - conn->param_list); + conn->param_list, + conn->tpg->tpg_attrib.login_keys_workaround); if (ret < 0) { iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_INITIATOR_ERR, ISCSI_LOGIN_STATUS_INIT_ERR); @@ -1082,7 +1062,6 @@ int iscsi_target_locate_portal( int sessiontype = 0, ret = 0, tag_num, tag_size; INIT_DELAYED_WORK(&conn->login_work, iscsi_target_do_login_rx); - INIT_DELAYED_WORK(&conn->login_cleanup_work, iscsi_target_do_cleanup); iscsi_target_set_sock_callbacks(conn); login->np = np; @@ -1331,7 +1310,6 @@ int iscsi_target_start_negotiation( if (ret < 0) { cancel_delayed_work_sync(&conn->login_work); - cancel_delayed_work_sync(&conn->login_cleanup_work); iscsi_target_restore_sock_callbacks(conn); iscsi_remove_failed_auth_entry(conn); } diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index fce627628200..caab1045742d 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -765,7 +765,8 @@ static int iscsi_check_for_auth_key(char *key) return 0; } -static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) +static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param, + bool keys_workaround) { if (IS_TYPE_BOOL_AND(param)) { if (!strcmp(param->value, NO)) @@ -773,19 +774,31 @@ static void iscsi_check_proposer_for_optional_reply(struct iscsi_param *param) } else if (IS_TYPE_BOOL_OR(param)) { if (!strcmp(param->value, YES)) SET_PSTATE_REPLY_OPTIONAL(param); - /* - * Required for gPXE iSCSI boot client - */ - if (!strcmp(param->name, IMMEDIATEDATA)) - SET_PSTATE_REPLY_OPTIONAL(param); + + if (keys_workaround) { + /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, IMMEDIATEDATA)) + SET_PSTATE_REPLY_OPTIONAL(param); + } } else if (IS_TYPE_NUMBER(param)) { if (!strcmp(param->name, MAXRECVDATASEGMENTLENGTH)) SET_PSTATE_REPLY_OPTIONAL(param); - /* - * Required for gPXE iSCSI boot client - */ - if (!strcmp(param->name, MAXCONNECTIONS)) - SET_PSTATE_REPLY_OPTIONAL(param); + + if (keys_workaround) { + /* + * Required for Mellanox Flexboot PXE boot ROM + */ + if (!strcmp(param->name, FIRSTBURSTLENGTH)) + SET_PSTATE_REPLY_OPTIONAL(param); + + /* + * Required for gPXE iSCSI boot client + */ + if (!strcmp(param->name, MAXCONNECTIONS)) + SET_PSTATE_REPLY_OPTIONAL(param); + } } else if (IS_PHASE_DECLARATIVE(param)) SET_PSTATE_REPLY_OPTIONAL(param); } @@ -1422,7 +1435,8 @@ int iscsi_encode_text_output( u8 sender, char *textbuf, u32 *length, - struct iscsi_param_list *param_list) + struct iscsi_param_list *param_list, + bool keys_workaround) { char *output_buf = NULL; struct iscsi_extra_response *er; @@ -1458,7 +1472,8 @@ int iscsi_encode_text_output( *length += 1; output_buf = textbuf + *length; SET_PSTATE_PROPOSER(param); - iscsi_check_proposer_for_optional_reply(param); + iscsi_check_proposer_for_optional_reply(param, + keys_workaround); pr_debug("Sending key: %s=%s\n", param->name, param->value); } diff --git a/drivers/target/iscsi/iscsi_target_parameters.h b/drivers/target/iscsi/iscsi_target_parameters.h index 9962ccf0ccd7..c47b73f57528 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.h +++ b/drivers/target/iscsi/iscsi_target_parameters.h @@ -46,7 +46,7 @@ extern int iscsi_extract_key_value(char *, char **, char **); extern int iscsi_update_param_value(struct iscsi_param *, char *); extern int iscsi_decode_text_input(u8, u8, char *, u32, struct iscsi_conn *); extern int iscsi_encode_text_output(u8, u8, char *, u32 *, - struct iscsi_param_list *); + struct iscsi_param_list *, bool); extern int iscsi_check_negotiated_keys(struct iscsi_param_list *); extern void iscsi_set_connection_parameters(struct iscsi_conn_ops *, struct iscsi_param_list *); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 2e7e08dbda48..594d07a1e995 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -227,6 +227,7 @@ static void iscsit_set_default_tpg_attribs(struct iscsi_portal_group *tpg) a->t10_pi = TA_DEFAULT_T10_PI; a->fabric_prot_type = TA_DEFAULT_FABRIC_PROT_TYPE; a->tpg_enabled_sendtargets = TA_DEFAULT_TPG_ENABLED_SENDTARGETS; + a->login_keys_workaround = TA_DEFAULT_LOGIN_KEYS_WORKAROUND; } int iscsit_tpg_add_portal_group(struct iscsi_tiqn *tiqn, struct iscsi_portal_group *tpg) @@ -311,11 +312,9 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) struct iscsi_tiqn *tiqn = tpg->tpg_tiqn; int ret; - spin_lock(&tpg->tpg_state_lock); if (tpg->tpg_state == TPG_STATE_ACTIVE) { pr_err("iSCSI target portal group: %hu is already" " active, ignoring request.\n", tpg->tpgt); - spin_unlock(&tpg->tpg_state_lock); return -EINVAL; } /* @@ -324,10 +323,8 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) * is enforced (as per default), and remove the NONE option. */ param = iscsi_find_param_from_key(AUTHMETHOD, tpg->param_list); - if (!param) { - spin_unlock(&tpg->tpg_state_lock); + if (!param) return -EINVAL; - } if (tpg->tpg_attrib.authentication) { if (!strcmp(param->value, NONE)) { @@ -341,6 +338,7 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) goto err; } + spin_lock(&tpg->tpg_state_lock); tpg->tpg_state = TPG_STATE_ACTIVE; spin_unlock(&tpg->tpg_state_lock); @@ -353,7 +351,6 @@ int iscsit_tpg_enable_portal_group(struct iscsi_portal_group *tpg) return 0; err: - spin_unlock(&tpg->tpg_state_lock); return ret; } @@ -899,3 +896,21 @@ int iscsit_ta_tpg_enabled_sendtargets( return 0; } + +int iscsit_ta_login_keys_workaround( + struct iscsi_portal_group *tpg, + u32 flag) +{ + struct iscsi_tpg_attrib *a = &tpg->tpg_attrib; + + if ((flag != 0) && (flag != 1)) { + pr_err("Illegal value %d\n", flag); + return -EINVAL; + } + + a->login_keys_workaround = flag; + pr_debug("iSCSI_TPG[%hu] - TPG enabled bit for login keys workaround: %s ", + tpg->tpgt, (a->login_keys_workaround) ? "ON" : "OFF"); + + return 0; +} diff --git a/drivers/target/iscsi/iscsi_target_tpg.h b/drivers/target/iscsi/iscsi_target_tpg.h index ceba29851167..59fd3cabe89d 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.h +++ b/drivers/target/iscsi/iscsi_target_tpg.h @@ -48,5 +48,6 @@ extern int iscsit_ta_default_erl(struct iscsi_portal_group *, u32); extern int iscsit_ta_t10_pi(struct iscsi_portal_group *, u32); extern int iscsit_ta_fabric_prot_type(struct iscsi_portal_group *, u32); extern int iscsit_ta_tpg_enabled_sendtargets(struct iscsi_portal_group *, u32); +extern int iscsit_ta_login_keys_workaround(struct iscsi_portal_group *, u32); #endif /* ISCSI_TARGET_TPG_H */ diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 7d3e2fcc26a0..1e36f83b5961 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -167,6 +167,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) cmd->se_cmd.map_tag = tag; cmd->conn = conn; + cmd->data_direction = DMA_NONE; INIT_LIST_HEAD(&cmd->i_conn_node); INIT_LIST_HEAD(&cmd->datain_list); INIT_LIST_HEAD(&cmd->cmd_r2t_list); @@ -711,19 +712,16 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd) } EXPORT_SYMBOL(iscsit_release_cmd); -void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, - bool check_queues) +void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool check_queues) { struct iscsi_conn *conn = cmd->conn; - if (scsi_cmd) { - if (cmd->data_direction == DMA_TO_DEVICE) { - iscsit_stop_dataout_timer(cmd); - iscsit_free_r2ts_from_list(cmd); - } - if (cmd->data_direction == DMA_FROM_DEVICE) - iscsit_free_all_datain_reqs(cmd); + if (cmd->data_direction == DMA_TO_DEVICE) { + iscsit_stop_dataout_timer(cmd); + iscsit_free_r2ts_from_list(cmd); } + if (cmd->data_direction == DMA_FROM_DEVICE) + iscsit_free_all_datain_reqs(cmd); if (conn && check_queues) { iscsit_remove_cmd_from_immediate_queue(cmd, conn); @@ -736,50 +734,18 @@ void __iscsit_free_cmd(struct iscsi_cmd *cmd, bool scsi_cmd, void iscsit_free_cmd(struct iscsi_cmd *cmd, bool shutdown) { - struct se_cmd *se_cmd = NULL; + struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL; int rc; - bool op_scsi = false; - /* - * Determine if a struct se_cmd is associated with - * this struct iscsi_cmd. - */ - switch (cmd->iscsi_opcode) { - case ISCSI_OP_SCSI_CMD: - op_scsi = true; - /* - * Fallthrough - */ - case ISCSI_OP_SCSI_TMFUNC: - se_cmd = &cmd->se_cmd; - __iscsit_free_cmd(cmd, op_scsi, shutdown); + + __iscsit_free_cmd(cmd, shutdown); + if (se_cmd) { rc = transport_generic_free_cmd(se_cmd, shutdown); if (!rc && shutdown && se_cmd->se_sess) { - __iscsit_free_cmd(cmd, op_scsi, shutdown); + __iscsit_free_cmd(cmd, shutdown); target_put_sess_cmd(se_cmd); } - break; - case ISCSI_OP_REJECT: - /* - * Handle special case for REJECT when iscsi_add_reject*() has - * overwritten the original iscsi_opcode assignment, and the - * associated cmd->se_cmd needs to be released. - */ - if (cmd->se_cmd.se_tfo != NULL) { - se_cmd = &cmd->se_cmd; - __iscsit_free_cmd(cmd, true, shutdown); - - rc = transport_generic_free_cmd(&cmd->se_cmd, shutdown); - if (!rc && shutdown && se_cmd->se_sess) { - __iscsit_free_cmd(cmd, true, shutdown); - target_put_sess_cmd(se_cmd); - } - break; - } - /* Fall-through */ - default: - __iscsit_free_cmd(cmd, false, shutdown); + } else { iscsit_release_cmd(cmd); - break; } } EXPORT_SYMBOL(iscsit_free_cmd); diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 9e4197af8708..425160565d0c 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -37,7 +37,7 @@ extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_co extern bool iscsit_conn_all_queues_empty(struct iscsi_conn *); extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *); extern void iscsit_release_cmd(struct iscsi_cmd *); -extern void __iscsit_free_cmd(struct iscsi_cmd *, bool, bool); +extern void __iscsit_free_cmd(struct iscsi_cmd *, bool); extern void iscsit_free_cmd(struct iscsi_cmd *, bool); extern int iscsit_check_session_usage_count(struct iscsi_session *); extern void iscsit_dec_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 5091b31b3e56..b6a913e38b30 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -51,19 +51,7 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd); */ static int tcm_loop_check_stop_free(struct se_cmd *se_cmd) { - /* - * Do not release struct se_cmd's containing a valid TMR - * pointer. These will be released directly in tcm_loop_device_reset() - * with transport_generic_free_cmd(). - */ - if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) - return 0; - /* - * Release the struct se_cmd, which will make a callback to release - * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd() - */ - transport_generic_free_cmd(se_cmd, 0); - return 1; + return transport_generic_free_cmd(se_cmd, 0); } static void tcm_loop_release_cmd(struct se_cmd *se_cmd) @@ -218,10 +206,8 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, { struct se_cmd *se_cmd = NULL; struct se_session *se_sess; - struct se_portal_group *se_tpg; struct tcm_loop_nexus *tl_nexus; struct tcm_loop_cmd *tl_cmd = NULL; - struct tcm_loop_tmr *tl_tmr = NULL; int ret = TMR_FUNCTION_FAILED, rc; /* @@ -240,55 +226,29 @@ static int tcm_loop_issue_tmr(struct tcm_loop_tpg *tl_tpg, return ret; } - tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL); - if (!tl_tmr) { - pr_err("Unable to allocate memory for tl_tmr\n"); - goto release; - } - init_waitqueue_head(&tl_tmr->tl_tmr_wait); + init_completion(&tl_cmd->tmr_done); se_cmd = &tl_cmd->tl_se_cmd; - se_tpg = &tl_tpg->tl_se_tpg; se_sess = tl_tpg->tl_nexus->se_sess; - /* - * Initialize struct se_cmd descriptor from target_core_mod infrastructure - */ - transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, - DMA_NONE, TCM_SIMPLE_TAG, - &tl_cmd->tl_sense_buf[0]); - rc = core_tmr_alloc_req(se_cmd, tl_tmr, tmr, GFP_KERNEL); + rc = target_submit_tmr(se_cmd, se_sess, tl_cmd->tl_sense_buf, lun, + NULL, tmr, GFP_KERNEL, task, + TARGET_SCF_ACK_KREF); if (rc < 0) goto release; + wait_for_completion(&tl_cmd->tmr_done); + ret = se_cmd->se_tmr_req->response; + target_put_sess_cmd(se_cmd); - if (tmr == TMR_ABORT_TASK) - se_cmd->se_tmr_req->ref_task_tag = task; +out: + return ret; - /* - * Locate the underlying TCM struct se_lun - */ - if (transport_lookup_tmr_lun(se_cmd, lun) < 0) { - ret = TMR_LUN_DOES_NOT_EXIST; - goto release; - } - /* - * Queue the TMR to TCM Core and sleep waiting for - * tcm_loop_queue_tm_rsp() to wake us up. - */ - transport_generic_handle_tmr(se_cmd); - wait_event(tl_tmr->tl_tmr_wait, atomic_read(&tl_tmr->tmr_complete)); - /* - * The TMR LUN_RESET has completed, check the response status and - * then release allocations. - */ - ret = se_cmd->se_tmr_req->response; release: if (se_cmd) - transport_generic_free_cmd(se_cmd, 1); + transport_generic_free_cmd(se_cmd, 0); else kmem_cache_free(tcm_loop_cmd_cache, tl_cmd); - kfree(tl_tmr); - return ret; + goto out; } static int tcm_loop_abort_task(struct scsi_cmnd *sc) @@ -669,14 +629,11 @@ static int tcm_loop_queue_status(struct se_cmd *se_cmd) static void tcm_loop_queue_tm_rsp(struct se_cmd *se_cmd) { - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - struct tcm_loop_tmr *tl_tmr = se_tmr->fabric_tmr_ptr; - /* - * The SCSI EH thread will be sleeping on se_tmr->tl_tmr_wait, go ahead - * and wake up the wait_queue_head_t in tcm_loop_device_reset() - */ - atomic_set(&tl_tmr->tmr_complete, 1); - wake_up(&tl_tmr->tl_tmr_wait); + struct tcm_loop_cmd *tl_cmd = container_of(se_cmd, + struct tcm_loop_cmd, tl_se_cmd); + + /* Wake up tcm_loop_issue_tmr(). */ + complete(&tl_cmd->tmr_done); } static void tcm_loop_aborted_task(struct se_cmd *se_cmd) diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h index a8a230b4e6b5..3acc43c05117 100644 --- a/drivers/target/loopback/tcm_loop.h +++ b/drivers/target/loopback/tcm_loop.h @@ -16,15 +16,11 @@ struct tcm_loop_cmd { /* The TCM I/O descriptor that is accessed via container_of() */ struct se_cmd tl_se_cmd; struct work_struct work; + struct completion tmr_done; /* Sense buffer that will be mapped into outgoing status */ unsigned char tl_sense_buf[TRANSPORT_SENSE_BUFFER]; }; -struct tcm_loop_tmr { - atomic_t tmr_complete; - wait_queue_head_t tl_tmr_wait; -}; - struct tcm_loop_nexus { /* * Pointer to TCM session for I_T Nexus diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index fc4a9c303d55..a91b7c25ffd4 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -205,8 +205,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * TARGET PORT GROUP */ - buf[off++] = ((tg_pt_gp->tg_pt_gp_id >> 8) & 0xff); - buf[off++] = (tg_pt_gp->tg_pt_gp_id & 0xff); + put_unaligned_be16(tg_pt_gp->tg_pt_gp_id, &buf[off]); + off += 2; off++; /* Skip over Reserved */ /* @@ -235,8 +235,8 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) /* * Set RELATIVE TARGET PORT IDENTIFIER */ - buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); - buf[off++] = (lun->lun_rtpi & 0xff); + put_unaligned_be16(lun->lun_rtpi, &buf[off]); + off += 2; rd_len += 4; } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index 0326607e5ab8..7e87d952bb7a 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -1085,6 +1085,24 @@ static ssize_t block_size_store(struct config_item *item, return count; } +static ssize_t alua_support_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = to_attrib(item); + u8 flags = da->da_dev->transport->transport_flags; + + return snprintf(page, PAGE_SIZE, "%d\n", + flags & TRANSPORT_FLAG_PASSTHROUGH_ALUA ? 0 : 1); +} + +static ssize_t pgr_support_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = to_attrib(item); + u8 flags = da->da_dev->transport->transport_flags; + + return snprintf(page, PAGE_SIZE, "%d\n", + flags & TRANSPORT_FLAG_PASSTHROUGH_PGR ? 0 : 1); +} + CONFIGFS_ATTR(, emulate_model_alias); CONFIGFS_ATTR(, emulate_dpo); CONFIGFS_ATTR(, emulate_fua_write); @@ -1116,6 +1134,8 @@ CONFIGFS_ATTR(, unmap_granularity); CONFIGFS_ATTR(, unmap_granularity_alignment); CONFIGFS_ATTR(, unmap_zeroes_data); CONFIGFS_ATTR(, max_write_same_len); +CONFIGFS_ATTR_RO(, alua_support); +CONFIGFS_ATTR_RO(, pgr_support); /* * dev_attrib attributes for devices using the target core SBC/SPC @@ -1154,6 +1174,8 @@ struct configfs_attribute *sbc_attrib_attrs[] = { &attr_unmap_granularity_alignment, &attr_unmap_zeroes_data, &attr_max_write_same_len, + &attr_alua_support, + &attr_pgr_support, NULL, }; EXPORT_SYMBOL(sbc_attrib_attrs); @@ -1168,6 +1190,8 @@ struct configfs_attribute *passthrough_attrib_attrs[] = { &attr_hw_block_size, &attr_hw_max_sectors, &attr_hw_queue_depth, + &attr_alua_support, + &attr_pgr_support, NULL, }; EXPORT_SYMBOL(passthrough_attrib_attrs); @@ -2236,7 +2260,11 @@ static void target_core_dev_release(struct config_item *item) target_free_device(dev); } -static struct configfs_item_operations target_core_dev_item_ops = { +/* + * Used in target_core_fabric_configfs.c to verify valid se_device symlink + * within target_fabric_port_link() + */ +struct configfs_item_operations target_core_dev_item_ops = { .release = target_core_dev_release, }; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 8add07f387f9..e8dd6da164b2 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -49,8 +49,9 @@ #include "target_core_pr.h" #include "target_core_ua.h" -DEFINE_MUTEX(g_device_mutex); -LIST_HEAD(g_device_list); +static DEFINE_MUTEX(device_mutex); +static LIST_HEAD(device_list); +static DEFINE_IDR(devices_idr); static struct se_hba *lun0_hba; /* not static, needed by tpg.c */ @@ -168,11 +169,20 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) rcu_read_lock(); deve = target_nacl_find_deve(nacl, unpacked_lun); if (deve) { - se_cmd->se_lun = rcu_dereference(deve->se_lun); se_lun = rcu_dereference(deve->se_lun); + + if (!percpu_ref_tryget_live(&se_lun->lun_ref)) { + se_lun = NULL; + goto out_unlock; + } + + se_cmd->se_lun = rcu_dereference(deve->se_lun); se_cmd->pr_res_key = deve->pr_res_key; se_cmd->orig_fe_lun = unpacked_lun; + se_cmd->se_cmd_flags |= SCF_SE_LUN_CMD; + se_cmd->lun_ref_active = true; } +out_unlock: rcu_read_unlock(); if (!se_lun) { @@ -182,9 +192,6 @@ int transport_lookup_tmr_lun(struct se_cmd *se_cmd, u64 unpacked_lun) unpacked_lun); return -ENODEV; } - /* - * XXX: Add percpu se_lun->lun_ref reference count for TMR - */ se_cmd->se_dev = rcu_dereference_raw(se_lun->lun_se_dev); se_tmr->tmr_dev = rcu_dereference_raw(se_lun->lun_se_dev); @@ -756,19 +763,16 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) if (!dev) return NULL; - dev->dev_link_magic = SE_DEV_LINK_MAGIC; dev->se_hba = hba; dev->transport = hba->backend->ops; dev->prot_length = sizeof(struct t10_pi_tuple); dev->hba_index = hba->hba_index; - INIT_LIST_HEAD(&dev->dev_list); INIT_LIST_HEAD(&dev->dev_sep_list); INIT_LIST_HEAD(&dev->dev_tmr_list); INIT_LIST_HEAD(&dev->delayed_cmd_list); INIT_LIST_HEAD(&dev->state_list); INIT_LIST_HEAD(&dev->qf_cmd_list); - INIT_LIST_HEAD(&dev->g_dev_node); spin_lock_init(&dev->execute_task_lock); spin_lock_init(&dev->delayed_cmd_lock); spin_lock_init(&dev->dev_reservation_lock); @@ -851,7 +855,7 @@ bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, attrib->unmap_granularity = q->limits.discard_granularity / block_size; attrib->unmap_granularity_alignment = q->limits.discard_alignment / block_size; - attrib->unmap_zeroes_data = 0; + attrib->unmap_zeroes_data = (q->limits.max_write_zeroes_sectors); return true; } EXPORT_SYMBOL(target_configure_unmap_from_queue); @@ -875,10 +879,79 @@ sector_t target_to_linux_sector(struct se_device *dev, sector_t lb) } EXPORT_SYMBOL(target_to_linux_sector); +/** + * target_find_device - find a se_device by its dev_index + * @id: dev_index + * @do_depend: true if caller needs target_depend_item to be done + * + * If do_depend is true, the caller must do a target_undepend_item + * when finished using the device. + * + * If do_depend is false, the caller must be called in a configfs + * callback or during removal. + */ +struct se_device *target_find_device(int id, bool do_depend) +{ + struct se_device *dev; + + mutex_lock(&device_mutex); + dev = idr_find(&devices_idr, id); + if (dev && do_depend && target_depend_item(&dev->dev_group.cg_item)) + dev = NULL; + mutex_unlock(&device_mutex); + return dev; +} +EXPORT_SYMBOL(target_find_device); + +struct devices_idr_iter { + int (*fn)(struct se_device *dev, void *data); + void *data; +}; + +static int target_devices_idr_iter(int id, void *p, void *data) +{ + struct devices_idr_iter *iter = data; + struct se_device *dev = p; + + /* + * We add the device early to the idr, so it can be used + * by backend modules during configuration. We do not want + * to allow other callers to access partially setup devices, + * so we skip them here. + */ + if (!(dev->dev_flags & DF_CONFIGURED)) + return 0; + + return iter->fn(dev, iter->data); +} + +/** + * target_for_each_device - iterate over configured devices + * @fn: iterator function + * @data: pointer to data that will be passed to fn + * + * fn must return 0 to continue looping over devices. non-zero will break + * from the loop and return that value to the caller. + */ +int target_for_each_device(int (*fn)(struct se_device *dev, void *data), + void *data) +{ + struct devices_idr_iter iter; + int ret; + + iter.fn = fn; + iter.data = data; + + mutex_lock(&device_mutex); + ret = idr_for_each(&devices_idr, target_devices_idr_iter, &iter); + mutex_unlock(&device_mutex); + return ret; +} + int target_configure_device(struct se_device *dev) { struct se_hba *hba = dev->se_hba; - int ret; + int ret, id; if (dev->dev_flags & DF_CONFIGURED) { pr_err("se_dev->se_dev_ptr already set for storage" @@ -886,9 +959,26 @@ int target_configure_device(struct se_device *dev) return -EEXIST; } + /* + * Add early so modules like tcmu can use during its + * configuration. + */ + mutex_lock(&device_mutex); + /* + * Use cyclic to try and avoid collisions with devices + * that were recently removed. + */ + id = idr_alloc_cyclic(&devices_idr, dev, 0, INT_MAX, GFP_KERNEL); + mutex_unlock(&device_mutex); + if (id < 0) { + ret = -ENOMEM; + goto out; + } + dev->dev_index = id; + ret = dev->transport->configure_device(dev); if (ret) - goto out; + goto out_free_index; /* * XXX: there is not much point to have two different values here.. */ @@ -903,12 +993,11 @@ int target_configure_device(struct se_device *dev) dev->dev_attrib.hw_block_size); dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; - dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); dev->creation_time = get_jiffies_64(); ret = core_setup_alua(dev); if (ret) - goto out; + goto out_free_index; /* * Startup the struct se_device processing thread @@ -946,16 +1035,16 @@ int target_configure_device(struct se_device *dev) hba->dev_count++; spin_unlock(&hba->device_lock); - mutex_lock(&g_device_mutex); - list_add_tail(&dev->g_dev_node, &g_device_list); - mutex_unlock(&g_device_mutex); - dev->dev_flags |= DF_CONFIGURED; return 0; out_free_alua: core_alua_free_lu_gp_mem(dev); +out_free_index: + mutex_lock(&device_mutex); + idr_remove(&devices_idr, dev->dev_index); + mutex_unlock(&device_mutex); out: se_release_vpd_for_dev(dev); return ret; @@ -970,9 +1059,11 @@ void target_free_device(struct se_device *dev) if (dev->dev_flags & DF_CONFIGURED) { destroy_workqueue(dev->tmr_wq); - mutex_lock(&g_device_mutex); - list_del(&dev->g_dev_node); - mutex_unlock(&g_device_mutex); + dev->transport->destroy_device(dev); + + mutex_lock(&device_mutex); + idr_remove(&devices_idr, dev->dev_index); + mutex_unlock(&device_mutex); spin_lock(&hba->device_lock); hba->dev_count--; @@ -1087,19 +1178,19 @@ passthrough_parse_cdb(struct se_cmd *cmd, TRANSPORT_FLAG_PASSTHROUGH_PGR)) { if (cdb[0] == PERSISTENT_RESERVE_IN) { cmd->execute_cmd = target_scsi3_emulate_pr_in; - size = (cdb[7] << 8) + cdb[8]; + size = get_unaligned_be16(&cdb[7]); return target_cmd_size_check(cmd, size); } if (cdb[0] == PERSISTENT_RESERVE_OUT) { cmd->execute_cmd = target_scsi3_emulate_pr_out; - size = (cdb[7] << 8) + cdb[8]; + size = get_unaligned_be32(&cdb[5]); return target_cmd_size_check(cmd, size); } if (cdb[0] == RELEASE || cdb[0] == RELEASE_10) { cmd->execute_cmd = target_scsi2_reservation_release; if (cdb[0] == RELEASE_10) - size = (cdb[7] << 8) | cdb[8]; + size = get_unaligned_be16(&cdb[7]); else size = cmd->data_length; return target_cmd_size_check(cmd, size); @@ -1107,7 +1198,7 @@ passthrough_parse_cdb(struct se_cmd *cmd, if (cdb[0] == RESERVE || cdb[0] == RESERVE_10) { cmd->execute_cmd = target_scsi2_reservation_reserve; if (cdb[0] == RESERVE_10) - size = (cdb[7] << 8) | cdb[8]; + size = get_unaligned_be16(&cdb[7]); else size = cmd->data_length; return target_cmd_size_check(cmd, size); @@ -1126,7 +1217,7 @@ passthrough_parse_cdb(struct se_cmd *cmd, case WRITE_16: case WRITE_VERIFY: case WRITE_VERIFY_12: - case 0x8e: /* WRITE_VERIFY_16 */ + case WRITE_VERIFY_16: case COMPARE_AND_WRITE: case XDWRITEREAD_10: cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; @@ -1135,7 +1226,7 @@ passthrough_parse_cdb(struct se_cmd *cmd, switch (get_unaligned_be16(&cdb[8])) { case READ_32: case WRITE_32: - case 0x0c: /* WRITE_VERIFY_32 */ + case WRITE_VERIFY_32: case XDWRITEREAD_32: cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB; break; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index d1e6cab8e3d3..e9e917cc6441 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -65,6 +65,8 @@ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) pr_debug("Setup generic %s\n", __stringify(_name)); \ } +static struct configfs_item_operations target_fabric_port_item_ops; + /* Start of tfc_tpg_mappedlun_cit */ static int target_fabric_mappedlun_link( @@ -72,19 +74,20 @@ static int target_fabric_mappedlun_link( struct config_item *lun_ci) { struct se_dev_entry *deve; - struct se_lun *lun = container_of(to_config_group(lun_ci), - struct se_lun, lun_group); + struct se_lun *lun; struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), struct se_lun_acl, se_lun_group); struct se_portal_group *se_tpg; struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; bool lun_access_ro; - if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { - pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" - " %p to struct lun: %p\n", lun_ci, lun); + if (!lun_ci->ci_type || + lun_ci->ci_type->ct_item_ops != &target_fabric_port_item_ops) { + pr_err("Bad lun_ci, not a valid lun_ci pointer: %p\n", lun_ci); return -EFAULT; } + lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); + /* * Ensure that the source port exists */ @@ -620,6 +623,8 @@ static struct configfs_attribute *target_fabric_port_attrs[] = { NULL, }; +extern struct configfs_item_operations target_core_dev_item_ops; + static int target_fabric_port_link( struct config_item *lun_ci, struct config_item *se_dev_ci) @@ -628,16 +633,16 @@ static int target_fabric_port_link( struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); struct se_portal_group *se_tpg; - struct se_device *dev = - container_of(to_config_group(se_dev_ci), struct se_device, dev_group); + struct se_device *dev; struct target_fabric_configfs *tf; int ret; - if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) { - pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:" - " %p to struct se_device: %p\n", se_dev_ci, dev); + if (!se_dev_ci->ci_type || + se_dev_ci->ci_type->ct_item_ops != &target_core_dev_item_ops) { + pr_err("Bad se_dev_ci, not a valid se_dev_ci pointer: %p\n", se_dev_ci); return -EFAULT; } + dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group); if (!(dev->dev_flags & DF_CONFIGURED)) { pr_err("se_device not configured yet, cannot port link\n"); diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index cb6497ce4b61..508da345b73f 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -34,6 +34,7 @@ #include <linux/ctype.h> #include <linux/spinlock.h> #include <linux/export.h> +#include <asm/unaligned.h> #include <scsi/scsi_proto.h> @@ -216,8 +217,7 @@ static int iscsi_get_pr_transport_id( if (padding != 0) len += padding; - buf[2] = ((len >> 8) & 0xff); - buf[3] = (len & 0xff); + put_unaligned_be16(len, &buf[2]); /* * Increment value for total payload + header length for * full status descriptor @@ -306,7 +306,7 @@ static char *iscsi_parse_pr_out_transport_id( */ if (out_tid_len) { /* The shift works thanks to integer promotion rules */ - add_len = (buf[2] << 8) | buf[3]; + add_len = get_unaligned_be16(&buf[2]); tid_len = strlen(&buf[4]); tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index e921948415c7..24cf11d9e50a 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -237,13 +237,17 @@ static void fd_dev_call_rcu(struct rcu_head *p) static void fd_free_device(struct se_device *dev) { + call_rcu(&dev->rcu_head, fd_dev_call_rcu); +} + +static void fd_destroy_device(struct se_device *dev) +{ struct fd_dev *fd_dev = FD_DEV(dev); if (fd_dev->fd_file) { filp_close(fd_dev->fd_file, NULL); fd_dev->fd_file = NULL; } - call_rcu(&dev->rcu_head, fd_dev_call_rcu); } static int fd_do_rw(struct se_cmd *cmd, struct file *fd, @@ -826,6 +830,7 @@ static const struct target_backend_ops fileio_ops = { .detach_hba = fd_detach_hba, .alloc_device = fd_alloc_device, .configure_device = fd_configure_device, + .destroy_device = fd_destroy_device, .free_device = fd_free_device, .parse_cdb = fd_parse_cdb, .set_configfs_dev_params = fd_set_configfs_dev_params, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index c05d38016556..ee7c7fa55dad 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -86,6 +86,7 @@ static int iblock_configure_device(struct se_device *dev) struct block_device *bd = NULL; struct blk_integrity *bi; fmode_t mode; + unsigned int max_write_zeroes_sectors; int ret = -ENOMEM; if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) { @@ -129,7 +130,11 @@ static int iblock_configure_device(struct se_device *dev) * Enable write same emulation for IBLOCK and use 0xFFFF as * the smaller WRITE_SAME(10) only has a two-byte block count. */ - dev->dev_attrib.max_write_same_len = 0xFFFF; + max_write_zeroes_sectors = bdev_write_zeroes_sectors(bd); + if (max_write_zeroes_sectors) + dev->dev_attrib.max_write_same_len = max_write_zeroes_sectors; + else + dev->dev_attrib.max_write_same_len = 0xFFFF; if (blk_queue_nonrot(q)) dev->dev_attrib.is_nonrot = 1; @@ -185,14 +190,17 @@ static void iblock_dev_call_rcu(struct rcu_head *p) static void iblock_free_device(struct se_device *dev) { + call_rcu(&dev->rcu_head, iblock_dev_call_rcu); +} + +static void iblock_destroy_device(struct se_device *dev) +{ struct iblock_dev *ib_dev = IBLOCK_DEV(dev); if (ib_dev->ibd_bd != NULL) blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); if (ib_dev->ibd_bio_set != NULL) bioset_free(ib_dev->ibd_bio_set); - - call_rcu(&dev->rcu_head, iblock_dev_call_rcu); } static unsigned long long iblock_emulate_read_cap_with_block_size( @@ -415,28 +423,31 @@ iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb) } static sense_reason_t -iblock_execute_write_same_direct(struct block_device *bdev, struct se_cmd *cmd) +iblock_execute_zero_out(struct block_device *bdev, struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; struct scatterlist *sg = &cmd->t_data_sg[0]; - struct page *page = NULL; - int ret; + unsigned char *buf, zero = 0x00, *p = &zero; + int rc, ret; - if (sg->offset) { - page = alloc_page(GFP_KERNEL); - if (!page) - return TCM_OUT_OF_RESOURCES; - sg_copy_to_buffer(sg, cmd->t_data_nents, page_address(page), - dev->dev_attrib.block_size); - } + buf = kmap(sg_page(sg)) + sg->offset; + if (!buf) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + /* + * Fall back to block_execute_write_same() slow-path if + * incoming WRITE_SAME payload does not contain zeros. + */ + rc = memcmp(buf, p, cmd->data_length); + kunmap(sg_page(sg)); - ret = blkdev_issue_write_same(bdev, + if (rc) + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + + ret = blkdev_issue_zeroout(bdev, target_to_linux_sector(dev, cmd->t_task_lba), target_to_linux_sector(dev, sbc_get_write_same_sectors(cmd)), - GFP_KERNEL, page ? page : sg_page(sg)); - if (page) - __free_page(page); + GFP_KERNEL, false); if (ret) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; @@ -472,8 +483,10 @@ iblock_execute_write_same(struct se_cmd *cmd) return TCM_INVALID_CDB_FIELD; } - if (bdev_write_same(bdev)) - return iblock_execute_write_same_direct(bdev, cmd); + if (bdev_write_zeroes_sectors(bdev)) { + if (!iblock_execute_zero_out(bdev, cmd)) + return 0; + } ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL); if (!ibr) @@ -848,6 +861,7 @@ static const struct target_backend_ops iblock_ops = { .detach_hba = iblock_detach_hba, .alloc_device = iblock_alloc_device, .configure_device = iblock_configure_device, + .destroy_device = iblock_destroy_device, .free_device = iblock_free_device, .parse_cdb = iblock_parse_cdb, .set_configfs_dev_params = iblock_set_configfs_dev_params, diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 0912de7c0cf8..f30e8ac13386 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -56,9 +56,6 @@ struct target_fabric_configfs { extern struct t10_alua_lu_gp *default_lu_gp; /* target_core_device.c */ -extern struct mutex g_device_mutex; -extern struct list_head g_device_list; - int core_alloc_rtpi(struct se_lun *lun, struct se_device *dev); struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16); void target_pr_kref_release(struct kref *); @@ -87,6 +84,8 @@ void core_dev_release_virtual_lun0(void); struct se_device *target_alloc_device(struct se_hba *hba, const char *name); int target_configure_device(struct se_device *dev); void target_free_device(struct se_device *); +int target_for_each_device(int (*fn)(struct se_device *dev, void *data), + void *data); /* target_core_configfs.c */ void target_setup_backend_cits(struct target_backend *); diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 129ca572673c..6d5def64db61 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1562,10 +1562,7 @@ core_scsi3_decode_spec_i_port( * first extract TransportID Parameter Data Length, and make sure * the value matches up to the SCSI expected data transfer length. */ - tpdl = (buf[24] & 0xff) << 24; - tpdl |= (buf[25] & 0xff) << 16; - tpdl |= (buf[26] & 0xff) << 8; - tpdl |= buf[27] & 0xff; + tpdl = get_unaligned_be32(&buf[24]); if ((tpdl + 28) != cmd->data_length) { pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header" @@ -3221,12 +3218,8 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key, goto out_put_pr_reg; } - rtpi = (buf[18] & 0xff) << 8; - rtpi |= buf[19] & 0xff; - tid_len = (buf[20] & 0xff) << 24; - tid_len |= (buf[21] & 0xff) << 16; - tid_len |= (buf[22] & 0xff) << 8; - tid_len |= buf[23] & 0xff; + rtpi = get_unaligned_be16(&buf[18]); + tid_len = get_unaligned_be32(&buf[20]); transport_kunmap_data_sg(cmd); buf = NULL; @@ -3552,16 +3545,6 @@ out_put_pr_reg: return ret; } -static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb) -{ - unsigned int __v1, __v2; - - __v1 = (cdb[0] << 24) | (cdb[1] << 16) | (cdb[2] << 8) | cdb[3]; - __v2 = (cdb[4] << 24) | (cdb[5] << 16) | (cdb[6] << 8) | cdb[7]; - - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; -} - /* * See spc4r17 section 6.14 Table 170 */ @@ -3602,7 +3585,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) if (cmd->data_length < 24) { pr_warn("SPC-PR: Received PR OUT parameter list" " length too small: %u\n", cmd->data_length); - return TCM_INVALID_PARAMETER_LIST; + return TCM_PARAMETER_LIST_LENGTH_ERROR; } /* @@ -3619,8 +3602,8 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) /* * From PERSISTENT_RESERVE_OUT parameter list (payload) */ - res_key = core_scsi3_extract_reservation_key(&buf[0]); - sa_res_key = core_scsi3_extract_reservation_key(&buf[8]); + res_key = get_unaligned_be64(&buf[0]); + sa_res_key = get_unaligned_be64(&buf[8]); /* * REGISTER_AND_MOVE uses a different SA parameter list containing * SCSI TransportIDs. @@ -3646,7 +3629,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) /* * SPEC_I_PT=1 is only valid for Service action: REGISTER */ - if (spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER)) + if (spec_i_pt && (sa != PRO_REGISTER)) return TCM_INVALID_PARAMETER_LIST; /* @@ -3658,11 +3641,11 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) * the sense key set to ILLEGAL REQUEST, and the additional sense * code set to PARAMETER LIST LENGTH ERROR. */ - if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) && + if (!spec_i_pt && (sa != PRO_REGISTER_AND_MOVE) && (cmd->data_length != 24)) { pr_warn("SPC-PR: Received PR OUT illegal parameter" " list length: %u\n", cmd->data_length); - return TCM_INVALID_PARAMETER_LIST; + return TCM_PARAMETER_LIST_LENGTH_ERROR; } /* @@ -3702,7 +3685,7 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd) break; default: pr_err("Unknown PERSISTENT_RESERVE_OUT service" - " action: 0x%02x\n", cdb[1] & 0x1f); + " action: 0x%02x\n", sa); return TCM_INVALID_CDB_FIELD; } @@ -3734,10 +3717,7 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); - buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); - buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); - buf[3] = (dev->t10_pr.pr_generation & 0xff); + put_unaligned_be32(dev->t10_pr.pr_generation, buf); spin_lock(&dev->t10_pr.registration_lock); list_for_each_entry(pr_reg, &dev->t10_pr.registration_list, @@ -3749,23 +3729,13 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd) if ((add_len + 8) > (cmd->data_length - 8)) break; - buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); - buf[off++] = (pr_reg->pr_res_key & 0xff); - + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); + off += 8; add_len += 8; } spin_unlock(&dev->t10_pr.registration_lock); - buf[4] = ((add_len >> 24) & 0xff); - buf[5] = ((add_len >> 16) & 0xff); - buf[6] = ((add_len >> 8) & 0xff); - buf[7] = (add_len & 0xff); + put_unaligned_be32(add_len, &buf[4]); transport_kunmap_data_sg(cmd); @@ -3796,10 +3766,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); - buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); - buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); - buf[3] = (dev->t10_pr.pr_generation & 0xff); + put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]); spin_lock(&dev->dev_reservation_lock); pr_reg = dev->dev_pr_res_holder; @@ -3807,10 +3774,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) /* * Set the hardcoded Additional Length */ - buf[4] = ((add_len >> 24) & 0xff); - buf[5] = ((add_len >> 16) & 0xff); - buf[6] = ((add_len >> 8) & 0xff); - buf[7] = (add_len & 0xff); + put_unaligned_be32(add_len, &buf[4]); if (cmd->data_length < 22) goto err; @@ -3837,14 +3801,7 @@ core_scsi3_pri_read_reservation(struct se_cmd *cmd) else pr_res_key = pr_reg->pr_res_key; - buf[8] = ((pr_res_key >> 56) & 0xff); - buf[9] = ((pr_res_key >> 48) & 0xff); - buf[10] = ((pr_res_key >> 40) & 0xff); - buf[11] = ((pr_res_key >> 32) & 0xff); - buf[12] = ((pr_res_key >> 24) & 0xff); - buf[13] = ((pr_res_key >> 16) & 0xff); - buf[14] = ((pr_res_key >> 8) & 0xff); - buf[15] = (pr_res_key & 0xff); + put_unaligned_be64(pr_res_key, &buf[8]); /* * Set the SCOPE and TYPE */ @@ -3882,8 +3839,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd) if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - buf[0] = ((add_len >> 8) & 0xff); - buf[1] = (add_len & 0xff); + put_unaligned_be16(add_len, &buf[0]); buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ buf[2] |= 0x04; /* ATP_C: All Target Ports Capable bit */ @@ -3947,10 +3903,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) if (!buf) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; - buf[0] = ((dev->t10_pr.pr_generation >> 24) & 0xff); - buf[1] = ((dev->t10_pr.pr_generation >> 16) & 0xff); - buf[2] = ((dev->t10_pr.pr_generation >> 8) & 0xff); - buf[3] = (dev->t10_pr.pr_generation & 0xff); + put_unaligned_be32(dev->t10_pr.pr_generation, &buf[0]); spin_lock(&dev->dev_reservation_lock); if (dev->dev_pr_res_holder) { @@ -3992,14 +3945,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) /* * Set RESERVATION KEY */ - buf[off++] = ((pr_reg->pr_res_key >> 56) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 48) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 40) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 32) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 24) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 16) & 0xff); - buf[off++] = ((pr_reg->pr_res_key >> 8) & 0xff); - buf[off++] = (pr_reg->pr_res_key & 0xff); + put_unaligned_be64(pr_reg->pr_res_key, &buf[off]); + off += 8; off += 4; /* Skip Over Reserved area */ /* @@ -4041,8 +3988,8 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) if (!pr_reg->pr_reg_all_tg_pt) { u16 sep_rtpi = pr_reg->tg_pt_sep_rtpi; - buf[off++] = ((sep_rtpi >> 8) & 0xff); - buf[off++] = (sep_rtpi & 0xff); + put_unaligned_be16(sep_rtpi, &buf[off]); + off += 2; } else off += 2; /* Skip over RELATIVE TARGET PORT IDENTIFIER */ @@ -4062,10 +4009,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) /* * Set the ADDITIONAL DESCRIPTOR LENGTH */ - buf[off++] = ((desc_len >> 24) & 0xff); - buf[off++] = ((desc_len >> 16) & 0xff); - buf[off++] = ((desc_len >> 8) & 0xff); - buf[off++] = (desc_len & 0xff); + put_unaligned_be32(desc_len, &buf[off]); /* * Size of full desctipor header minus TransportID * containing $FABRIC_MOD specific) initiator device/port @@ -4082,10 +4026,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd) /* * Set ADDITIONAL_LENGTH */ - buf[4] = ((add_len >> 24) & 0xff); - buf[5] = ((add_len >> 16) & 0xff); - buf[6] = ((add_len >> 8) & 0xff); - buf[7] = (add_len & 0xff); + put_unaligned_be32(add_len, &buf[4]); transport_kunmap_data_sg(cmd); diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index ceec0211e84e..7c69b4a9694d 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -168,7 +168,7 @@ static void pscsi_tape_read_blocksize(struct se_device *dev, /* * If MODE_SENSE still returns zero, set the default value to 1024. */ - sdev->sector_size = (buf[9] << 16) | (buf[10] << 8) | (buf[11]); + sdev->sector_size = get_unaligned_be24(&buf[9]); out_free: if (!sdev->sector_size) sdev->sector_size = 1024; @@ -209,8 +209,7 @@ pscsi_get_inquiry_vpd_serial(struct scsi_device *sdev, struct t10_wwn *wwn) cdb[0] = INQUIRY; cdb[1] = 0x01; /* Query VPD */ cdb[2] = 0x80; /* Unit Serial Number */ - cdb[3] = (INQUIRY_VPD_SERIAL_LEN >> 8) & 0xff; - cdb[4] = (INQUIRY_VPD_SERIAL_LEN & 0xff); + put_unaligned_be16(INQUIRY_VPD_SERIAL_LEN, &cdb[3]); ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, INQUIRY_VPD_SERIAL_LEN, NULL, HZ, 1, NULL); @@ -245,8 +244,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, cdb[0] = INQUIRY; cdb[1] = 0x01; /* Query VPD */ cdb[2] = 0x83; /* Device Identifier */ - cdb[3] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN >> 8) & 0xff; - cdb[4] = (INQUIRY_VPD_DEVICE_IDENTIFIER_LEN & 0xff); + put_unaligned_be16(INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, &cdb[3]); ret = scsi_execute_req(sdev, cdb, DMA_FROM_DEVICE, buf, INQUIRY_VPD_DEVICE_IDENTIFIER_LEN, @@ -254,7 +252,7 @@ pscsi_get_inquiry_vpd_device_ident(struct scsi_device *sdev, if (ret) goto out; - page_len = (buf[2] << 8) | buf[3]; + page_len = get_unaligned_be16(&buf[2]); while (page_len > 0) { /* Grab a pointer to the Identification descriptor */ page_83 = &buf[off]; @@ -384,7 +382,7 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) spin_unlock_irq(sh->host_lock); /* * Claim exclusive struct block_device access to struct scsi_device - * for TYPE_DISK using supplied udev_path + * for TYPE_DISK and TYPE_ZBC using supplied udev_path */ bd = blkdev_get_by_path(dev->udev_path, FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv); @@ -402,8 +400,9 @@ static int pscsi_create_type_disk(struct se_device *dev, struct scsi_device *sd) return ret; } - pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%llu\n", - phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun); + pr_debug("CORE_PSCSI[%d] - Added TYPE_%s for %d:%d:%d:%llu\n", + phv->phv_host_id, sd->type == TYPE_DISK ? "DISK" : "ZBC", + sh->host_no, sd->channel, sd->id, sd->lun); return 0; } @@ -522,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev) */ switch (sd->type) { case TYPE_DISK: + case TYPE_ZBC: ret = pscsi_create_type_disk(dev, sd); break; default: @@ -566,6 +566,11 @@ static void pscsi_dev_call_rcu(struct rcu_head *p) static void pscsi_free_device(struct se_device *dev) { + call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); +} + +static void pscsi_destroy_device(struct se_device *dev) +{ struct pscsi_dev_virt *pdv = PSCSI_DEV(dev); struct pscsi_hba_virt *phv = dev->se_hba->hba_ptr; struct scsi_device *sd = pdv->pdv_sd; @@ -573,9 +578,11 @@ static void pscsi_free_device(struct se_device *dev) if (sd) { /* * Release exclusive pSCSI internal struct block_device claim for - * struct scsi_device with TYPE_DISK from pscsi_create_type_disk() + * struct scsi_device with TYPE_DISK or TYPE_ZBC + * from pscsi_create_type_disk() */ - if ((sd->type == TYPE_DISK) && pdv->pdv_bd) { + if ((sd->type == TYPE_DISK || sd->type == TYPE_ZBC) && + pdv->pdv_bd) { blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL); pdv->pdv_bd = NULL; @@ -594,15 +601,13 @@ static void pscsi_free_device(struct se_device *dev) pdv->pdv_sd = NULL; } - call_rcu(&dev->rcu_head, pscsi_dev_call_rcu); } -static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, - unsigned char *sense_buffer) +static void pscsi_complete_cmd(struct se_cmd *cmd, u8 scsi_status, + unsigned char *req_sense) { struct pscsi_dev_virt *pdv = PSCSI_DEV(cmd->se_dev); struct scsi_device *sd = pdv->pdv_sd; - int result; struct pscsi_plugin_task *pt = cmd->priv; unsigned char *cdb; /* @@ -613,7 +618,6 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, return; cdb = &pt->pscsi_cdb[0]; - result = pt->pscsi_result; /* * Hack to make sure that Write-Protect modepage is set if R/O mode is * forced. @@ -622,7 +626,7 @@ static void pscsi_transport_complete(struct se_cmd *cmd, struct scatterlist *sg, goto after_mode_sense; if (((cdb[0] == MODE_SENSE) || (cdb[0] == MODE_SENSE_10)) && - (status_byte(result) << 1) == SAM_STAT_GOOD) { + scsi_status == SAM_STAT_GOOD) { bool read_only = target_lun_is_rdonly(cmd); if (read_only) { @@ -657,40 +661,36 @@ after_mode_sense: * storage engine. */ if (((cdb[0] == MODE_SELECT) || (cdb[0] == MODE_SELECT_10)) && - (status_byte(result) << 1) == SAM_STAT_GOOD) { + scsi_status == SAM_STAT_GOOD) { unsigned char *buf; u16 bdl; u32 blocksize; - buf = sg_virt(&sg[0]); + buf = sg_virt(&cmd->t_data_sg[0]); if (!buf) { pr_err("Unable to get buf for scatterlist\n"); goto after_mode_select; } if (cdb[0] == MODE_SELECT) - bdl = (buf[3]); + bdl = buf[3]; else - bdl = (buf[6] << 8) | (buf[7]); + bdl = get_unaligned_be16(&buf[6]); if (!bdl) goto after_mode_select; if (cdb[0] == MODE_SELECT) - blocksize = (buf[9] << 16) | (buf[10] << 8) | - (buf[11]); + blocksize = get_unaligned_be24(&buf[9]); else - blocksize = (buf[13] << 16) | (buf[14] << 8) | - (buf[15]); + blocksize = get_unaligned_be24(&buf[13]); sd->sector_size = blocksize; } after_mode_select: - if (sense_buffer && (status_byte(result) & CHECK_CONDITION)) { - memcpy(sense_buffer, pt->pscsi_sense, TRANSPORT_SENSE_BUFFER); - cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; - } + if (scsi_status == SAM_STAT_CHECK_CONDITION) + transport_copy_sense_to_cmd(cmd, req_sense); } enum { @@ -1002,7 +1002,8 @@ pscsi_execute_cmd(struct se_cmd *cmd) req->end_io_data = cmd; scsi_req(req)->cmd_len = scsi_command_size(pt->pscsi_cdb); scsi_req(req)->cmd = &pt->pscsi_cdb[0]; - if (pdv->pdv_sd->type == TYPE_DISK) + if (pdv->pdv_sd->type == TYPE_DISK || + pdv->pdv_sd->type == TYPE_ZBC) req->timeout = PS_TIMEOUT_DISK; else req->timeout = PS_TIMEOUT_OTHER; @@ -1047,30 +1048,29 @@ static void pscsi_req_done(struct request *req, blk_status_t status) { struct se_cmd *cmd = req->end_io_data; struct pscsi_plugin_task *pt = cmd->priv; + int result = scsi_req(req)->result; + u8 scsi_status = status_byte(result) << 1; - pt->pscsi_result = scsi_req(req)->result; - pt->pscsi_resid = scsi_req(req)->resid_len; - - cmd->scsi_status = status_byte(pt->pscsi_result) << 1; - if (cmd->scsi_status) { + if (scsi_status) { pr_debug("PSCSI Status Byte exception at cmd: %p CDB:" " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], - pt->pscsi_result); + result); } - switch (host_byte(pt->pscsi_result)) { + pscsi_complete_cmd(cmd, scsi_status, scsi_req(req)->sense); + + switch (host_byte(result)) { case DID_OK: - target_complete_cmd(cmd, cmd->scsi_status); + target_complete_cmd(cmd, scsi_status); break; default: pr_debug("PSCSI Host Byte exception at cmd: %p CDB:" " 0x%02x Result: 0x%08x\n", cmd, pt->pscsi_cdb[0], - pt->pscsi_result); + result); target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION); break; } - memcpy(pt->pscsi_sense, scsi_req(req)->sense, TRANSPORT_SENSE_BUFFER); __blk_put_request(req->q, req); kfree(pt); } @@ -1086,8 +1086,8 @@ static const struct target_backend_ops pscsi_ops = { .pmode_enable_hba = pscsi_pmode_enable_hba, .alloc_device = pscsi_alloc_device, .configure_device = pscsi_configure_device, + .destroy_device = pscsi_destroy_device, .free_device = pscsi_free_device, - .transport_complete = pscsi_transport_complete, .parse_cdb = pscsi_parse_cdb, .set_configfs_dev_params = pscsi_set_configfs_dev_params, .show_configfs_dev_params = pscsi_show_configfs_dev_params, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index 8a02fa47c7e8..b86fb0e1b783 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -23,10 +23,6 @@ struct scsi_device; struct Scsi_Host; struct pscsi_plugin_task { - unsigned char pscsi_sense[TRANSPORT_SENSE_BUFFER]; - int pscsi_direction; - int pscsi_result; - u32 pscsi_resid; unsigned char pscsi_cdb[0]; } ____cacheline_aligned; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 20253d04103f..a6e8106abd6f 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -339,10 +339,14 @@ static void rd_dev_call_rcu(struct rcu_head *p) static void rd_free_device(struct se_device *dev) { + call_rcu(&dev->rcu_head, rd_dev_call_rcu); +} + +static void rd_destroy_device(struct se_device *dev) +{ struct rd_dev *rd_dev = RD_DEV(dev); rd_release_device_space(rd_dev); - call_rcu(&dev->rcu_head, rd_dev_call_rcu); } static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page) @@ -554,7 +558,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev, struct rd_dev *rd_dev = RD_DEV(dev); char *orig, *ptr, *opts; substring_t args[MAX_OPT_ARGS]; - int ret = 0, arg, token; + int arg, token; opts = kstrdup(page, GFP_KERNEL); if (!opts) @@ -589,7 +593,7 @@ static ssize_t rd_set_configfs_dev_params(struct se_device *dev, } kfree(orig); - return (!ret) ? count : ret; + return count; } static ssize_t rd_show_configfs_dev_params(struct se_device *dev, char *b) @@ -651,6 +655,7 @@ static const struct target_backend_ops rd_mcp_ops = { .detach_hba = rd_detach_hba, .alloc_device = rd_alloc_device, .configure_device = rd_configure_device, + .destroy_device = rd_destroy_device, .free_device = rd_free_device, .parse_cdb = rd_parse_cdb, .set_configfs_dev_params = rd_set_configfs_dev_params, diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 4316f7b65fb7..750a04ed0e93 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -71,14 +71,8 @@ sbc_emulate_readcapacity(struct se_cmd *cmd) else blocks = (u32)blocks_long; - buf[0] = (blocks >> 24) & 0xff; - buf[1] = (blocks >> 16) & 0xff; - buf[2] = (blocks >> 8) & 0xff; - buf[3] = blocks & 0xff; - buf[4] = (dev->dev_attrib.block_size >> 24) & 0xff; - buf[5] = (dev->dev_attrib.block_size >> 16) & 0xff; - buf[6] = (dev->dev_attrib.block_size >> 8) & 0xff; - buf[7] = dev->dev_attrib.block_size & 0xff; + put_unaligned_be32(blocks, &buf[0]); + put_unaligned_be32(dev->dev_attrib.block_size, &buf[4]); rbuf = transport_kmap_data_sg(cmd); if (rbuf) { @@ -102,18 +96,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) unsigned long long blocks = dev->transport->get_blocks(dev); memset(buf, 0, sizeof(buf)); - buf[0] = (blocks >> 56) & 0xff; - buf[1] = (blocks >> 48) & 0xff; - buf[2] = (blocks >> 40) & 0xff; - buf[3] = (blocks >> 32) & 0xff; - buf[4] = (blocks >> 24) & 0xff; - buf[5] = (blocks >> 16) & 0xff; - buf[6] = (blocks >> 8) & 0xff; - buf[7] = blocks & 0xff; - buf[8] = (dev->dev_attrib.block_size >> 24) & 0xff; - buf[9] = (dev->dev_attrib.block_size >> 16) & 0xff; - buf[10] = (dev->dev_attrib.block_size >> 8) & 0xff; - buf[11] = dev->dev_attrib.block_size & 0xff; + put_unaligned_be64(blocks, &buf[0]); + put_unaligned_be32(dev->dev_attrib.block_size, &buf[8]); /* * Set P_TYPE and PROT_EN bits for DIF support */ @@ -134,8 +118,8 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd) if (dev->transport->get_alignment_offset_lbas) { u16 lalba = dev->transport->get_alignment_offset_lbas(dev); - buf[14] = (lalba >> 8) & 0x3f; - buf[15] = lalba & 0xff; + + put_unaligned_be16(lalba, &buf[14]); } /* @@ -262,18 +246,17 @@ static inline u32 transport_get_sectors_6(unsigned char *cdb) static inline u32 transport_get_sectors_10(unsigned char *cdb) { - return (u32)(cdb[7] << 8) + cdb[8]; + return get_unaligned_be16(&cdb[7]); } static inline u32 transport_get_sectors_12(unsigned char *cdb) { - return (u32)(cdb[6] << 24) + (cdb[7] << 16) + (cdb[8] << 8) + cdb[9]; + return get_unaligned_be32(&cdb[6]); } static inline u32 transport_get_sectors_16(unsigned char *cdb) { - return (u32)(cdb[10] << 24) + (cdb[11] << 16) + - (cdb[12] << 8) + cdb[13]; + return get_unaligned_be32(&cdb[10]); } /* @@ -281,29 +264,23 @@ static inline u32 transport_get_sectors_16(unsigned char *cdb) */ static inline u32 transport_get_sectors_32(unsigned char *cdb) { - return (u32)(cdb[28] << 24) + (cdb[29] << 16) + - (cdb[30] << 8) + cdb[31]; + return get_unaligned_be32(&cdb[28]); } static inline u32 transport_lba_21(unsigned char *cdb) { - return ((cdb[1] & 0x1f) << 16) | (cdb[2] << 8) | cdb[3]; + return get_unaligned_be24(&cdb[1]) & 0x1fffff; } static inline u32 transport_lba_32(unsigned char *cdb) { - return (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; + return get_unaligned_be32(&cdb[2]); } static inline unsigned long long transport_lba_64(unsigned char *cdb) { - unsigned int __v1, __v2; - - __v1 = (cdb[2] << 24) | (cdb[3] << 16) | (cdb[4] << 8) | cdb[5]; - __v2 = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; - - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; + return get_unaligned_be64(&cdb[2]); } /* @@ -311,12 +288,7 @@ static inline unsigned long long transport_lba_64(unsigned char *cdb) */ static inline unsigned long long transport_lba_64_ext(unsigned char *cdb) { - unsigned int __v1, __v2; - - __v1 = (cdb[12] << 24) | (cdb[13] << 16) | (cdb[14] << 8) | cdb[15]; - __v2 = (cdb[16] << 24) | (cdb[17] << 16) | (cdb[18] << 8) | cdb[19]; - - return ((unsigned long long)__v2) | (unsigned long long)__v1 << 32; + return get_unaligned_be64(&cdb[12]); } static sense_reason_t @@ -1005,6 +977,12 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) break; } case COMPARE_AND_WRITE: + if (!dev->dev_attrib.emulate_caw) { + pr_err_ratelimited("se_device %s/%s (vpd_unit_serial %s) reject" + " COMPARE_AND_WRITE\n", dev->se_hba->backend->ops->name, + dev->dev_group.cg_item.ci_name, dev->t10_wwn.unit_serial); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } sectors = cdb[13]; /* * Currently enforce COMPARE_AND_WRITE for a single sector @@ -1045,8 +1023,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) cmd->t_task_cdb[1] & 0x1f); return TCM_INVALID_CDB_FIELD; } - size = (cdb[10] << 24) | (cdb[11] << 16) | - (cdb[12] << 8) | cdb[13]; + size = get_unaligned_be32(&cdb[10]); break; case SYNCHRONIZE_CACHE: case SYNCHRONIZE_CACHE_16: @@ -1450,7 +1427,7 @@ sbc_dif_verify(struct se_cmd *cmd, sector_t start, unsigned int sectors, (unsigned long long)sector, sdt->guard_tag, sdt->app_tag, be32_to_cpu(sdt->ref_tag)); - if (sdt->app_tag == cpu_to_be16(0xffff)) { + if (sdt->app_tag == T10_PI_APP_ESCAPE) { dsg_off += block_size; goto next; } diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 2a91ed3ef380..cb0461a10808 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -287,8 +287,8 @@ check_t10_vend_desc: /* Skip over Obsolete field in RTPI payload * in Table 472 */ off += 2; - buf[off++] = ((lun->lun_rtpi >> 8) & 0xff); - buf[off++] = (lun->lun_rtpi & 0xff); + put_unaligned_be16(lun->lun_rtpi, &buf[off]); + off += 2; len += 8; /* Header size + Designation descriptor */ /* * Target port group identifier, see spc4r17 @@ -316,8 +316,8 @@ check_t10_vend_desc: off++; /* Skip over Reserved */ buf[off++] = 4; /* DESIGNATOR LENGTH */ off += 2; /* Skip over Reserved Field */ - buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); - buf[off++] = (tg_pt_gp_id & 0xff); + put_unaligned_be16(tg_pt_gp_id, &buf[off]); + off += 2; len += 8; /* Header size + Designation descriptor */ /* * Logical Unit Group identifier, see spc4r17 @@ -343,8 +343,8 @@ check_lu_gp: off++; /* Skip over Reserved */ buf[off++] = 4; /* DESIGNATOR LENGTH */ off += 2; /* Skip over Reserved Field */ - buf[off++] = ((lu_gp_id >> 8) & 0xff); - buf[off++] = (lu_gp_id & 0xff); + put_unaligned_be16(lu_gp_id, &buf[off]); + off += 2; len += 8; /* Header size + Designation descriptor */ /* * SCSI name string designator, see spc4r17 @@ -431,8 +431,7 @@ check_scsi_name: /* Header size + Designation descriptor */ len += (scsi_target_len + 4); } - buf[2] = ((len >> 8) & 0xff); - buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ + put_unaligned_be16(len, &buf[2]); /* Page Length for VPD 0x83 */ return 0; } EXPORT_SYMBOL(spc_emulate_evpd_83); @@ -1288,7 +1287,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) cmd->execute_cmd = spc_emulate_modeselect; break; case MODE_SELECT_10: - *size = (cdb[7] << 8) + cdb[8]; + *size = get_unaligned_be16(&cdb[7]); cmd->execute_cmd = spc_emulate_modeselect; break; case MODE_SENSE: @@ -1296,25 +1295,25 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) cmd->execute_cmd = spc_emulate_modesense; break; case MODE_SENSE_10: - *size = (cdb[7] << 8) + cdb[8]; + *size = get_unaligned_be16(&cdb[7]); cmd->execute_cmd = spc_emulate_modesense; break; case LOG_SELECT: case LOG_SENSE: - *size = (cdb[7] << 8) + cdb[8]; + *size = get_unaligned_be16(&cdb[7]); break; case PERSISTENT_RESERVE_IN: - *size = (cdb[7] << 8) + cdb[8]; + *size = get_unaligned_be16(&cdb[7]); cmd->execute_cmd = target_scsi3_emulate_pr_in; break; case PERSISTENT_RESERVE_OUT: - *size = (cdb[7] << 8) + cdb[8]; + *size = get_unaligned_be32(&cdb[5]); cmd->execute_cmd = target_scsi3_emulate_pr_out; break; case RELEASE: case RELEASE_10: if (cdb[0] == RELEASE_10) - *size = (cdb[7] << 8) | cdb[8]; + *size = get_unaligned_be16(&cdb[7]); else *size = cmd->data_length; @@ -1327,7 +1326,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) * Assume the passthrough or $FABRIC_MOD will tell us about it. */ if (cdb[0] == RESERVE_10) - *size = (cdb[7] << 8) | cdb[8]; + *size = get_unaligned_be16(&cdb[7]); else *size = cmd->data_length; @@ -1338,7 +1337,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) cmd->execute_cmd = spc_emulate_request_sense; break; case INQUIRY: - *size = (cdb[3] << 8) + cdb[4]; + *size = get_unaligned_be16(&cdb[3]); /* * Do implicit HEAD_OF_QUEUE processing for INQUIRY. @@ -1349,7 +1348,7 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) break; case SECURITY_PROTOCOL_IN: case SECURITY_PROTOCOL_OUT: - *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + *size = get_unaligned_be32(&cdb[6]); break; case EXTENDED_COPY: *size = get_unaligned_be32(&cdb[10]); @@ -1361,19 +1360,18 @@ spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) break; case READ_ATTRIBUTE: case WRITE_ATTRIBUTE: - *size = (cdb[10] << 24) | (cdb[11] << 16) | - (cdb[12] << 8) | cdb[13]; + *size = get_unaligned_be32(&cdb[10]); break; case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: - *size = (cdb[3] << 8) | cdb[4]; + *size = get_unaligned_be16(&cdb[3]); break; case WRITE_BUFFER: - *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; + *size = get_unaligned_be24(&cdb[6]); break; case REPORT_LUNS: cmd->execute_cmd = spc_emulate_report_luns; - *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; + *size = get_unaligned_be32(&cdb[6]); /* * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS * See spc4r17 section 5.3 diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 13f47bf4d16b..e22847bd79b9 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -355,20 +355,10 @@ static void core_tmr_drain_state_list( cmd = list_entry(drain_task_list.next, struct se_cmd, state_list); list_del_init(&cmd->state_list); - pr_debug("LUN_RESET: %s cmd: %p" - " ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d" - "cdb: 0x%02x\n", - (preempt_and_abort_list) ? "Preempt" : "", cmd, - cmd->tag, 0, - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, - cmd->t_task_cdb[0]); - pr_debug("LUN_RESET: ITT[0x%08llx] - pr_res_key: 0x%016Lx" - " -- CMD_T_ACTIVE: %d" - " CMD_T_STOP: %d CMD_T_SENT: %d\n", - cmd->tag, cmd->pr_res_key, - (cmd->transport_state & CMD_T_ACTIVE) != 0, - (cmd->transport_state & CMD_T_STOP) != 0, - (cmd->transport_state & CMD_T_SENT) != 0); + target_show_cmd("LUN_RESET: ", cmd); + pr_debug("LUN_RESET: ITT[0x%08llx] - %s pr_res_key: 0x%016Lx\n", + cmd->tag, (preempt_and_abort_list) ? "preempt" : "", + cmd->pr_res_key); /* * If the command may be queued onto a workqueue cancel it now. diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 310d9e55c6eb..36913734c6bc 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -576,7 +576,6 @@ struct se_lun *core_tpg_alloc_lun( return ERR_PTR(-ENOMEM); } lun->unpacked_lun = unpacked_lun; - lun->lun_link_magic = SE_LUN_LINK_MAGIC; atomic_set(&lun->lun_acl_count, 0); init_completion(&lun->lun_ref_comp); init_completion(&lun->lun_shutdown_comp); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index f1b3a46bdcaf..97fed9a298bd 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -252,7 +252,7 @@ int transport_alloc_session_tags(struct se_session *se_sess, int rc; se_sess->sess_cmd_map = kzalloc(tag_num * tag_size, - GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); if (!se_sess->sess_cmd_map) { se_sess->sess_cmd_map = vzalloc(tag_num * tag_size); if (!se_sess->sess_cmd_map) { @@ -704,23 +704,43 @@ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) return cmd->sense_buffer; } +void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) +{ + unsigned char *cmd_sense_buf; + unsigned long flags; + + spin_lock_irqsave(&cmd->t_state_lock, flags); + cmd_sense_buf = transport_get_sense_buffer(cmd); + if (!cmd_sense_buf) { + spin_unlock_irqrestore(&cmd->t_state_lock, flags); + return; + } + + cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; + memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); + spin_unlock_irqrestore(&cmd->t_state_lock, flags); +} +EXPORT_SYMBOL(transport_copy_sense_to_cmd); + void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) { struct se_device *dev = cmd->se_dev; - int success = scsi_status == GOOD; + int success; unsigned long flags; cmd->scsi_status = scsi_status; - spin_lock_irqsave(&cmd->t_state_lock, flags); - - if (dev && dev->transport->transport_complete) { - dev->transport->transport_complete(cmd, - cmd->t_data_sg, - transport_get_sense_buffer(cmd)); + switch (cmd->scsi_status) { + case SAM_STAT_CHECK_CONDITION: if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) success = 1; + else + success = 0; + break; + default: + success = 1; + break; } /* @@ -730,6 +750,15 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) if (cmd->transport_state & CMD_T_ABORTED || cmd->transport_state & CMD_T_STOP) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); + /* + * If COMPARE_AND_WRITE was stopped by __transport_wait_for_tasks(), + * release se_device->caw_sem obtained by sbc_compare_and_write() + * since target_complete_ok_work() or target_complete_failure_work() + * won't be called to invoke the normal CAW completion callbacks. + */ + if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { + up(&dev->caw_sem); + } complete_all(&cmd->t_transport_stop_comp); return; } else if (!success) { @@ -1239,6 +1268,7 @@ void transport_init_se_cmd( init_completion(&cmd->t_transport_stop_comp); init_completion(&cmd->cmd_wait_comp); spin_lock_init(&cmd->t_state_lock); + INIT_WORK(&cmd->work, NULL); kref_init(&cmd->cmd_kref); cmd->se_tfo = tfo; @@ -1590,9 +1620,33 @@ static void target_complete_tmr_failure(struct work_struct *work) se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; se_cmd->se_tfo->queue_tm_rsp(se_cmd); + transport_lun_remove_cmd(se_cmd); transport_cmd_check_stop_to_fabric(se_cmd); } +static bool target_lookup_lun_from_tag(struct se_session *se_sess, u64 tag, + u64 *unpacked_lun) +{ + struct se_cmd *se_cmd; + unsigned long flags; + bool ret = false; + + spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); + list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) { + if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) + continue; + + if (se_cmd->tag == tag) { + *unpacked_lun = se_cmd->orig_fe_lun; + ret = true; + break; + } + } + spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags); + + return ret; +} + /** * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd * for TMR CDBs @@ -1640,19 +1694,31 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, core_tmr_release_req(se_cmd->se_tmr_req); return ret; } + /* + * If this is ABORT_TASK with no explicit fabric provided LUN, + * go ahead and search active session tags for a match to figure + * out unpacked_lun for the original se_cmd. + */ + if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { + if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) + goto failure; + } ret = transport_lookup_tmr_lun(se_cmd, unpacked_lun); - if (ret) { - /* - * For callback during failure handling, push this work off - * to process context with TMR_LUN_DOES_NOT_EXIST status. - */ - INIT_WORK(&se_cmd->work, target_complete_tmr_failure); - schedule_work(&se_cmd->work); - return 0; - } + if (ret) + goto failure; + transport_generic_handle_tmr(se_cmd); return 0; + + /* + * For callback during failure handling, push this work off + * to process context with TMR_LUN_DOES_NOT_EXIST status. + */ +failure: + INIT_WORK(&se_cmd->work, target_complete_tmr_failure); + schedule_work(&se_cmd->work); + return 0; } EXPORT_SYMBOL(target_submit_tmr); @@ -1667,15 +1733,9 @@ void transport_generic_request_failure(struct se_cmd *cmd, if (transport_check_aborted_status(cmd, 1)) return; - pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08llx" - " CDB: 0x%02x\n", cmd, cmd->tag, cmd->t_task_cdb[0]); - pr_debug("-----[ i_state: %d t_state: %d sense_reason: %d\n", - cmd->se_tfo->get_cmd_state(cmd), - cmd->t_state, sense_reason); - pr_debug("-----[ CMD_T_ACTIVE: %d CMD_T_STOP: %d CMD_T_SENT: %d\n", - (cmd->transport_state & CMD_T_ACTIVE) != 0, - (cmd->transport_state & CMD_T_STOP) != 0, - (cmd->transport_state & CMD_T_SENT) != 0); + pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", + sense_reason); + target_show_cmd("-----[ ", cmd); /* * For SAM Task Attribute emulation for failed struct se_cmd @@ -2668,6 +2728,108 @@ int target_put_sess_cmd(struct se_cmd *se_cmd) } EXPORT_SYMBOL(target_put_sess_cmd); +static const char *data_dir_name(enum dma_data_direction d) +{ + switch (d) { + case DMA_BIDIRECTIONAL: return "BIDI"; + case DMA_TO_DEVICE: return "WRITE"; + case DMA_FROM_DEVICE: return "READ"; + case DMA_NONE: return "NONE"; + } + + return "(?)"; +} + +static const char *cmd_state_name(enum transport_state_table t) +{ + switch (t) { + case TRANSPORT_NO_STATE: return "NO_STATE"; + case TRANSPORT_NEW_CMD: return "NEW_CMD"; + case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; + case TRANSPORT_PROCESSING: return "PROCESSING"; + case TRANSPORT_COMPLETE: return "COMPLETE"; + case TRANSPORT_ISTATE_PROCESSING: + return "ISTATE_PROCESSING"; + case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; + case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; + case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; + } + + return "(?)"; +} + +static void target_append_str(char **str, const char *txt) +{ + char *prev = *str; + + *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : + kstrdup(txt, GFP_ATOMIC); + kfree(prev); +} + +/* + * Convert a transport state bitmask into a string. The caller is + * responsible for freeing the returned pointer. + */ +static char *target_ts_to_str(u32 ts) +{ + char *str = NULL; + + if (ts & CMD_T_ABORTED) + target_append_str(&str, "aborted"); + if (ts & CMD_T_ACTIVE) + target_append_str(&str, "active"); + if (ts & CMD_T_COMPLETE) + target_append_str(&str, "complete"); + if (ts & CMD_T_SENT) + target_append_str(&str, "sent"); + if (ts & CMD_T_STOP) + target_append_str(&str, "stop"); + if (ts & CMD_T_FABRIC_STOP) + target_append_str(&str, "fabric_stop"); + + return str; +} + +static const char *target_tmf_name(enum tcm_tmreq_table tmf) +{ + switch (tmf) { + case TMR_ABORT_TASK: return "ABORT_TASK"; + case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; + case TMR_CLEAR_ACA: return "CLEAR_ACA"; + case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; + case TMR_LUN_RESET: return "LUN_RESET"; + case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; + case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; + case TMR_UNKNOWN: break; + } + return "(?)"; +} + +void target_show_cmd(const char *pfx, struct se_cmd *cmd) +{ + char *ts_str = target_ts_to_str(cmd->transport_state); + const u8 *cdb = cmd->t_task_cdb; + struct se_tmr_req *tmf = cmd->se_tmr_req; + + if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { + pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", + pfx, cdb[0], cdb[1], cmd->tag, + data_dir_name(cmd->data_direction), + cmd->se_tfo->get_cmd_state(cmd), + cmd_state_name(cmd->t_state), cmd->data_length, + kref_read(&cmd->cmd_kref), ts_str); + } else { + pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", + pfx, target_tmf_name(tmf->function), cmd->tag, + tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), + cmd_state_name(cmd->t_state), + kref_read(&cmd->cmd_kref), ts_str); + } + kfree(ts_str); +} +EXPORT_SYMBOL(target_show_cmd); + /* target_sess_cmd_list_set_waiting - Flag all commands in * sess_cmd_list to complete cmd_wait_comp. Set * sess_tearing_down so no more commands are queued. @@ -2812,13 +2974,13 @@ __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, cmd->transport_state |= CMD_T_STOP; - pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d," - " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag, - cmd->se_tfo->get_cmd_state(cmd), cmd->t_state); + target_show_cmd("wait_for_tasks: Stopping ", cmd); spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - wait_for_completion(&cmd->t_transport_stop_comp); + while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, + 180 * HZ)) + target_show_cmd("wait for tasks: ", cmd); spin_lock_irqsave(&cmd->t_state_lock, *flags); cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); @@ -3201,6 +3363,7 @@ static void target_tmr_work(struct work_struct *work) cmd->se_tfo->queue_tm_rsp(cmd); check_stop: + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } @@ -3223,6 +3386,7 @@ int transport_generic_handle_tmr( pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d" "ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, cmd->se_tmr_req->ref_task_tag, cmd->tag); + transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return 0; } diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index beb5f098f32d..80ee130f8253 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -87,6 +87,8 @@ /* Default maximum of the global data blocks(512K * PAGE_SIZE) */ #define TCMU_GLOBAL_MAX_BLOCKS (512 * 1024) +static u8 tcmu_kern_cmd_reply_supported; + static struct device *tcmu_root_device; struct tcmu_hba { @@ -95,6 +97,13 @@ struct tcmu_hba { #define TCMU_CONFIG_LEN 256 +struct tcmu_nl_cmd { + /* wake up thread waiting for reply */ + struct completion complete; + int cmd; + int status; +}; + struct tcmu_dev { struct list_head node; struct kref kref; @@ -135,6 +144,11 @@ struct tcmu_dev { struct timer_list timeout; unsigned int cmd_time_out; + spinlock_t nl_cmd_lock; + struct tcmu_nl_cmd curr_nl_cmd; + /* wake up threads waiting on curr_nl_cmd */ + wait_queue_head_t nl_cmd_wq; + char dev_config[TCMU_CONFIG_LEN]; }; @@ -178,16 +192,128 @@ static const struct genl_multicast_group tcmu_mcgrps[] = { [TCMU_MCGRP_CONFIG] = { .name = "config", }, }; +static struct nla_policy tcmu_attr_policy[TCMU_ATTR_MAX+1] = { + [TCMU_ATTR_DEVICE] = { .type = NLA_STRING }, + [TCMU_ATTR_MINOR] = { .type = NLA_U32 }, + [TCMU_ATTR_CMD_STATUS] = { .type = NLA_S32 }, + [TCMU_ATTR_DEVICE_ID] = { .type = NLA_U32 }, + [TCMU_ATTR_SUPP_KERN_CMD_REPLY] = { .type = NLA_U8 }, +}; + +static int tcmu_genl_cmd_done(struct genl_info *info, int completed_cmd) +{ + struct se_device *dev; + struct tcmu_dev *udev; + struct tcmu_nl_cmd *nl_cmd; + int dev_id, rc, ret = 0; + bool is_removed = (completed_cmd == TCMU_CMD_REMOVED_DEVICE); + + if (!info->attrs[TCMU_ATTR_CMD_STATUS] || + !info->attrs[TCMU_ATTR_DEVICE_ID]) { + printk(KERN_ERR "TCMU_ATTR_CMD_STATUS or TCMU_ATTR_DEVICE_ID not set, doing nothing\n"); + return -EINVAL; + } + + dev_id = nla_get_u32(info->attrs[TCMU_ATTR_DEVICE_ID]); + rc = nla_get_s32(info->attrs[TCMU_ATTR_CMD_STATUS]); + + dev = target_find_device(dev_id, !is_removed); + if (!dev) { + printk(KERN_ERR "tcmu nl cmd %u/%u completion could not find device with dev id %u.\n", + completed_cmd, rc, dev_id); + return -ENODEV; + } + udev = TCMU_DEV(dev); + + spin_lock(&udev->nl_cmd_lock); + nl_cmd = &udev->curr_nl_cmd; + + pr_debug("genl cmd done got id %d curr %d done %d rc %d\n", dev_id, + nl_cmd->cmd, completed_cmd, rc); + + if (nl_cmd->cmd != completed_cmd) { + printk(KERN_ERR "Mismatched commands (Expecting reply for %d. Current %d).\n", + completed_cmd, nl_cmd->cmd); + ret = -EINVAL; + } else { + nl_cmd->status = rc; + } + + spin_unlock(&udev->nl_cmd_lock); + if (!is_removed) + target_undepend_item(&dev->dev_group.cg_item); + if (!ret) + complete(&nl_cmd->complete); + return ret; +} + +static int tcmu_genl_rm_dev_done(struct sk_buff *skb, struct genl_info *info) +{ + return tcmu_genl_cmd_done(info, TCMU_CMD_REMOVED_DEVICE); +} + +static int tcmu_genl_add_dev_done(struct sk_buff *skb, struct genl_info *info) +{ + return tcmu_genl_cmd_done(info, TCMU_CMD_ADDED_DEVICE); +} + +static int tcmu_genl_reconfig_dev_done(struct sk_buff *skb, + struct genl_info *info) +{ + return tcmu_genl_cmd_done(info, TCMU_CMD_RECONFIG_DEVICE); +} + +static int tcmu_genl_set_features(struct sk_buff *skb, struct genl_info *info) +{ + if (info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]) { + tcmu_kern_cmd_reply_supported = + nla_get_u8(info->attrs[TCMU_ATTR_SUPP_KERN_CMD_REPLY]); + printk(KERN_INFO "tcmu daemon: command reply support %u.\n", + tcmu_kern_cmd_reply_supported); + } + + return 0; +} + +static const struct genl_ops tcmu_genl_ops[] = { + { + .cmd = TCMU_CMD_SET_FEATURES, + .flags = GENL_ADMIN_PERM, + .policy = tcmu_attr_policy, + .doit = tcmu_genl_set_features, + }, + { + .cmd = TCMU_CMD_ADDED_DEVICE_DONE, + .flags = GENL_ADMIN_PERM, + .policy = tcmu_attr_policy, + .doit = tcmu_genl_add_dev_done, + }, + { + .cmd = TCMU_CMD_REMOVED_DEVICE_DONE, + .flags = GENL_ADMIN_PERM, + .policy = tcmu_attr_policy, + .doit = tcmu_genl_rm_dev_done, + }, + { + .cmd = TCMU_CMD_RECONFIG_DEVICE_DONE, + .flags = GENL_ADMIN_PERM, + .policy = tcmu_attr_policy, + .doit = tcmu_genl_reconfig_dev_done, + }, +}; + /* Our generic netlink family */ static struct genl_family tcmu_genl_family __ro_after_init = { .module = THIS_MODULE, .hdrsize = 0, .name = "TCM-USER", - .version = 1, + .version = 2, .maxattr = TCMU_ATTR_MAX, .mcgrps = tcmu_mcgrps, .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps), .netnsok = true, + .ops = tcmu_genl_ops, + .n_ops = ARRAY_SIZE(tcmu_genl_ops), }; #define tcmu_cmd_set_dbi_cur(cmd, index) ((cmd)->dbi_cur = (index)) @@ -216,7 +342,6 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, page = radix_tree_lookup(&udev->data_blocks, dbi); if (!page) { - if (atomic_add_return(1, &global_db_count) > TCMU_GLOBAL_MAX_BLOCKS) { atomic_dec(&global_db_count); @@ -226,14 +351,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, /* try to get new page from the mm */ page = alloc_page(GFP_KERNEL); if (!page) - return false; + goto err_alloc; ret = radix_tree_insert(&udev->data_blocks, dbi, page); - if (ret) { - __free_page(page); - return false; - } - + if (ret) + goto err_insert; } if (dbi > udev->dbi_max) @@ -243,6 +365,11 @@ static inline bool tcmu_get_empty_block(struct tcmu_dev *udev, tcmu_cmd_set_dbi(tcmu_cmd, dbi); return true; +err_insert: + __free_page(page); +err_alloc: + atomic_dec(&global_db_count); + return false; } static bool tcmu_get_empty_blocks(struct tcmu_dev *udev, @@ -401,7 +528,7 @@ static inline size_t get_block_offset_user(struct tcmu_dev *dev, DATA_BLOCK_SIZE - remaining; } -static inline size_t iov_tail(struct tcmu_dev *udev, struct iovec *iov) +static inline size_t iov_tail(struct iovec *iov) { return (size_t)iov->iov_base + iov->iov_len; } @@ -437,10 +564,10 @@ static int scatter_data_area(struct tcmu_dev *udev, to_offset = get_block_offset_user(udev, dbi, block_remaining); offset = DATA_BLOCK_SIZE - block_remaining; - to = (void *)(unsigned long)to + offset; + to += offset; if (*iov_cnt != 0 && - to_offset == iov_tail(udev, *iov)) { + to_offset == iov_tail(*iov)) { (*iov)->iov_len += copy_bytes; } else { new_iov(iov, iov_cnt, udev); @@ -510,7 +637,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd, copy_bytes = min_t(size_t, sg_remaining, block_remaining); offset = DATA_BLOCK_SIZE - block_remaining; - from = (void *)(unsigned long)from + offset; + from += offset; tcmu_flush_dcache_range(from, copy_bytes); memcpy(to + sg->length - sg_remaining, from, copy_bytes); @@ -596,10 +723,7 @@ static bool is_ring_space_avail(struct tcmu_dev *udev, struct tcmu_cmd *cmd, } } - if (!tcmu_get_empty_blocks(udev, cmd)) - return false; - - return true; + return tcmu_get_empty_blocks(udev, cmd); } static inline size_t tcmu_cmd_get_base_cmd_size(size_t iov_cnt) @@ -699,25 +823,24 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) size_t pad_size = head_to_end(cmd_head, udev->cmdr_size); entry = (void *) mb + CMDR_OFF + cmd_head; - tcmu_flush_dcache_range(entry, sizeof(*entry)); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_PAD); tcmu_hdr_set_len(&entry->hdr.len_op, pad_size); entry->hdr.cmd_id = 0; /* not used for PAD */ entry->hdr.kflags = 0; entry->hdr.uflags = 0; + tcmu_flush_dcache_range(entry, sizeof(*entry)); UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size); + tcmu_flush_dcache_range(mb, sizeof(*mb)); cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */ WARN_ON(cmd_head != 0); } entry = (void *) mb + CMDR_OFF + cmd_head; - tcmu_flush_dcache_range(entry, sizeof(*entry)); + memset(entry, 0, command_size); tcmu_hdr_set_op(&entry->hdr.len_op, TCMU_OP_CMD); entry->hdr.cmd_id = tcmu_cmd->cmd_id; - entry->hdr.kflags = 0; - entry->hdr.uflags = 0; /* Handle allocating space from the data area */ tcmu_cmd_reset_dbi_cur(tcmu_cmd); @@ -736,11 +859,10 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } entry->req.iov_cnt = iov_cnt; - entry->req.iov_dif_cnt = 0; /* Handle BIDI commands */ + iov_cnt = 0; if (se_cmd->se_cmd_flags & SCF_BIDI) { - iov_cnt = 0; iov++; ret = scatter_data_area(udev, tcmu_cmd, se_cmd->t_bidi_data_sg, @@ -753,8 +875,8 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) pr_err("tcmu: alloc and scatter bidi data failed\n"); return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } - entry->req.iov_bidi_cnt = iov_cnt; } + entry->req.iov_bidi_cnt = iov_cnt; /* * Recalaulate the command's base size and size according @@ -830,8 +952,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * cmd->se_cmd); entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION; } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) { - memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer, - se_cmd->scsi_sense_length); + transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer); } else if (se_cmd->se_cmd_flags & SCF_BIDI) { /* Get Data-In buffer before clean up */ gather_data_area(udev, cmd, true); @@ -989,6 +1110,9 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name) setup_timer(&udev->timeout, tcmu_device_timedout, (unsigned long)udev); + init_waitqueue_head(&udev->nl_cmd_wq); + spin_lock_init(&udev->nl_cmd_lock); + return &udev->se_dev; } @@ -1140,6 +1264,7 @@ static int tcmu_open(struct uio_info *info, struct inode *inode) return -EBUSY; udev->inode = inode; + kref_get(&udev->kref); pr_debug("open\n"); @@ -1171,12 +1296,59 @@ static int tcmu_release(struct uio_info *info, struct inode *inode) clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags); pr_debug("close\n"); - /* release ref from configure */ + /* release ref from open */ kref_put(&udev->kref, tcmu_dev_kref_release); return 0; } -static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor) +static void tcmu_init_genl_cmd_reply(struct tcmu_dev *udev, int cmd) +{ + struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; + + if (!tcmu_kern_cmd_reply_supported) + return; +relock: + spin_lock(&udev->nl_cmd_lock); + + if (nl_cmd->cmd != TCMU_CMD_UNSPEC) { + spin_unlock(&udev->nl_cmd_lock); + pr_debug("sleeping for open nl cmd\n"); + wait_event(udev->nl_cmd_wq, (nl_cmd->cmd == TCMU_CMD_UNSPEC)); + goto relock; + } + + memset(nl_cmd, 0, sizeof(*nl_cmd)); + nl_cmd->cmd = cmd; + init_completion(&nl_cmd->complete); + + spin_unlock(&udev->nl_cmd_lock); +} + +static int tcmu_wait_genl_cmd_reply(struct tcmu_dev *udev) +{ + struct tcmu_nl_cmd *nl_cmd = &udev->curr_nl_cmd; + int ret; + DEFINE_WAIT(__wait); + + if (!tcmu_kern_cmd_reply_supported) + return 0; + + pr_debug("sleeping for nl reply\n"); + wait_for_completion(&nl_cmd->complete); + + spin_lock(&udev->nl_cmd_lock); + nl_cmd->cmd = TCMU_CMD_UNSPEC; + ret = nl_cmd->status; + nl_cmd->status = 0; + spin_unlock(&udev->nl_cmd_lock); + + wake_up_all(&udev->nl_cmd_wq); + + return ret;; +} + +static int tcmu_netlink_event(struct tcmu_dev *udev, enum tcmu_genl_cmd cmd, + int reconfig_attr, const void *reconfig_data) { struct sk_buff *skb; void *msg_header; @@ -1190,22 +1362,51 @@ static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int mino if (!msg_header) goto free_skb; - ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name); + ret = nla_put_string(skb, TCMU_ATTR_DEVICE, udev->uio_info.name); + if (ret < 0) + goto free_skb; + + ret = nla_put_u32(skb, TCMU_ATTR_MINOR, udev->uio_info.uio_dev->minor); if (ret < 0) goto free_skb; - ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor); + ret = nla_put_u32(skb, TCMU_ATTR_DEVICE_ID, udev->se_dev.dev_index); if (ret < 0) goto free_skb; + if (cmd == TCMU_CMD_RECONFIG_DEVICE) { + switch (reconfig_attr) { + case TCMU_ATTR_DEV_CFG: + ret = nla_put_string(skb, reconfig_attr, reconfig_data); + break; + case TCMU_ATTR_DEV_SIZE: + ret = nla_put_u64_64bit(skb, reconfig_attr, + *((u64 *)reconfig_data), + TCMU_ATTR_PAD); + break; + case TCMU_ATTR_WRITECACHE: + ret = nla_put_u8(skb, reconfig_attr, + *((u8 *)reconfig_data)); + break; + default: + BUG(); + } + + if (ret < 0) + goto free_skb; + } + genlmsg_end(skb, msg_header); + tcmu_init_genl_cmd_reply(udev, cmd); + ret = genlmsg_multicast_allns(&tcmu_genl_family, skb, 0, TCMU_MCGRP_CONFIG, GFP_KERNEL); - /* We don't care if no one is listening */ if (ret == -ESRCH) ret = 0; + if (!ret) + ret = tcmu_wait_genl_cmd_reply(udev); return ret; free_skb: @@ -1213,19 +1414,14 @@ free_skb: return ret; } -static int tcmu_configure_device(struct se_device *dev) +static int tcmu_update_uio_info(struct tcmu_dev *udev) { - struct tcmu_dev *udev = TCMU_DEV(dev); struct tcmu_hba *hba = udev->hba->hba_ptr; struct uio_info *info; - struct tcmu_mailbox *mb; - size_t size; - size_t used; - int ret = 0; + size_t size, used; char *str; info = &udev->uio_info; - size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name, udev->dev_config); size += 1; /* for \0 */ @@ -1234,12 +1430,27 @@ static int tcmu_configure_device(struct se_device *dev) return -ENOMEM; used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name); - if (udev->dev_config[0]) snprintf(str + used, size - used, "/%s", udev->dev_config); info->name = str; + return 0; +} + +static int tcmu_configure_device(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); + struct uio_info *info; + struct tcmu_mailbox *mb; + int ret = 0; + + ret = tcmu_update_uio_info(udev); + if (ret) + return ret; + + info = &udev->uio_info; + udev->mb_addr = vzalloc(CMDR_SIZE); if (!udev->mb_addr) { ret = -ENOMEM; @@ -1290,6 +1501,8 @@ static int tcmu_configure_device(struct se_device *dev) /* Other attributes can be configured in userspace */ if (!dev->dev_attrib.hw_max_sectors) dev->dev_attrib.hw_max_sectors = 128; + if (!dev->dev_attrib.emulate_write_cache) + dev->dev_attrib.emulate_write_cache = 0; dev->dev_attrib.hw_queue_depth = 128; /* @@ -1298,8 +1511,7 @@ static int tcmu_configure_device(struct se_device *dev) */ kref_get(&udev->kref); - ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name, - udev->uio_info.uio_dev->minor); + ret = tcmu_netlink_event(udev, TCMU_CMD_ADDED_DEVICE, 0, NULL); if (ret) goto err_netlink; @@ -1355,6 +1567,14 @@ static void tcmu_blocks_release(struct tcmu_dev *udev) static void tcmu_free_device(struct se_device *dev) { struct tcmu_dev *udev = TCMU_DEV(dev); + + /* release ref from init */ + kref_put(&udev->kref, tcmu_dev_kref_release); +} + +static void tcmu_destroy_device(struct se_device *dev) +{ + struct tcmu_dev *udev = TCMU_DEV(dev); struct tcmu_cmd *cmd; bool all_expired = true; int i; @@ -1379,14 +1599,11 @@ static void tcmu_free_device(struct se_device *dev) tcmu_blocks_release(udev); - if (tcmu_dev_configured(udev)) { - tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name, - udev->uio_info.uio_dev->minor); + tcmu_netlink_event(udev, TCMU_CMD_REMOVED_DEVICE, 0, NULL); - uio_unregister_device(&udev->uio_info); - } + uio_unregister_device(&udev->uio_info); - /* release ref from init */ + /* release ref from configure */ kref_put(&udev->kref, tcmu_dev_kref_release); } @@ -1546,6 +1763,129 @@ static ssize_t tcmu_cmd_time_out_store(struct config_item *item, const char *pag } CONFIGFS_ATTR(tcmu_, cmd_time_out); +static ssize_t tcmu_dev_config_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%s\n", udev->dev_config); +} + +static ssize_t tcmu_dev_config_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + int ret, len; + + len = strlen(page); + if (!len || len > TCMU_CONFIG_LEN - 1) + return -EINVAL; + + /* Check if device has been configured before */ + if (tcmu_dev_configured(udev)) { + ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, + TCMU_ATTR_DEV_CFG, page); + if (ret) { + pr_err("Unable to reconfigure device\n"); + return ret; + } + strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); + + ret = tcmu_update_uio_info(udev); + if (ret) + return ret; + return count; + } + strlcpy(udev->dev_config, page, TCMU_CONFIG_LEN); + + return count; +} +CONFIGFS_ATTR(tcmu_, dev_config); + +static ssize_t tcmu_dev_size_show(struct config_item *item, char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + + return snprintf(page, PAGE_SIZE, "%zu\n", udev->dev_size); +} + +static ssize_t tcmu_dev_size_store(struct config_item *item, const char *page, + size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + u64 val; + int ret; + + ret = kstrtou64(page, 0, &val); + if (ret < 0) + return ret; + + /* Check if device has been configured before */ + if (tcmu_dev_configured(udev)) { + ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, + TCMU_ATTR_DEV_SIZE, &val); + if (ret) { + pr_err("Unable to reconfigure device\n"); + return ret; + } + } + udev->dev_size = val; + return count; +} +CONFIGFS_ATTR(tcmu_, dev_size); + +static ssize_t tcmu_emulate_write_cache_show(struct config_item *item, + char *page) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + + return snprintf(page, PAGE_SIZE, "%i\n", da->emulate_write_cache); +} + +static ssize_t tcmu_emulate_write_cache_store(struct config_item *item, + const char *page, size_t count) +{ + struct se_dev_attrib *da = container_of(to_config_group(item), + struct se_dev_attrib, da_group); + struct tcmu_dev *udev = TCMU_DEV(da->da_dev); + u8 val; + int ret; + + ret = kstrtou8(page, 0, &val); + if (ret < 0) + return ret; + + /* Check if device has been configured before */ + if (tcmu_dev_configured(udev)) { + ret = tcmu_netlink_event(udev, TCMU_CMD_RECONFIG_DEVICE, + TCMU_ATTR_WRITECACHE, &val); + if (ret) { + pr_err("Unable to reconfigure device\n"); + return ret; + } + } + + da->emulate_write_cache = val; + return count; +} +CONFIGFS_ATTR(tcmu_, emulate_write_cache); + +static struct configfs_attribute *tcmu_attrib_attrs[] = { + &tcmu_attr_cmd_time_out, + &tcmu_attr_dev_config, + &tcmu_attr_dev_size, + &tcmu_attr_emulate_write_cache, + NULL, +}; + static struct configfs_attribute **tcmu_attrs; static struct target_backend_ops tcmu_ops = { @@ -1556,6 +1896,7 @@ static struct target_backend_ops tcmu_ops = { .detach_hba = tcmu_detach_hba, .alloc_device = tcmu_alloc_device, .configure_device = tcmu_configure_device, + .destroy_device = tcmu_destroy_device, .free_device = tcmu_free_device, .parse_cdb = tcmu_parse_cdb, .set_configfs_dev_params = tcmu_set_configfs_dev_params, @@ -1573,7 +1914,7 @@ static int unmap_thread_fn(void *data) struct page *page; int i; - while (1) { + while (!kthread_should_stop()) { DEFINE_WAIT(__wait); prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE); @@ -1645,7 +1986,7 @@ static int unmap_thread_fn(void *data) static int __init tcmu_module_init(void) { - int ret, i, len = 0; + int ret, i, k, len = 0; BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0); @@ -1670,7 +2011,10 @@ static int __init tcmu_module_init(void) for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { len += sizeof(struct configfs_attribute *); } - len += sizeof(struct configfs_attribute *) * 2; + for (i = 0; tcmu_attrib_attrs[i] != NULL; i++) { + len += sizeof(struct configfs_attribute *); + } + len += sizeof(struct configfs_attribute *); tcmu_attrs = kzalloc(len, GFP_KERNEL); if (!tcmu_attrs) { @@ -1681,7 +2025,10 @@ static int __init tcmu_module_init(void) for (i = 0; passthrough_attrib_attrs[i] != NULL; i++) { tcmu_attrs[i] = passthrough_attrib_attrs[i]; } - tcmu_attrs[i] = &tcmu_attr_cmd_time_out; + for (k = 0; tcmu_attrib_attrs[k] != NULL; k++) { + tcmu_attrs[i] = tcmu_attrib_attrs[k]; + i++; + } tcmu_ops.tb_dev_attrib_attrs = tcmu_attrs; ret = transport_backend_register(&tcmu_ops); diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c index cac5a20a4de0..9ee89e00cd77 100644 --- a/drivers/target/target_core_xcopy.c +++ b/drivers/target/target_core_xcopy.c @@ -40,6 +40,8 @@ static struct workqueue_struct *xcopy_wq = NULL; +static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop); + static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) { int off = 0; @@ -53,48 +55,60 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf) return 0; } -static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, - struct se_device **found_dev) +struct xcopy_dev_search_info { + const unsigned char *dev_wwn; + struct se_device *found_dev; +}; + +static int target_xcopy_locate_se_dev_e4_iter(struct se_device *se_dev, + void *data) { - struct se_device *se_dev; + struct xcopy_dev_search_info *info = data; unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN]; int rc; - mutex_lock(&g_device_mutex); - list_for_each_entry(se_dev, &g_device_list, g_dev_node) { + if (!se_dev->dev_attrib.emulate_3pc) + return 0; - if (!se_dev->dev_attrib.emulate_3pc) - continue; + memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); + target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); - memset(&tmp_dev_wwn[0], 0, XCOPY_NAA_IEEE_REGEX_LEN); - target_xcopy_gen_naa_ieee(se_dev, &tmp_dev_wwn[0]); + rc = memcmp(&tmp_dev_wwn[0], info->dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); + if (rc != 0) + return 0; - rc = memcmp(&tmp_dev_wwn[0], dev_wwn, XCOPY_NAA_IEEE_REGEX_LEN); - if (rc != 0) - continue; + info->found_dev = se_dev; + pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); - *found_dev = se_dev; - pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev); + rc = target_depend_item(&se_dev->dev_group.cg_item); + if (rc != 0) { + pr_err("configfs_depend_item attempt failed: %d for se_dev: %p\n", + rc, se_dev); + return rc; + } - rc = target_depend_item(&se_dev->dev_group.cg_item); - if (rc != 0) { - pr_err("configfs_depend_item attempt failed:" - " %d for se_dev: %p\n", rc, se_dev); - mutex_unlock(&g_device_mutex); - return rc; - } + pr_debug("Called configfs_depend_item for se_dev: %p se_dev->se_dev_group: %p\n", + se_dev, &se_dev->dev_group); + return 1; +} - pr_debug("Called configfs_depend_item for se_dev: %p" - " se_dev->se_dev_group: %p\n", se_dev, - &se_dev->dev_group); +static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn, + struct se_device **found_dev) +{ + struct xcopy_dev_search_info info; + int ret; + + memset(&info, 0, sizeof(info)); + info.dev_wwn = dev_wwn; - mutex_unlock(&g_device_mutex); + ret = target_for_each_device(target_xcopy_locate_se_dev_e4_iter, &info); + if (ret == 1) { + *found_dev = info.found_dev; return 0; + } else { + pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); + return -EINVAL; } - mutex_unlock(&g_device_mutex); - - pr_debug_ratelimited("Unable to locate 0xe4 descriptor for EXTENDED_COPY\n"); - return -EINVAL; } static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop, @@ -311,9 +325,7 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op (unsigned long long)xop->dst_lba); if (dc != 0) { - xop->dbl = (desc[29] & 0xff) << 16; - xop->dbl |= (desc[30] & 0xff) << 8; - xop->dbl |= desc[31] & 0xff; + xop->dbl = get_unaligned_be24(&desc[29]); pr_debug("XCOPY seg desc 0x02: DC=1 w/ dbl: %u\n", xop->dbl); } @@ -781,13 +793,24 @@ static int target_xcopy_write_destination( static void target_xcopy_do_work(struct work_struct *work) { struct xcopy_op *xop = container_of(work, struct xcopy_op, xop_work); - struct se_device *src_dev = xop->src_dev, *dst_dev = xop->dst_dev; struct se_cmd *ec_cmd = xop->xop_se_cmd; - sector_t src_lba = xop->src_lba, dst_lba = xop->dst_lba, end_lba; + struct se_device *src_dev, *dst_dev; + sector_t src_lba, dst_lba, end_lba; unsigned int max_sectors; - int rc; - unsigned short nolb = xop->nolb, cur_nolb, max_nolb, copied_nolb = 0; + int rc = 0; + unsigned short nolb, cur_nolb, max_nolb, copied_nolb = 0; + + if (target_parse_xcopy_cmd(xop) != TCM_NO_SENSE) + goto err_free; + if (WARN_ON_ONCE(!xop->src_dev) || WARN_ON_ONCE(!xop->dst_dev)) + goto err_free; + + src_dev = xop->src_dev; + dst_dev = xop->dst_dev; + src_lba = xop->src_lba; + dst_lba = xop->dst_lba; + nolb = xop->nolb; end_lba = src_lba + nolb; /* * Break up XCOPY I/O into hw_max_sectors sized I/O based on the @@ -855,6 +878,8 @@ static void target_xcopy_do_work(struct work_struct *work) out: xcopy_pt_undepend_remotedev(xop); + +err_free: kfree(xop); /* * Don't override an error scsi status if it has already been set @@ -867,48 +892,22 @@ out: target_complete_cmd(ec_cmd, ec_cmd->scsi_status); } -sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) +/* + * Returns TCM_NO_SENSE upon success or a sense code != TCM_NO_SENSE if parsing + * fails. + */ +static sense_reason_t target_parse_xcopy_cmd(struct xcopy_op *xop) { - struct se_device *dev = se_cmd->se_dev; - struct xcopy_op *xop = NULL; + struct se_cmd *se_cmd = xop->xop_se_cmd; unsigned char *p = NULL, *seg_desc; - unsigned int list_id, list_id_usage, sdll, inline_dl, sa; + unsigned int list_id, list_id_usage, sdll, inline_dl; sense_reason_t ret = TCM_INVALID_PARAMETER_LIST; int rc; unsigned short tdll; - if (!dev->dev_attrib.emulate_3pc) { - pr_err("EXTENDED_COPY operation explicitly disabled\n"); - return TCM_UNSUPPORTED_SCSI_OPCODE; - } - - sa = se_cmd->t_task_cdb[1] & 0x1f; - if (sa != 0x00) { - pr_err("EXTENDED_COPY(LID4) not supported\n"); - return TCM_UNSUPPORTED_SCSI_OPCODE; - } - - if (se_cmd->data_length == 0) { - target_complete_cmd(se_cmd, SAM_STAT_GOOD); - return TCM_NO_SENSE; - } - if (se_cmd->data_length < XCOPY_HDR_LEN) { - pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", - se_cmd->data_length, XCOPY_HDR_LEN); - return TCM_PARAMETER_LIST_LENGTH_ERROR; - } - - xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); - if (!xop) { - pr_err("Unable to allocate xcopy_op\n"); - return TCM_OUT_OF_RESOURCES; - } - xop->xop_se_cmd = se_cmd; - p = transport_kmap_data_sg(se_cmd); if (!p) { pr_err("transport_kmap_data_sg() failed in target_do_xcopy\n"); - kfree(xop); return TCM_OUT_OF_RESOURCES; } @@ -977,18 +976,57 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc, rc * XCOPY_TARGET_DESC_LEN); transport_kunmap_data_sg(se_cmd); - - INIT_WORK(&xop->xop_work, target_xcopy_do_work); - queue_work(xcopy_wq, &xop->xop_work); return TCM_NO_SENSE; out: if (p) transport_kunmap_data_sg(se_cmd); - kfree(xop); return ret; } +sense_reason_t target_do_xcopy(struct se_cmd *se_cmd) +{ + struct se_device *dev = se_cmd->se_dev; + struct xcopy_op *xop; + unsigned int sa; + + if (!dev->dev_attrib.emulate_3pc) { + pr_err("EXTENDED_COPY operation explicitly disabled\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + sa = se_cmd->t_task_cdb[1] & 0x1f; + if (sa != 0x00) { + pr_err("EXTENDED_COPY(LID4) not supported\n"); + return TCM_UNSUPPORTED_SCSI_OPCODE; + } + + if (se_cmd->data_length == 0) { + target_complete_cmd(se_cmd, SAM_STAT_GOOD); + return TCM_NO_SENSE; + } + if (se_cmd->data_length < XCOPY_HDR_LEN) { + pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n", + se_cmd->data_length, XCOPY_HDR_LEN); + return TCM_PARAMETER_LIST_LENGTH_ERROR; + } + + xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL); + if (!xop) + goto err; + xop->xop_se_cmd = se_cmd; + INIT_WORK(&xop->xop_work, target_xcopy_do_work); + if (WARN_ON_ONCE(!queue_work(xcopy_wq, &xop->xop_work))) + goto free; + return TCM_NO_SENSE; + +free: + kfree(xop); + +err: + return TCM_OUT_OF_RESOURCES; +} + static sense_reason_t target_rcr_operating_parameters(struct se_cmd *se_cmd) { unsigned char *p; diff --git a/drivers/thermal/broadcom/bcm2835_thermal.c b/drivers/thermal/broadcom/bcm2835_thermal.c index 0ecf80890c84..e6863c841662 100644 --- a/drivers/thermal/broadcom/bcm2835_thermal.c +++ b/drivers/thermal/broadcom/bcm2835_thermal.c @@ -245,7 +245,6 @@ static int bcm2835_thermal_probe(struct platform_device *pdev) */ err = tz->ops->get_trip_temp(tz, 0, &trip_temp); if (err < 0) { - err = PTR_ERR(tz); dev_err(&pdev->dev, "Not able to read trip_temp: %d\n", err); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index 69d0f430b2d1..908a8014cf76 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -49,40 +49,45 @@ */ /** - * struct power_table - frequency to power conversion + * struct freq_table - frequency table along with power entries * @frequency: frequency in KHz * @power: power in mW * * This structure is built when the cooling device registers and helps - * in translating frequency to power and viceversa. + * in translating frequency to power and vice versa. */ -struct power_table { +struct freq_table { u32 frequency; u32 power; }; /** + * struct time_in_idle - Idle time stats + * @time: previous reading of the absolute time that this cpu was idle + * @timestamp: wall time of the last invocation of get_cpu_idle_time_us() + */ +struct time_in_idle { + u64 time; + u64 timestamp; +}; + +/** * struct cpufreq_cooling_device - data for cooling device with cpufreq * @id: unique integer value corresponding to each cpufreq_cooling_device * registered. - * @cool_dev: thermal_cooling_device pointer to keep track of the - * registered cooling device. + * @last_load: load measured by the latest call to cpufreq_get_requested_power() * @cpufreq_state: integer value representing the current state of cpufreq * cooling devices. * @clipped_freq: integer value representing the absolute value of the clipped * frequency. * @max_level: maximum cooling level. One less than total number of valid * cpufreq frequencies. - * @allowed_cpus: all the cpus involved for this cpufreq_cooling_device. + * @freq_table: Freq table in descending order of frequencies + * @cdev: thermal_cooling_device pointer to keep track of the + * registered cooling device. + * @policy: cpufreq policy. * @node: list_head to link all cpufreq_cooling_device together. - * @last_load: load measured by the latest call to cpufreq_get_requested_power() - * @time_in_idle: previous reading of the absolute time that this cpu was idle - * @time_in_idle_timestamp: wall time of the last invocation of - * get_cpu_idle_time_us() - * @dyn_power_table: array of struct power_table for frequency to power - * conversion, sorted in ascending order. - * @dyn_power_table_entries: number of entries in the @dyn_power_table array - * @cpu_dev: the first cpu_device from @allowed_cpus that has OPPs registered + * @idle_time: idle time stats * @plat_get_static_power: callback to calculate the static power * * This structure is required for keeping information of each registered @@ -90,81 +95,45 @@ struct power_table { */ struct cpufreq_cooling_device { int id; - struct thermal_cooling_device *cool_dev; + u32 last_load; unsigned int cpufreq_state; unsigned int clipped_freq; unsigned int max_level; - unsigned int *freq_table; /* In descending order */ - struct cpumask allowed_cpus; + struct freq_table *freq_table; /* In descending order */ + struct thermal_cooling_device *cdev; + struct cpufreq_policy *policy; struct list_head node; - u32 last_load; - u64 *time_in_idle; - u64 *time_in_idle_timestamp; - struct power_table *dyn_power_table; - int dyn_power_table_entries; - struct device *cpu_dev; + struct time_in_idle *idle_time; get_static_t plat_get_static_power; }; -static DEFINE_IDA(cpufreq_ida); +static DEFINE_IDA(cpufreq_ida); static DEFINE_MUTEX(cooling_list_lock); -static LIST_HEAD(cpufreq_dev_list); +static LIST_HEAD(cpufreq_cdev_list); /* Below code defines functions to be used for cpufreq as cooling device */ /** * get_level: Find the level for a particular frequency - * @cpufreq_dev: cpufreq_dev for which the property is required + * @cpufreq_cdev: cpufreq_cdev for which the property is required * @freq: Frequency * - * Return: level on success, THERMAL_CSTATE_INVALID on error. + * Return: level corresponding to the frequency. */ -static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev, +static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_cdev, unsigned int freq) { + struct freq_table *freq_table = cpufreq_cdev->freq_table; unsigned long level; - for (level = 0; level <= cpufreq_dev->max_level; level++) { - if (freq == cpufreq_dev->freq_table[level]) - return level; - - if (freq > cpufreq_dev->freq_table[level]) + for (level = 1; level <= cpufreq_cdev->max_level; level++) + if (freq > freq_table[level].frequency) break; - } - return THERMAL_CSTATE_INVALID; + return level - 1; } /** - * cpufreq_cooling_get_level - for a given cpu, return the cooling level. - * @cpu: cpu for which the level is required - * @freq: the frequency of interest - * - * This function will match the cooling level corresponding to the - * requested @freq and return it. - * - * Return: The matched cooling level on success or THERMAL_CSTATE_INVALID - * otherwise. - */ -unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq) -{ - struct cpufreq_cooling_device *cpufreq_dev; - - mutex_lock(&cooling_list_lock); - list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { - if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) { - mutex_unlock(&cooling_list_lock); - return get_level(cpufreq_dev, freq); - } - } - mutex_unlock(&cooling_list_lock); - - pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu); - return THERMAL_CSTATE_INVALID; -} -EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level); - -/** * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. * @nb: struct notifier_block * with callback info. * @event: value showing cpufreq event for which this function invoked. @@ -181,14 +150,18 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, { struct cpufreq_policy *policy = data; unsigned long clipped_freq; - struct cpufreq_cooling_device *cpufreq_dev; + struct cpufreq_cooling_device *cpufreq_cdev; if (event != CPUFREQ_ADJUST) return NOTIFY_DONE; mutex_lock(&cooling_list_lock); - list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) { - if (!cpumask_test_cpu(policy->cpu, &cpufreq_dev->allowed_cpus)) + list_for_each_entry(cpufreq_cdev, &cpufreq_cdev_list, node) { + /* + * A new copy of the policy is sent to the notifier and can't + * compare that directly. + */ + if (policy->cpu != cpufreq_cdev->policy->cpu) continue; /* @@ -202,7 +175,7 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, * But, if clipped_freq is greater than policy->max, we don't * need to do anything. */ - clipped_freq = cpufreq_dev->clipped_freq; + clipped_freq = cpufreq_cdev->clipped_freq; if (policy->max > clipped_freq) cpufreq_verify_within_limits(policy, 0, clipped_freq); @@ -214,63 +187,63 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb, } /** - * build_dyn_power_table() - create a dynamic power to frequency table - * @cpufreq_device: the cpufreq cooling device in which to store the table + * update_freq_table() - Update the freq table with power numbers + * @cpufreq_cdev: the cpufreq cooling device in which to update the table * @capacitance: dynamic power coefficient for these cpus * - * Build a dynamic power to frequency table for this cpu and store it - * in @cpufreq_device. This table will be used in cpu_power_to_freq() and - * cpu_freq_to_power() to convert between power and frequency - * efficiently. Power is stored in mW, frequency in KHz. The - * resulting table is in ascending order. + * Update the freq table with power numbers. This table will be used in + * cpu_power_to_freq() and cpu_freq_to_power() to convert between power and + * frequency efficiently. Power is stored in mW, frequency in KHz. The + * resulting table is in descending order. * * Return: 0 on success, -EINVAL if there are no OPPs for any CPUs, - * -ENOMEM if we run out of memory or -EAGAIN if an OPP was - * added/enabled while the function was executing. + * or -ENOMEM if we run out of memory. */ -static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, - u32 capacitance) +static int update_freq_table(struct cpufreq_cooling_device *cpufreq_cdev, + u32 capacitance) { - struct power_table *power_table; + struct freq_table *freq_table = cpufreq_cdev->freq_table; struct dev_pm_opp *opp; struct device *dev = NULL; - int num_opps = 0, cpu, i, ret = 0; - unsigned long freq; - - for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { - dev = get_cpu_device(cpu); - if (!dev) { - dev_warn(&cpufreq_device->cool_dev->device, - "No cpu device for cpu %d\n", cpu); - continue; - } + int num_opps = 0, cpu = cpufreq_cdev->policy->cpu, i; - num_opps = dev_pm_opp_get_opp_count(dev); - if (num_opps > 0) - break; - else if (num_opps < 0) - return num_opps; + dev = get_cpu_device(cpu); + if (unlikely(!dev)) { + dev_warn(&cpufreq_cdev->cdev->device, + "No cpu device for cpu %d\n", cpu); + return -ENODEV; } - if (num_opps == 0) - return -EINVAL; + num_opps = dev_pm_opp_get_opp_count(dev); + if (num_opps < 0) + return num_opps; - power_table = kcalloc(num_opps, sizeof(*power_table), GFP_KERNEL); - if (!power_table) - return -ENOMEM; + /* + * The cpufreq table is also built from the OPP table and so the count + * should match. + */ + if (num_opps != cpufreq_cdev->max_level + 1) { + dev_warn(dev, "Number of OPPs not matching with max_levels\n"); + return -EINVAL; + } - for (freq = 0, i = 0; - opp = dev_pm_opp_find_freq_ceil(dev, &freq), !IS_ERR(opp); - freq++, i++) { - u32 freq_mhz, voltage_mv; + for (i = 0; i <= cpufreq_cdev->max_level; i++) { + unsigned long freq = freq_table[i].frequency * 1000; + u32 freq_mhz = freq_table[i].frequency / 1000; u64 power; + u32 voltage_mv; - if (i >= num_opps) { - ret = -EAGAIN; - goto free_power_table; + /* + * Find ceil frequency as 'freq' may be slightly lower than OPP + * freq due to truncation while converting to kHz. + */ + opp = dev_pm_opp_find_freq_ceil(dev, &freq); + if (IS_ERR(opp)) { + dev_err(dev, "failed to get opp for %lu frequency\n", + freq); + return -EINVAL; } - freq_mhz = freq / 1000000; voltage_mv = dev_pm_opp_get_voltage(opp) / 1000; dev_pm_opp_put(opp); @@ -281,89 +254,73 @@ static int build_dyn_power_table(struct cpufreq_cooling_device *cpufreq_device, power = (u64)capacitance * freq_mhz * voltage_mv * voltage_mv; do_div(power, 1000000000); - /* frequency is stored in power_table in KHz */ - power_table[i].frequency = freq / 1000; - /* power is stored in mW */ - power_table[i].power = power; + freq_table[i].power = power; } - if (i != num_opps) { - ret = PTR_ERR(opp); - goto free_power_table; - } - - cpufreq_device->cpu_dev = dev; - cpufreq_device->dyn_power_table = power_table; - cpufreq_device->dyn_power_table_entries = i; - return 0; - -free_power_table: - kfree(power_table); - - return ret; } -static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_device, +static u32 cpu_freq_to_power(struct cpufreq_cooling_device *cpufreq_cdev, u32 freq) { int i; - struct power_table *pt = cpufreq_device->dyn_power_table; + struct freq_table *freq_table = cpufreq_cdev->freq_table; - for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++) - if (freq < pt[i].frequency) + for (i = 1; i <= cpufreq_cdev->max_level; i++) + if (freq > freq_table[i].frequency) break; - return pt[i - 1].power; + return freq_table[i - 1].power; } -static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_device, +static u32 cpu_power_to_freq(struct cpufreq_cooling_device *cpufreq_cdev, u32 power) { int i; - struct power_table *pt = cpufreq_device->dyn_power_table; + struct freq_table *freq_table = cpufreq_cdev->freq_table; - for (i = 1; i < cpufreq_device->dyn_power_table_entries; i++) - if (power < pt[i].power) + for (i = 1; i <= cpufreq_cdev->max_level; i++) + if (power > freq_table[i].power) break; - return pt[i - 1].frequency; + return freq_table[i - 1].frequency; } /** * get_load() - get load for a cpu since last updated - * @cpufreq_device: &struct cpufreq_cooling_device for this cpu + * @cpufreq_cdev: &struct cpufreq_cooling_device for this cpu * @cpu: cpu number - * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus + * @cpu_idx: index of the cpu in time_in_idle* * * Return: The average load of cpu @cpu in percentage since this * function was last called. */ -static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu, +static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, int cpu_idx) { u32 load; u64 now, now_idle, delta_time, delta_idle; + struct time_in_idle *idle_time = &cpufreq_cdev->idle_time[cpu_idx]; now_idle = get_cpu_idle_time(cpu, &now, 0); - delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx]; - delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx]; + delta_idle = now_idle - idle_time->time; + delta_time = now - idle_time->timestamp; if (delta_time <= delta_idle) load = 0; else load = div64_u64(100 * (delta_time - delta_idle), delta_time); - cpufreq_device->time_in_idle[cpu_idx] = now_idle; - cpufreq_device->time_in_idle_timestamp[cpu_idx] = now; + idle_time->time = now_idle; + idle_time->timestamp = now; return load; } /** * get_static_power() - calculate the static power consumed by the cpus - * @cpufreq_device: struct &cpufreq_cooling_device for this cpu cdev + * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev * @tz: thermal zone device in which we're operating * @freq: frequency in KHz * @power: pointer in which to store the calculated static power @@ -376,26 +333,28 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu, * * Return: 0 on success, -E* on failure. */ -static int get_static_power(struct cpufreq_cooling_device *cpufreq_device, +static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev, struct thermal_zone_device *tz, unsigned long freq, u32 *power) { struct dev_pm_opp *opp; unsigned long voltage; - struct cpumask *cpumask = &cpufreq_device->allowed_cpus; + struct cpufreq_policy *policy = cpufreq_cdev->policy; + struct cpumask *cpumask = policy->related_cpus; unsigned long freq_hz = freq * 1000; + struct device *dev; - if (!cpufreq_device->plat_get_static_power || - !cpufreq_device->cpu_dev) { + if (!cpufreq_cdev->plat_get_static_power) { *power = 0; return 0; } - opp = dev_pm_opp_find_freq_exact(cpufreq_device->cpu_dev, freq_hz, - true); + dev = get_cpu_device(policy->cpu); + WARN_ON(!dev); + + opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true); if (IS_ERR(opp)) { - dev_warn_ratelimited(cpufreq_device->cpu_dev, - "Failed to find OPP for frequency %lu: %ld\n", + dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n", freq_hz, PTR_ERR(opp)); return -EINVAL; } @@ -404,31 +363,30 @@ static int get_static_power(struct cpufreq_cooling_device *cpufreq_device, dev_pm_opp_put(opp); if (voltage == 0) { - dev_err_ratelimited(cpufreq_device->cpu_dev, - "Failed to get voltage for frequency %lu\n", + dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n", freq_hz); return -EINVAL; } - return cpufreq_device->plat_get_static_power(cpumask, tz->passive_delay, - voltage, power); + return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay, + voltage, power); } /** * get_dynamic_power() - calculate the dynamic power - * @cpufreq_device: &cpufreq_cooling_device for this cdev + * @cpufreq_cdev: &cpufreq_cooling_device for this cdev * @freq: current frequency * * Return: the dynamic power consumed by the cpus described by - * @cpufreq_device. + * @cpufreq_cdev. */ -static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device, +static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_cdev, unsigned long freq) { u32 raw_cpu_power; - raw_cpu_power = cpu_freq_to_power(cpufreq_device, freq); - return (raw_cpu_power * cpufreq_device->last_load) / 100; + raw_cpu_power = cpu_freq_to_power(cpufreq_cdev, freq); + return (raw_cpu_power * cpufreq_cdev->last_load) / 100; } /* cpufreq cooling device callback functions are defined below */ @@ -446,9 +404,9 @@ static u32 get_dynamic_power(struct cpufreq_cooling_device *cpufreq_device, static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, unsigned long *state) { - struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; + struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; - *state = cpufreq_device->max_level; + *state = cpufreq_cdev->max_level; return 0; } @@ -465,9 +423,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev, static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *state) { - struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; + struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; - *state = cpufreq_device->cpufreq_state; + *state = cpufreq_cdev->cpufreq_state; return 0; } @@ -485,23 +443,22 @@ static int cpufreq_get_cur_state(struct thermal_cooling_device *cdev, static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { - struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; - unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus); + struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; unsigned int clip_freq; /* Request state should be less than max_level */ - if (WARN_ON(state > cpufreq_device->max_level)) + if (WARN_ON(state > cpufreq_cdev->max_level)) return -EINVAL; /* Check if the old cooling action is same as new cooling action */ - if (cpufreq_device->cpufreq_state == state) + if (cpufreq_cdev->cpufreq_state == state) return 0; - clip_freq = cpufreq_device->freq_table[state]; - cpufreq_device->cpufreq_state = state; - cpufreq_device->clipped_freq = clip_freq; + clip_freq = cpufreq_cdev->freq_table[state].frequency; + cpufreq_cdev->cpufreq_state = state; + cpufreq_cdev->clipped_freq = clip_freq; - cpufreq_update_policy(cpu); + cpufreq_update_policy(cpufreq_cdev->policy->cpu); return 0; } @@ -536,33 +493,23 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, unsigned long freq; int i = 0, cpu, ret; u32 static_power, dynamic_power, total_load = 0; - struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; + struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; + struct cpufreq_policy *policy = cpufreq_cdev->policy; u32 *load_cpu = NULL; - cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask); - - /* - * All the CPUs are offline, thus the requested power by - * the cdev is 0 - */ - if (cpu >= nr_cpu_ids) { - *power = 0; - return 0; - } - - freq = cpufreq_quick_get(cpu); + freq = cpufreq_quick_get(policy->cpu); if (trace_thermal_power_cpu_get_power_enabled()) { - u32 ncpus = cpumask_weight(&cpufreq_device->allowed_cpus); + u32 ncpus = cpumask_weight(policy->related_cpus); load_cpu = kcalloc(ncpus, sizeof(*load_cpu), GFP_KERNEL); } - for_each_cpu(cpu, &cpufreq_device->allowed_cpus) { + for_each_cpu(cpu, policy->related_cpus) { u32 load; if (cpu_online(cpu)) - load = get_load(cpufreq_device, cpu, i); + load = get_load(cpufreq_cdev, cpu, i); else load = 0; @@ -573,19 +520,19 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, i++; } - cpufreq_device->last_load = total_load; + cpufreq_cdev->last_load = total_load; - dynamic_power = get_dynamic_power(cpufreq_device, freq); - ret = get_static_power(cpufreq_device, tz, freq, &static_power); + dynamic_power = get_dynamic_power(cpufreq_cdev, freq); + ret = get_static_power(cpufreq_cdev, tz, freq, &static_power); if (ret) { kfree(load_cpu); return ret; } if (load_cpu) { - trace_thermal_power_cpu_get_power( - &cpufreq_device->allowed_cpus, - freq, load_cpu, i, dynamic_power, static_power); + trace_thermal_power_cpu_get_power(policy->related_cpus, freq, + load_cpu, i, dynamic_power, + static_power); kfree(load_cpu); } @@ -614,38 +561,23 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev, unsigned long state, u32 *power) { unsigned int freq, num_cpus; - cpumask_var_t cpumask; u32 static_power, dynamic_power; int ret; - struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; - - if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) - return -ENOMEM; - - cpumask_and(cpumask, &cpufreq_device->allowed_cpus, cpu_online_mask); - num_cpus = cpumask_weight(cpumask); + struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; - /* None of our cpus are online, so no power */ - if (num_cpus == 0) { - *power = 0; - ret = 0; - goto out; - } + /* Request state should be less than max_level */ + if (WARN_ON(state > cpufreq_cdev->max_level)) + return -EINVAL; - freq = cpufreq_device->freq_table[state]; - if (!freq) { - ret = -EINVAL; - goto out; - } + num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); - dynamic_power = cpu_freq_to_power(cpufreq_device, freq) * num_cpus; - ret = get_static_power(cpufreq_device, tz, freq, &static_power); + freq = cpufreq_cdev->freq_table[state].frequency; + dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus; + ret = get_static_power(cpufreq_cdev, tz, freq, &static_power); if (ret) - goto out; + return ret; *power = static_power + dynamic_power; -out: - free_cpumask_var(cpumask); return ret; } @@ -673,39 +605,27 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, struct thermal_zone_device *tz, u32 power, unsigned long *state) { - unsigned int cpu, cur_freq, target_freq; + unsigned int cur_freq, target_freq; int ret; s32 dyn_power; u32 last_load, normalised_power, static_power; - struct cpufreq_cooling_device *cpufreq_device = cdev->devdata; + struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; + struct cpufreq_policy *policy = cpufreq_cdev->policy; - cpu = cpumask_any_and(&cpufreq_device->allowed_cpus, cpu_online_mask); - - /* None of our cpus are online */ - if (cpu >= nr_cpu_ids) - return -ENODEV; - - cur_freq = cpufreq_quick_get(cpu); - ret = get_static_power(cpufreq_device, tz, cur_freq, &static_power); + cur_freq = cpufreq_quick_get(policy->cpu); + ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power); if (ret) return ret; dyn_power = power - static_power; dyn_power = dyn_power > 0 ? dyn_power : 0; - last_load = cpufreq_device->last_load ?: 1; + last_load = cpufreq_cdev->last_load ?: 1; normalised_power = (dyn_power * 100) / last_load; - target_freq = cpu_power_to_freq(cpufreq_device, normalised_power); + target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power); - *state = cpufreq_cooling_get_level(cpu, target_freq); - if (*state == THERMAL_CSTATE_INVALID) { - dev_err_ratelimited(&cdev->device, - "Failed to convert %dKHz for cpu %d into a cdev state\n", - target_freq, cpu); - return -EINVAL; - } - - trace_thermal_power_cpu_limit(&cpufreq_device->allowed_cpus, - target_freq, *state, power); + *state = get_level(cpufreq_cdev, target_freq); + trace_thermal_power_cpu_limit(policy->related_cpus, target_freq, *state, + power); return 0; } @@ -748,7 +668,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table, /** * __cpufreq_cooling_register - helper function to create cpufreq cooling device * @np: a valid struct device_node to the cooling device device tree node - * @clip_cpus: cpumask of cpus where the frequency constraints will happen. + * @policy: cpufreq policy * Normally this should be same as cpufreq policy->related_cpus. * @capacitance: dynamic power coefficient for these cpus * @plat_static_func: function to calculate the static power consumed by these @@ -764,102 +684,68 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table, */ static struct thermal_cooling_device * __cpufreq_cooling_register(struct device_node *np, - const struct cpumask *clip_cpus, u32 capacitance, + struct cpufreq_policy *policy, u32 capacitance, get_static_t plat_static_func) { - struct cpufreq_policy *policy; - struct thermal_cooling_device *cool_dev; - struct cpufreq_cooling_device *cpufreq_dev; + struct thermal_cooling_device *cdev; + struct cpufreq_cooling_device *cpufreq_cdev; char dev_name[THERMAL_NAME_LENGTH]; - struct cpufreq_frequency_table *pos, *table; - cpumask_var_t temp_mask; unsigned int freq, i, num_cpus; int ret; struct thermal_cooling_device_ops *cooling_ops; bool first; - if (!alloc_cpumask_var(&temp_mask, GFP_KERNEL)) - return ERR_PTR(-ENOMEM); - - cpumask_and(temp_mask, clip_cpus, cpu_online_mask); - policy = cpufreq_cpu_get(cpumask_first(temp_mask)); - if (!policy) { - pr_debug("%s: CPUFreq policy not found\n", __func__); - cool_dev = ERR_PTR(-EPROBE_DEFER); - goto free_cpumask; + if (IS_ERR_OR_NULL(policy)) { + pr_err("%s: cpufreq policy isn't valid: %p", __func__, policy); + return ERR_PTR(-EINVAL); } - table = policy->freq_table; - if (!table) { - pr_debug("%s: CPUFreq table not found\n", __func__); - cool_dev = ERR_PTR(-ENODEV); - goto put_policy; + i = cpufreq_table_count_valid_entries(policy); + if (!i) { + pr_debug("%s: CPUFreq table not found or has no valid entries\n", + __func__); + return ERR_PTR(-ENODEV); } - cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL); - if (!cpufreq_dev) { - cool_dev = ERR_PTR(-ENOMEM); - goto put_policy; - } + cpufreq_cdev = kzalloc(sizeof(*cpufreq_cdev), GFP_KERNEL); + if (!cpufreq_cdev) + return ERR_PTR(-ENOMEM); - num_cpus = cpumask_weight(clip_cpus); - cpufreq_dev->time_in_idle = kcalloc(num_cpus, - sizeof(*cpufreq_dev->time_in_idle), - GFP_KERNEL); - if (!cpufreq_dev->time_in_idle) { - cool_dev = ERR_PTR(-ENOMEM); + cpufreq_cdev->policy = policy; + num_cpus = cpumask_weight(policy->related_cpus); + cpufreq_cdev->idle_time = kcalloc(num_cpus, + sizeof(*cpufreq_cdev->idle_time), + GFP_KERNEL); + if (!cpufreq_cdev->idle_time) { + cdev = ERR_PTR(-ENOMEM); goto free_cdev; } - cpufreq_dev->time_in_idle_timestamp = - kcalloc(num_cpus, sizeof(*cpufreq_dev->time_in_idle_timestamp), - GFP_KERNEL); - if (!cpufreq_dev->time_in_idle_timestamp) { - cool_dev = ERR_PTR(-ENOMEM); - goto free_time_in_idle; - } - - /* Find max levels */ - cpufreq_for_each_valid_entry(pos, table) - cpufreq_dev->max_level++; - - cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) * - cpufreq_dev->max_level, GFP_KERNEL); - if (!cpufreq_dev->freq_table) { - cool_dev = ERR_PTR(-ENOMEM); - goto free_time_in_idle_timestamp; - } - /* max_level is an index, not a counter */ - cpufreq_dev->max_level--; - - cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus); - - if (capacitance) { - cpufreq_dev->plat_get_static_power = plat_static_func; - - ret = build_dyn_power_table(cpufreq_dev, capacitance); - if (ret) { - cool_dev = ERR_PTR(ret); - goto free_table; - } - - cooling_ops = &cpufreq_power_cooling_ops; - } else { - cooling_ops = &cpufreq_cooling_ops; + cpufreq_cdev->max_level = i - 1; + + cpufreq_cdev->freq_table = kmalloc_array(i, + sizeof(*cpufreq_cdev->freq_table), + GFP_KERNEL); + if (!cpufreq_cdev->freq_table) { + cdev = ERR_PTR(-ENOMEM); + goto free_idle_time; } ret = ida_simple_get(&cpufreq_ida, 0, 0, GFP_KERNEL); if (ret < 0) { - cool_dev = ERR_PTR(ret); - goto free_power_table; + cdev = ERR_PTR(ret); + goto free_table; } - cpufreq_dev->id = ret; + cpufreq_cdev->id = ret; + + snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", + cpufreq_cdev->id); /* Fill freq-table in descending order of frequencies */ - for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) { - freq = find_next_max(table, freq); - cpufreq_dev->freq_table[i] = freq; + for (i = 0, freq = -1; i <= cpufreq_cdev->max_level; i++) { + freq = find_next_max(policy->freq_table, freq); + cpufreq_cdev->freq_table[i].frequency = freq; /* Warn for duplicate entries */ if (!freq) @@ -868,51 +754,54 @@ __cpufreq_cooling_register(struct device_node *np, pr_debug("%s: freq:%u KHz\n", __func__, freq); } - snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d", - cpufreq_dev->id); + if (capacitance) { + cpufreq_cdev->plat_get_static_power = plat_static_func; + + ret = update_freq_table(cpufreq_cdev, capacitance); + if (ret) { + cdev = ERR_PTR(ret); + goto remove_ida; + } + + cooling_ops = &cpufreq_power_cooling_ops; + } else { + cooling_ops = &cpufreq_cooling_ops; + } - cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev, - cooling_ops); - if (IS_ERR(cool_dev)) + cdev = thermal_of_cooling_device_register(np, dev_name, cpufreq_cdev, + cooling_ops); + if (IS_ERR(cdev)) goto remove_ida; - cpufreq_dev->clipped_freq = cpufreq_dev->freq_table[0]; - cpufreq_dev->cool_dev = cool_dev; + cpufreq_cdev->clipped_freq = cpufreq_cdev->freq_table[0].frequency; + cpufreq_cdev->cdev = cdev; mutex_lock(&cooling_list_lock); /* Register the notifier for first cpufreq cooling device */ - first = list_empty(&cpufreq_dev_list); - list_add(&cpufreq_dev->node, &cpufreq_dev_list); + first = list_empty(&cpufreq_cdev_list); + list_add(&cpufreq_cdev->node, &cpufreq_cdev_list); mutex_unlock(&cooling_list_lock); if (first) cpufreq_register_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - goto put_policy; + return cdev; remove_ida: - ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); -free_power_table: - kfree(cpufreq_dev->dyn_power_table); + ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); free_table: - kfree(cpufreq_dev->freq_table); -free_time_in_idle_timestamp: - kfree(cpufreq_dev->time_in_idle_timestamp); -free_time_in_idle: - kfree(cpufreq_dev->time_in_idle); + kfree(cpufreq_cdev->freq_table); +free_idle_time: + kfree(cpufreq_cdev->idle_time); free_cdev: - kfree(cpufreq_dev); -put_policy: - cpufreq_cpu_put(policy); -free_cpumask: - free_cpumask_var(temp_mask); - return cool_dev; + kfree(cpufreq_cdev); + return cdev; } /** * cpufreq_cooling_register - function to create cpufreq cooling device. - * @clip_cpus: cpumask of cpus where the frequency constraints will happen. + * @policy: cpufreq policy * * This interface function registers the cpufreq cooling device with the name * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq @@ -922,16 +811,16 @@ free_cpumask: * on failure, it returns a corresponding ERR_PTR(). */ struct thermal_cooling_device * -cpufreq_cooling_register(const struct cpumask *clip_cpus) +cpufreq_cooling_register(struct cpufreq_policy *policy) { - return __cpufreq_cooling_register(NULL, clip_cpus, 0, NULL); + return __cpufreq_cooling_register(NULL, policy, 0, NULL); } EXPORT_SYMBOL_GPL(cpufreq_cooling_register); /** * of_cpufreq_cooling_register - function to create cpufreq cooling device. * @np: a valid struct device_node to the cooling device device tree node - * @clip_cpus: cpumask of cpus where the frequency constraints will happen. + * @policy: cpufreq policy * * This interface function registers the cpufreq cooling device with the name * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq @@ -943,18 +832,18 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register); */ struct thermal_cooling_device * of_cpufreq_cooling_register(struct device_node *np, - const struct cpumask *clip_cpus) + struct cpufreq_policy *policy) { if (!np) return ERR_PTR(-EINVAL); - return __cpufreq_cooling_register(np, clip_cpus, 0, NULL); + return __cpufreq_cooling_register(np, policy, 0, NULL); } EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); /** * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions - * @clip_cpus: cpumask of cpus where the frequency constraints will happen + * @policy: cpufreq policy * @capacitance: dynamic power coefficient for these cpus * @plat_static_func: function to calculate the static power consumed by these * cpus (optional) @@ -974,10 +863,10 @@ EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); * on failure, it returns a corresponding ERR_PTR(). */ struct thermal_cooling_device * -cpufreq_power_cooling_register(const struct cpumask *clip_cpus, u32 capacitance, +cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance, get_static_t plat_static_func) { - return __cpufreq_cooling_register(NULL, clip_cpus, capacitance, + return __cpufreq_cooling_register(NULL, policy, capacitance, plat_static_func); } EXPORT_SYMBOL(cpufreq_power_cooling_register); @@ -985,7 +874,7 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register); /** * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions * @np: a valid struct device_node to the cooling device device tree node - * @clip_cpus: cpumask of cpus where the frequency constraints will happen + * @policy: cpufreq policy * @capacitance: dynamic power coefficient for these cpus * @plat_static_func: function to calculate the static power consumed by these * cpus (optional) @@ -1007,14 +896,14 @@ EXPORT_SYMBOL(cpufreq_power_cooling_register); */ struct thermal_cooling_device * of_cpufreq_power_cooling_register(struct device_node *np, - const struct cpumask *clip_cpus, + struct cpufreq_policy *policy, u32 capacitance, get_static_t plat_static_func) { if (!np) return ERR_PTR(-EINVAL); - return __cpufreq_cooling_register(np, clip_cpus, capacitance, + return __cpufreq_cooling_register(np, policy, capacitance, plat_static_func); } EXPORT_SYMBOL(of_cpufreq_power_cooling_register); @@ -1027,30 +916,28 @@ EXPORT_SYMBOL(of_cpufreq_power_cooling_register); */ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) { - struct cpufreq_cooling_device *cpufreq_dev; + struct cpufreq_cooling_device *cpufreq_cdev; bool last; if (!cdev) return; - cpufreq_dev = cdev->devdata; + cpufreq_cdev = cdev->devdata; mutex_lock(&cooling_list_lock); - list_del(&cpufreq_dev->node); + list_del(&cpufreq_cdev->node); /* Unregister the notifier for the last cpufreq cooling device */ - last = list_empty(&cpufreq_dev_list); + last = list_empty(&cpufreq_cdev_list); mutex_unlock(&cooling_list_lock); if (last) cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block, CPUFREQ_POLICY_NOTIFIER); - thermal_cooling_device_unregister(cpufreq_dev->cool_dev); - ida_simple_remove(&cpufreq_ida, cpufreq_dev->id); - kfree(cpufreq_dev->dyn_power_table); - kfree(cpufreq_dev->time_in_idle_timestamp); - kfree(cpufreq_dev->time_in_idle); - kfree(cpufreq_dev->freq_table); - kfree(cpufreq_dev); + thermal_cooling_device_unregister(cpufreq_cdev->cdev); + ida_simple_remove(&cpufreq_ida, cpufreq_cdev->id); + kfree(cpufreq_cdev->idle_time); + kfree(cpufreq_cdev->freq_table); + kfree(cpufreq_cdev); } EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister); diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c index 68bd1b569118..d3469fbc5207 100644 --- a/drivers/thermal/fair_share.c +++ b/drivers/thermal/fair_share.c @@ -71,6 +71,7 @@ static long get_target_state(struct thermal_zone_device *tz, /** * fair_share_throttle - throttles devices associated with the given zone * @tz - thermal_zone_device + * @trip - trip point index * * Throttling Logic: This uses three parameters to calculate the new * throttle state of the cooling devices associated with the given zone. diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c index f6429666a1cf..9c3ce341eb97 100644 --- a/drivers/thermal/hisi_thermal.c +++ b/drivers/thermal/hisi_thermal.c @@ -397,8 +397,11 @@ static int hisi_thermal_suspend(struct device *dev) static int hisi_thermal_resume(struct device *dev) { struct hisi_thermal_data *data = dev_get_drvdata(dev); + int ret; - clk_prepare_enable(data->clk); + ret = clk_prepare_enable(data->clk); + if (ret) + return ret; data->irq_enabled = true; hisi_thermal_enable_bind_irq_sensor(data); diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index fb648a45754e..4798b4b1fd77 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -8,6 +8,7 @@ */ #include <linux/clk.h> +#include <linux/cpufreq.h> #include <linux/cpu_cooling.h> #include <linux/delay.h> #include <linux/device.h> @@ -88,6 +89,7 @@ static struct thermal_soc_data thermal_imx6sx_data = { }; struct imx_thermal_data { + struct cpufreq_policy *policy; struct thermal_zone_device *tz; struct thermal_cooling_device *cdev; enum thermal_device_mode mode; @@ -525,13 +527,18 @@ static int imx_thermal_probe(struct platform_device *pdev) regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); - data->cdev = cpufreq_cooling_register(cpu_present_mask); + data->policy = cpufreq_cpu_get(0); + if (!data->policy) { + pr_debug("%s: CPUFreq policy not found\n", __func__); + return -EPROBE_DEFER; + } + + data->cdev = cpufreq_cooling_register(data->policy); if (IS_ERR(data->cdev)) { ret = PTR_ERR(data->cdev); - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, - "failed to register cpufreq cooling device: %d\n", - ret); + dev_err(&pdev->dev, + "failed to register cpufreq cooling device: %d\n", ret); + cpufreq_cpu_put(data->policy); return ret; } @@ -542,6 +549,7 @@ static int imx_thermal_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get thermal clk: %d\n", ret); cpufreq_cooling_unregister(data->cdev); + cpufreq_cpu_put(data->policy); return ret; } @@ -556,6 +564,7 @@ static int imx_thermal_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret); cpufreq_cooling_unregister(data->cdev); + cpufreq_cpu_put(data->policy); return ret; } @@ -571,6 +580,7 @@ static int imx_thermal_probe(struct platform_device *pdev) "failed to register thermal zone device %d\n", ret); clk_disable_unprepare(data->thermal_clk); cpufreq_cooling_unregister(data->cdev); + cpufreq_cpu_put(data->policy); return ret; } @@ -599,6 +609,7 @@ static int imx_thermal_probe(struct platform_device *pdev) clk_disable_unprepare(data->thermal_clk); thermal_zone_device_unregister(data->tz); cpufreq_cooling_unregister(data->cdev); + cpufreq_cpu_put(data->policy); return ret; } @@ -620,6 +631,7 @@ static int imx_thermal_remove(struct platform_device *pdev) thermal_zone_device_unregister(data->tz); cpufreq_cooling_unregister(data->cdev); + cpufreq_cpu_put(data->policy); return 0; } @@ -648,8 +660,11 @@ static int imx_thermal_resume(struct device *dev) { struct imx_thermal_data *data = dev_get_drvdata(dev); struct regmap *map = data->tempmon; + int ret; - clk_prepare_enable(data->thermal_clk); + ret = clk_prepare_enable(data->thermal_clk); + if (ret) + return ret; /* Enabled thermal sensor after resume */ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c index 2c2ec7666eb1..51ceb80212a7 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c @@ -62,8 +62,8 @@ static int acpi_thermal_rel_release(struct inode *inode, struct file *file) * acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling * * @handle: ACPI handle of the device contains _TRT - * @art_count: the number of valid entries resulted from parsing _TRT - * @artp: pointer to pointer of array of art entries in parsing result + * @trt_count: the number of valid entries resulted from parsing _TRT + * @trtp: pointer to pointer of array of _TRT entries in parsing result * @create_dev: whether to create platform devices for target and source * */ @@ -208,7 +208,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, if (art->target) { result = acpi_bus_get_device(art->target, &adev); if (result) - pr_warn("Failed to get source ACPI device\n"); + pr_warn("Failed to get target ACPI device\n"); } } diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c index c4890c9437eb..8a7f24dd9315 100644 --- a/drivers/thermal/int340x_thermal/int3403_thermal.c +++ b/drivers/thermal/int340x_thermal/int3403_thermal.c @@ -238,8 +238,16 @@ static int int3403_add(struct platform_device *pdev) status = acpi_evaluate_integer(priv->adev->handle, "PTYP", NULL, &priv->type); if (ACPI_FAILURE(status)) { - result = -EINVAL; - goto err; + unsigned long long tmp; + + status = acpi_evaluate_integer(priv->adev->handle, "_TMP", + NULL, &tmp); + if (ACPI_FAILURE(status)) { + result = -EINVAL; + goto err; + } else { + priv->type = INT3403_TYPE_SENSOR; + } } platform_set_drvdata(pdev, priv); diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c index bcef2e7c4ec9..be95826631b7 100644 --- a/drivers/thermal/step_wise.c +++ b/drivers/thermal/step_wise.c @@ -186,8 +186,7 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip) /** * step_wise_throttle - throttles devices associated with the given zone * @tz - thermal_zone_device - * @trip - the trip point - * @trip_type - type of the trip point + * @trip - trip point index * * Throttling Logic: This uses the trend of the thermal zone to throttle. * If the thermal zone is 'heating up' this throttles all the cooling diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c index 02790f69e26c..c211a8e4a210 100644 --- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c +++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c @@ -28,6 +28,7 @@ #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/thermal.h> +#include <linux/cpufreq.h> #include <linux/cpumask.h> #include <linux/cpu_cooling.h> #include <linux/of.h> @@ -37,6 +38,7 @@ /* common data structures */ struct ti_thermal_data { + struct cpufreq_policy *policy; struct thermal_zone_device *ti_thermal; struct thermal_zone_device *pcb_tz; struct thermal_cooling_device *cool_dev; @@ -247,15 +249,19 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id) if (!data) return -EINVAL; + data->policy = cpufreq_cpu_get(0); + if (!data->policy) { + pr_debug("%s: CPUFreq policy not found\n", __func__); + return -EPROBE_DEFER; + } + /* Register cooling device */ - data->cool_dev = cpufreq_cooling_register(cpu_present_mask); + data->cool_dev = cpufreq_cooling_register(data->policy); if (IS_ERR(data->cool_dev)) { int ret = PTR_ERR(data->cool_dev); - - if (ret != -EPROBE_DEFER) - dev_err(bgp->dev, - "Failed to register cpu cooling device %d\n", - ret); + dev_err(bgp->dev, "Failed to register cpu cooling device %d\n", + ret); + cpufreq_cpu_put(data->policy); return ret; } @@ -270,8 +276,10 @@ int ti_thermal_unregister_cpu_cooling(struct ti_bandgap *bgp, int id) data = ti_bandgap_get_sensor_data(bgp, id); - if (data) + if (data) { cpufreq_cooling_unregister(data->cool_dev); + cpufreq_cpu_put(data->policy); + } return 0; } diff --git a/drivers/thermal/user_space.c b/drivers/thermal/user_space.c index c908150c268d..8e92a06ef48a 100644 --- a/drivers/thermal/user_space.c +++ b/drivers/thermal/user_space.c @@ -24,12 +24,13 @@ #include <linux/thermal.h> #include <linux/slab.h> + #include "thermal_core.h" /** * notify_user_space - Notifies user space about thermal events * @tz - thermal_zone_device - * @trip - Trip point index + * @trip - trip point index * * This function notifies the user space through UEvents. */ diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index ab3e8f410444..40219a706309 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -281,9 +281,11 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, if (active) { config.name = "nvm_active"; config.reg_read = tb_switch_nvm_read; + config.read_only = true; } else { config.name = "nvm_non_active"; config.reg_write = tb_switch_nvm_write; + config.root_only = true; } config.id = id; @@ -292,7 +294,6 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id, config.size = size; config.dev = &sw->dev; config.owner = THIS_MODULE; - config.root_only = true; config.priv = sw; return nvmem_register(&config); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index d1399aac05a1..284749fb0f6b 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -448,48 +448,6 @@ err: return retval; } -/** - * pty_open_peer - open the peer of a pty - * @tty: the peer of the pty being opened - * - * Open the cached dentry in tty->link, providing a safe way for userspace - * to get the slave end of a pty (where they have the master fd and cannot - * access or trust the mount namespace /dev/pts was mounted inside). - */ -static struct file *pty_open_peer(struct tty_struct *tty, int flags) -{ - if (tty->driver->subtype != PTY_TYPE_MASTER) - return ERR_PTR(-EIO); - return dentry_open(tty->link->driver_data, flags, current_cred()); -} - -static int pty_get_peer(struct tty_struct *tty, int flags) -{ - int fd = -1; - struct file *filp = NULL; - int retval = -EINVAL; - - fd = get_unused_fd_flags(0); - if (fd < 0) { - retval = fd; - goto err; - } - - filp = pty_open_peer(tty, flags); - if (IS_ERR(filp)) { - retval = PTR_ERR(filp); - goto err_put; - } - - fd_install(fd, filp); - return fd; - -err_put: - put_unused_fd(fd); -err: - return retval; -} - static void pty_cleanup(struct tty_struct *tty) { tty_port_put(tty->port); @@ -646,9 +604,50 @@ static inline void legacy_pty_init(void) { } /* Unix98 devices */ #ifdef CONFIG_UNIX98_PTYS - static struct cdev ptmx_cdev; +/** + * pty_open_peer - open the peer of a pty + * @tty: the peer of the pty being opened + * + * Open the cached dentry in tty->link, providing a safe way for userspace + * to get the slave end of a pty (where they have the master fd and cannot + * access or trust the mount namespace /dev/pts was mounted inside). + */ +static struct file *pty_open_peer(struct tty_struct *tty, int flags) +{ + if (tty->driver->subtype != PTY_TYPE_MASTER) + return ERR_PTR(-EIO); + return dentry_open(tty->link->driver_data, flags, current_cred()); +} + +static int pty_get_peer(struct tty_struct *tty, int flags) +{ + int fd = -1; + struct file *filp = NULL; + int retval = -EINVAL; + + fd = get_unused_fd_flags(0); + if (fd < 0) { + retval = fd; + goto err; + } + + filp = pty_open_peer(tty, flags); + if (IS_ERR(filp)) { + retval = PTR_ERR(filp); + goto err_put; + } + + fd_install(fd, filp); + return fd; + +err_put: + put_unused_fd(fd); +err: + return retval; +} + static int pty_unix98_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 343de8c384b0..898dcb091a27 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -619,6 +619,12 @@ static unsigned int lpuart32_tx_empty(struct uart_port *port) TIOCSER_TEMT : 0; } +static bool lpuart_is_32(struct lpuart_port *sport) +{ + return sport->port.iotype == UPIO_MEM32 || + sport->port.iotype == UPIO_MEM32BE; +} + static irqreturn_t lpuart_txint(int irq, void *dev_id) { struct lpuart_port *sport = dev_id; @@ -627,7 +633,7 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id) spin_lock_irqsave(&sport->port.lock, flags); if (sport->port.x_char) { - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) + if (lpuart_is_32(sport)) lpuart32_write(&sport->port, sport->port.x_char, UARTDATA); else writeb(sport->port.x_char, sport->port.membase + UARTDR); @@ -635,14 +641,14 @@ static irqreturn_t lpuart_txint(int irq, void *dev_id) } if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) + if (lpuart_is_32(sport)) lpuart32_stop_tx(&sport->port); else lpuart_stop_tx(&sport->port); goto out; } - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) + if (lpuart_is_32(sport)) lpuart32_transmit_buffer(sport); else lpuart_transmit_buffer(sport); @@ -1978,12 +1984,12 @@ static int __init lpuart_console_setup(struct console *co, char *options) if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) + if (lpuart_is_32(sport)) lpuart32_console_get_options(sport, &baud, &parity, &bits); else lpuart_console_get_options(sport, &baud, &parity, &bits); - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) + if (lpuart_is_32(sport)) lpuart32_setup_watermark(sport); else lpuart_setup_watermark(sport); @@ -2118,7 +2124,7 @@ static int lpuart_probe(struct platform_device *pdev) } sport->port.irq = ret; sport->port.iotype = sdata->iotype; - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) + if (lpuart_is_32(sport)) sport->port.ops = &lpuart32_pops; else sport->port.ops = &lpuart_pops; @@ -2145,7 +2151,7 @@ static int lpuart_probe(struct platform_device *pdev) platform_set_drvdata(pdev, &sport->port); - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) + if (lpuart_is_32(sport)) lpuart_reg.cons = LPUART32_CONSOLE; else lpuart_reg.cons = LPUART_CONSOLE; @@ -2198,7 +2204,7 @@ static int lpuart_suspend(struct device *dev) struct lpuart_port *sport = dev_get_drvdata(dev); unsigned long temp; - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) { + if (lpuart_is_32(sport)) { /* disable Rx/Tx and interrupts */ temp = lpuart32_read(&sport->port, UARTCTRL); temp &= ~(UARTCTRL_TE | UARTCTRL_TIE | UARTCTRL_TCIE); @@ -2249,7 +2255,7 @@ static int lpuart_resume(struct device *dev) if (sport->port.suspended && !sport->port.irq_wake) clk_prepare_enable(sport->clk); - if (sport->port.iotype & (UPIO_MEM32 | UPIO_MEM32BE)) { + if (lpuart_is_32(sport)) { lpuart32_setup_watermark(sport); temp = lpuart32_read(&sport->port, UARTCTRL); temp |= (UARTCTRL_RIE | UARTCTRL_TIE | UARTCTRL_RE | diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 9e3162bf3bd1..80934e7bd67f 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -186,11 +186,6 @@ #define UART_NR 8 -/* RX DMA buffer periods */ -#define RX_DMA_PERIODS 4 -#define RX_BUF_SIZE (PAGE_SIZE) - - /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */ enum imx_uart_type { IMX1_UART, @@ -226,7 +221,6 @@ struct imx_port { struct dma_chan *dma_chan_rx, *dma_chan_tx; struct scatterlist rx_sgl, tx_sgl[2]; void *rx_buf; - unsigned int rx_buf_size; struct circ_buf rx_ring; unsigned int rx_periods; dma_cookie_t rx_cookie; @@ -464,7 +458,7 @@ static inline void imx_transmit_buffer(struct imx_port *sport) } } - while (!uart_circ_empty(xmit) && + while (!uart_circ_empty(xmit) && !sport->dma_is_txing && !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) { /* send xmit->buf[xmit->tail] * out the port here */ @@ -967,6 +961,8 @@ static void imx_timeout(unsigned long data) } } +#define RX_BUF_SIZE (PAGE_SIZE) + /* * There are two kinds of RX DMA interrupts(such as in the MX6Q): * [1] the RX DMA buffer is full. @@ -1049,6 +1045,9 @@ static void dma_rx_callback(void *data) } } +/* RX DMA buffer periods */ +#define RX_DMA_PERIODS 4 + static int start_rx_dma(struct imx_port *sport) { struct scatterlist *sgl = &sport->rx_sgl; @@ -1059,8 +1058,9 @@ static int start_rx_dma(struct imx_port *sport) sport->rx_ring.head = 0; sport->rx_ring.tail = 0; + sport->rx_periods = RX_DMA_PERIODS; - sg_init_one(sgl, sport->rx_buf, sport->rx_buf_size); + sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE); ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE); if (ret == 0) { dev_err(dev, "DMA mapping error for RX.\n"); @@ -1171,7 +1171,7 @@ static int imx_uart_dma_init(struct imx_port *sport) goto err; } - sport->rx_buf = kzalloc(sport->rx_buf_size, GFP_KERNEL); + sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!sport->rx_buf) { ret = -ENOMEM; goto err; @@ -2036,7 +2036,6 @@ static int serial_imx_probe_dt(struct imx_port *sport, { struct device_node *np = pdev->dev.of_node; int ret; - u32 dma_buf_size[2]; sport->devdata = of_device_get_match_data(&pdev->dev); if (!sport->devdata) @@ -2060,14 +2059,6 @@ static int serial_imx_probe_dt(struct imx_port *sport, if (of_get_property(np, "rts-gpios", NULL)) sport->have_rtsgpio = 1; - if (!of_property_read_u32_array(np, "fsl,dma-size", dma_buf_size, 2)) { - sport->rx_buf_size = dma_buf_size[0] * dma_buf_size[1]; - sport->rx_periods = dma_buf_size[1]; - } else { - sport->rx_buf_size = RX_BUF_SIZE; - sport->rx_periods = RX_DMA_PERIODS; - } - return 0; } #else diff --git a/drivers/tty/serial/ioc3_serial.c b/drivers/tty/serial/ioc3_serial.c index 2a61dd6b4009..906ee770ff4a 100644 --- a/drivers/tty/serial/ioc3_serial.c +++ b/drivers/tty/serial/ioc3_serial.c @@ -377,7 +377,7 @@ static struct ioc3_port *get_ioc3_port(struct uart_port *the_port) * called per port from attach... * @port: port to initialize */ -static int inline port_init(struct ioc3_port *port) +static inline int port_init(struct ioc3_port *port) { uint32_t sio_cr; struct port_hooks *hooks = port->ip_hooks; @@ -1430,7 +1430,7 @@ static int receive_chars(struct uart_port *the_port) * @pending: interrupts to handle */ -static int inline +static inline int ioc3uart_intr_one(struct ioc3_submodule *is, struct ioc3_driver_data *idd, unsigned int pending) diff --git a/drivers/tty/serial/ioc4_serial.c b/drivers/tty/serial/ioc4_serial.c index f96bcf9bee25..43d7d32eb150 100644 --- a/drivers/tty/serial/ioc4_serial.c +++ b/drivers/tty/serial/ioc4_serial.c @@ -824,7 +824,7 @@ pending_intrs(struct ioc4_soft *soft, int type) * called per port from attach... * @port: port to initialize */ -static int inline port_init(struct ioc4_port *port) +static inline int port_init(struct ioc4_port *port) { uint32_t sio_cr; struct hooks *hooks = port->ip_hooks; @@ -1048,7 +1048,7 @@ static irqreturn_t ioc4_intr(int irq, void *arg) * IOC4 with serial ports in the system. * @idd: Master module data for this IOC4 */ -static int inline ioc4_attach_local(struct ioc4_driver_data *idd) +static inline int ioc4_attach_local(struct ioc4_driver_data *idd) { struct ioc4_port *port; struct ioc4_port *ports[IOC4_NUM_SERIAL_PORTS]; diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c index da5ddfc14778..e08b16b070c0 100644 --- a/drivers/tty/serial/sh-sci.c +++ b/drivers/tty/serial/sh-sci.c @@ -1085,10 +1085,12 @@ static ssize_t rx_trigger_store(struct device *dev, { struct uart_port *port = dev_get_drvdata(dev); struct sci_port *sci = to_sci_port(port); + int ret; long r; - if (kstrtol(buf, 0, &r) == -EINVAL) - return -EINVAL; + ret = kstrtol(buf, 0, &r); + if (ret) + return ret; sci->rx_trigger = scif_set_rtrg(port, r); if (port->type == PORT_SCIFA || port->type == PORT_SCIFB) @@ -1116,10 +1118,12 @@ static ssize_t rx_fifo_timeout_store(struct device *dev, { struct uart_port *port = dev_get_drvdata(dev); struct sci_port *sci = to_sci_port(port); + int ret; long r; - if (kstrtol(buf, 0, &r) == -EINVAL) - return -EINVAL; + ret = kstrtol(buf, 0, &r); + if (ret) + return ret; sci->rx_fifo_timeout = r; scif_set_rtrg(port, 1); if (r > 0) diff --git a/drivers/tty/serial/st-asc.c b/drivers/tty/serial/st-asc.c index f5335be344f6..6b0ca65027d0 100644 --- a/drivers/tty/serial/st-asc.c +++ b/drivers/tty/serial/st-asc.c @@ -758,6 +758,7 @@ static int asc_init_port(struct asc_port *ascport, if (IS_ERR(ascport->pinctrl)) { ret = PTR_ERR(ascport->pinctrl); dev_err(&pdev->dev, "Failed to get Pinctrl: %d\n", ret); + return ret; } ascport->states[DEFAULT] = diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 5357d83bbda2..5e056064259c 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -1829,6 +1829,9 @@ static const struct usb_device_id acm_ids[] = { { USB_DEVICE(0x1576, 0x03b1), /* Maretron USB100 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, + { USB_DEVICE(0xfff0, 0x0100), /* DATECS FP-2000 */ + .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ + }, { USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */ .driver_info = CLEAR_HALT_CONDITIONS, diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index bc3b3fda5000..c4066cd77e47 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -3573,6 +3573,9 @@ irq_retry: /* Report disconnection if it is not already done. */ dwc2_hsotg_disconnect(hsotg); + /* Reset device address to zero */ + __bic32(hsotg->regs + DCFG, DCFG_DEVADDR_MASK); + if (usb_status & GOTGCTL_BSESVLD && connected) dwc2_hsotg_core_init_disconnected(hsotg, true); } diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 326b302fc440..03474d3575ab 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -766,15 +766,15 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc->maximum_speed = USB_SPEED_HIGH; } - ret = dwc3_core_soft_reset(dwc); + ret = dwc3_core_get_phy(dwc); if (ret) goto err0; - ret = dwc3_phy_setup(dwc); + ret = dwc3_core_soft_reset(dwc); if (ret) goto err0; - ret = dwc3_core_get_phy(dwc); + ret = dwc3_phy_setup(dwc); if (ret) goto err0; diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c index 98926504b55b..f5aaa0cf3873 100644 --- a/drivers/usb/dwc3/dwc3-omap.c +++ b/drivers/usb/dwc3/dwc3-omap.c @@ -512,15 +512,6 @@ static int dwc3_omap_probe(struct platform_device *pdev) /* check the DMA Status */ reg = dwc3_omap_readl(omap->base, USBOTGSS_SYSCONFIG); - irq_set_status_flags(omap->irq, IRQ_NOAUTOEN); - ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt, - dwc3_omap_interrupt_thread, IRQF_SHARED, - "dwc3-omap", omap); - if (ret) { - dev_err(dev, "failed to request IRQ #%d --> %d\n", - omap->irq, ret); - goto err1; - } ret = dwc3_omap_extcon_register(omap); if (ret < 0) @@ -532,8 +523,15 @@ static int dwc3_omap_probe(struct platform_device *pdev) goto err1; } + ret = devm_request_threaded_irq(dev, omap->irq, dwc3_omap_interrupt, + dwc3_omap_interrupt_thread, IRQF_SHARED, + "dwc3-omap", omap); + if (ret) { + dev_err(dev, "failed to request IRQ #%d --> %d\n", + omap->irq, ret); + goto err1; + } dwc3_omap_enable_irqs(omap); - enable_irq(omap->irq); return 0; err1: diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 9e41605a276b..6b299c7b7656 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -191,14 +191,16 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, req->started = false; list_del(&req->list); - req->trb = NULL; req->remaining = 0; if (req->request.status == -EINPROGRESS) req->request.status = status; - usb_gadget_unmap_request_by_dev(dwc->sysdev, - &req->request, req->direction); + if (req->trb) + usb_gadget_unmap_request_by_dev(dwc->sysdev, + &req->request, req->direction); + + req->trb = NULL; trace_dwc3_gadget_giveback(req); diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c index e80b9c123a9d..f95bddd6513f 100644 --- a/drivers/usb/gadget/function/f_mass_storage.c +++ b/drivers/usb/gadget/function/f_mass_storage.c @@ -2490,7 +2490,7 @@ static int fsg_main_thread(void *common_) int i; down_write(&common->filesem); - for (i = 0; i < ARRAY_SIZE(common->luns); --i) { + for (i = 0; i < ARRAY_SIZE(common->luns); i++) { struct fsg_lun *curlun = common->luns[i]; if (!curlun || !fsg_lun_is_open(curlun)) continue; diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index 8656f84e17d9..29efbedc91f9 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -92,9 +92,9 @@ static struct uac_input_terminal_descriptor usb_out_it_desc = { .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_INPUT_TERMINAL, .bTerminalID = USB_OUT_IT_ID, - .wTerminalType = UAC_TERMINAL_STREAMING, + .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING), .bAssocTerminal = 0, - .wChannelConfig = 0x3, + .wChannelConfig = cpu_to_le16(0x3), }; #define IO_OUT_OT_ID 2 @@ -103,7 +103,7 @@ static struct uac1_output_terminal_descriptor io_out_ot_desc = { .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, .bTerminalID = IO_OUT_OT_ID, - .wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER, + .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER), .bAssocTerminal = 0, .bSourceID = USB_OUT_IT_ID, }; @@ -114,9 +114,9 @@ static struct uac_input_terminal_descriptor io_in_it_desc = { .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_INPUT_TERMINAL, .bTerminalID = IO_IN_IT_ID, - .wTerminalType = UAC_INPUT_TERMINAL_MICROPHONE, + .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE), .bAssocTerminal = 0, - .wChannelConfig = 0x3, + .wChannelConfig = cpu_to_le16(0x3), }; #define USB_IN_OT_ID 4 @@ -125,7 +125,7 @@ static struct uac1_output_terminal_descriptor usb_in_ot_desc = { .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, .bTerminalID = USB_IN_OT_ID, - .wTerminalType = UAC_TERMINAL_STREAMING, + .wTerminalType = cpu_to_le16(UAC_TERMINAL_STREAMING), .bAssocTerminal = 0, .bSourceID = IO_IN_IT_ID, }; @@ -174,7 +174,7 @@ static struct uac1_as_header_descriptor as_out_header_desc = { .bDescriptorSubtype = UAC_AS_GENERAL, .bTerminalLink = USB_OUT_IT_ID, .bDelay = 1, - .wFormatTag = UAC_FORMAT_TYPE_I_PCM, + .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM), }; static struct uac1_as_header_descriptor as_in_header_desc = { @@ -183,7 +183,7 @@ static struct uac1_as_header_descriptor as_in_header_desc = { .bDescriptorSubtype = UAC_AS_GENERAL, .bTerminalLink = USB_IN_OT_ID, .bDelay = 1, - .wFormatTag = UAC_FORMAT_TYPE_I_PCM, + .wFormatTag = cpu_to_le16(UAC_FORMAT_TYPE_I_PCM), }; DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1); @@ -606,8 +606,8 @@ static int f_audio_bind(struct usb_configuration *c, struct usb_function *f) if (status) goto fail; - audio->out_ep_maxpsize = as_out_ep_desc.wMaxPacketSize; - audio->in_ep_maxpsize = as_in_ep_desc.wMaxPacketSize; + audio->out_ep_maxpsize = le16_to_cpu(as_out_ep_desc.wMaxPacketSize); + audio->in_ep_maxpsize = le16_to_cpu(as_in_ep_desc.wMaxPacketSize); audio->params.c_chmask = audio_opts->c_chmask; audio->params.c_srate = audio_opts->c_srate; audio->params.c_ssize = audio_opts->c_ssize; diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 9082ce261e70..f05c3f3e6103 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -168,7 +168,7 @@ static struct uac2_input_terminal_descriptor usb_out_it_desc = { .bAssocTerminal = 0, .bCSourceID = USB_OUT_CLK_ID, .iChannelNames = 0, - .bmControls = (CONTROL_RDWR << COPY_CTRL), + .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL), }; /* Input Terminal for I/O-In */ @@ -182,7 +182,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = { .bAssocTerminal = 0, .bCSourceID = USB_IN_CLK_ID, .iChannelNames = 0, - .bmControls = (CONTROL_RDWR << COPY_CTRL), + .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL), }; /* Ouput Terminal for USB_IN */ @@ -196,7 +196,7 @@ static struct uac2_output_terminal_descriptor usb_in_ot_desc = { .bAssocTerminal = 0, .bSourceID = IO_IN_IT_ID, .bCSourceID = USB_IN_CLK_ID, - .bmControls = (CONTROL_RDWR << COPY_CTRL), + .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL), }; /* Ouput Terminal for I/O-Out */ @@ -210,7 +210,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = { .bAssocTerminal = 0, .bSourceID = USB_OUT_IT_ID, .bCSourceID = USB_OUT_CLK_ID, - .bmControls = (CONTROL_RDWR << COPY_CTRL), + .bmControls = cpu_to_le16(CONTROL_RDWR << COPY_CTRL), }; static struct uac2_ac_header_descriptor ac_hdr_desc = { @@ -220,9 +220,10 @@ static struct uac2_ac_header_descriptor ac_hdr_desc = { .bDescriptorSubtype = UAC_MS_HEADER, .bcdADC = cpu_to_le16(0x200), .bCategory = UAC2_FUNCTION_IO_BOX, - .wTotalLength = sizeof in_clk_src_desc + sizeof out_clk_src_desc - + sizeof usb_out_it_desc + sizeof io_in_it_desc - + sizeof usb_in_ot_desc + sizeof io_out_ot_desc, + .wTotalLength = cpu_to_le16(sizeof in_clk_src_desc + + sizeof out_clk_src_desc + sizeof usb_out_it_desc + + sizeof io_in_it_desc + sizeof usb_in_ot_desc + + sizeof io_out_ot_desc), .bmControls = 0, }; @@ -569,10 +570,12 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn) return ret; } - agdev->in_ep_maxpsize = max(fs_epin_desc.wMaxPacketSize, - hs_epin_desc.wMaxPacketSize); - agdev->out_ep_maxpsize = max(fs_epout_desc.wMaxPacketSize, - hs_epout_desc.wMaxPacketSize); + agdev->in_ep_maxpsize = max_t(u16, + le16_to_cpu(fs_epin_desc.wMaxPacketSize), + le16_to_cpu(hs_epin_desc.wMaxPacketSize)); + agdev->out_ep_maxpsize = max_t(u16, + le16_to_cpu(fs_epout_desc.wMaxPacketSize), + le16_to_cpu(hs_epout_desc.wMaxPacketSize)); hs_epout_desc.bEndpointAddress = fs_epout_desc.bEndpointAddress; hs_epin_desc.bEndpointAddress = fs_epin_desc.bEndpointAddress; diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig index 9ffb11ec9ed9..7cd5c969fcbe 100644 --- a/drivers/usb/gadget/udc/Kconfig +++ b/drivers/usb/gadget/udc/Kconfig @@ -192,7 +192,7 @@ config USB_RENESAS_USBHS_UDC config USB_RENESAS_USB3 tristate 'Renesas USB3.0 Peripheral controller' depends on ARCH_RENESAS || COMPILE_TEST - depends on EXTCON + depends on EXTCON && HAS_DMA help Renesas USB3.0 Peripheral controller is a USB peripheral controller that supports super, high, and full speed USB 3.0 data transfers. @@ -257,6 +257,7 @@ config USB_MV_U3D config USB_SNP_CORE depends on (USB_AMD5536UDC || USB_SNP_UDC_PLAT) + depends on HAS_DMA tristate help This enables core driver support for Synopsys USB 2.0 Device @@ -271,7 +272,7 @@ config USB_SNP_CORE config USB_SNP_UDC_PLAT tristate "Synopsys USB 2.0 Device controller" - depends on (USB_GADGET && OF) + depends on USB_GADGET && OF && HAS_DMA select USB_GADGET_DUALSPEED select USB_SNP_CORE default ARCH_BCM_IPROC diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index d8278322d5ac..62dc9c7798e7 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -89,6 +89,9 @@ /* USB_COM_CON */ #define USB_COM_CON_CONF BIT(24) +#define USB_COM_CON_PN_WDATAIF_NL BIT(23) +#define USB_COM_CON_PN_RDATAIF_NL BIT(22) +#define USB_COM_CON_PN_LSTTR_PP BIT(21) #define USB_COM_CON_SPD_MODE BIT(17) #define USB_COM_CON_EP0_EN BIT(16) #define USB_COM_CON_DEV_ADDR_SHIFT 8 @@ -686,6 +689,9 @@ static void renesas_usb3_init_controller(struct renesas_usb3 *usb3) { usb3_init_axi_bridge(usb3); usb3_init_epc_registers(usb3); + usb3_set_bit(usb3, USB_COM_CON_PN_WDATAIF_NL | + USB_COM_CON_PN_RDATAIF_NL | USB_COM_CON_PN_LSTTR_PP, + USB3_USB_COM_CON); usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_STA); usb3_write(usb3, USB_OTG_IDMON, USB3_USB_OTG_INT_ENA); @@ -1369,7 +1375,7 @@ static int renesas_usb3_dma_free_prd(struct renesas_usb3 *usb3, usb3_for_each_dma(usb3, dma, i) { if (dma->prd) { - dma_free_coherent(dev, USB3_DMA_MAX_XFER_SIZE, + dma_free_coherent(dev, USB3_DMA_PRD_SIZE, dma->prd, dma->prd_dma); dma->prd = NULL; } @@ -1409,12 +1415,12 @@ static void usb3_start_pipen(struct renesas_usb3_ep *usb3_ep, int ret = -EAGAIN; u32 enable_bits = 0; + spin_lock_irqsave(&usb3->lock, flags); if (usb3_ep->halt || usb3_ep->started) - return; + goto out; if (usb3_req != usb3_req_first) - return; + goto out; - spin_lock_irqsave(&usb3->lock, flags); if (usb3_pn_change(usb3, usb3_ep->num) < 0) goto out; diff --git a/drivers/usb/gadget/udc/snps_udc_plat.c b/drivers/usb/gadget/udc/snps_udc_plat.c index 2e11f19e07ae..f7b4d0f159e4 100644 --- a/drivers/usb/gadget/udc/snps_udc_plat.c +++ b/drivers/usb/gadget/udc/snps_udc_plat.c @@ -28,7 +28,7 @@ /* description */ #define UDC_MOD_DESCRIPTION "Synopsys UDC platform driver" -void start_udc(struct udc *udc) +static void start_udc(struct udc *udc) { if (udc->driver) { dev_info(udc->dev, "Connecting...\n"); @@ -38,7 +38,7 @@ void start_udc(struct udc *udc) } } -void stop_udc(struct udc *udc) +static void stop_udc(struct udc *udc) { int tmp; u32 reg; @@ -76,7 +76,7 @@ void stop_udc(struct udc *udc) dev_info(udc->dev, "Device disconnected\n"); } -void udc_drd_work(struct work_struct *work) +static void udc_drd_work(struct work_struct *work) { struct udc *udc; diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index a9a1e4c40480..c8989c62a262 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -77,6 +77,16 @@ #define USB_INTEL_USB3_PSSEN 0xD8 #define USB_INTEL_USB3PRM 0xDC +/* ASMEDIA quirk use */ +#define ASMT_DATA_WRITE0_REG 0xF8 +#define ASMT_DATA_WRITE1_REG 0xFC +#define ASMT_CONTROL_REG 0xE0 +#define ASMT_CONTROL_WRITE_BIT 0x02 +#define ASMT_WRITEREG_CMD 0x10423 +#define ASMT_FLOWCTL_ADDR 0xFA30 +#define ASMT_FLOWCTL_DATA 0xBA +#define ASMT_PSEUDO_DATA 0 + /* * amd_chipset_gen values represent AMD different chipset generations */ @@ -412,6 +422,50 @@ void usb_amd_quirk_pll_disable(void) } EXPORT_SYMBOL_GPL(usb_amd_quirk_pll_disable); +static int usb_asmedia_wait_write(struct pci_dev *pdev) +{ + unsigned long retry_count; + unsigned char value; + + for (retry_count = 1000; retry_count > 0; --retry_count) { + + pci_read_config_byte(pdev, ASMT_CONTROL_REG, &value); + + if (value == 0xff) { + dev_err(&pdev->dev, "%s: check_ready ERROR", __func__); + return -EIO; + } + + if ((value & ASMT_CONTROL_WRITE_BIT) == 0) + return 0; + + usleep_range(40, 60); + } + + dev_warn(&pdev->dev, "%s: check_write_ready timeout", __func__); + return -ETIMEDOUT; +} + +void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) +{ + if (usb_asmedia_wait_write(pdev) != 0) + return; + + /* send command and address to device */ + pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_WRITEREG_CMD); + pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_FLOWCTL_ADDR); + pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); + + if (usb_asmedia_wait_write(pdev) != 0) + return; + + /* send data to device */ + pci_write_config_dword(pdev, ASMT_DATA_WRITE0_REG, ASMT_FLOWCTL_DATA); + pci_write_config_dword(pdev, ASMT_DATA_WRITE1_REG, ASMT_PSEUDO_DATA); + pci_write_config_byte(pdev, ASMT_CONTROL_REG, ASMT_CONTROL_WRITE_BIT); +} +EXPORT_SYMBOL_GPL(usb_asmedia_modifyflowcontrol); + void usb_amd_quirk_pll_enable(void) { usb_amd_quirk_pll(0); diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h index 0222195bd5b0..655994480198 100644 --- a/drivers/usb/host/pci-quirks.h +++ b/drivers/usb/host/pci-quirks.h @@ -11,6 +11,7 @@ bool usb_amd_prefetch_quirk(void); void usb_amd_dev_put(void); void usb_amd_quirk_pll_disable(void); void usb_amd_quirk_pll_enable(void); +void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev); void usb_enable_intel_xhci_ports(struct pci_dev *xhci_pdev); void usb_disable_xhci_ports(struct pci_dev *xhci_pdev); void sb800_prefetch(struct device *dev, int on); @@ -18,6 +19,7 @@ void sb800_prefetch(struct device *dev, int on); struct pci_dev; static inline void usb_amd_quirk_pll_disable(void) {} static inline void usb_amd_quirk_pll_enable(void) {} +static inline void usb_asmedia_modifyflowcontrol(struct pci_dev *pdev) {} static inline void usb_amd_dev_put(void) {} static inline void usb_disable_xhci_ports(struct pci_dev *xhci_pdev) {} static inline void sb800_prefetch(struct device *dev, int on) {} diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index 1adae9eab831..00721e8807ab 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -398,14 +398,21 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend) spin_lock_irqsave(&xhci->lock, flags); for (i = LAST_EP_INDEX; i > 0; i--) { if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) { + struct xhci_ep_ctx *ep_ctx; struct xhci_command *command; + + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->out_ctx, i); + + /* Check ep is running, required by AMD SNPS 3.1 xHC */ + if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_RUNNING) + continue; + command = xhci_alloc_command(xhci, false, false, GFP_NOWAIT); if (!command) { spin_unlock_irqrestore(&xhci->lock, flags); xhci_free_command(xhci, cmd); return -ENOMEM; - } xhci_queue_stop_endpoint(xhci, command, slot_id, i, suspend); @@ -603,12 +610,14 @@ static int xhci_enter_test_mode(struct xhci_hcd *xhci, /* Disable all Device Slots */ xhci_dbg(xhci, "Disable all slots\n"); + spin_unlock_irqrestore(&xhci->lock, *flags); for (i = 1; i <= HCS_MAX_SLOTS(xhci->hcs_params1); i++) { retval = xhci_disable_slot(xhci, NULL, i); if (retval) xhci_err(xhci, "Failed to disable slot %d, %d. Enter test mode anyway\n", i, retval); } + spin_lock_irqsave(&xhci->lock, *flags); /* Put all ports to the Disable state by clear PP */ xhci_dbg(xhci, "Disable all port (PP = 0)\n"); /* Power off USB3 ports*/ @@ -897,6 +906,9 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd, clear_bit(wIndex, &bus_state->resuming_ports); set_bit(wIndex, &bus_state->rexit_ports); + + xhci_test_and_clear_bit(xhci, port_array, wIndex, + PORT_PLC); xhci_set_link_state(xhci, port_array, wIndex, XDEV_U0); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 53882e2babbb..5b0fa553c8bc 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -59,6 +59,8 @@ #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 + static const char hcd_name[] = "xhci_hcd"; static struct hc_driver __read_mostly xhci_pci_hc_driver; @@ -217,6 +219,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x1142) xhci->quirks |= XHCI_TRUST_TX_LENGTH; + if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) + xhci->quirks |= XHCI_ASMEDIA_MODIFY_FLOWCONTROL; + if (pdev->vendor == PCI_VENDOR_ID_TI && pdev->device == 0x8241) xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_7; diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index c50c902d009e..cc368ad2b51e 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -864,13 +864,16 @@ static void xhci_kill_endpoint_urbs(struct xhci_hcd *xhci, (ep->ep_state & EP_GETTING_NO_STREAMS)) { int stream_id; - for (stream_id = 0; stream_id < ep->stream_info->num_streams; + for (stream_id = 1; stream_id < ep->stream_info->num_streams; stream_id++) { + ring = ep->stream_info->stream_rings[stream_id]; + if (!ring) + continue; + xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, "Killing URBs for slot ID %u, ep index %u, stream %u", - slot_id, ep_index, stream_id + 1); - xhci_kill_ring_urbs(xhci, - ep->stream_info->stream_rings[stream_id]); + slot_id, ep_index, stream_id); + xhci_kill_ring_urbs(xhci, ring); } } else { ring = ep->ring; diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 56f85df013db..b2ff1ff1a02f 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -198,6 +198,9 @@ int xhci_reset(struct xhci_hcd *xhci) if (ret) return ret; + if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) + usb_asmedia_modifyflowcontrol(to_pci_dev(xhci_to_hcd(xhci)->self.controller)); + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Wait for controller to be ready for doorbell rings"); /* @@ -622,8 +625,10 @@ int xhci_run(struct usb_hcd *hcd) if (!command) return -ENOMEM; - xhci_queue_vendor_command(xhci, command, 0, 0, 0, + ret = xhci_queue_vendor_command(xhci, command, 0, 0, 0, TRB_TYPE(TRB_NEC_GET_FW)); + if (ret) + xhci_free_command(xhci, command); } xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_run for USB2 roothub"); @@ -1085,6 +1090,9 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) if ((xhci->quirks & XHCI_COMP_MODE_QUIRK) && !comp_timer_running) compliance_mode_recovery_timer_init(xhci); + if (xhci->quirks & XHCI_ASMEDIA_MODIFY_FLOWCONTROL) + usb_asmedia_modifyflowcontrol(to_pci_dev(hcd->self.controller)); + /* Re-enable port polling. */ xhci_dbg(xhci, "%s: starting port polling.\n", __func__); set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 3c6da1f93c84..e3e935291ed6 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1820,6 +1820,7 @@ struct xhci_hcd { #define XHCI_BROKEN_PORT_PED (1 << 25) #define XHCI_LIMIT_ENDPOINT_INTERVAL_7 (1 << 26) #define XHCI_U2_DISABLE_WAKE (1 << 27) +#define XHCI_ASMEDIA_MODIFY_FLOWCONTROL (1 << 28) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c index 623c51300393..f0ce304c5aaf 100644 --- a/drivers/usb/renesas_usbhs/common.c +++ b/drivers/usb/renesas_usbhs/common.c @@ -752,8 +752,10 @@ static int usbhsc_resume(struct device *dev) struct usbhs_priv *priv = dev_get_drvdata(dev); struct platform_device *pdev = usbhs_priv_to_pdev(priv); - if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) + if (!usbhsc_flags_has(priv, USBHSF_RUNTIME_PWCTRL)) { usbhsc_power_ctrl(priv, 1); + usbhs_mod_autonomy_mode(priv); + } usbhs_platform_call(priv, phy_reset, pdev); diff --git a/drivers/usb/renesas_usbhs/mod_gadget.c b/drivers/usb/renesas_usbhs/mod_gadget.c index 5bc7a6138855..93fba9033b00 100644 --- a/drivers/usb/renesas_usbhs/mod_gadget.c +++ b/drivers/usb/renesas_usbhs/mod_gadget.c @@ -37,6 +37,7 @@ struct usbhsg_gpriv; struct usbhsg_uep { struct usb_ep ep; struct usbhs_pipe *pipe; + spinlock_t lock; /* protect the pipe */ char ep_name[EP_NAME_SIZE]; @@ -636,10 +637,16 @@ usbhsg_ep_enable_end: static int usbhsg_ep_disable(struct usb_ep *ep) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + struct usbhs_pipe *pipe; + unsigned long flags; + int ret = 0; - if (!pipe) - return -EINVAL; + spin_lock_irqsave(&uep->lock, flags); + pipe = usbhsg_uep_to_pipe(uep); + if (!pipe) { + ret = -EINVAL; + goto out; + } usbhsg_pipe_disable(uep); usbhs_pipe_free(pipe); @@ -647,6 +654,9 @@ static int usbhsg_ep_disable(struct usb_ep *ep) uep->pipe->mod_private = NULL; uep->pipe = NULL; +out: + spin_unlock_irqrestore(&uep->lock, flags); + return 0; } @@ -696,8 +706,11 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) { struct usbhsg_uep *uep = usbhsg_ep_to_uep(ep); struct usbhsg_request *ureq = usbhsg_req_to_ureq(req); - struct usbhs_pipe *pipe = usbhsg_uep_to_pipe(uep); + struct usbhs_pipe *pipe; + unsigned long flags; + spin_lock_irqsave(&uep->lock, flags); + pipe = usbhsg_uep_to_pipe(uep); if (pipe) usbhs_pkt_pop(pipe, usbhsg_ureq_to_pkt(ureq)); @@ -706,6 +719,7 @@ static int usbhsg_ep_dequeue(struct usb_ep *ep, struct usb_request *req) * even if the pipe is NULL. */ usbhsg_queue_pop(uep, ureq, -ECONNRESET); + spin_unlock_irqrestore(&uep->lock, flags); return 0; } @@ -852,10 +866,10 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) { struct usbhsg_gpriv *gpriv = usbhsg_priv_to_gpriv(priv); struct usbhs_mod *mod = usbhs_mod_get_current(priv); - struct usbhsg_uep *dcp = usbhsg_gpriv_to_dcp(gpriv); + struct usbhsg_uep *uep; struct device *dev = usbhs_priv_to_dev(priv); unsigned long flags; - int ret = 0; + int ret = 0, i; /******************** spin lock ********************/ usbhs_lock(priv, flags); @@ -887,7 +901,9 @@ static int usbhsg_try_stop(struct usbhs_priv *priv, u32 status) usbhs_sys_set_test_mode(priv, 0); usbhs_sys_function_ctrl(priv, 0); - usbhsg_ep_disable(&dcp->ep); + /* disable all eps */ + usbhsg_for_each_uep_with_dcp(uep, gpriv, i) + usbhsg_ep_disable(&uep->ep); dev_dbg(dev, "stop gadget\n"); @@ -1069,6 +1085,7 @@ int usbhs_mod_gadget_probe(struct usbhs_priv *priv) ret = -ENOMEM; goto usbhs_mod_gadget_probe_err_gpriv; } + spin_lock_init(&uep->lock); gpriv->transceiver = usb_get_phy(USB_PHY_TYPE_UNDEFINED); dev_info(dev, "%stransceiver found\n", diff --git a/drivers/usb/serial/safe_serial.c b/drivers/usb/serial/safe_serial.c index 8a069aa154ed..27d7a7016298 100644 --- a/drivers/usb/serial/safe_serial.c +++ b/drivers/usb/serial/safe_serial.c @@ -180,7 +180,7 @@ static const __u16 crc10_table[256] = { * Perform a memcpy and calculate fcs using ppp 10bit CRC algorithm. Return * new 10 bit FCS. */ -static __u16 __inline__ fcs_compute10(unsigned char *sp, int len, __u16 fcs) +static inline __u16 fcs_compute10(unsigned char *sp, int len, __u16 fcs) { for (; len-- > 0; fcs = CRC10_FCS(fcs, *sp++)); return fcs; diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index fba4005dd737..6a7720e66595 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -1529,8 +1529,11 @@ static void isd200_ata_command(struct scsi_cmnd *srb, struct us_data *us) /* Make sure driver was initialized */ - if (us->extra == NULL) + if (us->extra == NULL) { usb_stor_dbg(us, "ERROR Driver not initialized\n"); + srb->result = DID_ERROR << 16; + return; + } scsi_set_resid(srb, 0); /* scsi_bufflen might change in protocol translation to ata */ diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h index 6b0d2f0918c6..8a88f45822e3 100644 --- a/drivers/usb/typec/ucsi/ucsi.h +++ b/drivers/usb/typec/ucsi/ucsi.h @@ -3,6 +3,7 @@ #define __DRIVER_USB_TYPEC_UCSI_H #include <linux/bitops.h> +#include <linux/device.h> #include <linux/types.h> /* -------------------------------------------------------------------------- */ diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 324c52e3a1a4..063c1ce6fa42 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -195,11 +195,11 @@ static bool vfio_pci_nointx(struct pci_dev *pdev) switch (pdev->vendor) { case PCI_VENDOR_ID_INTEL: switch (pdev->device) { - /* All i40e (XL710/X710) 10/20/40GbE NICs */ + /* All i40e (XL710/X710/XXV710) 10/20/25/40GbE NICs */ case 0x1572: case 0x1574: case 0x1580 ... 0x1581: - case 0x1583 ... 0x1589: + case 0x1583 ... 0x158b: case 0x37d0 ... 0x37d2: return true; default: diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index 561084ab387f..330d50582f40 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -382,7 +382,7 @@ static struct vfio_group *vfio_create_group(struct iommu_group *iommu_group) if (IS_ERR(dev)) { vfio_free_group_minor(minor); vfio_group_unlock_and_free(group); - return (struct vfio_group *)dev; /* ERR_PTR */ + return ERR_CAST(dev); } group->minor = minor; @@ -423,6 +423,34 @@ static void vfio_group_put(struct vfio_group *group) kref_put_mutex(&group->kref, vfio_group_release, &vfio.group_lock); } +struct vfio_group_put_work { + struct work_struct work; + struct vfio_group *group; +}; + +static void vfio_group_put_bg(struct work_struct *work) +{ + struct vfio_group_put_work *do_work; + + do_work = container_of(work, struct vfio_group_put_work, work); + + vfio_group_put(do_work->group); + kfree(do_work); +} + +static void vfio_group_schedule_put(struct vfio_group *group) +{ + struct vfio_group_put_work *do_work; + + do_work = kmalloc(sizeof(*do_work), GFP_KERNEL); + if (WARN_ON(!do_work)) + return; + + INIT_WORK(&do_work->work, vfio_group_put_bg); + do_work->group = group; + schedule_work(&do_work->work); +} + /* Assume group_lock or group reference is held */ static void vfio_group_get(struct vfio_group *group) { @@ -762,7 +790,14 @@ static int vfio_iommu_group_notifier(struct notifier_block *nb, break; } - vfio_group_put(group); + /* + * If we're the last reference to the group, the group will be + * released, which includes unregistering the iommu group notifier. + * We hold a read-lock on that notifier list, unregistering needs + * a write-lock... deadlock. Release our reference asynchronously + * to avoid that situation. + */ + vfio_group_schedule_put(group); return NOTIFY_OK; } @@ -1140,15 +1175,11 @@ static long vfio_fops_unl_ioctl(struct file *filep, ret = vfio_ioctl_set_iommu(container, arg); break; default: - down_read(&container->group_lock); - driver = container->iommu_driver; data = container->iommu_data; if (driver) /* passthrough all unrecognized ioctls */ ret = driver->ops->ioctl(data, cmd, arg); - - up_read(&container->group_lock); } return ret; @@ -1202,15 +1233,11 @@ static ssize_t vfio_fops_read(struct file *filep, char __user *buf, struct vfio_iommu_driver *driver; ssize_t ret = -EINVAL; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->read)) ret = driver->ops->read(container->iommu_data, buf, count, ppos); - up_read(&container->group_lock); - return ret; } @@ -1221,15 +1248,11 @@ static ssize_t vfio_fops_write(struct file *filep, const char __user *buf, struct vfio_iommu_driver *driver; ssize_t ret = -EINVAL; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->write)) ret = driver->ops->write(container->iommu_data, buf, count, ppos); - up_read(&container->group_lock); - return ret; } @@ -1239,14 +1262,10 @@ static int vfio_fops_mmap(struct file *filep, struct vm_area_struct *vma) struct vfio_iommu_driver *driver; int ret = -EINVAL; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->mmap)) ret = driver->ops->mmap(container->iommu_data, vma); - up_read(&container->group_lock); - return ret; } @@ -1741,6 +1760,15 @@ void vfio_group_put_external_user(struct vfio_group *group) } EXPORT_SYMBOL_GPL(vfio_group_put_external_user); +bool vfio_external_group_match_file(struct vfio_group *test_group, + struct file *filep) +{ + struct vfio_group *group = filep->private_data; + + return (filep->f_op == &vfio_group_fops) && (group == test_group); +} +EXPORT_SYMBOL_GPL(vfio_external_group_match_file); + int vfio_external_user_iommu_id(struct vfio_group *group) { return iommu_group_id(group->iommu_group); @@ -1949,8 +1977,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, goto err_pin_pages; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->pin_pages)) ret = driver->ops->pin_pages(container->iommu_data, user_pfn, @@ -1958,7 +1984,6 @@ int vfio_pin_pages(struct device *dev, unsigned long *user_pfn, int npage, else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); err_pin_pages: @@ -1998,8 +2023,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) goto err_unpin_pages; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->unpin_pages)) ret = driver->ops->unpin_pages(container->iommu_data, user_pfn, @@ -2007,7 +2030,6 @@ int vfio_unpin_pages(struct device *dev, unsigned long *user_pfn, int npage) else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); err_unpin_pages: @@ -2029,8 +2051,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group, return -EINVAL; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->register_notifier)) ret = driver->ops->register_notifier(container->iommu_data, @@ -2038,7 +2058,6 @@ static int vfio_register_iommu_notifier(struct vfio_group *group, else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; @@ -2056,8 +2075,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group, return -EINVAL; container = group->container; - down_read(&container->group_lock); - driver = container->iommu_driver; if (likely(driver && driver->ops->unregister_notifier)) ret = driver->ops->unregister_notifier(container->iommu_data, @@ -2065,7 +2082,6 @@ static int vfio_unregister_iommu_notifier(struct vfio_group *group, else ret = -ENOTTY; - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; @@ -2083,7 +2099,6 @@ static int vfio_register_group_notifier(struct vfio_group *group, unsigned long *events, struct notifier_block *nb) { - struct vfio_container *container; int ret; bool set_kvm = false; @@ -2101,9 +2116,6 @@ static int vfio_register_group_notifier(struct vfio_group *group, if (ret) return -EINVAL; - container = group->container; - down_read(&container->group_lock); - ret = blocking_notifier_chain_register(&group->notifier, nb); /* @@ -2114,7 +2126,6 @@ static int vfio_register_group_notifier(struct vfio_group *group, blocking_notifier_call_chain(&group->notifier, VFIO_GROUP_NOTIFY_SET_KVM, group->kvm); - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; @@ -2123,19 +2134,14 @@ static int vfio_register_group_notifier(struct vfio_group *group, static int vfio_unregister_group_notifier(struct vfio_group *group, struct notifier_block *nb) { - struct vfio_container *container; int ret; ret = vfio_group_add_container_user(group); if (ret) return -EINVAL; - container = group->container; - down_read(&container->group_lock); - ret = blocking_notifier_chain_unregister(&group->notifier, nb); - up_read(&container->group_lock); vfio_group_try_dissolve_container(group); return ret; diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e3d7ea1288c6..06d044862e58 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -897,7 +897,7 @@ static int vhost_net_open(struct inode *inode, struct file *f) struct sk_buff **queue; int i; - n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_REPEAT); + n = kvmalloc(sizeof *n, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!n) return -ENOMEM; vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index fd6c8b66f06f..046f6d280af5 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -496,14 +496,12 @@ static void vhost_scsi_evt_work(struct vhost_work *work) struct vhost_scsi *vs = container_of(work, struct vhost_scsi, vs_event_work); struct vhost_virtqueue *vq = &vs->vqs[VHOST_SCSI_VQ_EVT].vq; - struct vhost_scsi_evt *evt; + struct vhost_scsi_evt *evt, *t; struct llist_node *llnode; mutex_lock(&vq->mutex); llnode = llist_del_all(&vs->vs_event_list); - while (llnode) { - evt = llist_entry(llnode, struct vhost_scsi_evt, list); - llnode = llist_next(llnode); + llist_for_each_entry_safe(evt, t, llnode, list) { vhost_scsi_do_evt_work(vs, evt); vhost_scsi_free_evt(vs, evt); } @@ -529,10 +527,7 @@ static void vhost_scsi_complete_cmd_work(struct vhost_work *work) bitmap_zero(signal, VHOST_SCSI_MAX_VQ); llnode = llist_del_all(&vs->vs_completion_list); - while (llnode) { - cmd = llist_entry(llnode, struct vhost_scsi_cmd, - tvc_completion_list); - llnode = llist_next(llnode); + llist_for_each_entry(cmd, llnode, tvc_completion_list) { se_cmd = &cmd->tvc_se_cmd; pr_debug("%s tv_cmd %p resid %u status %#02x\n", __func__, @@ -1404,7 +1399,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f) struct vhost_virtqueue **vqs; int r = -ENOMEM, i; - vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT); + vs = kzalloc(sizeof(*vs), GFP_KERNEL | __GFP_NOWARN | __GFP_RETRY_MAYFAIL); if (!vs) { vs = vzalloc(sizeof(*vs)); if (!vs) diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c index 3f63e03de8e8..c9de9c41aa97 100644 --- a/drivers/vhost/vsock.c +++ b/drivers/vhost/vsock.c @@ -508,7 +508,7 @@ static int vhost_vsock_dev_open(struct inode *inode, struct file *file) /* This struct is large and allocation could fail, fall back to vmalloc * if there is no other way. */ - vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_REPEAT); + vsock = kvmalloc(sizeof(*vsock), GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!vsock) return -ENOMEM; diff --git a/drivers/video/console/mdacon.c b/drivers/video/console/mdacon.c index ec192a1bf297..d0d427a2f1a3 100644 --- a/drivers/video/console/mdacon.c +++ b/drivers/video/console/mdacon.c @@ -48,7 +48,7 @@ static DEFINE_SPINLOCK(mda_lock); /* description of the hardware layout */ -static unsigned long mda_vram_base; /* Base of video memory */ +static u16 *mda_vram_base; /* Base of video memory */ static unsigned long mda_vram_len; /* Size of video memory */ static unsigned int mda_num_columns; /* Number of text columns */ static unsigned int mda_num_lines; /* Number of text lines */ @@ -205,13 +205,20 @@ static int mda_detect(void) /* do a memory check */ - p = (u16 *) mda_vram_base; - q = (u16 *) (mda_vram_base + 0x01000); + p = mda_vram_base; + q = mda_vram_base + 0x01000 / 2; - p_save = scr_readw(p); q_save = scr_readw(q); + p_save = scr_readw(p); + q_save = scr_readw(q); + + scr_writew(0xAA55, p); + if (scr_readw(p) == 0xAA55) + count++; + + scr_writew(0x55AA, p); + if (scr_readw(p) == 0x55AA) + count++; - scr_writew(0xAA55, p); if (scr_readw(p) == 0xAA55) count++; - scr_writew(0x55AA, p); if (scr_readw(p) == 0x55AA) count++; scr_writew(p_save, p); if (count != 2) { @@ -220,13 +227,18 @@ static int mda_detect(void) /* check if we have 4K or 8K */ - scr_writew(0xA55A, q); scr_writew(0x0000, p); - if (scr_readw(q) == 0xA55A) count++; + scr_writew(0xA55A, q); + scr_writew(0x0000, p); + if (scr_readw(q) == 0xA55A) + count++; - scr_writew(0x5AA5, q); scr_writew(0x0000, p); - if (scr_readw(q) == 0x5AA5) count++; + scr_writew(0x5AA5, q); + scr_writew(0x0000, p); + if (scr_readw(q) == 0x5AA5) + count++; - scr_writew(p_save, p); scr_writew(q_save, q); + scr_writew(p_save, p); + scr_writew(q_save, q); if (count == 4) { mda_vram_len = 0x02000; @@ -240,14 +252,12 @@ static int mda_detect(void) /* Edward: These two mess `tests' mess up my cursor on bootup */ /* cursor low register */ - if (! test_mda_b(0x66, 0x0f)) { + if (!test_mda_b(0x66, 0x0f)) return 0; - } /* cursor low register */ - if (! test_mda_b(0x99, 0x0f)) { + if (!test_mda_b(0x99, 0x0f)) return 0; - } #endif /* See if the card is a Hercules, by checking whether the vsync @@ -257,25 +267,25 @@ static int mda_detect(void) p_save = q_save = inb_p(mda_status_port) & MDA_STATUS_VSYNC; - for (count=0; count < 50000 && p_save == q_save; count++) { + for (count = 0; count < 50000 && p_save == q_save; count++) { q_save = inb(mda_status_port) & MDA_STATUS_VSYNC; udelay(2); } if (p_save != q_save) { switch (inb_p(mda_status_port) & 0x70) { - case 0x10: - mda_type = TYPE_HERCPLUS; - mda_type_name = "HerculesPlus"; - break; - case 0x50: - mda_type = TYPE_HERCCOLOR; - mda_type_name = "HerculesColor"; - break; - default: - mda_type = TYPE_HERC; - mda_type_name = "Hercules"; - break; + case 0x10: + mda_type = TYPE_HERCPLUS; + mda_type_name = "HerculesPlus"; + break; + case 0x50: + mda_type = TYPE_HERCCOLOR; + mda_type_name = "HerculesColor"; + break; + default: + mda_type = TYPE_HERC; + mda_type_name = "Hercules"; + break; } } @@ -313,7 +323,7 @@ static const char *mdacon_startup(void) mda_num_lines = 25; mda_vram_len = 0x01000; - mda_vram_base = VGA_MAP_MEM(0xb0000, mda_vram_len); + mda_vram_base = (u16 *)VGA_MAP_MEM(0xb0000, mda_vram_len); mda_index_port = 0x3b4; mda_value_port = 0x3b5; @@ -410,17 +420,20 @@ static void mdacon_invert_region(struct vc_data *c, u16 *p, int count) } } -#define MDA_ADDR(x,y) ((u16 *) mda_vram_base + (y)*mda_num_columns + (x)) +static inline u16 *mda_addr(unsigned int x, unsigned int y) +{ + return mda_vram_base + y * mda_num_columns + x; +} static void mdacon_putc(struct vc_data *c, int ch, int y, int x) { - scr_writew(mda_convert_attr(ch), MDA_ADDR(x, y)); + scr_writew(mda_convert_attr(ch), mda_addr(x, y)); } static void mdacon_putcs(struct vc_data *c, const unsigned short *s, int count, int y, int x) { - u16 *dest = MDA_ADDR(x, y); + u16 *dest = mda_addr(x, y); for (; count > 0; count--) { scr_writew(mda_convert_attr(scr_readw(s++)), dest++); @@ -430,7 +443,7 @@ static void mdacon_putcs(struct vc_data *c, const unsigned short *s, static void mdacon_clear(struct vc_data *c, int y, int x, int height, int width) { - u16 *dest = MDA_ADDR(x, y); + u16 *dest = mda_addr(x, y); u16 eattr = mda_convert_attr(c->vc_video_erase_char); if (width <= 0 || height <= 0) @@ -453,7 +466,7 @@ static int mdacon_blank(struct vc_data *c, int blank, int mode_switch) { if (mda_type == TYPE_MDA) { if (blank) - scr_memsetw((void *)mda_vram_base, + scr_memsetw(mda_vram_base, mda_convert_attr(c->vc_video_erase_char), c->vc_screenbuf_size); /* Tell console.c that it has to restore the screen itself */ @@ -502,16 +515,16 @@ static bool mdacon_scroll(struct vc_data *c, unsigned int t, unsigned int b, switch (dir) { case SM_UP: - scr_memmovew(MDA_ADDR(0,t), MDA_ADDR(0,t+lines), + scr_memmovew(mda_addr(0, t), mda_addr(0, t + lines), (b-t-lines)*mda_num_columns*2); - scr_memsetw(MDA_ADDR(0,b-lines), eattr, + scr_memsetw(mda_addr(0, b - lines), eattr, lines*mda_num_columns*2); break; case SM_DOWN: - scr_memmovew(MDA_ADDR(0,t+lines), MDA_ADDR(0,t), + scr_memmovew(mda_addr(0, t + lines), mda_addr(0, t), (b-t-lines)*mda_num_columns*2); - scr_memsetw(MDA_ADDR(0,t), eattr, lines*mda_num_columns*2); + scr_memsetw(mda_addr(0, t), eattr, lines*mda_num_columns*2); break; } diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c index 11026e726b68..b55fdac9c9f5 100644 --- a/drivers/video/fbdev/aty/atyfb_base.c +++ b/drivers/video/fbdev/aty/atyfb_base.c @@ -802,7 +802,7 @@ static int aty_var_to_crtc(const struct fb_info *info, { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 xres, yres, vxres, vyres, xoffset, yoffset, bpp; - u32 sync, vmode, vdisplay; + u32 sync, vmode; u32 h_total, h_disp, h_sync_strt, h_sync_end, h_sync_dly, h_sync_wid, h_sync_pol; u32 v_total, v_disp, v_sync_strt, v_sync_end, v_sync_wid, v_sync_pol, c_sync; u32 pix_width, dp_pix_width, dp_chain_mask; @@ -984,12 +984,6 @@ static int aty_var_to_crtc(const struct fb_info *info, v_total <<= 1; } - vdisplay = yres; -#ifdef CONFIG_FB_ATY_GENERIC_LCD - if ((par->lcd_table != 0) && (crtc->lcd_gen_cntl & LCD_ON)) - vdisplay = par->lcd_height; -#endif - v_disp--; v_sync_strt--; v_sync_end--; @@ -1036,7 +1030,7 @@ static int aty_var_to_crtc(const struct fb_info *info, crtc->gen_cntl |= CRTC_INTERLACE_EN; #ifdef CONFIG_FB_ATY_GENERIC_LCD if (par->lcd_table != 0) { - vdisplay = yres; + u32 vdisplay = yres; if (vmode & FB_VMODE_DOUBLE) vdisplay <<= 1; crtc->gen_cntl &= ~(CRTC2_EN | CRTC2_PIX_WIDTH); diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 5324358f110f..7a42238db446 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -1483,7 +1483,7 @@ __releases(&info->lock) return 0; } -#ifdef CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA +#if defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && !defined(CONFIG_MMU) unsigned long get_fb_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) @@ -1510,7 +1510,8 @@ static const struct file_operations fb_fops = { .open = fb_open, .release = fb_release, #if defined(HAVE_ARCH_FB_UNMAPPED_AREA) || \ - defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) + (defined(CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA) && \ + !defined(CONFIG_MMU)) .get_unmapped_area = get_fb_unmapped_area, #endif #ifdef CONFIG_FB_DEFERRED_IO diff --git a/drivers/video/fbdev/fsl-diu-fb.c b/drivers/video/fbdev/fsl-diu-fb.c index ca3d6b366471..25abbcf38913 100644 --- a/drivers/video/fbdev/fsl-diu-fb.c +++ b/drivers/video/fbdev/fsl-diu-fb.c @@ -388,7 +388,7 @@ struct fsl_diu_data { /* Determine the DMA address of a member of the fsl_diu_data structure */ #define DMA_ADDR(p, f) ((p)->dma_addr + offsetof(struct fsl_diu_data, f)) -static struct mfb_info mfb_template[] = { +static const struct mfb_info mfb_template[] = { { .index = PLANE0, .id = "Panel0", @@ -1868,7 +1868,7 @@ static int __init fsl_diu_setup(char *options) } #endif -static struct of_device_id fsl_diu_match[] = { +static const struct of_device_id fsl_diu_match[] = { #ifdef CONFIG_PPC_MPC512x { .compatible = "fsl,mpc5121-diu", diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index 6b444400a86c..ffc391208b27 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -907,7 +907,7 @@ static void intelfb_pci_unregister(struct pci_dev *pdev) * helper functions * ***************************************************************/ -int __inline__ intelfb_var_to_depth(const struct fb_var_screeninfo *var) +__inline__ int intelfb_var_to_depth(const struct fb_var_screeninfo *var) { DBG_MSG("intelfb_var_to_depth: bpp: %d, green.length is %d\n", var->bits_per_pixel, var->green.length); diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c index 11eb094396ae..f6a0b9af97a9 100644 --- a/drivers/video/fbdev/matrox/matroxfb_base.c +++ b/drivers/video/fbdev/matrox/matroxfb_base.c @@ -2001,7 +2001,7 @@ static void matroxfb_register_device(struct matrox_fb_info* minfo) { for (drv = matroxfb_driver_l(matroxfb_driver_list.next); drv != matroxfb_driver_l(&matroxfb_driver_list); drv = matroxfb_driver_l(drv->node.next)) { - if (drv && drv->probe) { + if (drv->probe) { void *p = drv->probe(minfo); if (p) { minfo->drivers_data[i] = p; diff --git a/drivers/video/fbdev/omap/lcdc.c b/drivers/video/fbdev/omap/lcdc.c index e3d9b9ea5498..938cba0d24ae 100644 --- a/drivers/video/fbdev/omap/lcdc.c +++ b/drivers/video/fbdev/omap/lcdc.c @@ -79,12 +79,12 @@ static struct omap_lcd_controller { unsigned long vram_size; } lcdc; -static void inline enable_irqs(int mask) +static inline void enable_irqs(int mask) { lcdc.irq_mask |= mask; } -static void inline disable_irqs(int mask) +static inline void disable_irqs(int mask) { lcdc.irq_mask &= ~mask; } @@ -466,7 +466,7 @@ static void calc_ck_div(int is_tft, int pck, int *pck_div) } } -static void inline setup_regs(void) +static inline void setup_regs(void) { u32 l; struct lcd_panel *panel = lcdc.fbdev->panel; diff --git a/drivers/video/fbdev/omap/omapfb_main.c b/drivers/video/fbdev/omap/omapfb_main.c index f4cbfb3b8a09..3479a47a3082 100644 --- a/drivers/video/fbdev/omap/omapfb_main.c +++ b/drivers/video/fbdev/omap/omapfb_main.c @@ -62,7 +62,7 @@ struct caps_table_struct { const char *name; }; -static struct caps_table_struct ctrl_caps[] = { +static const struct caps_table_struct ctrl_caps[] = { { OMAPFB_CAPS_MANUAL_UPDATE, "manual update" }, { OMAPFB_CAPS_TEARSYNC, "tearing synchronization" }, { OMAPFB_CAPS_PLANE_RELOCATE_MEM, "relocate plane memory" }, @@ -74,7 +74,7 @@ static struct caps_table_struct ctrl_caps[] = { { OMAPFB_CAPS_SET_BACKLIGHT, "backlight setting" }, }; -static struct caps_table_struct color_caps[] = { +static const struct caps_table_struct color_caps[] = { { 1 << OMAPFB_COLOR_RGB565, "RGB565", }, { 1 << OMAPFB_COLOR_YUV422, "YUV422", }, { 1 << OMAPFB_COLOR_YUV420, "YUV420", }, @@ -1384,7 +1384,7 @@ static struct attribute *panel_attrs[] = { NULL, }; -static struct attribute_group panel_attr_grp = { +static const struct attribute_group panel_attr_grp = { .name = "panel", .attrs = panel_attrs, }; @@ -1406,7 +1406,7 @@ static struct attribute *ctrl_attrs[] = { NULL, }; -static struct attribute_group ctrl_attr_grp = { +static const struct attribute_group ctrl_attr_grp = { .name = "ctrl", .attrs = ctrl_attrs, }; diff --git a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c index fd2b372d0264..bef431530090 100644 --- a/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c +++ b/drivers/video/fbdev/omap2/omapfb/displays/panel-dsi-cm.c @@ -100,7 +100,7 @@ static void hw_guard_wait(struct panel_drv_data *ddata) { unsigned long wait = ddata->hw_guard_end - jiffies; - if ((long)wait > 0 && wait <= ddata->hw_guard_wait) { + if ((long)wait > 0 && time_before_eq(wait, ddata->hw_guard_wait)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(wait); } @@ -559,7 +559,7 @@ static struct attribute *dsicm_attrs[] = { NULL, }; -static struct attribute_group dsicm_attr_group = { +static const struct attribute_group dsicm_attr_group = { .attrs = dsicm_attrs, }; diff --git a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c index 9e2a67fdf4d2..44b96af4ef4e 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/manager-sysfs.c @@ -182,22 +182,16 @@ static ssize_t manager_trans_key_type_show(struct omap_overlay_manager *mgr, static ssize_t manager_trans_key_type_store(struct omap_overlay_manager *mgr, const char *buf, size_t size) { - enum omap_dss_trans_key_type key_type; struct omap_overlay_manager_info info; int r; - for (key_type = OMAP_DSS_COLOR_KEY_GFX_DST; - key_type < ARRAY_SIZE(trans_key_type_str); key_type++) { - if (sysfs_streq(buf, trans_key_type_str[key_type])) - break; - } - - if (key_type == ARRAY_SIZE(trans_key_type_str)) - return -EINVAL; + r = sysfs_match_string(trans_key_type_str, buf); + if (r < 0) + return r; mgr->get_manager_info(mgr, &info); - info.trans_key_type = key_type; + info.trans_key_type = r; r = mgr->set_manager_info(mgr, &info); if (r) diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index b21a89b03fb4..c3d49e13643c 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -1436,7 +1436,10 @@ static void pxafb_enable_controller(struct pxafb_info *fbi) pr_debug("reg_lccr3 0x%08x\n", (unsigned int) fbi->reg_lccr3); /* enable LCD controller clock */ - clk_prepare_enable(fbi->clk); + if (clk_prepare_enable(fbi->clk)) { + pr_err("%s: Failed to prepare clock\n", __func__); + return; + } if (fbi->lccr0 & LCCR0_LCDT) return; diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c index 885ee3a563aa..c3a46506e47e 100644 --- a/drivers/video/fbdev/sh_mobile_lcdcfb.c +++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c @@ -2301,7 +2301,7 @@ static int sh_mobile_lcdc_check_fb(struct backlight_device *bdev, return (info->bl_dev == bdev); } -static struct backlight_ops sh_mobile_lcdc_bl_ops = { +static const struct backlight_ops sh_mobile_lcdc_bl_ops = { .options = BL_CORE_SUSPENDRESUME, .update_status = sh_mobile_lcdc_update_bl, .get_brightness = sh_mobile_lcdc_get_brightness, diff --git a/drivers/video/fbdev/uvesafb.c b/drivers/video/fbdev/uvesafb.c index 98af9e02959b..dc0e8d90d9cc 100644 --- a/drivers/video/fbdev/uvesafb.c +++ b/drivers/video/fbdev/uvesafb.c @@ -5,6 +5,9 @@ * Loosely based upon the vesafb driver. * */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> @@ -149,8 +152,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task) * allowed by connector. */ if (sizeof(*m) + len > CONNECTOR_MAX_MSG_SIZE) { - printk(KERN_WARNING "uvesafb: message too long (%d), " - "can't execute task\n", (int)(sizeof(*m) + len)); + pr_warn("message too long (%d), can't execute task\n", + (int)(sizeof(*m) + len)); return -E2BIG; } @@ -198,10 +201,8 @@ static int uvesafb_exec(struct uvesafb_ktask *task) */ err = uvesafb_helper_start(); if (err) { - printk(KERN_ERR "uvesafb: failed to execute %s\n", - v86d_path); - printk(KERN_ERR "uvesafb: make sure that the v86d " - "helper is installed and executable\n"); + pr_err("failed to execute %s\n", v86d_path); + pr_err("make sure that the v86d helper is installed and executable\n"); } else { v86d_started = 1; err = cn_netlink_send(m, 0, 0, gfp_any()); @@ -375,9 +376,8 @@ static u8 *uvesafb_vbe_state_save(struct uvesafb_par *par) err = uvesafb_exec(task); if (err || (task->t.regs.eax & 0xffff) != 0x004f) { - printk(KERN_WARNING "uvesafb: VBE get state call " - "failed (eax=0x%x, err=%d)\n", - task->t.regs.eax, err); + pr_warn("VBE get state call failed (eax=0x%x, err=%d)\n", + task->t.regs.eax, err); kfree(state); state = NULL; } @@ -407,9 +407,8 @@ static void uvesafb_vbe_state_restore(struct uvesafb_par *par, u8 *state_buf) err = uvesafb_exec(task); if (err || (task->t.regs.eax & 0xffff) != 0x004f) - printk(KERN_WARNING "uvesafb: VBE state restore call " - "failed (eax=0x%x, err=%d)\n", - task->t.regs.eax, err); + pr_warn("VBE state restore call failed (eax=0x%x, err=%d)\n", + task->t.regs.eax, err); uvesafb_free(task); } @@ -427,24 +426,22 @@ static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task, err = uvesafb_exec(task); if (err || (task->t.regs.eax & 0xffff) != 0x004f) { - printk(KERN_ERR "uvesafb: Getting VBE info block failed " - "(eax=0x%x, err=%d)\n", (u32)task->t.regs.eax, - err); + pr_err("Getting VBE info block failed (eax=0x%x, err=%d)\n", + (u32)task->t.regs.eax, err); return -EINVAL; } if (par->vbe_ib.vbe_version < 0x0200) { - printk(KERN_ERR "uvesafb: Sorry, pre-VBE 2.0 cards are " - "not supported.\n"); + pr_err("Sorry, pre-VBE 2.0 cards are not supported\n"); return -EINVAL; } if (!par->vbe_ib.mode_list_ptr) { - printk(KERN_ERR "uvesafb: Missing mode list!\n"); + pr_err("Missing mode list!\n"); return -EINVAL; } - printk(KERN_INFO "uvesafb: "); + pr_info(""); /* * Convert string pointers and the mode list pointer into @@ -452,23 +449,24 @@ static int uvesafb_vbe_getinfo(struct uvesafb_ktask *task, * video adapter and its vendor. */ if (par->vbe_ib.oem_vendor_name_ptr) - printk("%s, ", + pr_cont("%s, ", ((char *)task->buf) + par->vbe_ib.oem_vendor_name_ptr); if (par->vbe_ib.oem_product_name_ptr) - printk("%s, ", + pr_cont("%s, ", ((char *)task->buf) + par->vbe_ib.oem_product_name_ptr); if (par->vbe_ib.oem_product_rev_ptr) - printk("%s, ", + pr_cont("%s, ", ((char *)task->buf) + par->vbe_ib.oem_product_rev_ptr); if (par->vbe_ib.oem_string_ptr) - printk("OEM: %s, ", + pr_cont("OEM: %s, ", ((char *)task->buf) + par->vbe_ib.oem_string_ptr); - printk("VBE v%d.%d\n", ((par->vbe_ib.vbe_version & 0xff00) >> 8), - par->vbe_ib.vbe_version & 0xff); + pr_cont("VBE v%d.%d\n", + (par->vbe_ib.vbe_version & 0xff00) >> 8, + par->vbe_ib.vbe_version & 0xff); return 0; } @@ -507,8 +505,7 @@ static int uvesafb_vbe_getmodes(struct uvesafb_ktask *task, err = uvesafb_exec(task); if (err || (task->t.regs.eax & 0xffff) != 0x004f) { - printk(KERN_WARNING "uvesafb: Getting mode info block " - "for mode 0x%x failed (eax=0x%x, err=%d)\n", + pr_warn("Getting mode info block for mode 0x%x failed (eax=0x%x, err=%d)\n", *mode, (u32)task->t.regs.eax, err); mode++; par->vbe_modes_cnt--; @@ -569,23 +566,20 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task, + task->t.regs.edi); par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1]; par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2]; - printk(KERN_INFO "uvesafb: protected mode interface info at " - "%04x:%04x\n", - (u16)task->t.regs.es, (u16)task->t.regs.edi); - printk(KERN_INFO "uvesafb: pmi: set display start = %p, " - "set palette = %p\n", par->pmi_start, - par->pmi_pal); + pr_info("protected mode interface info at %04x:%04x\n", + (u16)task->t.regs.es, (u16)task->t.regs.edi); + pr_info("pmi: set display start = %p, set palette = %p\n", + par->pmi_start, par->pmi_pal); if (par->pmi_base[3]) { - printk(KERN_INFO "uvesafb: pmi: ports = "); + pr_info("pmi: ports ="); for (i = par->pmi_base[3]/2; par->pmi_base[i] != 0xffff; i++) - printk("%x ", par->pmi_base[i]); - printk("\n"); + pr_cont(" %x", par->pmi_base[i]); + pr_cont("\n"); if (par->pmi_base[i] != 0xffff) { - printk(KERN_INFO "uvesafb: can't handle memory" - " requests, pmi disabled\n"); + pr_info("can't handle memory requests, pmi disabled\n"); par->ypan = par->pmi_setpal = 0; } } @@ -634,17 +628,13 @@ static int uvesafb_vbe_getedid(struct uvesafb_ktask *task, struct fb_info *info) return -EINVAL; if ((task->t.regs.ebx & 0x3) == 3) { - printk(KERN_INFO "uvesafb: VBIOS/hardware supports both " - "DDC1 and DDC2 transfers\n"); + pr_info("VBIOS/hardware supports both DDC1 and DDC2 transfers\n"); } else if ((task->t.regs.ebx & 0x3) == 2) { - printk(KERN_INFO "uvesafb: VBIOS/hardware supports DDC2 " - "transfers\n"); + pr_info("VBIOS/hardware supports DDC2 transfers\n"); } else if ((task->t.regs.ebx & 0x3) == 1) { - printk(KERN_INFO "uvesafb: VBIOS/hardware supports DDC1 " - "transfers\n"); + pr_info("VBIOS/hardware supports DDC1 transfers\n"); } else { - printk(KERN_INFO "uvesafb: VBIOS/hardware doesn't support " - "DDC transfers\n"); + pr_info("VBIOS/hardware doesn't support DDC transfers\n"); return -EINVAL; } @@ -718,14 +708,12 @@ static void uvesafb_vbe_getmonspecs(struct uvesafb_ktask *task, } if (info->monspecs.gtf) - printk(KERN_INFO - "uvesafb: monitor limits: vf = %d Hz, hf = %d kHz, " - "clk = %d MHz\n", info->monspecs.vfmax, + pr_info("monitor limits: vf = %d Hz, hf = %d kHz, clk = %d MHz\n", + info->monspecs.vfmax, (int)(info->monspecs.hfmax / 1000), (int)(info->monspecs.dclkmax / 1000000)); else - printk(KERN_INFO "uvesafb: no monitor limits have been set, " - "default refresh rate will be used\n"); + pr_info("no monitor limits have been set, default refresh rate will be used\n"); /* Add VBE modes to the modelist. */ for (i = 0; i < par->vbe_modes_cnt; i++) { @@ -779,8 +767,7 @@ static void uvesafb_vbe_getstatesize(struct uvesafb_ktask *task, err = uvesafb_exec(task); if (err || (task->t.regs.eax & 0xffff) != 0x004f) { - printk(KERN_WARNING "uvesafb: VBE state buffer size " - "cannot be determined (eax=0x%x, err=%d)\n", + pr_warn("VBE state buffer size cannot be determined (eax=0x%x, err=%d)\n", task->t.regs.eax, err); par->vbe_state_size = 0; return; @@ -815,8 +802,7 @@ static int uvesafb_vbe_init(struct fb_info *info) if (par->pmi_setpal || par->ypan) { if (__supported_pte_mask & _PAGE_NX) { par->pmi_setpal = par->ypan = 0; - printk(KERN_WARNING "uvesafb: NX protection is active, " - "better not use the PMI.\n"); + pr_warn("NX protection is active, better not use the PMI\n"); } else { uvesafb_vbe_getpmi(task, par); } @@ -859,8 +845,7 @@ static int uvesafb_vbe_init_mode(struct fb_info *info) goto gotmode; } } - printk(KERN_INFO "uvesafb: requested VBE mode 0x%x is " - "unavailable\n", vbemode); + pr_info("requested VBE mode 0x%x is unavailable\n", vbemode); vbemode = 0; } @@ -1181,8 +1166,8 @@ static int uvesafb_open(struct fb_info *info, int user) if (!cnt && par->vbe_state_size) { buf = uvesafb_vbe_state_save(par); if (IS_ERR(buf)) { - printk(KERN_WARNING "uvesafb: save hardware state" - "failed, error code is %ld!\n", PTR_ERR(buf)); + pr_warn("save hardware state failed, error code is %ld!\n", + PTR_ERR(buf)); } else { par->vbe_state_orig = buf; } @@ -1293,17 +1278,16 @@ setmode: * use our own timings. Try again with the default timings. */ if (crtc != NULL) { - printk(KERN_WARNING "uvesafb: mode switch failed " - "(eax=0x%x, err=%d). Trying again with " - "default timings.\n", task->t.regs.eax, err); + pr_warn("mode switch failed (eax=0x%x, err=%d) - trying again with default timings\n", + task->t.regs.eax, err); uvesafb_reset(task); kfree(crtc); crtc = NULL; info->var.pixclock = 0; goto setmode; } else { - printk(KERN_ERR "uvesafb: mode switch failed (eax=" - "0x%x, err=%d)\n", task->t.regs.eax, err); + pr_err("mode switch failed (eax=0x%x, err=%d)\n", + task->t.regs.eax, err); err = -EINVAL; goto out; } @@ -1510,13 +1494,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode) mode->bytes_per_scan_line; if (par->ypan && info->var.yres_virtual > info->var.yres) { - printk(KERN_INFO "uvesafb: scrolling: %s " - "using protected mode interface, " - "yres_virtual=%d\n", + pr_info("scrolling: %s using protected mode interface, yres_virtual=%d\n", (par->ypan > 1) ? "ywrap" : "ypan", info->var.yres_virtual); } else { - printk(KERN_INFO "uvesafb: scrolling: redraw\n"); + pr_info("scrolling: redraw\n"); info->var.yres_virtual = info->var.yres; par->ypan = 0; } @@ -1704,7 +1686,7 @@ static int uvesafb_probe(struct platform_device *dev) err = uvesafb_vbe_init(info); if (err) { - printk(KERN_ERR "uvesafb: vbe_init() failed with %d\n", err); + pr_err("vbe_init() failed with %d\n", err); goto out; } @@ -1726,15 +1708,15 @@ static int uvesafb_probe(struct platform_device *dev) uvesafb_init_info(info, mode); if (!request_region(0x3c0, 32, "uvesafb")) { - printk(KERN_ERR "uvesafb: request region 0x3c0-0x3e0 failed\n"); + pr_err("request region 0x3c0-0x3e0 failed\n"); err = -EIO; goto out_mode; } if (!request_mem_region(info->fix.smem_start, info->fix.smem_len, "uvesafb")) { - printk(KERN_ERR "uvesafb: cannot reserve video memory at " - "0x%lx\n", info->fix.smem_start); + pr_err("cannot reserve video memory at 0x%lx\n", + info->fix.smem_start); err = -EIO; goto out_reg; } @@ -1743,10 +1725,8 @@ static int uvesafb_probe(struct platform_device *dev) uvesafb_ioremap(info); if (!info->screen_base) { - printk(KERN_ERR - "uvesafb: abort, cannot ioremap 0x%x bytes of video " - "memory at 0x%lx\n", - info->fix.smem_len, info->fix.smem_start); + pr_err("abort, cannot ioremap 0x%x bytes of video memory at 0x%lx\n", + info->fix.smem_len, info->fix.smem_start); err = -EIO; goto out_mem; } @@ -1754,16 +1734,14 @@ static int uvesafb_probe(struct platform_device *dev) platform_set_drvdata(dev, info); if (register_framebuffer(info) < 0) { - printk(KERN_ERR - "uvesafb: failed to register framebuffer device\n"); + pr_err("failed to register framebuffer device\n"); err = -EINVAL; goto out_unmap; } - printk(KERN_INFO "uvesafb: framebuffer at 0x%lx, mapped to 0x%p, " - "using %dk, total %dk\n", info->fix.smem_start, - info->screen_base, info->fix.smem_len/1024, - par->vbe_ib.total_memory * 64); + pr_info("framebuffer at 0x%lx, mapped to 0x%p, using %dk, total %dk\n", + info->fix.smem_start, info->screen_base, + info->fix.smem_len / 1024, par->vbe_ib.total_memory * 64); fb_info(info, "%s frame buffer device\n", info->fix.id); err = sysfs_create_group(&dev->dev.kobj, &uvesafb_dev_attgrp); @@ -1871,8 +1849,7 @@ static int uvesafb_setup(char *options) else if (this_opt[0] >= '0' && this_opt[0] <= '9') { mode_option = this_opt; } else { - printk(KERN_WARNING - "uvesafb: unrecognized option %s\n", this_opt); + pr_warn("unrecognized option %s\n", this_opt); } } @@ -1931,8 +1908,7 @@ static int uvesafb_init(void) err = driver_create_file(&uvesafb_driver.driver, &driver_attr_v86d); if (err) { - printk(KERN_WARNING "uvesafb: failed to register " - "attributes\n"); + pr_warn("failed to register attributes\n"); err = 0; } } diff --git a/drivers/video/fbdev/vermilion/cr_pll.c b/drivers/video/fbdev/vermilion/cr_pll.c index ebc6e6e0dd0f..ba105c876bed 100644 --- a/drivers/video/fbdev/vermilion/cr_pll.c +++ b/drivers/video/fbdev/vermilion/cr_pll.c @@ -185,6 +185,7 @@ static int __init cr_pll_init(void) if (err) { printk(KERN_ERR "Carillo Ranch failed to initialize vml_sys.\n"); + iounmap(mch_regs_base); pci_dev_put(mch_dev); return err; } diff --git a/drivers/w1/masters/omap_hdq.c b/drivers/w1/masters/omap_hdq.c index 3612542b6044..83fc9aab34e8 100644 --- a/drivers/w1/masters/omap_hdq.c +++ b/drivers/w1/masters/omap_hdq.c @@ -704,7 +704,8 @@ static int omap_hdq_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) { - ret = -ENXIO; + dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq); + ret = irq; goto err_irq; } diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index 95ea7e6b1d99..74471e7aa5cc 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c @@ -728,6 +728,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) memcpy(&sl->reg_num, rn, sizeof(sl->reg_num)); atomic_set(&sl->refcnt, 1); atomic_inc(&sl->master->refcnt); + dev->slave_count++; /* slave modules need to be loaded in a context with unlocked mutex */ mutex_unlock(&dev->mutex); @@ -747,11 +748,11 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) sl->family = f; - err = __w1_attach_slave_device(sl); if (err < 0) { dev_err(&dev->dev, "%s: Attaching %s failed.\n", __func__, sl->name); + dev->slave_count--; w1_family_put(sl->family); atomic_dec(&sl->master->refcnt); kfree(sl); @@ -759,7 +760,6 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn) } sl->ttl = dev->slave_ttl; - dev->slave_count++; memcpy(msg.id.id, rn, sizeof(msg.id)); msg.type = W1_SLAVE_ADD; diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig index e6e31a16f68f..c722cbfdc7e6 100644 --- a/drivers/watchdog/Kconfig +++ b/drivers/watchdog/Kconfig @@ -46,6 +46,17 @@ config WATCHDOG_NOWAYOUT get killed. If you say Y here, the watchdog cannot be stopped once it has been started. +config WATCHDOG_HANDLE_BOOT_ENABLED + bool "Update boot-enabled watchdog until userspace takes over" + default y + help + The default watchdog behaviour (which you get if you say Y here) is + to ping watchdog devices that were enabled before the driver has + been loaded until control is taken over from userspace using the + /dev/watchdog file. If you say N here, the kernel will not update + the watchdog on its own. Thus if your userspace does not start fast + enough your device will reboot. + config WATCHDOG_SYSFS bool "Read different watchdog information through sysfs" help @@ -721,6 +732,14 @@ config RENESAS_WDT This driver adds watchdog support for the integrated watchdogs in the Renesas R-Car and other SH-Mobile SoCs (usually named RWDT or SWDT). +config RENESAS_RZAWDT + tristate "Renesas RZ/A WDT Watchdog" + depends on ARCH_RENESAS || COMPILE_TEST + select WATCHDOG_CORE + help + This driver adds watchdog support for the integrated watchdogs in the + Renesas RZ/A SoCs. These watchdogs can be used to reset a system. + config ASPEED_WATCHDOG tristate "Aspeed 2400 watchdog support" depends on ARCH_ASPEED || COMPILE_TEST @@ -744,6 +763,30 @@ config ZX2967_WATCHDOG To compile this driver as a module, choose M here: the module will be called zx2967_wdt. +config STM32_WATCHDOG + tristate "STM32 Independent WatchDoG (IWDG) support" + depends on ARCH_STM32 + select WATCHDOG_CORE + default y + help + Say Y here to include support for the watchdog timer + in stm32 SoCs. + + To compile this driver as a module, choose M here: the + module will be called stm32_iwdg. + +config UNIPHIER_WATCHDOG + tristate "UniPhier watchdog support" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF && MFD_SYSCON + select WATCHDOG_CORE + help + Say Y here to include support watchdog timer embedded + into the UniPhier system. + + To compile this driver as a module, choose M here: the + module will be called uniphier_wdt. + # AVR32 Architecture config AT32AP700X_WDT @@ -829,11 +872,12 @@ config EBC_C384_WDT the timeout module parameter. config F71808E_WDT - tristate "Fintek F71808E, F71862FG, F71869, F71882FG and F71889FG Watchdog" + tristate "Fintek F718xx, F818xx Super I/O Watchdog" depends on X86 help - This is the driver for the hardware watchdog on the Fintek - F71808E, F71862FG, F71869, F71882FG and F71889FG Super I/O controllers. + This is the driver for the hardware watchdog on the Fintek F71808E, + F71862FG, F71868, F71869, F71882FG, F71889FG, F81865 and F81866 + Super I/O controllers. You can compile this driver directly into the kernel, or use it as a module. The module will be called f71808e_wdt. @@ -1037,13 +1081,12 @@ config IT8712F_WDT config IT87_WDT tristate "IT87 Watchdog Timer" depends on X86 + select WATCHDOG_CORE ---help--- - This is the driver for the hardware watchdog on the ITE IT8620, - IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726 and IT8728 - Super I/O chips. - - If the driver does not work, then make sure that the game port in - the BIOS is enabled. + This is the driver for the hardware watchdog on the ITE IT8607, + IT8620, IT8622, IT8625, IT8628, IT8655, IT8665, IT8686, IT8702, + IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, IT8728, and + IT8783 Super I/O chips. This watchdog simply watches your kernel to make sure it doesn't freeze, and if it does, it reboots your computer after a certain diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile index a2126e2a99ae..56adf9fa67d0 100644 --- a/drivers/watchdog/Makefile +++ b/drivers/watchdog/Makefile @@ -82,8 +82,11 @@ obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o obj-$(CONFIG_ATLAS7_WATCHDOG) += atlas7_wdt.o obj-$(CONFIG_RENESAS_WDT) += renesas_wdt.o +obj-$(CONFIG_RENESAS_RZAWDT) += rza_wdt.o obj-$(CONFIG_ASPEED_WATCHDOG) += aspeed_wdt.o obj-$(CONFIG_ZX2967_WATCHDOG) += zx2967_wdt.o +obj-$(CONFIG_STM32_WATCHDOG) += stm32_iwdg.o +obj-$(CONFIG_UNIPHIER_WATCHDOG) += uniphier_wdt.o # AVR32 Architecture obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o diff --git a/drivers/watchdog/bcm47xx_wdt.c b/drivers/watchdog/bcm47xx_wdt.c index 35725e21b18a..236582809336 100644 --- a/drivers/watchdog/bcm47xx_wdt.c +++ b/drivers/watchdog/bcm47xx_wdt.c @@ -97,7 +97,7 @@ static int bcm47xx_wdt_restart(struct watchdog_device *wdd, return 0; } -static struct watchdog_ops bcm47xx_wdt_hard_ops = { +static const struct watchdog_ops bcm47xx_wdt_hard_ops = { .owner = THIS_MODULE, .start = bcm47xx_wdt_hard_start, .stop = bcm47xx_wdt_hard_stop, @@ -168,7 +168,7 @@ static const struct watchdog_info bcm47xx_wdt_info = { WDIOF_MAGICCLOSE, }; -static struct watchdog_ops bcm47xx_wdt_soft_ops = { +static const struct watchdog_ops bcm47xx_wdt_soft_ops = { .owner = THIS_MODULE, .start = bcm47xx_wdt_soft_start, .stop = bcm47xx_wdt_soft_stop, diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c index 86e0b5d2e761..05c000081e9d 100644 --- a/drivers/watchdog/cadence_wdt.c +++ b/drivers/watchdog/cadence_wdt.c @@ -458,7 +458,7 @@ static int __maybe_unused cdns_wdt_resume(struct device *dev) static SIMPLE_DEV_PM_OPS(cdns_wdt_pm_ops, cdns_wdt_suspend, cdns_wdt_resume); -static struct of_device_id cdns_wdt_of_match[] = { +static const struct of_device_id cdns_wdt_of_match[] = { { .compatible = "cdns,wdt-r1p2", }, { /* end of table */ } }; diff --git a/drivers/watchdog/davinci_wdt.c b/drivers/watchdog/davinci_wdt.c index 0e731d797a2a..2f46487af86d 100644 --- a/drivers/watchdog/davinci_wdt.c +++ b/drivers/watchdog/davinci_wdt.c @@ -173,7 +173,11 @@ static int davinci_wdt_probe(struct platform_device *pdev) return PTR_ERR(davinci_wdt->clk); } - clk_prepare_enable(davinci_wdt->clk); + ret = clk_prepare_enable(davinci_wdt->clk); + if (ret) { + dev_err(&pdev->dev, "failed to prepare clock\n"); + return ret; + } platform_set_drvdata(pdev, davinci_wdt); @@ -198,8 +202,10 @@ static int davinci_wdt_probe(struct platform_device *pdev) return PTR_ERR(davinci_wdt->base); ret = watchdog_register_device(wdd); - if (ret < 0) + if (ret < 0) { + clk_disable_unprepare(davinci_wdt->clk); dev_err(dev, "cannot register watchdog device\n"); + } return ret; } diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c index 914da3a4d334..36be987ff9ef 100644 --- a/drivers/watchdog/dw_wdt.c +++ b/drivers/watchdog/dw_wdt.c @@ -29,6 +29,7 @@ #include <linux/of.h> #include <linux/pm.h> #include <linux/platform_device.h> +#include <linux/reset.h> #include <linux/watchdog.h> #define WDOG_CONTROL_REG_OFFSET 0x00 @@ -54,6 +55,7 @@ struct dw_wdt { struct clk *clk; unsigned long rate; struct watchdog_device wdd; + struct reset_control *rst; }; #define to_dw_wdt(wdd) container_of(wdd, struct dw_wdt, wdd) @@ -234,6 +236,14 @@ static int dw_wdt_drv_probe(struct platform_device *pdev) goto out_disable_clk; } + dw_wdt->rst = devm_reset_control_get_optional_shared(&pdev->dev, NULL); + if (IS_ERR(dw_wdt->rst)) { + ret = PTR_ERR(dw_wdt->rst); + goto out_disable_clk; + } + + reset_control_deassert(dw_wdt->rst); + wdd = &dw_wdt->wdd; wdd->info = &dw_wdt_ident; wdd->ops = &dw_wdt_ops; @@ -279,6 +289,7 @@ static int dw_wdt_drv_remove(struct platform_device *pdev) struct dw_wdt *dw_wdt = platform_get_drvdata(pdev); watchdog_unregister_device(&dw_wdt->wdd); + reset_control_assert(dw_wdt->rst); clk_disable_unprepare(dw_wdt->clk); return 0; diff --git a/drivers/watchdog/f71808e_wdt.c b/drivers/watchdog/f71808e_wdt.c index 1b7e9169072f..8658dba21768 100644 --- a/drivers/watchdog/f71808e_wdt.c +++ b/drivers/watchdog/f71808e_wdt.c @@ -57,6 +57,7 @@ #define SIO_F71808_ID 0x0901 /* Chipset ID */ #define SIO_F71858_ID 0x0507 /* Chipset ID */ #define SIO_F71862_ID 0x0601 /* Chipset ID */ +#define SIO_F71868_ID 0x1106 /* Chipset ID */ #define SIO_F71869_ID 0x0814 /* Chipset ID */ #define SIO_F71869A_ID 0x1007 /* Chipset ID */ #define SIO_F71882_ID 0x0541 /* Chipset ID */ @@ -101,7 +102,7 @@ MODULE_PARM_DESC(timeout, static unsigned int pulse_width = WATCHDOG_PULSE_WIDTH; module_param(pulse_width, uint, 0); MODULE_PARM_DESC(pulse_width, - "Watchdog signal pulse width. 0(=level), 1 ms, 25 ms, 125 ms or 5000 ms" + "Watchdog signal pulse width. 0(=level), 1, 25, 30, 125, 150, 5000 or 6000 ms" " (default=" __MODULE_STRING(WATCHDOG_PULSE_WIDTH) ")"); static unsigned int f71862fg_pin = WATCHDOG_F71862FG_PIN; @@ -119,13 +120,14 @@ module_param(start_withtimeout, uint, 0); MODULE_PARM_DESC(start_withtimeout, "Start watchdog timer on module load with" " given initial timeout. Zero (default) disables this feature."); -enum chips { f71808fg, f71858fg, f71862fg, f71869, f71882fg, f71889fg, f81865, - f81866}; +enum chips { f71808fg, f71858fg, f71862fg, f71868, f71869, f71882fg, f71889fg, + f81865, f81866}; static const char *f71808e_names[] = { "f71808fg", "f71858fg", "f71862fg", + "f71868", "f71869", "f71882fg", "f71889fg", @@ -252,16 +254,23 @@ static int watchdog_set_timeout(int timeout) static int watchdog_set_pulse_width(unsigned int pw) { int err = 0; + unsigned int t1 = 25, t2 = 125, t3 = 5000; + + if (watchdog.type == f71868) { + t1 = 30; + t2 = 150; + t3 = 6000; + } mutex_lock(&watchdog.lock); - if (pw <= 1) { + if (pw <= 1) { watchdog.pulse_val = 0; - } else if (pw <= 25) { + } else if (pw <= t1) { watchdog.pulse_val = 1; - } else if (pw <= 125) { + } else if (pw <= t2) { watchdog.pulse_val = 2; - } else if (pw <= 5000) { + } else if (pw <= t3) { watchdog.pulse_val = 3; } else { pr_err("pulse width out of range\n"); @@ -354,6 +363,7 @@ static int watchdog_start(void) goto exit_superio; break; + case f71868: case f71869: /* GPIO14 --> WDTRST# */ superio_clear_bit(watchdog.sioaddr, SIO_REG_MFUNCT1, 4); @@ -792,6 +802,9 @@ static int __init f71808e_find(int sioaddr) watchdog.type = f71862fg; err = f71862fg_pin_configure(0); /* validate module parameter */ break; + case SIO_F71868_ID: + watchdog.type = f71868; + break; case SIO_F71869_ID: case SIO_F71869A_ID: watchdog.type = f71869; diff --git a/drivers/watchdog/gpio_wdt.c b/drivers/watchdog/gpio_wdt.c index 93457cabc178..cb66c2f99ff1 100644 --- a/drivers/watchdog/gpio_wdt.c +++ b/drivers/watchdog/gpio_wdt.c @@ -18,7 +18,6 @@ #define SOFT_TIMEOUT_MIN 1 #define SOFT_TIMEOUT_DEF 60 -#define SOFT_TIMEOUT_MAX 0xffff enum { HW_ALGO_TOGGLE, @@ -30,11 +29,7 @@ struct gpio_wdt_priv { bool active_low; bool state; bool always_running; - bool armed; unsigned int hw_algo; - unsigned int hw_margin; - unsigned long last_jiffies; - struct timer_list timer; struct watchdog_device wdd; }; @@ -47,21 +42,10 @@ static void gpio_wdt_disable(struct gpio_wdt_priv *priv) gpio_direction_input(priv->gpio); } -static void gpio_wdt_hwping(unsigned long data) +static int gpio_wdt_ping(struct watchdog_device *wdd) { - struct watchdog_device *wdd = (struct watchdog_device *)data; struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); - if (priv->armed && time_after(jiffies, priv->last_jiffies + - msecs_to_jiffies(wdd->timeout * 1000))) { - dev_crit(wdd->parent, - "Timer expired. System will reboot soon!\n"); - return; - } - - /* Restart timer */ - mod_timer(&priv->timer, jiffies + priv->hw_margin); - switch (priv->hw_algo) { case HW_ALGO_TOGGLE: /* Toggle output pin */ @@ -75,55 +59,33 @@ static void gpio_wdt_hwping(unsigned long data) gpio_set_value_cansleep(priv->gpio, priv->active_low); break; } -} - -static void gpio_wdt_start_impl(struct gpio_wdt_priv *priv) -{ - priv->state = priv->active_low; - gpio_direction_output(priv->gpio, priv->state); - priv->last_jiffies = jiffies; - gpio_wdt_hwping((unsigned long)&priv->wdd); + return 0; } static int gpio_wdt_start(struct watchdog_device *wdd) { struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); - gpio_wdt_start_impl(priv); - priv->armed = true; + priv->state = priv->active_low; + gpio_direction_output(priv->gpio, priv->state); - return 0; + set_bit(WDOG_HW_RUNNING, &wdd->status); + + return gpio_wdt_ping(wdd); } static int gpio_wdt_stop(struct watchdog_device *wdd) { struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); - priv->armed = false; if (!priv->always_running) { - mod_timer(&priv->timer, 0); gpio_wdt_disable(priv); + clear_bit(WDOG_HW_RUNNING, &wdd->status); } return 0; } -static int gpio_wdt_ping(struct watchdog_device *wdd) -{ - struct gpio_wdt_priv *priv = watchdog_get_drvdata(wdd); - - priv->last_jiffies = jiffies; - - return 0; -} - -static int gpio_wdt_set_timeout(struct watchdog_device *wdd, unsigned int t) -{ - wdd->timeout = t; - - return gpio_wdt_ping(wdd); -} - static const struct watchdog_info gpio_wdt_ident = { .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, @@ -135,7 +97,6 @@ static const struct watchdog_ops gpio_wdt_ops = { .start = gpio_wdt_start, .stop = gpio_wdt_stop, .ping = gpio_wdt_ping, - .set_timeout = gpio_wdt_set_timeout, }; static int gpio_wdt_probe(struct platform_device *pdev) @@ -185,9 +146,6 @@ static int gpio_wdt_probe(struct platform_device *pdev) if (hw_margin < 2 || hw_margin > 65535) return -EINVAL; - /* Use safe value (1/2 of real timeout) */ - priv->hw_margin = msecs_to_jiffies(hw_margin / 2); - priv->always_running = of_property_read_bool(pdev->dev.of_node, "always-running"); @@ -196,31 +154,26 @@ static int gpio_wdt_probe(struct platform_device *pdev) priv->wdd.info = &gpio_wdt_ident; priv->wdd.ops = &gpio_wdt_ops; priv->wdd.min_timeout = SOFT_TIMEOUT_MIN; - priv->wdd.max_timeout = SOFT_TIMEOUT_MAX; + priv->wdd.max_hw_heartbeat_ms = hw_margin; priv->wdd.parent = &pdev->dev; if (watchdog_init_timeout(&priv->wdd, 0, &pdev->dev) < 0) priv->wdd.timeout = SOFT_TIMEOUT_DEF; - setup_timer(&priv->timer, gpio_wdt_hwping, (unsigned long)&priv->wdd); - watchdog_stop_on_reboot(&priv->wdd); - ret = watchdog_register_device(&priv->wdd); - if (ret) - return ret; - if (priv->always_running) - gpio_wdt_start_impl(priv); + gpio_wdt_start(&priv->wdd); - return 0; + ret = watchdog_register_device(&priv->wdd); + + return ret; } static int gpio_wdt_remove(struct platform_device *pdev) { struct gpio_wdt_priv *priv = platform_get_drvdata(pdev); - del_timer_sync(&priv->timer); watchdog_unregister_device(&priv->wdd); return 0; diff --git a/drivers/watchdog/intel-mid_wdt.c b/drivers/watchdog/intel-mid_wdt.c index 45e4d02221b5..72c108a12c19 100644 --- a/drivers/watchdog/intel-mid_wdt.c +++ b/drivers/watchdog/intel-mid_wdt.c @@ -147,8 +147,21 @@ static int mid_wdt_probe(struct platform_device *pdev) return ret; } - /* Make sure the watchdog is not running */ - wdt_stop(wdt_dev); + /* + * The firmware followed by U-Boot leaves the watchdog running + * with the default threshold which may vary. When we get here + * we should make a decision to prevent any side effects before + * user space daemon will take care of it. The best option, + * taking into consideration that there is no way to read values + * back from hardware, is to enforce watchdog being run with + * deterministic values. + */ + ret = wdt_start(wdt_dev); + if (ret) + return ret; + + /* Make sure the watchdog is serviced */ + set_bit(WDOG_HW_RUNNING, &wdt_dev->status); ret = devm_watchdog_register_device(&pdev->dev, wdt_dev); if (ret) { diff --git a/drivers/watchdog/it87_wdt.c b/drivers/watchdog/it87_wdt.c index b9878c41598f..dd1e7eaef50f 100644 --- a/drivers/watchdog/it87_wdt.c +++ b/drivers/watchdog/it87_wdt.c @@ -12,8 +12,9 @@ * http://www.ite.com.tw/ * * Support of the watchdog timers, which are available on - * IT8620, IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, - * IT8728 and IT8783. + * IT8607, IT8620, IT8622, IT8625, IT8628, IT8655, IT8665, IT8686, + * IT8702, IT8712, IT8716, IT8718, IT8720, IT8721, IT8726, IT8728, + * and IT8783. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -24,38 +25,21 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> -#include <linux/kernel.h> -#include <linux/fs.h> -#include <linux/miscdevice.h> -#include <linux/init.h> -#include <linux/ioport.h> #include <linux/watchdog.h> -#include <linux/notifier.h> -#include <linux/reboot.h> -#include <linux/uaccess.h> -#include <linux/io.h> - -#define WATCHDOG_VERSION "1.14" #define WATCHDOG_NAME "IT87 WDT" -#define DRIVER_VERSION WATCHDOG_NAME " driver, v" WATCHDOG_VERSION "\n" -#define WD_MAGIC 'V' /* Defaults for Module Parameter */ -#define DEFAULT_NOGAMEPORT 0 -#define DEFAULT_NOCIR 0 -#define DEFAULT_EXCLUSIVE 1 #define DEFAULT_TIMEOUT 60 #define DEFAULT_TESTMODE 0 #define DEFAULT_NOWAYOUT WATCHDOG_NOWAYOUT @@ -66,19 +50,22 @@ /* Logical device Numbers LDN */ #define GPIO 0x07 -#define GAMEPORT 0x09 -#define CIR 0x0a /* Configuration Registers and Functions */ #define LDNREG 0x07 #define CHIPID 0x20 #define CHIPREV 0x22 -#define ACTREG 0x30 -#define BASEREG 0x60 /* Chip Id numbers */ #define NO_DEV_ID 0xffff +#define IT8607_ID 0x8607 #define IT8620_ID 0x8620 +#define IT8622_ID 0x8622 +#define IT8625_ID 0x8625 +#define IT8628_ID 0x8628 +#define IT8655_ID 0x8655 +#define IT8665_ID 0x8665 +#define IT8686_ID 0x8686 #define IT8702_ID 0x8702 #define IT8705_ID 0x8705 #define IT8712_ID 0x8712 @@ -96,14 +83,6 @@ #define WDTVALLSB 0x73 #define WDTVALMSB 0x74 -/* GPIO Bits WDTCTRL */ -#define WDT_CIRINT 0x80 -#define WDT_MOUSEINT 0x40 -#define WDT_KYBINT 0x20 -#define WDT_GAMEPORT 0x10 /* not in it8718, it8720, it8721, it8728 */ -#define WDT_FORCE 0x02 -#define WDT_ZERO 0x01 - /* GPIO Bits WDTCFG */ #define WDT_TOV1 0x80 #define WDT_KRST 0x40 @@ -111,55 +90,12 @@ #define WDT_PWROK 0x10 /* not in it8721 */ #define WDT_INT_MASK 0x0f -/* CIR Configuration Register LDN=0x0a */ -#define CIR_ILS 0x70 - -/* The default Base address is not always available, we use this */ -#define CIR_BASE 0x0208 - -/* CIR Controller */ -#define CIR_DR(b) (b) -#define CIR_IER(b) (b + 1) -#define CIR_RCR(b) (b + 2) -#define CIR_TCR1(b) (b + 3) -#define CIR_TCR2(b) (b + 4) -#define CIR_TSR(b) (b + 5) -#define CIR_RSR(b) (b + 6) -#define CIR_BDLR(b) (b + 5) -#define CIR_BDHR(b) (b + 6) -#define CIR_IIR(b) (b + 7) - -/* Default Base address of Game port */ -#define GP_BASE_DEFAULT 0x0201 - -/* wdt_status */ -#define WDTS_TIMER_RUN 0 -#define WDTS_DEV_OPEN 1 -#define WDTS_KEEPALIVE 2 -#define WDTS_LOCKED 3 -#define WDTS_USE_GP 4 -#define WDTS_EXPECTED 5 -#define WDTS_USE_CIR 6 - -static unsigned int base, gpact, ciract, max_units, chip_type; -static unsigned long wdt_status; - -static int nogameport = DEFAULT_NOGAMEPORT; -static int nocir = DEFAULT_NOCIR; -static int exclusive = DEFAULT_EXCLUSIVE; -static int timeout = DEFAULT_TIMEOUT; -static int testmode = DEFAULT_TESTMODE; -static bool nowayout = DEFAULT_NOWAYOUT; - -module_param(nogameport, int, 0); -MODULE_PARM_DESC(nogameport, "Forbid the activation of game port, default=" - __MODULE_STRING(DEFAULT_NOGAMEPORT)); -module_param(nocir, int, 0); -MODULE_PARM_DESC(nocir, "Forbid the use of Consumer IR interrupts to reset timer, default=" - __MODULE_STRING(DEFAULT_NOCIR)); -module_param(exclusive, int, 0); -MODULE_PARM_DESC(exclusive, "Watchdog exclusive device open, default=" - __MODULE_STRING(DEFAULT_EXCLUSIVE)); +static unsigned int max_units, chip_type; + +static unsigned int timeout = DEFAULT_TIMEOUT; +static int testmode = DEFAULT_TESTMODE; +static bool nowayout = DEFAULT_NOWAYOUT; + module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds, default=" __MODULE_STRING(DEFAULT_TIMEOUT)); @@ -231,88 +167,59 @@ static inline void superio_outw(int val, int reg) } /* Internal function, should be called after superio_select(GPIO) */ -static void wdt_update_timeout(void) +static void _wdt_update_timeout(unsigned int t) { unsigned char cfg = WDT_KRST; - int tm = timeout; if (testmode) cfg = 0; - if (tm <= max_units) + if (t <= max_units) cfg |= WDT_TOV1; else - tm /= 60; + t /= 60; if (chip_type != IT8721_ID) cfg |= WDT_PWROK; superio_outb(cfg, WDTCFG); - superio_outb(tm, WDTVALLSB); + superio_outb(t, WDTVALLSB); if (max_units > 255) - superio_outb(tm>>8, WDTVALMSB); + superio_outb(t >> 8, WDTVALMSB); } -static int wdt_round_time(int t) +static int wdt_update_timeout(unsigned int t) { - t += 59; - t -= t % 60; - return t; -} + int ret; -/* watchdog timer handling */ - -static void wdt_keepalive(void) -{ - if (test_bit(WDTS_USE_GP, &wdt_status)) - inb(base); - else if (test_bit(WDTS_USE_CIR, &wdt_status)) - /* The timer reloads with around 5 msec delay */ - outb(0x55, CIR_DR(base)); - else { - if (superio_enter()) - return; - - superio_select(GPIO); - wdt_update_timeout(); - superio_exit(); - } - set_bit(WDTS_KEEPALIVE, &wdt_status); -} - -static int wdt_start(void) -{ - int ret = superio_enter(); + ret = superio_enter(); if (ret) return ret; superio_select(GPIO); - if (test_bit(WDTS_USE_GP, &wdt_status)) - superio_outb(WDT_GAMEPORT, WDTCTRL); - else if (test_bit(WDTS_USE_CIR, &wdt_status)) - superio_outb(WDT_CIRINT, WDTCTRL); - wdt_update_timeout(); - + _wdt_update_timeout(t); superio_exit(); return 0; } -static int wdt_stop(void) +static int wdt_round_time(int t) { - int ret = superio_enter(); - if (ret) - return ret; + t += 59; + t -= t % 60; + return t; +} - superio_select(GPIO); - superio_outb(0x00, WDTCTRL); - superio_outb(WDT_TOV1, WDTCFG); - superio_outb(0x00, WDTVALLSB); - if (max_units > 255) - superio_outb(0x00, WDTVALMSB); +/* watchdog timer handling */ - superio_exit(); - return 0; +static int wdt_start(struct watchdog_device *wdd) +{ + return wdt_update_timeout(wdd->timeout); +} + +static int wdt_stop(struct watchdog_device *wdd) +{ + return wdt_update_timeout(0); } /** @@ -325,292 +232,44 @@ static int wdt_stop(void) * Used within WDIOC_SETTIMEOUT watchdog device ioctl. */ -static int wdt_set_timeout(int t) +static int wdt_set_timeout(struct watchdog_device *wdd, unsigned int t) { - if (t < 1 || t > max_units * 60) - return -EINVAL; + int ret = 0; if (t > max_units) - timeout = wdt_round_time(t); - else - timeout = t; - - if (test_bit(WDTS_TIMER_RUN, &wdt_status)) { - int ret = superio_enter(); - if (ret) - return ret; - - superio_select(GPIO); - wdt_update_timeout(); - superio_exit(); - } - return 0; -} - -/** - * wdt_get_status - determines the status supported by watchdog ioctl - * @status: status returned to user space - * - * The status bit of the device does not allow to distinguish - * between a regular system reset and a watchdog forced reset. - * But, in test mode it is useful, so it is supported through - * WDIOC_GETSTATUS watchdog ioctl. Additionally the driver - * reports the keepalive signal and the acception of the magic. - * - * Used within WDIOC_GETSTATUS watchdog device ioctl. - */ - -static int wdt_get_status(int *status) -{ - *status = 0; - if (testmode) { - int ret = superio_enter(); - if (ret) - return ret; - - superio_select(GPIO); - if (superio_inb(WDTCTRL) & WDT_ZERO) { - superio_outb(0x00, WDTCTRL); - clear_bit(WDTS_TIMER_RUN, &wdt_status); - *status |= WDIOF_CARDRESET; - } - - superio_exit(); - } - if (test_and_clear_bit(WDTS_KEEPALIVE, &wdt_status)) - *status |= WDIOF_KEEPALIVEPING; - if (test_bit(WDTS_EXPECTED, &wdt_status)) - *status |= WDIOF_MAGICCLOSE; - return 0; -} - -/* /dev/watchdog handling */ - -/** - * wdt_open - watchdog file_operations .open - * @inode: inode of the device - * @file: file handle to the device - * - * The watchdog timer starts by opening the device. - * - * Used within the file operation of the watchdog device. - */ + t = wdt_round_time(t); -static int wdt_open(struct inode *inode, struct file *file) -{ - if (exclusive && test_and_set_bit(WDTS_DEV_OPEN, &wdt_status)) - return -EBUSY; - if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) { - int ret; - if (nowayout && !test_and_set_bit(WDTS_LOCKED, &wdt_status)) - __module_get(THIS_MODULE); - - ret = wdt_start(); - if (ret) { - clear_bit(WDTS_LOCKED, &wdt_status); - clear_bit(WDTS_TIMER_RUN, &wdt_status); - clear_bit(WDTS_DEV_OPEN, &wdt_status); - return ret; - } - } - return nonseekable_open(inode, file); -} + wdd->timeout = t; -/** - * wdt_release - watchdog file_operations .release - * @inode: inode of the device - * @file: file handle to the device - * - * Closing the watchdog device either stops the watchdog timer - * or in the case, that nowayout is set or the magic character - * wasn't written, a critical warning about an running watchdog - * timer is given. - * - * Used within the file operation of the watchdog device. - */ + if (watchdog_hw_running(wdd)) + ret = wdt_update_timeout(t); -static int wdt_release(struct inode *inode, struct file *file) -{ - if (test_bit(WDTS_TIMER_RUN, &wdt_status)) { - if (test_and_clear_bit(WDTS_EXPECTED, &wdt_status)) { - int ret = wdt_stop(); - if (ret) { - /* - * Stop failed. Just keep the watchdog alive - * and hope nothing bad happens. - */ - set_bit(WDTS_EXPECTED, &wdt_status); - wdt_keepalive(); - return ret; - } - clear_bit(WDTS_TIMER_RUN, &wdt_status); - } else { - wdt_keepalive(); - pr_crit("unexpected close, not stopping watchdog!\n"); - } - } - clear_bit(WDTS_DEV_OPEN, &wdt_status); - return 0; -} - -/** - * wdt_write - watchdog file_operations .write - * @file: file handle to the watchdog - * @buf: buffer to write - * @count: count of bytes - * @ppos: pointer to the position to write. No seeks allowed - * - * A write to a watchdog device is defined as a keepalive signal. Any - * write of data will do, as we don't define content meaning. - * - * Used within the file operation of the watchdog device. - */ - -static ssize_t wdt_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - if (count) { - clear_bit(WDTS_EXPECTED, &wdt_status); - wdt_keepalive(); - } - if (!nowayout) { - size_t ofs; - - /* note: just in case someone wrote the magic character long ago */ - for (ofs = 0; ofs != count; ofs++) { - char c; - if (get_user(c, buf + ofs)) - return -EFAULT; - if (c == WD_MAGIC) - set_bit(WDTS_EXPECTED, &wdt_status); - } - } - return count; + return ret; } static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, - .firmware_version = 1, + .firmware_version = 1, .identity = WATCHDOG_NAME, }; -/** - * wdt_ioctl - watchdog file_operations .unlocked_ioctl - * @file: file handle to the device - * @cmd: watchdog command - * @arg: argument pointer - * - * The watchdog API defines a common set of functions for all watchdogs - * according to their available features. - * - * Used within the file operation of the watchdog device. - */ - -static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - int rc = 0, status, new_options, new_timeout; - union { - struct watchdog_info __user *ident; - int __user *i; - } uarg; - - uarg.i = (int __user *)arg; - - switch (cmd) { - case WDIOC_GETSUPPORT: - return copy_to_user(uarg.ident, - &ident, sizeof(ident)) ? -EFAULT : 0; - - case WDIOC_GETSTATUS: - rc = wdt_get_status(&status); - if (rc) - return rc; - return put_user(status, uarg.i); - - case WDIOC_GETBOOTSTATUS: - return put_user(0, uarg.i); - - case WDIOC_KEEPALIVE: - wdt_keepalive(); - return 0; - - case WDIOC_SETOPTIONS: - if (get_user(new_options, uarg.i)) - return -EFAULT; - - switch (new_options) { - case WDIOS_DISABLECARD: - if (test_bit(WDTS_TIMER_RUN, &wdt_status)) { - rc = wdt_stop(); - if (rc) - return rc; - } - clear_bit(WDTS_TIMER_RUN, &wdt_status); - return 0; - - case WDIOS_ENABLECARD: - if (!test_and_set_bit(WDTS_TIMER_RUN, &wdt_status)) { - rc = wdt_start(); - if (rc) { - clear_bit(WDTS_TIMER_RUN, &wdt_status); - return rc; - } - } - return 0; - - default: - return -EFAULT; - } - - case WDIOC_SETTIMEOUT: - if (get_user(new_timeout, uarg.i)) - return -EFAULT; - rc = wdt_set_timeout(new_timeout); - case WDIOC_GETTIMEOUT: - if (put_user(timeout, uarg.i)) - return -EFAULT; - return rc; - - default: - return -ENOTTY; - } -} - -static int wdt_notify_sys(struct notifier_block *this, unsigned long code, - void *unused) -{ - if (code == SYS_DOWN || code == SYS_HALT) - wdt_stop(); - return NOTIFY_DONE; -} - -static const struct file_operations wdt_fops = { - .owner = THIS_MODULE, - .llseek = no_llseek, - .write = wdt_write, - .unlocked_ioctl = wdt_ioctl, - .open = wdt_open, - .release = wdt_release, +static struct watchdog_ops wdt_ops = { + .owner = THIS_MODULE, + .start = wdt_start, + .stop = wdt_stop, + .set_timeout = wdt_set_timeout, }; -static struct miscdevice wdt_miscdev = { - .minor = WATCHDOG_MINOR, - .name = "watchdog", - .fops = &wdt_fops, -}; - -static struct notifier_block wdt_notifier = { - .notifier_call = wdt_notify_sys, +static struct watchdog_device wdt_dev = { + .info = &ident, + .ops = &wdt_ops, + .min_timeout = 1, }; static int __init it87_wdt_init(void) { - int rc = 0; - int try_gameport = !nogameport; u8 chip_rev; - int gp_rreq_fail = 0; - - wdt_status = 0; + int rc; rc = superio_enter(); if (rc) @@ -631,14 +290,20 @@ static int __init it87_wdt_init(void) case IT8726_ID: max_units = 65535; break; + case IT8607_ID: case IT8620_ID: + case IT8622_ID: + case IT8625_ID: + case IT8628_ID: + case IT8655_ID: + case IT8665_ID: + case IT8686_ID: case IT8718_ID: case IT8720_ID: case IT8721_ID: case IT8728_ID: case IT8783_ID: max_units = 65535; - try_gameport = 0; break; case IT8705_ID: pr_err("Unsupported Chip found, Chip %04x Revision %02x\n", @@ -660,48 +325,7 @@ static int __init it87_wdt_init(void) superio_select(GPIO); superio_outb(WDT_TOV1, WDTCFG); superio_outb(0x00, WDTCTRL); - - /* First try to get Gameport support */ - if (try_gameport) { - superio_select(GAMEPORT); - base = superio_inw(BASEREG); - if (!base) { - base = GP_BASE_DEFAULT; - superio_outw(base, BASEREG); - } - gpact = superio_inb(ACTREG); - superio_outb(0x01, ACTREG); - if (request_region(base, 1, WATCHDOG_NAME)) - set_bit(WDTS_USE_GP, &wdt_status); - else - gp_rreq_fail = 1; - } - - /* If we haven't Gameport support, try to get CIR support */ - if (!nocir && !test_bit(WDTS_USE_GP, &wdt_status)) { - if (!request_region(CIR_BASE, 8, WATCHDOG_NAME)) { - if (gp_rreq_fail) - pr_err("I/O Address 0x%04x and 0x%04x already in use\n", - base, CIR_BASE); - else - pr_err("I/O Address 0x%04x already in use\n", - CIR_BASE); - rc = -EIO; - goto err_out; - } - base = CIR_BASE; - - superio_select(CIR); - superio_outw(base, BASEREG); - superio_outb(0x00, CIR_ILS); - ciract = superio_inb(ACTREG); - superio_outb(0x01, ACTREG); - if (gp_rreq_fail) { - superio_select(GAMEPORT); - superio_outb(gpact, ACTREG); - } - set_bit(WDTS_USE_CIR, &wdt_status); - } + superio_exit(); if (timeout < 1 || timeout > max_units * 60) { timeout = DEFAULT_TIMEOUT; @@ -712,83 +336,25 @@ static int __init it87_wdt_init(void) if (timeout > max_units) timeout = wdt_round_time(timeout); - rc = register_reboot_notifier(&wdt_notifier); - if (rc) { - pr_err("Cannot register reboot notifier (err=%d)\n", rc); - goto err_out_region; - } + wdt_dev.timeout = timeout; + wdt_dev.max_timeout = max_units * 60; - rc = misc_register(&wdt_miscdev); + watchdog_stop_on_reboot(&wdt_dev); + rc = watchdog_register_device(&wdt_dev); if (rc) { - pr_err("Cannot register miscdev on minor=%d (err=%d)\n", - wdt_miscdev.minor, rc); - goto err_out_reboot; - } - - /* Initialize CIR to use it as keepalive source */ - if (test_bit(WDTS_USE_CIR, &wdt_status)) { - outb(0x00, CIR_RCR(base)); - outb(0xc0, CIR_TCR1(base)); - outb(0x5c, CIR_TCR2(base)); - outb(0x10, CIR_IER(base)); - outb(0x00, CIR_BDHR(base)); - outb(0x01, CIR_BDLR(base)); - outb(0x09, CIR_IER(base)); + pr_err("Cannot register watchdog device (err=%d)\n", rc); + return rc; } - pr_info("Chip IT%04x revision %d initialized. timeout=%d sec (nowayout=%d testmode=%d exclusive=%d nogameport=%d nocir=%d)\n", - chip_type, chip_rev, timeout, - nowayout, testmode, exclusive, nogameport, nocir); + pr_info("Chip IT%04x revision %d initialized. timeout=%d sec (nowayout=%d testmode=%d)\n", + chip_type, chip_rev, timeout, nowayout, testmode); - superio_exit(); return 0; - -err_out_reboot: - unregister_reboot_notifier(&wdt_notifier); -err_out_region: - if (test_bit(WDTS_USE_GP, &wdt_status)) - release_region(base, 1); - else if (test_bit(WDTS_USE_CIR, &wdt_status)) { - release_region(base, 8); - superio_select(CIR); - superio_outb(ciract, ACTREG); - } -err_out: - if (try_gameport) { - superio_select(GAMEPORT); - superio_outb(gpact, ACTREG); - } - - superio_exit(); - return rc; } static void __exit it87_wdt_exit(void) { - if (superio_enter() == 0) { - superio_select(GPIO); - superio_outb(0x00, WDTCTRL); - superio_outb(0x00, WDTCFG); - superio_outb(0x00, WDTVALLSB); - if (max_units > 255) - superio_outb(0x00, WDTVALMSB); - if (test_bit(WDTS_USE_GP, &wdt_status)) { - superio_select(GAMEPORT); - superio_outb(gpact, ACTREG); - } else if (test_bit(WDTS_USE_CIR, &wdt_status)) { - superio_select(CIR); - superio_outb(ciract, ACTREG); - } - superio_exit(); - } - - misc_deregister(&wdt_miscdev); - unregister_reboot_notifier(&wdt_notifier); - - if (test_bit(WDTS_USE_GP, &wdt_status)) - release_region(base, 1); - else if (test_bit(WDTS_USE_CIR, &wdt_status)) - release_region(base, 8); + watchdog_unregister_device(&wdt_dev); } module_init(it87_wdt_init); diff --git a/drivers/watchdog/meson_gxbb_wdt.c b/drivers/watchdog/meson_gxbb_wdt.c index 45d47664a00a..69a5a57f1446 100644 --- a/drivers/watchdog/meson_gxbb_wdt.c +++ b/drivers/watchdog/meson_gxbb_wdt.c @@ -203,7 +203,9 @@ static int meson_gxbb_wdt_probe(struct platform_device *pdev) if (IS_ERR(data->clk)) return PTR_ERR(data->clk); - clk_prepare_enable(data->clk); + ret = clk_prepare_enable(data->clk); + if (ret) + return ret; platform_set_drvdata(pdev, data); diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c index 39be4dd8035e..83af7d6cc37c 100644 --- a/drivers/watchdog/orion_wdt.c +++ b/drivers/watchdog/orion_wdt.c @@ -651,5 +651,5 @@ module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:orion_wdt"); diff --git a/drivers/watchdog/rza_wdt.c b/drivers/watchdog/rza_wdt.c new file mode 100644 index 000000000000..e618218d2374 --- /dev/null +++ b/drivers/watchdog/rza_wdt.c @@ -0,0 +1,199 @@ +/* + * Renesas RZ/A Series WDT Driver + * + * Copyright (C) 2017 Renesas Electronics America, Inc. + * Copyright (C) 2017 Chris Brandt + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +#define DEFAULT_TIMEOUT 30 + +/* Watchdog Timer Registers */ +#define WTCSR 0 +#define WTCSR_MAGIC 0xA500 +#define WTSCR_WT BIT(6) +#define WTSCR_TME BIT(5) +#define WTSCR_CKS(i) (i) + +#define WTCNT 2 +#define WTCNT_MAGIC 0x5A00 + +#define WRCSR 4 +#define WRCSR_MAGIC 0x5A00 +#define WRCSR_RSTE BIT(6) +#define WRCSR_CLEAR_WOVF 0xA500 /* special value */ + +struct rza_wdt { + struct watchdog_device wdev; + void __iomem *base; + struct clk *clk; +}; + +static int rza_wdt_start(struct watchdog_device *wdev) +{ + struct rza_wdt *priv = watchdog_get_drvdata(wdev); + + /* Stop timer */ + writew(WTCSR_MAGIC | 0, priv->base + WTCSR); + + /* Must dummy read WRCSR:WOVF at least once before clearing */ + readb(priv->base + WRCSR); + writew(WRCSR_CLEAR_WOVF, priv->base + WRCSR); + + /* + * Start timer with slowest clock source and reset option enabled. + */ + writew(WRCSR_MAGIC | WRCSR_RSTE, priv->base + WRCSR); + writew(WTCNT_MAGIC | 0, priv->base + WTCNT); + writew(WTCSR_MAGIC | WTSCR_WT | WTSCR_TME | WTSCR_CKS(7), + priv->base + WTCSR); + + return 0; +} + +static int rza_wdt_stop(struct watchdog_device *wdev) +{ + struct rza_wdt *priv = watchdog_get_drvdata(wdev); + + writew(WTCSR_MAGIC | 0, priv->base + WTCSR); + + return 0; +} + +static int rza_wdt_ping(struct watchdog_device *wdev) +{ + struct rza_wdt *priv = watchdog_get_drvdata(wdev); + + writew(WTCNT_MAGIC | 0, priv->base + WTCNT); + + return 0; +} + +static int rza_wdt_restart(struct watchdog_device *wdev, unsigned long action, + void *data) +{ + struct rza_wdt *priv = watchdog_get_drvdata(wdev); + + /* Stop timer */ + writew(WTCSR_MAGIC | 0, priv->base + WTCSR); + + /* Must dummy read WRCSR:WOVF at least once before clearing */ + readb(priv->base + WRCSR); + writew(WRCSR_CLEAR_WOVF, priv->base + WRCSR); + + /* + * Start timer with fastest clock source and only 1 clock left before + * overflow with reset option enabled. + */ + writew(WRCSR_MAGIC | WRCSR_RSTE, priv->base + WRCSR); + writew(WTCNT_MAGIC | 255, priv->base + WTCNT); + writew(WTCSR_MAGIC | WTSCR_WT | WTSCR_TME, priv->base + WTCSR); + + /* + * Actually make sure the above sequence hits hardware before sleeping. + */ + wmb(); + + /* Wait for WDT overflow (reset) */ + udelay(20); + + return 0; +} + +static const struct watchdog_info rza_wdt_ident = { + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .identity = "Renesas RZ/A WDT Watchdog", +}; + +static const struct watchdog_ops rza_wdt_ops = { + .owner = THIS_MODULE, + .start = rza_wdt_start, + .stop = rza_wdt_stop, + .ping = rza_wdt_ping, + .restart = rza_wdt_restart, +}; + +static int rza_wdt_probe(struct platform_device *pdev) +{ + struct rza_wdt *priv; + struct resource *res; + unsigned long rate; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + + rate = clk_get_rate(priv->clk); + if (rate < 16384) { + dev_err(&pdev->dev, "invalid clock rate (%ld)\n", rate); + return -ENOENT; + } + + /* Assume slowest clock rate possible (CKS=7) */ + rate /= 16384; + + priv->wdev.info = &rza_wdt_ident, + priv->wdev.ops = &rza_wdt_ops, + priv->wdev.parent = &pdev->dev; + + /* + * Since the max possible timeout of our 8-bit count register is less + * than a second, we must use max_hw_heartbeat_ms. + */ + priv->wdev.max_hw_heartbeat_ms = (1000 * U8_MAX) / rate; + dev_dbg(&pdev->dev, "max hw timeout of %dms\n", + priv->wdev.max_hw_heartbeat_ms); + + priv->wdev.min_timeout = 1; + priv->wdev.timeout = DEFAULT_TIMEOUT; + + watchdog_init_timeout(&priv->wdev, 0, &pdev->dev); + watchdog_set_drvdata(&priv->wdev, priv); + + ret = devm_watchdog_register_device(&pdev->dev, &priv->wdev); + if (ret) + dev_err(&pdev->dev, "Cannot register watchdog device\n"); + + return ret; +} + +static const struct of_device_id rza_wdt_of_match[] = { + { .compatible = "renesas,rza-wdt", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rza_wdt_of_match); + +static struct platform_driver rza_wdt_driver = { + .probe = rza_wdt_probe, + .driver = { + .name = "rza_wdt", + .of_match_table = rza_wdt_of_match, + }, +}; + +module_platform_driver(rza_wdt_driver); + +MODULE_DESCRIPTION("Renesas RZ/A WDT Driver"); +MODULE_AUTHOR("Chris Brandt <chris.brandt@renesas.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c index 6ed97596ca80..adaa43543f0a 100644 --- a/drivers/watchdog/s3c2410_wdt.c +++ b/drivers/watchdog/s3c2410_wdt.c @@ -1,5 +1,4 @@ -/* linux/drivers/char/watchdog/s3c2410_wdt.c - * +/* * Copyright (c) 2004 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * @@ -17,11 +16,7 @@ * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -*/ + */ #include <linux/module.h> #include <linux/moduleparam.h> @@ -37,6 +32,7 @@ #include <linux/slab.h> #include <linux/err.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/delay.h> @@ -94,8 +90,7 @@ MODULE_PARM_DESC(tmr_atboot, __MODULE_STRING(S3C2410_WATCHDOG_ATBOOT)); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); -MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, " - "0 to reboot (default 0)"); +MODULE_PARM_DESC(soft_noboot, "Watchdog action, set to 1 to ignore reboots, 0 to reboot (default 0)"); /** * struct s3c2410_wdt_variant - Per-variant config data @@ -131,7 +126,7 @@ struct s3c2410_wdt { unsigned long wtdat_save; struct watchdog_device wdt_device; struct notifier_block freq_transition; - struct s3c2410_wdt_variant *drv_data; + const struct s3c2410_wdt_variant *drv_data; struct regmap *pmureg; }; @@ -310,7 +305,8 @@ static inline int s3c2410wdt_is_running(struct s3c2410_wdt *wdt) return readl(wdt->reg_base + S3C2410_WTCON) & S3C2410_WTCON_ENABLE; } -static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, unsigned timeout) +static int s3c2410wdt_set_heartbeat(struct watchdog_device *wdd, + unsigned int timeout) { struct s3c2410_wdt *wdt = watchdog_get_drvdata(wdd); unsigned long freq = clk_get_rate(wdt->clock); @@ -401,7 +397,7 @@ static const struct watchdog_ops s3c2410wdt_ops = { .restart = s3c2410wdt_restart, }; -static struct watchdog_device s3c2410_wdd = { +static const struct watchdog_device s3c2410_wdd = { .info = &s3c2410_wdt_ident, .ops = &s3c2410wdt_ops, .timeout = S3C2410_WATCHDOG_DEFAULT_TIME, @@ -507,22 +503,24 @@ static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt) return 0; } -static inline struct s3c2410_wdt_variant * +static inline const struct s3c2410_wdt_variant * s3c2410_get_wdt_drv_data(struct platform_device *pdev) { - if (pdev->dev.of_node) { - const struct of_device_id *match; - match = of_match_node(s3c2410_wdt_match, pdev->dev.of_node); - return (struct s3c2410_wdt_variant *)match->data; - } else { - return (struct s3c2410_wdt_variant *) - platform_get_device_id(pdev)->driver_data; + const struct s3c2410_wdt_variant *variant; + + variant = of_device_get_match_data(&pdev->dev); + if (!variant) { + /* Device matched by platform_device_id */ + variant = (struct s3c2410_wdt_variant *) + platform_get_device_id(pdev)->driver_data; } + + return variant; } static int s3c2410wdt_probe(struct platform_device *pdev) { - struct device *dev; + struct device *dev = &pdev->dev; struct s3c2410_wdt *wdt; struct resource *wdt_mem; struct resource *wdt_irq; @@ -530,13 +528,11 @@ static int s3c2410wdt_probe(struct platform_device *pdev) int started = 0; int ret; - dev = &pdev->dev; - wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); if (!wdt) return -ENOMEM; - wdt->dev = &pdev->dev; + wdt->dev = dev; spin_lock_init(&wdt->lock); wdt->wdt_device = s3c2410_wdd; @@ -592,7 +588,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev) /* see if we can actually set the requested timer margin, and if * not, try the default value */ - watchdog_init_timeout(&wdt->wdt_device, tmr_margin, &pdev->dev); + watchdog_init_timeout(&wdt->wdt_device, tmr_margin, dev); ret = s3c2410wdt_set_heartbeat(&wdt->wdt_device, wdt->wdt_device.timeout); if (ret) { @@ -601,11 +597,10 @@ static int s3c2410wdt_probe(struct platform_device *pdev) if (started == 0) dev_info(dev, - "tmr_margin value out of range, default %d used\n", - S3C2410_WATCHDOG_DEFAULT_TIME); + "tmr_margin value out of range, default %d used\n", + S3C2410_WATCHDOG_DEFAULT_TIME); else - dev_info(dev, "default timer value is out of range, " - "cannot start\n"); + dev_info(dev, "default timer value is out of range, cannot start\n"); } ret = devm_request_irq(dev, wdt_irq->start, s3c2410wdt_irq, 0, @@ -619,7 +614,7 @@ static int s3c2410wdt_probe(struct platform_device *pdev) watchdog_set_restart_priority(&wdt->wdt_device, 128); wdt->wdt_device.bootstatus = s3c2410wdt_get_bootstatus(wdt); - wdt->wdt_device.parent = &pdev->dev; + wdt->wdt_device.parent = dev; ret = watchdog_register_device(&wdt->wdt_device); if (ret) { @@ -754,7 +749,6 @@ static struct platform_driver s3c2410wdt_driver = { module_platform_driver(s3c2410wdt_driver); -MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, " - "Dimitry Andric <dimitry.andric@tomtom.com>"); +MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>, Dimitry Andric <dimitry.andric@tomtom.com>"); MODULE_DESCRIPTION("S3C2410 Watchdog Device Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/watchdog/sama5d4_wdt.c b/drivers/watchdog/sama5d4_wdt.c index 362fd229786d..0ae947c3d7bc 100644 --- a/drivers/watchdog/sama5d4_wdt.c +++ b/drivers/watchdog/sama5d4_wdt.c @@ -228,15 +228,13 @@ static int sama5d4_wdt_probe(struct platform_device *pdev) wdt->reg_base = regs; - if (pdev->dev.of_node) { - irq = irq_of_parse_and_map(pdev->dev.of_node, 0); - if (!irq) - dev_warn(&pdev->dev, "failed to get IRQ from DT\n"); + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) + dev_warn(&pdev->dev, "failed to get IRQ from DT\n"); - ret = of_sama5d4_wdt_init(pdev->dev.of_node, wdt); - if (ret) - return ret; - } + ret = of_sama5d4_wdt_init(pdev->dev.of_node, wdt); + if (ret) + return ret; if ((wdt->mr & AT91_WDT_WDFIEN) && irq) { ret = devm_request_irq(&pdev->dev, irq, sama5d4_wdt_irq_handler, @@ -302,6 +300,11 @@ static int sama5d4_wdt_resume(struct device *dev) { struct sama5d4_wdt *wdt = dev_get_drvdata(dev); + /* + * FIXME: writing MR also pings the watchdog which may not be desired. + * This should only be done when the registers are lost on suspend but + * there is no way to get this information right now. + */ sama5d4_wdt_init(wdt); return 0; diff --git a/drivers/watchdog/stm32_iwdg.c b/drivers/watchdog/stm32_iwdg.c new file mode 100644 index 000000000000..6c501b7dba29 --- /dev/null +++ b/drivers/watchdog/stm32_iwdg.c @@ -0,0 +1,253 @@ +/* + * Driver for STM32 Independent Watchdog + * + * Copyright (C) Yannick Fertre 2017 + * Author: Yannick Fertre <yannick.fertre@st.com> + * + * This driver is based on tegra_wdt.c + * + * License terms: GNU General Public License (GPL), version 2 + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/iopoll.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/watchdog.h> + +/* IWDG registers */ +#define IWDG_KR 0x00 /* Key register */ +#define IWDG_PR 0x04 /* Prescaler Register */ +#define IWDG_RLR 0x08 /* ReLoad Register */ +#define IWDG_SR 0x0C /* Status Register */ +#define IWDG_WINR 0x10 /* Windows Register */ + +/* IWDG_KR register bit mask */ +#define KR_KEY_RELOAD 0xAAAA /* reload counter enable */ +#define KR_KEY_ENABLE 0xCCCC /* peripheral enable */ +#define KR_KEY_EWA 0x5555 /* write access enable */ +#define KR_KEY_DWA 0x0000 /* write access disable */ + +/* IWDG_PR register bit values */ +#define PR_4 0x00 /* prescaler set to 4 */ +#define PR_8 0x01 /* prescaler set to 8 */ +#define PR_16 0x02 /* prescaler set to 16 */ +#define PR_32 0x03 /* prescaler set to 32 */ +#define PR_64 0x04 /* prescaler set to 64 */ +#define PR_128 0x05 /* prescaler set to 128 */ +#define PR_256 0x06 /* prescaler set to 256 */ + +/* IWDG_RLR register values */ +#define RLR_MIN 0x07C /* min value supported by reload register */ +#define RLR_MAX 0xFFF /* max value supported by reload register */ + +/* IWDG_SR register bit mask */ +#define FLAG_PVU BIT(0) /* Watchdog prescaler value update */ +#define FLAG_RVU BIT(1) /* Watchdog counter reload value update */ + +/* set timeout to 100000 us */ +#define TIMEOUT_US 100000 +#define SLEEP_US 1000 + +struct stm32_iwdg { + struct watchdog_device wdd; + void __iomem *regs; + struct clk *clk; + unsigned int rate; +}; + +static inline u32 reg_read(void __iomem *base, u32 reg) +{ + return readl_relaxed(base + reg); +} + +static inline void reg_write(void __iomem *base, u32 reg, u32 val) +{ + writel_relaxed(val, base + reg); +} + +static int stm32_iwdg_start(struct watchdog_device *wdd) +{ + struct stm32_iwdg *wdt = watchdog_get_drvdata(wdd); + u32 val = FLAG_PVU | FLAG_RVU; + u32 reload; + int ret; + + dev_dbg(wdd->parent, "%s\n", __func__); + + /* prescaler fixed to 256 */ + reload = clamp_t(unsigned int, ((wdd->timeout * wdt->rate) / 256) - 1, + RLR_MIN, RLR_MAX); + + /* enable write access */ + reg_write(wdt->regs, IWDG_KR, KR_KEY_EWA); + + /* set prescaler & reload registers */ + reg_write(wdt->regs, IWDG_PR, PR_256); /* prescaler fix to 256 */ + reg_write(wdt->regs, IWDG_RLR, reload); + reg_write(wdt->regs, IWDG_KR, KR_KEY_ENABLE); + + /* wait for the registers to be updated (max 100ms) */ + ret = readl_relaxed_poll_timeout(wdt->regs + IWDG_SR, val, + !(val & (FLAG_PVU | FLAG_RVU)), + SLEEP_US, TIMEOUT_US); + if (ret) { + dev_err(wdd->parent, + "Fail to set prescaler or reload registers\n"); + return ret; + } + + /* reload watchdog */ + reg_write(wdt->regs, IWDG_KR, KR_KEY_RELOAD); + + return 0; +} + +static int stm32_iwdg_ping(struct watchdog_device *wdd) +{ + struct stm32_iwdg *wdt = watchdog_get_drvdata(wdd); + + dev_dbg(wdd->parent, "%s\n", __func__); + + /* reload watchdog */ + reg_write(wdt->regs, IWDG_KR, KR_KEY_RELOAD); + + return 0; +} + +static int stm32_iwdg_set_timeout(struct watchdog_device *wdd, + unsigned int timeout) +{ + dev_dbg(wdd->parent, "%s timeout: %d sec\n", __func__, timeout); + + wdd->timeout = timeout; + + if (watchdog_active(wdd)) + return stm32_iwdg_start(wdd); + + return 0; +} + +static const struct watchdog_info stm32_iwdg_info = { + .options = WDIOF_SETTIMEOUT | + WDIOF_MAGICCLOSE | + WDIOF_KEEPALIVEPING, + .identity = "STM32 Independent Watchdog", +}; + +static struct watchdog_ops stm32_iwdg_ops = { + .owner = THIS_MODULE, + .start = stm32_iwdg_start, + .ping = stm32_iwdg_ping, + .set_timeout = stm32_iwdg_set_timeout, +}; + +static int stm32_iwdg_probe(struct platform_device *pdev) +{ + struct watchdog_device *wdd; + struct stm32_iwdg *wdt; + struct resource *res; + void __iomem *regs; + struct clk *clk; + int ret; + + /* This is the timer base. */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(regs)) { + dev_err(&pdev->dev, "Could not get resource\n"); + return PTR_ERR(regs); + } + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "Unable to get clock\n"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + dev_err(&pdev->dev, "Unable to prepare clock %p\n", clk); + return ret; + } + + /* + * Allocate our watchdog driver data, which has the + * struct watchdog_device nested within it. + */ + wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); + if (!wdt) { + ret = -ENOMEM; + goto err; + } + + /* Initialize struct stm32_iwdg. */ + wdt->regs = regs; + wdt->clk = clk; + wdt->rate = clk_get_rate(clk); + + /* Initialize struct watchdog_device. */ + wdd = &wdt->wdd; + wdd->info = &stm32_iwdg_info; + wdd->ops = &stm32_iwdg_ops; + wdd->min_timeout = ((RLR_MIN + 1) * 256) / wdt->rate; + wdd->max_hw_heartbeat_ms = ((RLR_MAX + 1) * 256 * 1000) / wdt->rate; + wdd->parent = &pdev->dev; + + watchdog_set_drvdata(wdd, wdt); + watchdog_set_nowayout(wdd, WATCHDOG_NOWAYOUT); + + ret = watchdog_init_timeout(wdd, 0, &pdev->dev); + if (ret) + dev_warn(&pdev->dev, + "unable to set timeout value, using default\n"); + + ret = watchdog_register_device(wdd); + if (ret) { + dev_err(&pdev->dev, "failed to register watchdog device\n"); + goto err; + } + + platform_set_drvdata(pdev, wdt); + + return 0; +err: + clk_disable_unprepare(clk); + + return ret; +} + +static int stm32_iwdg_remove(struct platform_device *pdev) +{ + struct stm32_iwdg *wdt = platform_get_drvdata(pdev); + + watchdog_unregister_device(&wdt->wdd); + clk_disable_unprepare(wdt->clk); + + return 0; +} + +static const struct of_device_id stm32_iwdg_of_match[] = { + { .compatible = "st,stm32-iwdg" }, + { /* end node */ } +}; +MODULE_DEVICE_TABLE(of, stm32_iwdg_of_match); + +static struct platform_driver stm32_iwdg_driver = { + .probe = stm32_iwdg_probe, + .remove = stm32_iwdg_remove, + .driver = { + .name = "iwdg", + .of_match_table = stm32_iwdg_of_match, + }, +}; +module_platform_driver(stm32_iwdg_driver); + +MODULE_AUTHOR("Yannick Fertre <yannick.fertre@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STM32 Independent Watchdog Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/uniphier_wdt.c b/drivers/watchdog/uniphier_wdt.c new file mode 100644 index 000000000000..0ea2339d9702 --- /dev/null +++ b/drivers/watchdog/uniphier_wdt.c @@ -0,0 +1,268 @@ +/* + * Watchdog driver for the UniPhier watchdog timer + * + * (c) Copyright 2014 Panasonic Corporation + * (c) Copyright 2016 Socionext Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bitops.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/watchdog.h> + +/* WDT timer setting register */ +#define WDTTIMSET 0x3004 +#define WDTTIMSET_PERIOD_MASK (0xf << 0) +#define WDTTIMSET_PERIOD_1_SEC (0x3 << 0) + +/* WDT reset selection register */ +#define WDTRSTSEL 0x3008 +#define WDTRSTSEL_RSTSEL_MASK (0x3 << 0) +#define WDTRSTSEL_RSTSEL_BOTH (0x0 << 0) +#define WDTRSTSEL_RSTSEL_IRQ_ONLY (0x2 << 0) + +/* WDT control register */ +#define WDTCTRL 0x300c +#define WDTCTRL_STATUS BIT(8) +#define WDTCTRL_CLEAR BIT(1) +#define WDTCTRL_ENABLE BIT(0) + +#define SEC_TO_WDTTIMSET_PRD(sec) \ + (ilog2(sec) + WDTTIMSET_PERIOD_1_SEC) + +#define WDTST_TIMEOUT 1000 /* usec */ + +#define WDT_DEFAULT_TIMEOUT 64 /* Default is 64 seconds */ +#define WDT_PERIOD_MIN 1 +#define WDT_PERIOD_MAX 128 + +static unsigned int timeout = 0; +static bool nowayout = WATCHDOG_NOWAYOUT; + +struct uniphier_wdt_dev { + struct watchdog_device wdt_dev; + struct regmap *regmap; +}; + +/* + * UniPhier Watchdog operations + */ +static int uniphier_watchdog_ping(struct watchdog_device *w) +{ + struct uniphier_wdt_dev *wdev = watchdog_get_drvdata(w); + unsigned int val; + int ret; + + /* Clear counter */ + ret = regmap_write_bits(wdev->regmap, WDTCTRL, + WDTCTRL_CLEAR, WDTCTRL_CLEAR); + if (!ret) + /* + * As SoC specification, after clear counter, + * it needs to wait until counter status is 1. + */ + ret = regmap_read_poll_timeout(wdev->regmap, WDTCTRL, val, + (val & WDTCTRL_STATUS), + 0, WDTST_TIMEOUT); + + return ret; +} + +static int __uniphier_watchdog_start(struct regmap *regmap, unsigned int sec) +{ + unsigned int val; + int ret; + + ret = regmap_read_poll_timeout(regmap, WDTCTRL, val, + !(val & WDTCTRL_STATUS), + 0, WDTST_TIMEOUT); + if (ret) + return ret; + + /* Setup period */ + ret = regmap_write(regmap, WDTTIMSET, + SEC_TO_WDTTIMSET_PRD(sec)); + if (ret) + return ret; + + /* Enable and clear watchdog */ + ret = regmap_write(regmap, WDTCTRL, WDTCTRL_ENABLE | WDTCTRL_CLEAR); + if (!ret) + /* + * As SoC specification, after clear counter, + * it needs to wait until counter status is 1. + */ + ret = regmap_read_poll_timeout(regmap, WDTCTRL, val, + (val & WDTCTRL_STATUS), + 0, WDTST_TIMEOUT); + + return ret; +} + +static int __uniphier_watchdog_stop(struct regmap *regmap) +{ + /* Disable and stop watchdog */ + return regmap_write_bits(regmap, WDTCTRL, WDTCTRL_ENABLE, 0); +} + +static int __uniphier_watchdog_restart(struct regmap *regmap, unsigned int sec) +{ + int ret; + + ret = __uniphier_watchdog_stop(regmap); + if (ret) + return ret; + + return __uniphier_watchdog_start(regmap, sec); +} + +static int uniphier_watchdog_start(struct watchdog_device *w) +{ + struct uniphier_wdt_dev *wdev = watchdog_get_drvdata(w); + unsigned int tmp_timeout; + + tmp_timeout = roundup_pow_of_two(w->timeout); + + return __uniphier_watchdog_start(wdev->regmap, tmp_timeout); +} + +static int uniphier_watchdog_stop(struct watchdog_device *w) +{ + struct uniphier_wdt_dev *wdev = watchdog_get_drvdata(w); + + return __uniphier_watchdog_stop(wdev->regmap); +} + +static int uniphier_watchdog_set_timeout(struct watchdog_device *w, + unsigned int t) +{ + struct uniphier_wdt_dev *wdev = watchdog_get_drvdata(w); + unsigned int tmp_timeout; + int ret; + + tmp_timeout = roundup_pow_of_two(t); + if (tmp_timeout == w->timeout) + return 0; + + if (watchdog_active(w)) { + ret = __uniphier_watchdog_restart(wdev->regmap, tmp_timeout); + if (ret) + return ret; + } + + w->timeout = tmp_timeout; + + return 0; +} + +/* + * Kernel Interfaces + */ +static const struct watchdog_info uniphier_wdt_info = { + .identity = "uniphier-wdt", + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE | + WDIOF_OVERHEAT, +}; + +static const struct watchdog_ops uniphier_wdt_ops = { + .owner = THIS_MODULE, + .start = uniphier_watchdog_start, + .stop = uniphier_watchdog_stop, + .ping = uniphier_watchdog_ping, + .set_timeout = uniphier_watchdog_set_timeout, +}; + +static int uniphier_wdt_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_wdt_dev *wdev; + struct regmap *regmap; + struct device_node *parent; + int ret; + + wdev = devm_kzalloc(dev, sizeof(*wdev), GFP_KERNEL); + if (!wdev) + return -ENOMEM; + + platform_set_drvdata(pdev, wdev); + + parent = of_get_parent(dev->of_node); /* parent should be syscon node */ + regmap = syscon_node_to_regmap(parent); + of_node_put(parent); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + wdev->regmap = regmap; + wdev->wdt_dev.info = &uniphier_wdt_info; + wdev->wdt_dev.ops = &uniphier_wdt_ops; + wdev->wdt_dev.max_timeout = WDT_PERIOD_MAX; + wdev->wdt_dev.min_timeout = WDT_PERIOD_MIN; + wdev->wdt_dev.parent = dev; + + if (watchdog_init_timeout(&wdev->wdt_dev, timeout, dev) < 0) { + wdev->wdt_dev.timeout = WDT_DEFAULT_TIMEOUT; + } + watchdog_set_nowayout(&wdev->wdt_dev, nowayout); + watchdog_stop_on_reboot(&wdev->wdt_dev); + + watchdog_set_drvdata(&wdev->wdt_dev, wdev); + + uniphier_watchdog_stop(&wdev->wdt_dev); + ret = regmap_write(wdev->regmap, WDTRSTSEL, WDTRSTSEL_RSTSEL_BOTH); + if (ret) + return ret; + + ret = devm_watchdog_register_device(dev, &wdev->wdt_dev); + if (ret) + return ret; + + dev_info(dev, "watchdog driver (timeout=%d sec, nowayout=%d)\n", + wdev->wdt_dev.timeout, nowayout); + + return 0; +} + +static const struct of_device_id uniphier_wdt_dt_ids[] = { + { .compatible = "socionext,uniphier-wdt" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_wdt_dt_ids); + +static struct platform_driver uniphier_wdt_driver = { + .probe = uniphier_wdt_probe, + .driver = { + .name = "uniphier-wdt", + .of_match_table = uniphier_wdt_dt_ids, + }, +}; + +module_platform_driver(uniphier_wdt_driver); + +module_param(timeout, uint, 0000); +MODULE_PARM_DESC(timeout, + "Watchdog timeout seconds in power of 2. (0 < timeout < 128, default=" + __MODULE_STRING(WDT_DEFAULT_TIMEOUT) ")"); + +module_param(nowayout, bool, 0000); +MODULE_PARM_DESC(nowayout, + "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +MODULE_AUTHOR("Keiji Hayashibara <hayashibara.keiji@socionext.com>"); +MODULE_DESCRIPTION("UniPhier Watchdog Device Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/watchdog/w83627hf_wdt.c b/drivers/watchdog/w83627hf_wdt.c index 98fd186c6878..d9ba0496713c 100644 --- a/drivers/watchdog/w83627hf_wdt.c +++ b/drivers/watchdog/w83627hf_wdt.c @@ -49,7 +49,8 @@ static int cr_wdt_csr; /* WDT control & status register */ enum chips { w83627hf, w83627s, w83697hf, w83697ug, w83637hf, w83627thf, w83687thf, w83627ehf, w83627dhg, w83627uhg, w83667hg, w83627dhg_p, - w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6102 }; + w83667hg_b, nct6775, nct6776, nct6779, nct6791, nct6792, nct6793, + nct6795, nct6102 }; static int timeout; /* in seconds */ module_param(timeout, int, 0); @@ -97,6 +98,8 @@ MODULE_PARM_DESC(early_disable, "Disable watchdog at boot time (default=0)"); #define NCT6779_ID 0xc5 #define NCT6791_ID 0xc8 #define NCT6792_ID 0xc9 +#define NCT6793_ID 0xd1 +#define NCT6795_ID 0xd3 #define W83627HF_WDT_TIMEOUT 0xf6 #define W83697HF_WDT_TIMEOUT 0xf4 @@ -204,6 +207,8 @@ static int w83627hf_init(struct watchdog_device *wdog, enum chips chip) case nct6779: case nct6791: case nct6792: + case nct6793: + case nct6795: case nct6102: /* * These chips have a fixed WDTO# output pin (W83627UHG), @@ -396,6 +401,12 @@ static int wdt_find(int addr) case NCT6792_ID: ret = nct6792; break; + case NCT6793_ID: + ret = nct6793; + break; + case NCT6795_ID: + ret = nct6795; + break; case NCT6102_ID: ret = nct6102; cr_wdt_timeout = NCT6102D_WDT_TIMEOUT; @@ -437,6 +448,8 @@ static int __init wdt_init(void) "NCT6779", "NCT6791", "NCT6792", + "NCT6793", + "NCT6795", "NCT6102", }; diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index d5d2bbd8f428..0826e663bd5a 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -80,6 +80,9 @@ static struct watchdog_core_data *old_wd_data; static struct workqueue_struct *watchdog_wq; +static bool handle_boot_enabled = + IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED); + static inline bool watchdog_need_worker(struct watchdog_device *wdd) { /* All variables in milli-seconds */ @@ -192,18 +195,23 @@ static int watchdog_ping(struct watchdog_device *wdd) return __watchdog_ping(wdd); } +static bool watchdog_worker_should_ping(struct watchdog_core_data *wd_data) +{ + struct watchdog_device *wdd = wd_data->wdd; + + return wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd)); +} + static void watchdog_ping_work(struct work_struct *work) { struct watchdog_core_data *wd_data; - struct watchdog_device *wdd; wd_data = container_of(to_delayed_work(work), struct watchdog_core_data, work); mutex_lock(&wd_data->lock); - wdd = wd_data->wdd; - if (wdd && (watchdog_active(wdd) || watchdog_hw_running(wdd))) - __watchdog_ping(wdd); + if (watchdog_worker_should_ping(wd_data)) + __watchdog_ping(wd_data->wdd); mutex_unlock(&wd_data->lock); } @@ -956,9 +964,14 @@ static int watchdog_cdev_register(struct watchdog_device *wdd, dev_t devno) * and schedule an immediate ping. */ if (watchdog_hw_running(wdd)) { - __module_get(wdd->ops->owner); - kref_get(&wd_data->kref); - queue_delayed_work(watchdog_wq, &wd_data->work, 0); + if (handle_boot_enabled) { + __module_get(wdd->ops->owner); + kref_get(&wd_data->kref); + queue_delayed_work(watchdog_wq, &wd_data->work, 0); + } else { + pr_info("watchdog%d running and kernel based pre-userspace handler disabled\n", + wdd->id); + } } return 0; @@ -1106,3 +1119,8 @@ void __exit watchdog_dev_exit(void) class_unregister(&watchdog_class); destroy_workqueue(watchdog_wq); } + +module_param(handle_boot_enabled, bool, 0444); +MODULE_PARM_DESC(handle_boot_enabled, + "Watchdog core auto-updates boot enabled watchdogs before userspace takes over (default=" + __MODULE_STRING(IS_ENABLED(CONFIG_WATCHDOG_HANDLE_BOOT_ENABLED)) ")"); diff --git a/drivers/watchdog/zx2967_wdt.c b/drivers/watchdog/zx2967_wdt.c index c98252733c30..69ec5855584b 100644 --- a/drivers/watchdog/zx2967_wdt.c +++ b/drivers/watchdog/zx2967_wdt.c @@ -154,7 +154,7 @@ static const struct watchdog_info zx2967_wdt_ident = { .identity = "zx2967 watchdog", }; -static struct watchdog_ops zx2967_wdt_ops = { +static const struct watchdog_ops zx2967_wdt_ops = { .owner = THIS_MODULE, .start = zx2967_wdt_start, .stop = zx2967_wdt_stop, diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c index 50dcb68d8070..ab609255a0f3 100644 --- a/drivers/xen/balloon.c +++ b/drivers/xen/balloon.c @@ -780,6 +780,9 @@ static int __init balloon_init(void) } #endif + /* Init the xen-balloon driver. */ + xen_balloon_init(); + return 0; } subsys_initcall(balloon_init); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index d6786b87e13b..2c6a9114d332 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -42,6 +42,7 @@ #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/workqueue.h> +#include <linux/ratelimit.h> #include <xen/xen.h> #include <xen/interface/xen.h> @@ -1072,8 +1073,14 @@ static int gnttab_expand(unsigned int req_entries) cur = nr_grant_frames; extra = ((req_entries + (grefs_per_grant_frame-1)) / grefs_per_grant_frame); - if (cur + extra > gnttab_max_grant_frames()) + if (cur + extra > gnttab_max_grant_frames()) { + pr_warn_ratelimited("xen/grant-table: max_grant_frames reached" + " cur=%u extra=%u limit=%u" + " gnttab_free_count=%u req_entries=%u\n", + cur, extra, gnttab_max_grant_frames(), + gnttab_free_count, req_entries); return -ENOSPC; + } rc = gnttab_map(cur, cur + extra - 1); if (rc == 0) diff --git a/drivers/xen/xen-balloon.c b/drivers/xen/xen-balloon.c index e7715cb62eef..e89136ab851e 100644 --- a/drivers/xen/xen-balloon.c +++ b/drivers/xen/xen-balloon.c @@ -59,6 +59,8 @@ static void watch_target(struct xenbus_watch *watch, { unsigned long long new_target; int err; + static bool watch_fired; + static long target_diff; err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target); if (err != 1) { @@ -69,7 +71,14 @@ static void watch_target(struct xenbus_watch *watch, /* The given memory/target value is in KiB, so it needs converting to * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10. */ - balloon_set_new_target(new_target >> (PAGE_SHIFT - 10)); + new_target >>= PAGE_SHIFT - 10; + if (watch_fired) { + balloon_set_new_target(new_target - target_diff); + return; + } + + watch_fired = true; + target_diff = new_target - balloon_stats.target_pages; } static struct xenbus_watch target_watch = { .node = "memory/target", @@ -94,22 +103,15 @@ static struct notifier_block xenstore_notifier = { .notifier_call = balloon_init_watcher, }; -static int __init balloon_init(void) +void xen_balloon_init(void) { - if (!xen_domain()) - return -ENODEV; - - pr_info("Initialising balloon driver\n"); - register_balloon(&balloon_dev); register_xen_selfballooning(&balloon_dev); register_xenstore_notifier(&xenstore_notifier); - - return 0; } -subsys_initcall(balloon_init); +EXPORT_SYMBOL_GPL(xen_balloon_init); #define BALLOON_SHOW(name, format, args...) \ static ssize_t show_##name(struct device *dev, \ diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c index d6950e0802b7..7bc88fd43cfc 100644 --- a/drivers/xen/xen-scsiback.c +++ b/drivers/xen/xen-scsiback.c @@ -134,11 +134,8 @@ struct vscsibk_pend { struct page *pages[VSCSI_MAX_GRANTS]; struct se_cmd se_cmd; -}; -struct scsiback_tmr { - atomic_t tmr_complete; - wait_queue_head_t tmr_wait; + struct completion tmr_done; }; #define VSCSI_DEFAULT_SESSION_TAGS 128 @@ -599,36 +596,28 @@ static void scsiback_device_action(struct vscsibk_pend *pending_req, struct scsiback_tpg *tpg = pending_req->v2p->tpg; struct scsiback_nexus *nexus = tpg->tpg_nexus; struct se_cmd *se_cmd = &pending_req->se_cmd; - struct scsiback_tmr *tmr; u64 unpacked_lun = pending_req->v2p->lun; int rc, err = FAILED; - tmr = kzalloc(sizeof(struct scsiback_tmr), GFP_KERNEL); - if (!tmr) { - target_put_sess_cmd(se_cmd); - goto err; - } - - init_waitqueue_head(&tmr->tmr_wait); + init_completion(&pending_req->tmr_done); rc = target_submit_tmr(&pending_req->se_cmd, nexus->tvn_se_sess, &pending_req->sense_buffer[0], - unpacked_lun, tmr, act, GFP_KERNEL, + unpacked_lun, NULL, act, GFP_KERNEL, tag, TARGET_SCF_ACK_KREF); if (rc) goto err; - wait_event(tmr->tmr_wait, atomic_read(&tmr->tmr_complete)); + wait_for_completion(&pending_req->tmr_done); err = (se_cmd->se_tmr_req->response == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; scsiback_do_resp_with_sense(NULL, err, 0, pending_req); - transport_generic_free_cmd(&pending_req->se_cmd, 1); + transport_generic_free_cmd(&pending_req->se_cmd, 0); return; + err: - if (tmr) - kfree(tmr); scsiback_do_resp_with_sense(NULL, err, 0, pending_req); } @@ -1389,12 +1378,6 @@ static int scsiback_check_stop_free(struct se_cmd *se_cmd) static void scsiback_release_cmd(struct se_cmd *se_cmd) { struct se_session *se_sess = se_cmd->se_sess; - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - - if (se_tmr && se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) { - struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; - kfree(tmr); - } percpu_ida_free(&se_sess->sess_tag_pool, se_cmd->map_tag); } @@ -1455,11 +1438,10 @@ static int scsiback_queue_status(struct se_cmd *se_cmd) static void scsiback_queue_tm_rsp(struct se_cmd *se_cmd) { - struct se_tmr_req *se_tmr = se_cmd->se_tmr_req; - struct scsiback_tmr *tmr = se_tmr->fabric_tmr_ptr; + struct vscsibk_pend *pending_req = container_of(se_cmd, + struct vscsibk_pend, se_cmd); - atomic_set(&tmr->tmr_complete, 1); - wake_up(&tmr->tmr_wait); + complete(&pending_req->tmr_done); } static void scsiback_aborted_task(struct se_cmd *se_cmd) |