diff options
author | Jason Gunthorpe <jgg@nvidia.com> | 2021-02-18 18:17:24 +0300 |
---|---|---|
committer | Jason Gunthorpe <jgg@nvidia.com> | 2021-02-18 18:19:29 +0300 |
commit | 7289e26f395b583f68b676d4d12a0971e4f6f65c (patch) | |
tree | 99f8abbb112a3144094e0082dd446439b930beea /drivers | |
parent | ed408529679737a9a7ad816c8de5d59ba104bb11 (diff) | |
parent | f40ddce88593482919761f74910f42f4b84c004b (diff) | |
download | linux-7289e26f395b583f68b676d4d12a0971e4f6f65c.tar.xz |
Merge tag 'v5.11' into rdma.git for-next
Linux 5.11
Merged to resolve conflicts with RDMA rc commits
- drivers/infiniband/sw/rxe/rxe_net.c
The final logic is to call rxe_get_dev_from_net() again with the master
netdev if the packet was rx'd on a vlan. To keep the elimination of the
local variables requires a trivial edit to the code in -rc
Link: https://lore.kernel.org/r/20210210131542.215ea67c@canb.auug.org.au
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
Diffstat (limited to 'drivers')
422 files changed, 4628 insertions, 2129 deletions
diff --git a/drivers/acpi/acpica/nsrepair2.c b/drivers/acpi/acpica/nsrepair2.c index d2c8d8279e7a..24c197d91f29 100644 --- a/drivers/acpi/acpica/nsrepair2.c +++ b/drivers/acpi/acpica/nsrepair2.c @@ -495,8 +495,9 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, union acpi_operand_object **return_object_ptr) { union acpi_operand_object *return_object = *return_object_ptr; - char *dest; + union acpi_operand_object *new_string; char *source; + char *dest; ACPI_FUNCTION_NAME(ns_repair_HID); @@ -517,6 +518,13 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, return_ACPI_STATUS(AE_OK); } + /* It is simplest to always create a new string object */ + + new_string = acpi_ut_create_string_object(return_object->string.length); + if (!new_string) { + return_ACPI_STATUS(AE_NO_MEMORY); + } + /* * Remove a leading asterisk if present. For some unknown reason, there * are many machines in the field that contains IDs like this. @@ -526,7 +534,7 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, source = return_object->string.pointer; if (*source == '*') { source++; - return_object->string.length--; + new_string->string.length--; ACPI_DEBUG_PRINT((ACPI_DB_REPAIR, "%s: Removed invalid leading asterisk\n", @@ -541,11 +549,12 @@ acpi_ns_repair_HID(struct acpi_evaluate_info *info, * "NNNN####" where N is an uppercase letter or decimal digit, and * # is a hex digit. */ - for (dest = return_object->string.pointer; *source; dest++, source++) { + for (dest = new_string->string.pointer; *source; dest++, source++) { *dest = (char)toupper((int)*source); } - return_object->string.pointer[return_object->string.length] = 0; + acpi_ut_remove_reference(return_object); + *return_object_ptr = new_string; return_ACPI_STATUS(AE_OK); } diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index d4eac6d7e9fb..2494138a6905 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1107,6 +1107,11 @@ static int nc_dma_get_range(struct device *dev, u64 *size) ncomp = (struct acpi_iort_named_component *)node->node_data; + if (!ncomp->memory_address_limit) { + pr_warn(FW_BUG "Named component missing memory address limit\n"); + return -EINVAL; + } + *size = ncomp->memory_address_limit >= 64 ? U64_MAX : 1ULL<<ncomp->memory_address_limit; @@ -1126,6 +1131,11 @@ static int rc_dma_get_range(struct device *dev, u64 *size) rc = (struct acpi_iort_root_complex *)node->node_data; + if (!rc->memory_address_limit) { + pr_warn(FW_BUG "Root complex missing memory address limit\n"); + return -EINVAL; + } + *size = rc->memory_address_limit >= 64 ? U64_MAX : 1ULL<<rc->memory_address_limit; @@ -1173,8 +1183,8 @@ void iort_dma_setup(struct device *dev, u64 *dma_addr, u64 *dma_size) end = dmaaddr + size - 1; mask = DMA_BIT_MASK(ilog2(end) + 1); dev->bus_dma_limit = end; - dev->coherent_dma_mask = mask; - *dev->dma_mask = mask; + dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask); + *dev->dma_mask = min(*dev->dma_mask, mask); } *dma_addr = dmaaddr; diff --git a/drivers/acpi/device_sysfs.c b/drivers/acpi/device_sysfs.c index 96869f1538b9..bfca116482b8 100644 --- a/drivers/acpi/device_sysfs.c +++ b/drivers/acpi/device_sysfs.c @@ -251,20 +251,12 @@ int __acpi_device_uevent_modalias(struct acpi_device *adev, if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; - len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); - if (len < 0) - return len; - - env->buflen += len; - if (!adev->data.of_compatible) - return 0; - - if (len > 0 && add_uevent_var(env, "MODALIAS=")) - return -ENOMEM; - - len = create_of_modalias(adev, &env->buf[env->buflen - 1], - sizeof(env->buf) - env->buflen); + if (adev->data.of_compatible) + len = create_of_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); + else + len = create_pnp_modalias(adev, &env->buf[env->buflen - 1], + sizeof(env->buf) - env->buflen); if (len < 0) return len; diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c index b11b08a60684..8c5dde628405 100644 --- a/drivers/acpi/nfit/core.c +++ b/drivers/acpi/nfit/core.c @@ -2269,40 +2269,24 @@ static const struct attribute_group *acpi_nfit_region_attribute_groups[] = { /* enough info to uniquely specify an interleave set */ struct nfit_set_info { - struct nfit_set_info_map { - u64 region_offset; - u32 serial_number; - u32 pad; - } mapping[0]; + u64 region_offset; + u32 serial_number; + u32 pad; }; struct nfit_set_info2 { - struct nfit_set_info_map2 { - u64 region_offset; - u32 serial_number; - u16 vendor_id; - u16 manufacturing_date; - u8 manufacturing_location; - u8 reserved[31]; - } mapping[0]; + u64 region_offset; + u32 serial_number; + u16 vendor_id; + u16 manufacturing_date; + u8 manufacturing_location; + u8 reserved[31]; }; -static size_t sizeof_nfit_set_info(int num_mappings) -{ - return sizeof(struct nfit_set_info) - + num_mappings * sizeof(struct nfit_set_info_map); -} - -static size_t sizeof_nfit_set_info2(int num_mappings) -{ - return sizeof(struct nfit_set_info2) - + num_mappings * sizeof(struct nfit_set_info_map2); -} - static int cmp_map_compat(const void *m0, const void *m1) { - const struct nfit_set_info_map *map0 = m0; - const struct nfit_set_info_map *map1 = m1; + const struct nfit_set_info *map0 = m0; + const struct nfit_set_info *map1 = m1; return memcmp(&map0->region_offset, &map1->region_offset, sizeof(u64)); @@ -2310,8 +2294,8 @@ static int cmp_map_compat(const void *m0, const void *m1) static int cmp_map(const void *m0, const void *m1) { - const struct nfit_set_info_map *map0 = m0; - const struct nfit_set_info_map *map1 = m1; + const struct nfit_set_info *map0 = m0; + const struct nfit_set_info *map1 = m1; if (map0->region_offset < map1->region_offset) return -1; @@ -2322,8 +2306,8 @@ static int cmp_map(const void *m0, const void *m1) static int cmp_map2(const void *m0, const void *m1) { - const struct nfit_set_info_map2 *map0 = m0; - const struct nfit_set_info_map2 *map1 = m1; + const struct nfit_set_info2 *map0 = m0; + const struct nfit_set_info2 *map1 = m1; if (map0->region_offset < map1->region_offset) return -1; @@ -2361,22 +2345,22 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, return -ENOMEM; import_guid(&nd_set->type_guid, spa->range_guid); - info = devm_kzalloc(dev, sizeof_nfit_set_info(nr), GFP_KERNEL); + info = devm_kcalloc(dev, nr, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; - info2 = devm_kzalloc(dev, sizeof_nfit_set_info2(nr), GFP_KERNEL); + info2 = devm_kcalloc(dev, nr, sizeof(*info2), GFP_KERNEL); if (!info2) return -ENOMEM; for (i = 0; i < nr; i++) { struct nd_mapping_desc *mapping = &ndr_desc->mapping[i]; - struct nfit_set_info_map *map = &info->mapping[i]; - struct nfit_set_info_map2 *map2 = &info2->mapping[i]; struct nvdimm *nvdimm = mapping->nvdimm; struct nfit_mem *nfit_mem = nvdimm_provider_data(nvdimm); - struct acpi_nfit_memory_map *memdev = memdev_from_spa(acpi_desc, - spa->range_index, i); + struct nfit_set_info *map = &info[i]; + struct nfit_set_info2 *map2 = &info2[i]; + struct acpi_nfit_memory_map *memdev = + memdev_from_spa(acpi_desc, spa->range_index, i); struct acpi_nfit_control_region *dcr = nfit_mem->dcr; if (!memdev || !nfit_mem->dcr) { @@ -2395,23 +2379,20 @@ static int acpi_nfit_init_interleave_set(struct acpi_nfit_desc *acpi_desc, } /* v1.1 namespaces */ - sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), - cmp_map, NULL); - nd_set->cookie1 = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + sort(info, nr, sizeof(*info), cmp_map, NULL); + nd_set->cookie1 = nd_fletcher64(info, sizeof(*info) * nr, 0); /* v1.2 namespaces */ - sort(&info2->mapping[0], nr, sizeof(struct nfit_set_info_map2), - cmp_map2, NULL); - nd_set->cookie2 = nd_fletcher64(info2, sizeof_nfit_set_info2(nr), 0); + sort(info2, nr, sizeof(*info2), cmp_map2, NULL); + nd_set->cookie2 = nd_fletcher64(info2, sizeof(*info2) * nr, 0); /* support v1.1 namespaces created with the wrong sort order */ - sort(&info->mapping[0], nr, sizeof(struct nfit_set_info_map), - cmp_map_compat, NULL); - nd_set->altcookie = nd_fletcher64(info, sizeof_nfit_set_info(nr), 0); + sort(info, nr, sizeof(*info), cmp_map_compat, NULL); + nd_set->altcookie = nd_fletcher64(info, sizeof(*info) * nr, 0); /* record the result of the sort for the mapping position */ for (i = 0; i < nr; i++) { - struct nfit_set_info_map2 *map2 = &info2->mapping[i]; + struct nfit_set_info2 *map2 = &info2[i]; int j; for (j = 0; j < nr; j++) { diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 58ff36340cd7..22566b4b3150 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -586,6 +586,8 @@ static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device, if (!device) return -EINVAL; + *device = NULL; + status = acpi_get_data_full(handle, acpi_scan_drop_device, (void **)device, callback); if (ACPI_FAILURE(status) || !*device) { @@ -2121,12 +2123,12 @@ void acpi_walk_dep_device_list(acpi_handle handle) list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) { if (dep->supplier == handle) { acpi_bus_get_device(dep->consumer, &adev); - if (!adev) - continue; - adev->dep_unmet--; - if (!adev->dep_unmet) - acpi_bus_attach(adev, true); + if (adev) { + adev->dep_unmet--; + if (!adev->dep_unmet) + acpi_bus_attach(adev, true); + } list_del(&dep->node); kfree(dep); diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 12c0ece746f0..859b1de31ddc 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -174,6 +174,8 @@ struct acpi_thermal { struct thermal_zone_device *thermal_zone; int kelvin_offset; /* in millidegrees */ struct work_struct thermal_check_work; + struct mutex thermal_check_lock; + refcount_t thermal_check_count; }; /* -------------------------------------------------------------------------- @@ -495,14 +497,6 @@ static int acpi_thermal_get_trip_points(struct acpi_thermal *tz) return 0; } -static void acpi_thermal_check(void *data) -{ - struct acpi_thermal *tz = data; - - thermal_zone_device_update(tz->thermal_zone, - THERMAL_EVENT_UNSPECIFIED); -} - /* sys I/F for generic thermal sysfs support */ static int thermal_get_temp(struct thermal_zone_device *thermal, int *temp) @@ -900,6 +894,12 @@ static void acpi_thermal_unregister_thermal_zone(struct acpi_thermal *tz) Driver Interface -------------------------------------------------------------------------- */ +static void acpi_queue_thermal_check(struct acpi_thermal *tz) +{ + if (!work_pending(&tz->thermal_check_work)) + queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); +} + static void acpi_thermal_notify(struct acpi_device *device, u32 event) { struct acpi_thermal *tz = acpi_driver_data(device); @@ -910,17 +910,17 @@ static void acpi_thermal_notify(struct acpi_device *device, u32 event) switch (event) { case ACPI_THERMAL_NOTIFY_TEMPERATURE: - acpi_thermal_check(tz); + acpi_queue_thermal_check(tz); break; case ACPI_THERMAL_NOTIFY_THRESHOLDS: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_THRESHOLDS); - acpi_thermal_check(tz); + acpi_queue_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; case ACPI_THERMAL_NOTIFY_DEVICES: acpi_thermal_trips_update(tz, ACPI_TRIPS_REFRESH_DEVICES); - acpi_thermal_check(tz); + acpi_queue_thermal_check(tz); acpi_bus_generate_netlink_event(device->pnp.device_class, dev_name(&device->dev), event, 0); break; @@ -1020,7 +1020,25 @@ static void acpi_thermal_check_fn(struct work_struct *work) { struct acpi_thermal *tz = container_of(work, struct acpi_thermal, thermal_check_work); - acpi_thermal_check(tz); + + /* + * In general, it is not sufficient to check the pending bit, because + * subsequent instances of this function may be queued after one of them + * has started running (e.g. if _TMP sleeps). Avoid bailing out if just + * one of them is running, though, because it may have done the actual + * check some time ago, so allow at least one of them to block on the + * mutex while another one is running the update. + */ + if (!refcount_dec_not_one(&tz->thermal_check_count)) + return; + + mutex_lock(&tz->thermal_check_lock); + + thermal_zone_device_update(tz->thermal_zone, THERMAL_EVENT_UNSPECIFIED); + + refcount_inc(&tz->thermal_check_count); + + mutex_unlock(&tz->thermal_check_lock); } static int acpi_thermal_add(struct acpi_device *device) @@ -1052,6 +1070,8 @@ static int acpi_thermal_add(struct acpi_device *device) if (result) goto free_memory; + refcount_set(&tz->thermal_check_count, 3); + mutex_init(&tz->thermal_check_lock); INIT_WORK(&tz->thermal_check_work, acpi_thermal_check_fn); pr_info(PREFIX "%s [%s] (%ld C)\n", acpi_device_name(device), @@ -1117,7 +1137,7 @@ static int acpi_thermal_resume(struct device *dev) tz->state.active |= tz->trips.active[i].flags.enabled; } - queue_work(acpi_thermal_pm_queue, &tz->thermal_check_work); + acpi_queue_thermal_check(tz); return AE_OK; } diff --git a/drivers/base/core.c b/drivers/base/core.c index 14f165816742..6eb4c7a904c5 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -208,6 +208,16 @@ int device_links_read_lock_held(void) #endif #endif /* !CONFIG_SRCU */ +static bool device_is_ancestor(struct device *dev, struct device *target) +{ + while (target->parent) { + target = target->parent; + if (dev == target) + return true; + } + return false; +} + /** * device_is_dependent - Check if one device depends on another one * @dev: Device to check dependencies for. @@ -221,7 +231,12 @@ int device_is_dependent(struct device *dev, void *target) struct device_link *link; int ret; - if (dev == target) + /* + * The "ancestors" check is needed to catch the case when the target + * device has not been completely initialized yet and it is still + * missing from the list of children of its parent device. + */ + if (dev == target || device_is_ancestor(dev, target)) return 1; ret = device_for_each_child(dev, target, device_is_dependent); @@ -456,7 +471,9 @@ static int devlink_add_symlinks(struct device *dev, struct device *con = link->consumer; char *buf; - len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), + strlen(dev_bus_name(con)) + strlen(dev_name(con))); + len += strlen(":"); len += strlen("supplier:") + 1; buf = kzalloc(len, GFP_KERNEL); if (!buf) @@ -470,12 +487,12 @@ static int devlink_add_symlinks(struct device *dev, if (ret) goto err_con; - snprintf(buf, len, "consumer:%s", dev_name(con)); + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); ret = sysfs_create_link(&sup->kobj, &link->link_dev.kobj, buf); if (ret) goto err_con_dev; - snprintf(buf, len, "supplier:%s", dev_name(sup)); + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); ret = sysfs_create_link(&con->kobj, &link->link_dev.kobj, buf); if (ret) goto err_sup_dev; @@ -483,7 +500,7 @@ static int devlink_add_symlinks(struct device *dev, goto out; err_sup_dev: - snprintf(buf, len, "consumer:%s", dev_name(con)); + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); sysfs_remove_link(&sup->kobj, buf); err_con_dev: sysfs_remove_link(&link->link_dev.kobj, "consumer"); @@ -506,7 +523,9 @@ static void devlink_remove_symlinks(struct device *dev, sysfs_remove_link(&link->link_dev.kobj, "consumer"); sysfs_remove_link(&link->link_dev.kobj, "supplier"); - len = max(strlen(dev_name(sup)), strlen(dev_name(con))); + len = max(strlen(dev_bus_name(sup)) + strlen(dev_name(sup)), + strlen(dev_bus_name(con)) + strlen(dev_name(con))); + len += strlen(":"); len += strlen("supplier:") + 1; buf = kzalloc(len, GFP_KERNEL); if (!buf) { @@ -514,9 +533,9 @@ static void devlink_remove_symlinks(struct device *dev, return; } - snprintf(buf, len, "supplier:%s", dev_name(sup)); + snprintf(buf, len, "supplier:%s:%s", dev_bus_name(sup), dev_name(sup)); sysfs_remove_link(&con->kobj, buf); - snprintf(buf, len, "consumer:%s", dev_name(con)); + snprintf(buf, len, "consumer:%s:%s", dev_bus_name(con), dev_name(con)); sysfs_remove_link(&sup->kobj, buf); kfree(buf); } @@ -737,8 +756,9 @@ struct device_link *device_link_add(struct device *consumer, link->link_dev.class = &devlink_class; device_set_pm_not_required(&link->link_dev); - dev_set_name(&link->link_dev, "%s--%s", - dev_name(supplier), dev_name(consumer)); + dev_set_name(&link->link_dev, "%s:%s--%s:%s", + dev_bus_name(supplier), dev_name(supplier), + dev_bus_name(consumer), dev_name(consumer)); if (device_register(&link->link_dev)) { put_device(consumer); put_device(supplier); @@ -1808,9 +1828,7 @@ const char *dev_driver_string(const struct device *dev) * never change once they are set, so they don't need special care. */ drv = READ_ONCE(dev->driver); - return drv ? drv->name : - (dev->bus ? dev->bus->name : - (dev->class ? dev->class->name : "")); + return drv ? drv->name : dev_bus_name(dev); } EXPORT_SYMBOL(dev_driver_string); diff --git a/drivers/base/dd.c b/drivers/base/dd.c index 2f32f38a11ed..9179825ff646 100644 --- a/drivers/base/dd.c +++ b/drivers/base/dd.c @@ -371,13 +371,6 @@ static void driver_bound(struct device *dev) device_pm_check_callbacks(dev); /* - * Reorder successfully probed devices to the end of the device list. - * This ensures that suspend/resume order matches probe order, which - * is usually what drivers rely on. - */ - device_pm_move_to_tail(dev); - - /* * Make sure the device is no longer in one of the deferred lists and * kick off retrying all pending devices */ @@ -619,6 +612,8 @@ dev_groups_failed: else if (drv->remove) drv->remove(dev); probe_failed: + kfree(dev->dma_range_map); + dev->dma_range_map = NULL; if (dev->bus) blocking_notifier_call_chain(&dev->bus->p->bus_notifier, BUS_NOTIFY_DRIVER_NOT_BOUND, dev); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 95fd1549f87d..8456d8384ac8 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev, return -ERANGE; nvec = platform_irq_count(dev); + if (nvec < 0) + return nvec; if (nvec < minvec) return -ENOSPC; diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 6727358e147d..e6ea5d344f87 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -1022,6 +1022,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, if (!sock) return err; + /* + * We need to make sure we don't get any errant requests while we're + * reallocating the ->socks array. + */ + blk_mq_freeze_queue(nbd->disk->queue); + if (!netlink && !nbd->task_setup && !test_bit(NBD_RT_BOUND, &config->runtime_flags)) nbd->task_setup = current; @@ -1060,10 +1066,12 @@ static int nbd_add_socket(struct nbd_device *nbd, unsigned long arg, nsock->cookie = 0; socks[config->num_connections++] = nsock; atomic_inc(&config->live_connections); + blk_mq_unfreeze_queue(nbd->disk->queue); return 0; put_socket: + blk_mq_unfreeze_queue(nbd->disk->queue); sockfd_put(sock); return err; } diff --git a/drivers/block/null_blk/zoned.c b/drivers/block/null_blk/zoned.c index 148b871f263b..fce0a54df0e5 100644 --- a/drivers/block/null_blk/zoned.c +++ b/drivers/block/null_blk/zoned.c @@ -6,7 +6,10 @@ #define CREATE_TRACE_POINTS #include "trace.h" -#define MB_TO_SECTS(mb) (((sector_t)mb * SZ_1M) >> SECTOR_SHIFT) +static inline sector_t mb_to_sects(unsigned long mb) +{ + return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT; +} static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect) { @@ -77,12 +80,11 @@ int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q) return -EINVAL; } - zone_capacity_sects = MB_TO_SECTS(dev->zone_capacity); - dev_capacity_sects = MB_TO_SECTS(dev->size); - dev->zone_size_sects = MB_TO_SECTS(dev->zone_size); - dev->nr_zones = dev_capacity_sects >> ilog2(dev->zone_size_sects); - if (dev_capacity_sects & (dev->zone_size_sects - 1)) - dev->nr_zones++; + zone_capacity_sects = mb_to_sects(dev->zone_capacity); + dev_capacity_sects = mb_to_sects(dev->size); + dev->zone_size_sects = mb_to_sects(dev->zone_size); + dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects) + >> ilog2(dev->zone_size_sects); dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone), GFP_KERNEL | __GFP_ZERO); diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 5265975b3fba..e1c6798889f4 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -945,7 +945,8 @@ static void blkif_set_queue_limits(struct blkfront_info *info) if (info->feature_discard) { blk_queue_flag_set(QUEUE_FLAG_DISCARD, rq); blk_queue_max_discard_sectors(rq, get_capacity(gd)); - rq->limits.discard_granularity = info->discard_granularity; + rq->limits.discard_granularity = info->discard_granularity ?: + info->physical_sector_size; rq->limits.discard_alignment = info->discard_alignment; if (info->feature_secdiscard) blk_queue_flag_set(QUEUE_FLAG_SECERASE, rq); @@ -2179,19 +2180,12 @@ static void blkfront_closing(struct blkfront_info *info) static void blkfront_setup_discard(struct blkfront_info *info) { - int err; - unsigned int discard_granularity; - unsigned int discard_alignment; - info->feature_discard = 1; - err = xenbus_gather(XBT_NIL, info->xbdev->otherend, - "discard-granularity", "%u", &discard_granularity, - "discard-alignment", "%u", &discard_alignment, - NULL); - if (!err) { - info->discard_granularity = discard_granularity; - info->discard_alignment = discard_alignment; - } + info->discard_granularity = xenbus_read_unsigned(info->xbdev->otherend, + "discard-granularity", + 0); + info->discard_alignment = xenbus_read_unsigned(info->xbdev->otherend, + "discard-alignment", 0); info->feature_secdiscard = !!xenbus_read_unsigned(info->xbdev->otherend, "discard-secure", 0); diff --git a/drivers/bus/arm-integrator-lm.c b/drivers/bus/arm-integrator-lm.c index 845b6c43fef8..2344d560b144 100644 --- a/drivers/bus/arm-integrator-lm.c +++ b/drivers/bus/arm-integrator-lm.c @@ -54,6 +54,7 @@ static int integrator_lm_populate(int num, struct device *dev) ret = of_platform_default_populate(child, NULL, dev); if (ret) { dev_err(dev, "failed to populate module\n"); + of_node_put(child); return ret; } } diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index c5eb46cbf388..01a3d0cd08ed 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -16,6 +16,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev) { + const struct of_dev_auxdata *lookup = dev_get_platdata(&pdev->dev); struct device_node *np = pdev->dev.of_node; dev_dbg(&pdev->dev, "%s\n", __func__); @@ -23,7 +24,7 @@ static int simple_pm_bus_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); if (np) - of_platform_populate(np, NULL, NULL, &pdev->dev); + of_platform_populate(np, NULL, lookup, &pdev->dev); return 0; } diff --git a/drivers/clk/imx/Kconfig b/drivers/clk/imx/Kconfig index 3061896503f3..47d9ec3abd2f 100644 --- a/drivers/clk/imx/Kconfig +++ b/drivers/clk/imx/Kconfig @@ -6,8 +6,6 @@ config MXC_CLK config MXC_CLK_SCU tristate - depends on ARCH_MXC - depends on IMX_SCU && HAVE_ARM_SMCCC config CLK_IMX1 def_bool SOC_IMX1 diff --git a/drivers/clk/mmp/clk-audio.c b/drivers/clk/mmp/clk-audio.c index eea69d498bd2..7aa7f4a9564f 100644 --- a/drivers/clk/mmp/clk-audio.c +++ b/drivers/clk/mmp/clk-audio.c @@ -392,7 +392,8 @@ static int mmp2_audio_clk_remove(struct platform_device *pdev) return 0; } -static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev) +#ifdef CONFIG_PM +static int mmp2_audio_clk_suspend(struct device *dev) { struct mmp2_audio_clk *priv = dev_get_drvdata(dev); @@ -404,7 +405,7 @@ static int __maybe_unused mmp2_audio_clk_suspend(struct device *dev) return 0; } -static int __maybe_unused mmp2_audio_clk_resume(struct device *dev) +static int mmp2_audio_clk_resume(struct device *dev) { struct mmp2_audio_clk *priv = dev_get_drvdata(dev); @@ -415,6 +416,7 @@ static int __maybe_unused mmp2_audio_clk_resume(struct device *dev) return 0; } +#endif static const struct dev_pm_ops mmp2_audio_clk_pm_ops = { SET_RUNTIME_PM_OPS(mmp2_audio_clk_suspend, mmp2_audio_clk_resume, NULL) diff --git a/drivers/clk/qcom/gcc-sc7180.c b/drivers/clk/qcom/gcc-sc7180.c index d82d725ac231..b05901b24917 100644 --- a/drivers/clk/qcom/gcc-sc7180.c +++ b/drivers/clk/qcom/gcc-sc7180.c @@ -891,21 +891,6 @@ static struct clk_branch gcc_boot_rom_ahb_clk = { }, }; -static struct clk_branch gcc_camera_ahb_clk = { - .halt_reg = 0xb008, - .halt_check = BRANCH_HALT, - .hwcg_reg = 0xb008, - .hwcg_bit = 1, - .clkr = { - .enable_reg = 0xb008, - .enable_mask = BIT(0), - .hw.init = &(struct clk_init_data){ - .name = "gcc_camera_ahb_clk", - .ops = &clk_branch2_ops, - }, - }, -}; - static struct clk_branch gcc_camera_hf_axi_clk = { .halt_reg = 0xb020, .halt_check = BRANCH_HALT, @@ -2317,7 +2302,6 @@ static struct clk_regmap *gcc_sc7180_clocks[] = { [GCC_AGGRE_UFS_PHY_AXI_CLK] = &gcc_aggre_ufs_phy_axi_clk.clkr, [GCC_AGGRE_USB3_PRIM_AXI_CLK] = &gcc_aggre_usb3_prim_axi_clk.clkr, [GCC_BOOT_ROM_AHB_CLK] = &gcc_boot_rom_ahb_clk.clkr, - [GCC_CAMERA_AHB_CLK] = &gcc_camera_ahb_clk.clkr, [GCC_CAMERA_HF_AXI_CLK] = &gcc_camera_hf_axi_clk.clkr, [GCC_CAMERA_THROTTLE_HF_AXI_CLK] = &gcc_camera_throttle_hf_axi_clk.clkr, [GCC_CAMERA_XO_CLK] = &gcc_camera_xo_clk.clkr, @@ -2519,11 +2503,12 @@ static int gcc_sc7180_probe(struct platform_device *pdev) /* * Keep the clocks always-ON - * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_DISP_AHB_CLK - * GCC_GPU_CFG_AHB_CLK + * GCC_CPUSS_GNOC_CLK, GCC_VIDEO_AHB_CLK, GCC_CAMERA_AHB_CLK, + * GCC_DISP_AHB_CLK, GCC_GPU_CFG_AHB_CLK */ regmap_update_bits(regmap, 0x48004, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x0b004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x0b008, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x0b00c, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); diff --git a/drivers/clk/qcom/gcc-sm8250.c b/drivers/clk/qcom/gcc-sm8250.c index 6cb6617b8d88..ab594a0f0c40 100644 --- a/drivers/clk/qcom/gcc-sm8250.c +++ b/drivers/clk/qcom/gcc-sm8250.c @@ -722,7 +722,7 @@ static struct clk_rcg2 gcc_sdcc2_apps_clk_src = { .name = "gcc_sdcc2_apps_clk_src", .parent_data = gcc_parent_data_4, .num_parents = 5, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; @@ -745,7 +745,7 @@ static struct clk_rcg2 gcc_sdcc4_apps_clk_src = { .name = "gcc_sdcc4_apps_clk_src", .parent_data = gcc_parent_data_0, .num_parents = 3, - .ops = &clk_rcg2_ops, + .ops = &clk_rcg2_floor_ops, }, }; diff --git a/drivers/clk/sunxi-ng/ccu_mp.c b/drivers/clk/sunxi-ng/ccu_mp.c index fa4ecb915590..9d3a76604d94 100644 --- a/drivers/clk/sunxi-ng/ccu_mp.c +++ b/drivers/clk/sunxi-ng/ccu_mp.c @@ -108,7 +108,7 @@ static unsigned long ccu_mp_round_rate(struct ccu_mux_internal *mux, max_m = cmp->m.max ?: 1 << cmp->m.width; max_p = cmp->p.max ?: 1 << ((1 << cmp->p.width) - 1); - if (!(clk_hw_get_flags(hw) & CLK_SET_RATE_PARENT)) { + if (!clk_hw_can_set_rate_parent(&cmp->common.hw)) { ccu_mp_find_best(*parent_rate, rate, max_m, max_p, &m, &p); rate = *parent_rate / p / m; } else { diff --git a/drivers/counter/ti-eqep.c b/drivers/counter/ti-eqep.c index a60aee1a1a29..65df9ef5b5bc 100644 --- a/drivers/counter/ti-eqep.c +++ b/drivers/counter/ti-eqep.c @@ -235,36 +235,6 @@ static ssize_t ti_eqep_position_ceiling_write(struct counter_device *counter, return len; } -static ssize_t ti_eqep_position_floor_read(struct counter_device *counter, - struct counter_count *count, - void *ext_priv, char *buf) -{ - struct ti_eqep_cnt *priv = counter->priv; - u32 qposinit; - - regmap_read(priv->regmap32, QPOSINIT, &qposinit); - - return sprintf(buf, "%u\n", qposinit); -} - -static ssize_t ti_eqep_position_floor_write(struct counter_device *counter, - struct counter_count *count, - void *ext_priv, const char *buf, - size_t len) -{ - struct ti_eqep_cnt *priv = counter->priv; - int err; - u32 res; - - err = kstrtouint(buf, 0, &res); - if (err < 0) - return err; - - regmap_write(priv->regmap32, QPOSINIT, res); - - return len; -} - static ssize_t ti_eqep_position_enable_read(struct counter_device *counter, struct counter_count *count, void *ext_priv, char *buf) @@ -302,11 +272,6 @@ static struct counter_count_ext ti_eqep_position_ext[] = { .write = ti_eqep_position_ceiling_write, }, { - .name = "floor", - .read = ti_eqep_position_floor_read, - .write = ti_eqep_position_floor_write, - }, - { .name = "enable", .read = ti_eqep_position_enable_read, .write = ti_eqep_position_enable_write, diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c index 1e4fbb002a31..d3e5a6fceb61 100644 --- a/drivers/cpufreq/acpi-cpufreq.c +++ b/drivers/cpufreq/acpi-cpufreq.c @@ -26,6 +26,7 @@ #include <linux/uaccess.h> #include <acpi/processor.h> +#include <acpi/cppc_acpi.h> #include <asm/msr.h> #include <asm/processor.h> @@ -53,6 +54,7 @@ struct acpi_cpufreq_data { unsigned int resume; unsigned int cpu_feature; unsigned int acpi_perf_cpu; + unsigned int first_perf_state; cpumask_var_t freqdomain_cpus; void (*cpu_freq_write)(struct acpi_pct_register *reg, u32 val); u32 (*cpu_freq_read)(struct acpi_pct_register *reg); @@ -221,10 +223,10 @@ static unsigned extract_msr(struct cpufreq_policy *policy, u32 msr) perf = to_perf_data(data); - cpufreq_for_each_entry(pos, policy->freq_table) + cpufreq_for_each_entry(pos, policy->freq_table + data->first_perf_state) if (msr == perf->states[pos->driver_data].status) return pos->frequency; - return policy->freq_table[0].frequency; + return policy->freq_table[data->first_perf_state].frequency; } static unsigned extract_freq(struct cpufreq_policy *policy, u32 val) @@ -363,6 +365,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) struct cpufreq_policy *policy; unsigned int freq; unsigned int cached_freq; + unsigned int state; pr_debug("%s (%d)\n", __func__, cpu); @@ -374,7 +377,11 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) if (unlikely(!data || !policy->freq_table)) return 0; - cached_freq = policy->freq_table[to_perf_data(data)->state].frequency; + state = to_perf_data(data)->state; + if (state < data->first_perf_state) + state = data->first_perf_state; + + cached_freq = policy->freq_table[state].frequency; freq = extract_freq(policy, get_cur_val(cpumask_of(cpu), data)); if (freq != cached_freq) { /* @@ -628,16 +635,54 @@ static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) } #endif +#ifdef CONFIG_ACPI_CPPC_LIB +static u64 get_max_boost_ratio(unsigned int cpu) +{ + struct cppc_perf_caps perf_caps; + u64 highest_perf, nominal_perf; + int ret; + + if (acpi_pstate_strict) + return 0; + + ret = cppc_get_perf_caps(cpu, &perf_caps); + if (ret) { + pr_debug("CPU%d: Unable to get performance capabilities (%d)\n", + cpu, ret); + return 0; + } + + highest_perf = perf_caps.highest_perf; + nominal_perf = perf_caps.nominal_perf; + + if (!highest_perf || !nominal_perf) { + pr_debug("CPU%d: highest or nominal performance missing\n", cpu); + return 0; + } + + if (highest_perf < nominal_perf) { + pr_debug("CPU%d: nominal performance above highest\n", cpu); + return 0; + } + + return div_u64(highest_perf << SCHED_CAPACITY_SHIFT, nominal_perf); +} +#else +static inline u64 get_max_boost_ratio(unsigned int cpu) { return 0; } +#endif + static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) { - unsigned int i; - unsigned int valid_states = 0; - unsigned int cpu = policy->cpu; + struct cpufreq_frequency_table *freq_table; + struct acpi_processor_performance *perf; struct acpi_cpufreq_data *data; + unsigned int cpu = policy->cpu; + struct cpuinfo_x86 *c = &cpu_data(cpu); + unsigned int valid_states = 0; unsigned int result = 0; - struct cpuinfo_x86 *c = &cpu_data(policy->cpu); - struct acpi_processor_performance *perf; - struct cpufreq_frequency_table *freq_table; + unsigned int state_count; + u64 max_boost_ratio; + unsigned int i; #ifdef CONFIG_SMP static int blacklisted; #endif @@ -750,8 +795,28 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) goto err_unreg; } - freq_table = kcalloc(perf->state_count + 1, sizeof(*freq_table), - GFP_KERNEL); + state_count = perf->state_count + 1; + + max_boost_ratio = get_max_boost_ratio(cpu); + if (max_boost_ratio) { + /* + * Make a room for one more entry to represent the highest + * available "boost" frequency. + */ + state_count++; + valid_states++; + data->first_perf_state = valid_states; + } else { + /* + * If the maximum "boost" frequency is unknown, ask the arch + * scale-invariance code to use the "nominal" performance for + * CPU utilization scaling so as to prevent the schedutil + * governor from selecting inadequate CPU frequencies. + */ + arch_set_max_freq_ratio(true); + } + + freq_table = kcalloc(state_count, sizeof(*freq_table), GFP_KERNEL); if (!freq_table) { result = -ENOMEM; goto err_unreg; @@ -785,6 +850,30 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) valid_states++; } freq_table[valid_states].frequency = CPUFREQ_TABLE_END; + + if (max_boost_ratio) { + unsigned int state = data->first_perf_state; + unsigned int freq = freq_table[state].frequency; + + /* + * Because the loop above sorts the freq_table entries in the + * descending order, freq is the maximum frequency in the table. + * Assume that it corresponds to the CPPC nominal frequency and + * use it to populate the frequency field of the extra "boost" + * frequency entry. + */ + freq_table[0].frequency = freq * max_boost_ratio >> SCHED_CAPACITY_SHIFT; + /* + * The purpose of the extra "boost" frequency entry is to make + * the rest of cpufreq aware of the real maximum frequency, but + * the way to request it is the same as for the first_perf_state + * entry that is expected to cover the entire range of "boost" + * frequencies of the CPU, so copy the driver_data value from + * that entry. + */ + freq_table[0].driver_data = freq_table[state].driver_data; + } + policy->freq_table = freq_table; perf->state = 0; @@ -858,8 +947,10 @@ static void acpi_cpufreq_cpu_ready(struct cpufreq_policy *policy) { struct acpi_processor_performance *perf = per_cpu_ptr(acpi_perf_data, policy->cpu); + struct acpi_cpufreq_data *data = policy->driver_data; + unsigned int freq = policy->freq_table[data->first_perf_state].frequency; - if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) + if (perf->states[0].core_frequency * 1000 != freq) pr_warn(FW_WARN "P-state 0 is not max freq\n"); } diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index bbd51703e738..e535f28a8028 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -366,6 +366,7 @@ if CRYPTO_DEV_OMAP config CRYPTO_DEV_OMAP_SHAM tristate "Support for OMAP MD5/SHA1/SHA2 hw accelerator" depends on ARCH_OMAP2PLUS + select CRYPTO_ENGINE select CRYPTO_SHA1 select CRYPTO_MD5 select CRYPTO_SHA256 diff --git a/drivers/crypto/marvell/cesa/cesa.h b/drivers/crypto/marvell/cesa/cesa.h index fabfaaccca87..fa56b45620c7 100644 --- a/drivers/crypto/marvell/cesa/cesa.h +++ b/drivers/crypto/marvell/cesa/cesa.h @@ -300,11 +300,11 @@ struct mv_cesa_tdma_desc { __le32 byte_cnt; union { __le32 src; - dma_addr_t src_dma; + u32 src_dma; }; union { __le32 dst; - dma_addr_t dst_dma; + u32 dst_dma; }; __le32 next_dma; diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 962cbb5e5f7f..fe6a460c4373 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1110,7 +1110,6 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, "%s called while %d clients hold a reference\n", __func__, chan->client_count); mutex_lock(&dma_list_mutex); - list_del(&chan->device_node); device->chancnt--; chan->dev->chan = NULL; mutex_unlock(&dma_list_mutex); diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 19a23767533a..7ab83fe601ed 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -982,11 +982,8 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) dev_vdbg(chan2dev(chan), "%s\n", __func__); - pm_runtime_get_sync(dw->dma.dev); - /* ASSERT: channel is idle */ if (dma_readl(dw, CH_EN) & dwc->mask) { - pm_runtime_put_sync_suspend(dw->dma.dev); dev_dbg(chan2dev(chan), "DMA channel not idle?\n"); return -EIO; } @@ -1003,7 +1000,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan) * We need controller-specific data to set up slave transfers. */ if (chan->private && !dw_dma_filter(chan, chan->private)) { - pm_runtime_put_sync_suspend(dw->dma.dev); dev_warn(chan2dev(chan), "Wrong controller-specific data\n"); return -EINVAL; } @@ -1047,8 +1043,6 @@ static void dwc_free_chan_resources(struct dma_chan *chan) if (!dw->in_use) do_dw_dma_off(dw); - pm_runtime_put_sync_suspend(dw->dma.dev); - dev_vdbg(chan2dev(chan), "%s: done\n", __func__); } diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 95f94a3ed6be..84a6ea60ecf0 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -398,17 +398,31 @@ static inline bool idxd_is_enabled(struct idxd_device *idxd) return false; } +static inline bool idxd_device_is_halted(struct idxd_device *idxd) +{ + union gensts_reg gensts; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + + return (gensts.state == IDXD_DEVICE_STATE_HALT); +} + /* * This is function is only used for reset during probe and will * poll for completion. Once the device is setup with interrupts, * all commands will be done via interrupt completion. */ -void idxd_device_init_reset(struct idxd_device *idxd) +int idxd_device_init_reset(struct idxd_device *idxd) { struct device *dev = &idxd->pdev->dev; union idxd_command_reg cmd; unsigned long flags; + if (idxd_device_is_halted(idxd)) { + dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); + return -ENXIO; + } + memset(&cmd, 0, sizeof(cmd)); cmd.cmd = IDXD_CMD_RESET_DEVICE; dev_dbg(dev, "%s: sending reset for init.\n", __func__); @@ -419,6 +433,7 @@ void idxd_device_init_reset(struct idxd_device *idxd) IDXD_CMDSTS_ACTIVE) cpu_relax(); spin_unlock_irqrestore(&idxd->dev_lock, flags); + return 0; } static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, @@ -428,6 +443,12 @@ static void idxd_cmd_exec(struct idxd_device *idxd, int cmd_code, u32 operand, DECLARE_COMPLETION_ONSTACK(done); unsigned long flags; + if (idxd_device_is_halted(idxd)) { + dev_warn(&idxd->pdev->dev, "Device is HALTED!\n"); + *status = IDXD_CMDSTS_HW_ERR; + return; + } + memset(&cmd, 0, sizeof(cmd)); cmd.cmd = cmd_code; cmd.operand = operand; diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c index 8ed2773d8285..71fd6e4c42cd 100644 --- a/drivers/dma/idxd/dma.c +++ b/drivers/dma/idxd/dma.c @@ -205,5 +205,8 @@ int idxd_register_dma_channel(struct idxd_wq *wq) void idxd_unregister_dma_channel(struct idxd_wq *wq) { - dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan); + struct dma_chan *chan = &wq->dma_chan; + + dma_async_device_channel_unregister(&wq->idxd->dma_dev, chan); + list_del(&chan->device_node); } diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 5a50e91c71bf..81a0e65fd316 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -326,7 +326,7 @@ void idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); void idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); /* device control */ -void idxd_device_init_reset(struct idxd_device *idxd); +int idxd_device_init_reset(struct idxd_device *idxd); int idxd_device_enable(struct idxd_device *idxd); int idxd_device_disable(struct idxd_device *idxd); void idxd_device_reset(struct idxd_device *idxd); diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 2c051e07c34c..fa04acd5582a 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -335,7 +335,10 @@ static int idxd_probe(struct idxd_device *idxd) int rc; dev_dbg(dev, "%s entered and resetting device\n", __func__); - idxd_device_init_reset(idxd); + rc = idxd_device_init_reset(idxd); + if (rc < 0) + return rc; + dev_dbg(dev, "IDXD reset complete\n"); if (IS_ENABLED(CONFIG_INTEL_IDXD_SVM)) { diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c index 593a2f6ed16c..a60ca11a5784 100644 --- a/drivers/dma/idxd/irq.c +++ b/drivers/dma/idxd/irq.c @@ -111,19 +111,14 @@ irqreturn_t idxd_irq_handler(int vec, void *data) return IRQ_WAKE_THREAD; } -irqreturn_t idxd_misc_thread(int vec, void *data) +static int process_misc_interrupts(struct idxd_device *idxd, u32 cause) { - struct idxd_irq_entry *irq_entry = data; - struct idxd_device *idxd = irq_entry->idxd; struct device *dev = &idxd->pdev->dev; union gensts_reg gensts; - u32 cause, val = 0; + u32 val = 0; int i; bool err = false; - cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); - iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); - if (cause & IDXD_INTC_ERR) { spin_lock_bh(&idxd->dev_lock); for (i = 0; i < 4; i++) @@ -181,7 +176,7 @@ irqreturn_t idxd_misc_thread(int vec, void *data) val); if (!err) - goto out; + return 0; /* * This case should rarely happen and typically is due to software @@ -211,37 +206,58 @@ irqreturn_t idxd_misc_thread(int vec, void *data) gensts.reset_type == IDXD_DEVICE_RESET_FLR ? "FLR" : "system reset"); spin_unlock_bh(&idxd->dev_lock); + return -ENXIO; } } - out: + return 0; +} + +irqreturn_t idxd_misc_thread(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + int rc; + u32 cause; + + cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); + if (cause) + iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); + + while (cause) { + rc = process_misc_interrupts(idxd, cause); + if (rc < 0) + break; + cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); + if (cause) + iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); + } + idxd_unmask_msix_vector(idxd, irq_entry->id); return IRQ_HANDLED; } -static bool process_fault(struct idxd_desc *desc, u64 fault_addr) +static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr) { /* * Completion address can be bad as well. Check fault address match for descriptor * and completion address. */ - if ((u64)desc->hw == fault_addr || - (u64)desc->completion == fault_addr) { - idxd_dma_complete_txd(desc, IDXD_COMPLETE_DEV_FAIL); + if ((u64)desc->hw == fault_addr || (u64)desc->completion == fault_addr) { + struct idxd_device *idxd = desc->wq->idxd; + struct device *dev = &idxd->pdev->dev; + + dev_warn(dev, "desc with fault address: %#llx\n", fault_addr); return true; } return false; } -static bool complete_desc(struct idxd_desc *desc) +static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason) { - if (desc->completion->status) { - idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); - return true; - } - - return false; + idxd_dma_complete_txd(desc, reason); + idxd_free_desc(desc->wq, desc); } static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, @@ -251,25 +267,25 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, struct idxd_desc *desc, *t; struct llist_node *head; int queued = 0; - bool completed = false; unsigned long flags; + enum idxd_complete_type reason; *processed = 0; head = llist_del_all(&irq_entry->pending_llist); if (!head) goto out; - llist_for_each_entry_safe(desc, t, head, llnode) { - if (wtype == IRQ_WORK_NORMAL) - completed = complete_desc(desc); - else if (wtype == IRQ_WORK_PROCESS_FAULT) - completed = process_fault(desc, data); + if (wtype == IRQ_WORK_NORMAL) + reason = IDXD_COMPLETE_NORMAL; + else + reason = IDXD_COMPLETE_DEV_FAIL; - if (completed) { - idxd_free_desc(desc->wq, desc); + llist_for_each_entry_safe(desc, t, head, llnode) { + if (desc->completion->status) { + if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS) + match_fault(desc, data); + complete_desc(desc, reason); (*processed)++; - if (wtype == IRQ_WORK_PROCESS_FAULT) - break; } else { spin_lock_irqsave(&irq_entry->list_lock, flags); list_add_tail(&desc->list, @@ -287,42 +303,46 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry, enum irq_work_type wtype, int *processed, u64 data) { - struct list_head *node, *next; int queued = 0; - bool completed = false; unsigned long flags; + LIST_HEAD(flist); + struct idxd_desc *desc, *n; + enum idxd_complete_type reason; *processed = 0; - spin_lock_irqsave(&irq_entry->list_lock, flags); - if (list_empty(&irq_entry->work_list)) - goto out; - - list_for_each_safe(node, next, &irq_entry->work_list) { - struct idxd_desc *desc = - container_of(node, struct idxd_desc, list); + if (wtype == IRQ_WORK_NORMAL) + reason = IDXD_COMPLETE_NORMAL; + else + reason = IDXD_COMPLETE_DEV_FAIL; + /* + * This lock protects list corruption from access of list outside of the irq handler + * thread. + */ + spin_lock_irqsave(&irq_entry->list_lock, flags); + if (list_empty(&irq_entry->work_list)) { spin_unlock_irqrestore(&irq_entry->list_lock, flags); - if (wtype == IRQ_WORK_NORMAL) - completed = complete_desc(desc); - else if (wtype == IRQ_WORK_PROCESS_FAULT) - completed = process_fault(desc, data); + return 0; + } - if (completed) { - spin_lock_irqsave(&irq_entry->list_lock, flags); + list_for_each_entry_safe(desc, n, &irq_entry->work_list, list) { + if (desc->completion->status) { list_del(&desc->list); - spin_unlock_irqrestore(&irq_entry->list_lock, flags); - idxd_free_desc(desc->wq, desc); (*processed)++; - if (wtype == IRQ_WORK_PROCESS_FAULT) - return queued; + list_add_tail(&desc->list, &flist); } else { queued++; } - spin_lock_irqsave(&irq_entry->list_lock, flags); } - out: spin_unlock_irqrestore(&irq_entry->list_lock, flags); + + list_for_each_entry(desc, &flist, list) { + if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS) + match_fault(desc, data); + complete_desc(desc, reason); + } + return queued; } diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 298460438bb4..f474a1232335 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -2401,7 +2401,8 @@ static int bcdma_alloc_chan_resources(struct dma_chan *chan) dev_err(ud->ddev.dev, "Descriptor pool allocation failed\n"); uc->use_dma_pool = false; - return -ENOMEM; + ret = -ENOMEM; + goto err_res_free; } uc->use_dma_pool = true; diff --git a/drivers/firmware/efi/apple-properties.c b/drivers/firmware/efi/apple-properties.c index 34f53d898acb..e1926483ae2f 100644 --- a/drivers/firmware/efi/apple-properties.c +++ b/drivers/firmware/efi/apple-properties.c @@ -3,8 +3,9 @@ * apple-properties.c - EFI device properties on Macs * Copyright (C) 2016 Lukas Wunner <lukas@wunner.de> * - * Note, all properties are considered as u8 arrays. - * To get a value of any of them the caller must use device_property_read_u8_array(). + * Properties are stored either as: + * u8 arrays which can be retrieved with device_property_read_u8_array() or + * booleans which can be queried with device_property_present(). */ #define pr_fmt(fmt) "apple-properties: " fmt @@ -88,8 +89,12 @@ static void __init unmarshal_key_value_pairs(struct dev_header *dev_header, entry_data = ptr + key_len + sizeof(val_len); entry_len = val_len - sizeof(val_len); - entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data, - entry_len); + if (entry_len) + entry[i] = PROPERTY_ENTRY_U8_ARRAY_LEN(key, entry_data, + entry_len); + else + entry[i] = PROPERTY_ENTRY_BOOL(key); + if (dump_properties) { dev_info(dev, "property: %s\n", key); print_hex_dump(KERN_INFO, pr_fmt(), DUMP_PREFIX_OFFSET, diff --git a/drivers/firmware/imx/Kconfig b/drivers/firmware/imx/Kconfig index 1d2e5b85d7ca..c027d99f2a59 100644 --- a/drivers/firmware/imx/Kconfig +++ b/drivers/firmware/imx/Kconfig @@ -13,6 +13,7 @@ config IMX_DSP config IMX_SCU bool "IMX SCU Protocol driver" depends on IMX_MBOX + select SOC_BUS help The System Controller Firmware (SCFW) is a low-level system function which runs on a dedicated Cortex-M core to provide power, clock, and diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index c70f46e80a3b..fa225175e68d 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig @@ -441,8 +441,9 @@ config GPIO_MXC select GENERIC_IRQ_CHIP config GPIO_MXS - def_bool y + bool "Freescale MXS GPIO support" if COMPILE_TEST depends on ARCH_MXS || COMPILE_TEST + default y if ARCH_MXS select GPIO_GENERIC select GENERIC_IRQ_CHIP @@ -521,7 +522,8 @@ config GPIO_SAMA5D2_PIOBU config GPIO_SIFIVE bool "SiFive GPIO support" - depends on OF_GPIO && IRQ_DOMAIN_HIERARCHY + depends on OF_GPIO + select IRQ_DOMAIN_HIERARCHY select GPIO_GENERIC select GPIOLIB_IRQCHIP select REGMAP_MMIO @@ -597,6 +599,8 @@ config GPIO_TEGRA default ARCH_TEGRA depends on ARCH_TEGRA || COMPILE_TEST depends on OF_GPIO + select GPIOLIB_IRQCHIP + select IRQ_DOMAIN_HIERARCHY help Say yes here to support GPIO pins on NVIDIA Tegra SoCs. diff --git a/drivers/gpio/gpio-ep93xx.c b/drivers/gpio/gpio-ep93xx.c index 226da8df6f10..94d9fa0d6aa7 100644 --- a/drivers/gpio/gpio-ep93xx.c +++ b/drivers/gpio/gpio-ep93xx.c @@ -25,6 +25,9 @@ /* Maximum value for gpio line identifiers */ #define EP93XX_GPIO_LINE_MAX 63 +/* Number of GPIO chips in EP93XX */ +#define EP93XX_GPIO_CHIP_NUM 8 + /* Maximum value for irq capable line identifiers */ #define EP93XX_GPIO_LINE_MAX_IRQ 23 @@ -34,74 +37,75 @@ */ #define EP93XX_GPIO_F_IRQ_BASE 80 -struct ep93xx_gpio { - void __iomem *base; - struct gpio_chip gc[8]; +struct ep93xx_gpio_irq_chip { + struct irq_chip ic; + u8 irq_offset; + u8 int_unmasked; + u8 int_enabled; + u8 int_type1; + u8 int_type2; + u8 int_debounce; }; -/************************************************************************* - * Interrupt handling for EP93xx on-chip GPIOs - *************************************************************************/ -static unsigned char gpio_int_unmasked[3]; -static unsigned char gpio_int_enabled[3]; -static unsigned char gpio_int_type1[3]; -static unsigned char gpio_int_type2[3]; -static unsigned char gpio_int_debounce[3]; - -/* Port ordering is: A B F */ -static const u8 int_type1_register_offset[3] = { 0x90, 0xac, 0x4c }; -static const u8 int_type2_register_offset[3] = { 0x94, 0xb0, 0x50 }; -static const u8 eoi_register_offset[3] = { 0x98, 0xb4, 0x54 }; -static const u8 int_en_register_offset[3] = { 0x9c, 0xb8, 0x58 }; -static const u8 int_debounce_register_offset[3] = { 0xa8, 0xc4, 0x64 }; - -static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, unsigned port) -{ - BUG_ON(port > 2); +struct ep93xx_gpio_chip { + struct gpio_chip gc; + struct ep93xx_gpio_irq_chip *eic; +}; - writeb_relaxed(0, epg->base + int_en_register_offset[port]); +struct ep93xx_gpio { + void __iomem *base; + struct ep93xx_gpio_chip gc[EP93XX_GPIO_CHIP_NUM]; +}; - writeb_relaxed(gpio_int_type2[port], - epg->base + int_type2_register_offset[port]); +#define to_ep93xx_gpio_chip(x) container_of(x, struct ep93xx_gpio_chip, gc) - writeb_relaxed(gpio_int_type1[port], - epg->base + int_type1_register_offset[port]); +static struct ep93xx_gpio_irq_chip *to_ep93xx_gpio_irq_chip(struct gpio_chip *gc) +{ + struct ep93xx_gpio_chip *egc = to_ep93xx_gpio_chip(gc); - writeb(gpio_int_unmasked[port] & gpio_int_enabled[port], - epg->base + int_en_register_offset[port]); + return egc->eic; } -static int ep93xx_gpio_port(struct gpio_chip *gc) +/************************************************************************* + * Interrupt handling for EP93xx on-chip GPIOs + *************************************************************************/ +#define EP93XX_INT_TYPE1_OFFSET 0x00 +#define EP93XX_INT_TYPE2_OFFSET 0x04 +#define EP93XX_INT_EOI_OFFSET 0x08 +#define EP93XX_INT_EN_OFFSET 0x0c +#define EP93XX_INT_STATUS_OFFSET 0x10 +#define EP93XX_INT_RAW_STATUS_OFFSET 0x14 +#define EP93XX_INT_DEBOUNCE_OFFSET 0x18 + +static void ep93xx_gpio_update_int_params(struct ep93xx_gpio *epg, + struct ep93xx_gpio_irq_chip *eic) { - struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = 0; + writeb_relaxed(0, epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET); - while (port < ARRAY_SIZE(epg->gc) && gc != &epg->gc[port]) - port++; + writeb_relaxed(eic->int_type2, + epg->base + eic->irq_offset + EP93XX_INT_TYPE2_OFFSET); - /* This should not happen but is there as a last safeguard */ - if (port == ARRAY_SIZE(epg->gc)) { - pr_crit("can't find the GPIO port\n"); - return 0; - } + writeb_relaxed(eic->int_type1, + epg->base + eic->irq_offset + EP93XX_INT_TYPE1_OFFSET); - return port; + writeb_relaxed(eic->int_unmasked & eic->int_enabled, + epg->base + eic->irq_offset + EP93XX_INT_EN_OFFSET); } static void ep93xx_gpio_int_debounce(struct gpio_chip *gc, unsigned int offset, bool enable) { struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); int port_mask = BIT(offset); if (enable) - gpio_int_debounce[port] |= port_mask; + eic->int_debounce |= port_mask; else - gpio_int_debounce[port] &= ~port_mask; + eic->int_debounce &= ~port_mask; - writeb(gpio_int_debounce[port], - epg->base + int_debounce_register_offset[port]); + writeb(eic->int_debounce, + epg->base + eic->irq_offset + EP93XX_INT_DEBOUNCE_OFFSET); } static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) @@ -122,12 +126,12 @@ static void ep93xx_gpio_ab_irq_handler(struct irq_desc *desc) */ stat = readb(epg->base + EP93XX_GPIO_A_INT_STATUS); for_each_set_bit(offset, &stat, 8) - generic_handle_irq(irq_find_mapping(epg->gc[0].irq.domain, + generic_handle_irq(irq_find_mapping(epg->gc[0].gc.irq.domain, offset)); stat = readb(epg->base + EP93XX_GPIO_B_INT_STATUS); for_each_set_bit(offset, &stat, 8) - generic_handle_irq(irq_find_mapping(epg->gc[1].irq.domain, + generic_handle_irq(irq_find_mapping(epg->gc[1].gc.irq.domain, offset)); chained_irq_exit(irqchip, desc); @@ -153,52 +157,52 @@ static void ep93xx_gpio_f_irq_handler(struct irq_desc *desc) static void ep93xx_gpio_irq_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); int port_mask = BIT(d->irq & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) { - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ - ep93xx_gpio_update_int_params(epg, port); + eic->int_type2 ^= port_mask; /* switch edge direction */ + ep93xx_gpio_update_int_params(epg, eic); } - writeb(port_mask, epg->base + eoi_register_offset[port]); + writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET); } static void ep93xx_gpio_irq_mask_ack(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); int port_mask = BIT(d->irq & 7); if (irqd_get_trigger_type(d) == IRQ_TYPE_EDGE_BOTH) - gpio_int_type2[port] ^= port_mask; /* switch edge direction */ + eic->int_type2 ^= port_mask; /* switch edge direction */ - gpio_int_unmasked[port] &= ~port_mask; - ep93xx_gpio_update_int_params(epg, port); + eic->int_unmasked &= ~port_mask; + ep93xx_gpio_update_int_params(epg, eic); - writeb(port_mask, epg->base + eoi_register_offset[port]); + writeb(port_mask, epg->base + eic->irq_offset + EP93XX_INT_EOI_OFFSET); } static void ep93xx_gpio_irq_mask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); - gpio_int_unmasked[port] &= ~BIT(d->irq & 7); - ep93xx_gpio_update_int_params(epg, port); + eic->int_unmasked &= ~BIT(d->irq & 7); + ep93xx_gpio_update_int_params(epg, eic); } static void ep93xx_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); - gpio_int_unmasked[port] |= BIT(d->irq & 7); - ep93xx_gpio_update_int_params(epg, port); + eic->int_unmasked |= BIT(d->irq & 7); + ep93xx_gpio_update_int_params(epg, eic); } /* @@ -209,8 +213,8 @@ static void ep93xx_gpio_irq_unmask(struct irq_data *d) static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + struct ep93xx_gpio_irq_chip *eic = to_ep93xx_gpio_irq_chip(gc); struct ep93xx_gpio *epg = gpiochip_get_data(gc); - int port = ep93xx_gpio_port(gc); int offset = d->irq & 7; int port_mask = BIT(offset); irq_flow_handler_t handler; @@ -219,32 +223,32 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) switch (type) { case IRQ_TYPE_EDGE_RISING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] |= port_mask; + eic->int_type1 |= port_mask; + eic->int_type2 |= port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_EDGE_FALLING: - gpio_int_type1[port] |= port_mask; - gpio_int_type2[port] &= ~port_mask; + eic->int_type1 |= port_mask; + eic->int_type2 &= ~port_mask; handler = handle_edge_irq; break; case IRQ_TYPE_LEVEL_HIGH: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] |= port_mask; + eic->int_type1 &= ~port_mask; + eic->int_type2 |= port_mask; handler = handle_level_irq; break; case IRQ_TYPE_LEVEL_LOW: - gpio_int_type1[port] &= ~port_mask; - gpio_int_type2[port] &= ~port_mask; + eic->int_type1 &= ~port_mask; + eic->int_type2 &= ~port_mask; handler = handle_level_irq; break; case IRQ_TYPE_EDGE_BOTH: - gpio_int_type1[port] |= port_mask; + eic->int_type1 |= port_mask; /* set initial polarity based on current input level */ if (gc->get(gc, offset)) - gpio_int_type2[port] &= ~port_mask; /* falling */ + eic->int_type2 &= ~port_mask; /* falling */ else - gpio_int_type2[port] |= port_mask; /* rising */ + eic->int_type2 |= port_mask; /* rising */ handler = handle_edge_irq; break; default: @@ -253,22 +257,13 @@ static int ep93xx_gpio_irq_type(struct irq_data *d, unsigned int type) irq_set_handler_locked(d, handler); - gpio_int_enabled[port] |= port_mask; + eic->int_enabled |= port_mask; - ep93xx_gpio_update_int_params(epg, port); + ep93xx_gpio_update_int_params(epg, eic); return 0; } -static struct irq_chip ep93xx_gpio_irq_chip = { - .name = "GPIO", - .irq_ack = ep93xx_gpio_irq_ack, - .irq_mask_ack = ep93xx_gpio_irq_mask_ack, - .irq_mask = ep93xx_gpio_irq_mask, - .irq_unmask = ep93xx_gpio_irq_unmask, - .irq_set_type = ep93xx_gpio_irq_type, -}; - /************************************************************************* * gpiolib interface for EP93xx on-chip GPIOs *************************************************************************/ @@ -276,17 +271,19 @@ struct ep93xx_gpio_bank { const char *label; int data; int dir; + int irq; int base; bool has_irq; bool has_hierarchical_irq; unsigned int irq_base; }; -#define EP93XX_GPIO_BANK(_label, _data, _dir, _base, _has_irq, _has_hier, _irq_base) \ +#define EP93XX_GPIO_BANK(_label, _data, _dir, _irq, _base, _has_irq, _has_hier, _irq_base) \ { \ .label = _label, \ .data = _data, \ .dir = _dir, \ + .irq = _irq, \ .base = _base, \ .has_irq = _has_irq, \ .has_hierarchical_irq = _has_hier, \ @@ -295,16 +292,16 @@ struct ep93xx_gpio_bank { static struct ep93xx_gpio_bank ep93xx_gpio_banks[] = { /* Bank A has 8 IRQs */ - EP93XX_GPIO_BANK("A", 0x00, 0x10, 0, true, false, 64), + EP93XX_GPIO_BANK("A", 0x00, 0x10, 0x90, 0, true, false, 64), /* Bank B has 8 IRQs */ - EP93XX_GPIO_BANK("B", 0x04, 0x14, 8, true, false, 72), - EP93XX_GPIO_BANK("C", 0x08, 0x18, 40, false, false, 0), - EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 24, false, false, 0), - EP93XX_GPIO_BANK("E", 0x20, 0x24, 32, false, false, 0), + EP93XX_GPIO_BANK("B", 0x04, 0x14, 0xac, 8, true, false, 72), + EP93XX_GPIO_BANK("C", 0x08, 0x18, 0x00, 40, false, false, 0), + EP93XX_GPIO_BANK("D", 0x0c, 0x1c, 0x00, 24, false, false, 0), + EP93XX_GPIO_BANK("E", 0x20, 0x24, 0x00, 32, false, false, 0), /* Bank F has 8 IRQs */ - EP93XX_GPIO_BANK("F", 0x30, 0x34, 16, false, true, 0), - EP93XX_GPIO_BANK("G", 0x38, 0x3c, 48, false, false, 0), - EP93XX_GPIO_BANK("H", 0x40, 0x44, 56, false, false, 0), + EP93XX_GPIO_BANK("F", 0x30, 0x34, 0x4c, 16, false, true, 0), + EP93XX_GPIO_BANK("G", 0x38, 0x3c, 0x00, 48, false, false, 0), + EP93XX_GPIO_BANK("H", 0x40, 0x44, 0x00, 56, false, false, 0), }; static int ep93xx_gpio_set_config(struct gpio_chip *gc, unsigned offset, @@ -326,13 +323,23 @@ static int ep93xx_gpio_f_to_irq(struct gpio_chip *gc, unsigned offset) return EP93XX_GPIO_F_IRQ_BASE + offset; } -static int ep93xx_gpio_add_bank(struct gpio_chip *gc, +static void ep93xx_init_irq_chip(struct device *dev, struct irq_chip *ic) +{ + ic->irq_ack = ep93xx_gpio_irq_ack; + ic->irq_mask_ack = ep93xx_gpio_irq_mask_ack; + ic->irq_mask = ep93xx_gpio_irq_mask; + ic->irq_unmask = ep93xx_gpio_irq_unmask; + ic->irq_set_type = ep93xx_gpio_irq_type; +} + +static int ep93xx_gpio_add_bank(struct ep93xx_gpio_chip *egc, struct platform_device *pdev, struct ep93xx_gpio *epg, struct ep93xx_gpio_bank *bank) { void __iomem *data = epg->base + bank->data; void __iomem *dir = epg->base + bank->dir; + struct gpio_chip *gc = &egc->gc; struct device *dev = &pdev->dev; struct gpio_irq_chip *girq; int err; @@ -346,8 +353,21 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, girq = &gc->irq; if (bank->has_irq || bank->has_hierarchical_irq) { + struct irq_chip *ic; + gc->set_config = ep93xx_gpio_set_config; - girq->chip = &ep93xx_gpio_irq_chip; + egc->eic = devm_kcalloc(dev, 1, + sizeof(*egc->eic), + GFP_KERNEL); + if (!egc->eic) + return -ENOMEM; + egc->eic->irq_offset = bank->irq; + ic = &egc->eic->ic; + ic->name = devm_kasprintf(dev, GFP_KERNEL, "gpio-irq-%s", bank->label); + if (!ic->name) + return -ENOMEM; + ep93xx_init_irq_chip(dev, ic); + girq->chip = ic; } if (bank->has_irq) { @@ -389,7 +409,7 @@ static int ep93xx_gpio_add_bank(struct gpio_chip *gc, gpio_irq = EP93XX_GPIO_F_IRQ_BASE + i; irq_set_chip_data(gpio_irq, &epg->gc[5]); irq_set_chip_and_handler(gpio_irq, - &ep93xx_gpio_irq_chip, + girq->chip, handle_level_irq); irq_clear_status_flags(gpio_irq, IRQ_NOREQUEST); } @@ -415,7 +435,7 @@ static int ep93xx_gpio_probe(struct platform_device *pdev) return PTR_ERR(epg->base); for (i = 0; i < ARRAY_SIZE(ep93xx_gpio_banks); i++) { - struct gpio_chip *gc = &epg->gc[i]; + struct ep93xx_gpio_chip *gc = &epg->gc[i]; struct ep93xx_gpio_bank *bank = &ep93xx_gpio_banks[i]; if (ep93xx_gpio_add_bank(gc, pdev, epg, bank)) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 672681a976f5..a912a8fed197 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -676,20 +676,17 @@ static void mvebu_pwm_get_state(struct pwm_chip *chip, else state->duty_cycle = 1; + val = (unsigned long long) u; /* on duration */ regmap_read(mvpwm->regs, mvebu_pwmreg_blink_off_duration(mvpwm), &u); - val = (unsigned long long) u * NSEC_PER_SEC; + val += (unsigned long long) u; /* period = on + off duration */ + val *= NSEC_PER_SEC; do_div(val, mvpwm->clk_rate); - if (val < state->duty_cycle) { + if (val > UINT_MAX) + state->period = UINT_MAX; + else if (val) + state->period = val; + else state->period = 1; - } else { - val -= state->duty_cycle; - if (val > UINT_MAX) - state->period = UINT_MAX; - else if (val) - state->period = val; - else - state->period = 1; - } regmap_read(mvchip->regs, GPIO_BLINK_EN_OFF + mvchip->offset, &u); if (u) diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index 12b679ca552c..1631727bf0da 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -776,6 +776,8 @@ static void edge_detector_stop(struct line *line) cancel_delayed_work_sync(&line->work); WRITE_ONCE(line->sw_debounced, 0); WRITE_ONCE(line->eflags, 0); + if (line->desc) + WRITE_ONCE(line->desc->debounce_period_us, 0); /* do not change line->level - see comment in debounced_value() */ } @@ -1979,6 +1981,21 @@ struct gpio_chardev_data { #endif }; +static int chipinfo_get(struct gpio_chardev_data *cdev, void __user *ip) +{ + struct gpio_device *gdev = cdev->gdev; + struct gpiochip_info chipinfo; + + memset(&chipinfo, 0, sizeof(chipinfo)); + + strscpy(chipinfo.name, dev_name(&gdev->dev), sizeof(chipinfo.name)); + strscpy(chipinfo.label, gdev->label, sizeof(chipinfo.label)); + chipinfo.lines = gdev->ngpio; + if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) + return -EFAULT; + return 0; +} + #ifdef CONFIG_GPIO_CDEV_V1 /* * returns 0 if the versions match, else the previously selected ABI version @@ -1993,6 +2010,41 @@ static int lineinfo_ensure_abi_version(struct gpio_chardev_data *cdata, return abiv; } + +static int lineinfo_get_v1(struct gpio_chardev_data *cdev, void __user *ip, + bool watch) +{ + struct gpio_desc *desc; + struct gpioline_info lineinfo; + struct gpio_v2_line_info lineinfo_v2; + + if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) + return -EFAULT; + + /* this doubles as a range check on line_offset */ + desc = gpiochip_get_desc(cdev->gdev->chip, lineinfo.line_offset); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + if (watch) { + if (lineinfo_ensure_abi_version(cdev, 1)) + return -EPERM; + + if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) + return -EBUSY; + } + + gpio_desc_to_lineinfo(desc, &lineinfo_v2); + gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); + + if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { + if (watch) + clear_bit(lineinfo.line_offset, cdev->watched_lines); + return -EFAULT; + } + + return 0; +} #endif static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, @@ -2030,6 +2082,22 @@ static int lineinfo_get(struct gpio_chardev_data *cdev, void __user *ip, return 0; } +static int lineinfo_unwatch(struct gpio_chardev_data *cdev, void __user *ip) +{ + __u32 offset; + + if (copy_from_user(&offset, ip, sizeof(offset))) + return -EFAULT; + + if (offset >= cdev->gdev->ngpio) + return -EINVAL; + + if (!test_and_clear_bit(offset, cdev->watched_lines)) + return -EBUSY; + + return 0; +} + /* * gpio_ioctl() - ioctl handler for the GPIO chardev */ @@ -2037,80 +2105,24 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct gpio_chardev_data *cdev = file->private_data; struct gpio_device *gdev = cdev->gdev; - struct gpio_chip *gc = gdev->chip; void __user *ip = (void __user *)arg; - __u32 offset; /* We fail any subsequent ioctl():s when the chip is gone */ - if (!gc) + if (!gdev->chip) return -ENODEV; /* Fill in the struct and pass to userspace */ if (cmd == GPIO_GET_CHIPINFO_IOCTL) { - struct gpiochip_info chipinfo; - - memset(&chipinfo, 0, sizeof(chipinfo)); - - strscpy(chipinfo.name, dev_name(&gdev->dev), - sizeof(chipinfo.name)); - strscpy(chipinfo.label, gdev->label, - sizeof(chipinfo.label)); - chipinfo.lines = gdev->ngpio; - if (copy_to_user(ip, &chipinfo, sizeof(chipinfo))) - return -EFAULT; - return 0; + return chipinfo_get(cdev, ip); #ifdef CONFIG_GPIO_CDEV_V1 - } else if (cmd == GPIO_GET_LINEINFO_IOCTL) { - struct gpio_desc *desc; - struct gpioline_info lineinfo; - struct gpio_v2_line_info lineinfo_v2; - - if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) - return -EFAULT; - - /* this doubles as a range check on line_offset */ - desc = gpiochip_get_desc(gc, lineinfo.line_offset); - if (IS_ERR(desc)) - return PTR_ERR(desc); - - gpio_desc_to_lineinfo(desc, &lineinfo_v2); - gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); - - if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) - return -EFAULT; - return 0; } else if (cmd == GPIO_GET_LINEHANDLE_IOCTL) { return linehandle_create(gdev, ip); } else if (cmd == GPIO_GET_LINEEVENT_IOCTL) { return lineevent_create(gdev, ip); - } else if (cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) { - struct gpio_desc *desc; - struct gpioline_info lineinfo; - struct gpio_v2_line_info lineinfo_v2; - - if (copy_from_user(&lineinfo, ip, sizeof(lineinfo))) - return -EFAULT; - - /* this doubles as a range check on line_offset */ - desc = gpiochip_get_desc(gc, lineinfo.line_offset); - if (IS_ERR(desc)) - return PTR_ERR(desc); - - if (lineinfo_ensure_abi_version(cdev, 1)) - return -EPERM; - - if (test_and_set_bit(lineinfo.line_offset, cdev->watched_lines)) - return -EBUSY; - - gpio_desc_to_lineinfo(desc, &lineinfo_v2); - gpio_v2_line_info_to_v1(&lineinfo_v2, &lineinfo); - - if (copy_to_user(ip, &lineinfo, sizeof(lineinfo))) { - clear_bit(lineinfo.line_offset, cdev->watched_lines); - return -EFAULT; - } - - return 0; + } else if (cmd == GPIO_GET_LINEINFO_IOCTL || + cmd == GPIO_GET_LINEINFO_WATCH_IOCTL) { + return lineinfo_get_v1(cdev, ip, + cmd == GPIO_GET_LINEINFO_WATCH_IOCTL); #endif /* CONFIG_GPIO_CDEV_V1 */ } else if (cmd == GPIO_V2_GET_LINEINFO_IOCTL || cmd == GPIO_V2_GET_LINEINFO_WATCH_IOCTL) { @@ -2119,16 +2131,7 @@ static long gpio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } else if (cmd == GPIO_V2_GET_LINE_IOCTL) { return linereq_create(gdev, ip); } else if (cmd == GPIO_GET_LINEINFO_UNWATCH_IOCTL) { - if (copy_from_user(&offset, ip, sizeof(offset))) - return -EFAULT; - - if (offset >= cdev->gdev->ngpio) - return -EINVAL; - - if (!test_and_clear_bit(offset, cdev->watched_lines)) - return -EBUSY; - - return 0; + return lineinfo_unwatch(cdev, ip); } return -EINVAL; } diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index b02cc2abd3b6..97eec8d8dbdc 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -603,7 +603,11 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, ret = gdev->id; goto err_free_gdev; } - dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); + + ret = dev_set_name(&gdev->dev, GPIOCHIP_NAME "%d", gdev->id); + if (ret) + goto err_free_ida; + device_initialize(&gdev->dev); dev_set_drvdata(&gdev->dev, gdev); if (gc->parent && gc->parent->driver) @@ -617,7 +621,7 @@ int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data, gdev->descs = kcalloc(gc->ngpio, sizeof(gdev->descs[0]), GFP_KERNEL); if (!gdev->descs) { ret = -ENOMEM; - goto err_free_ida; + goto err_free_dev_name; } if (gc->ngpio == 0) { @@ -768,6 +772,8 @@ err_free_label: kfree_const(gdev->label); err_free_descs: kfree(gdev->descs); +err_free_dev_name: + kfree(dev_name(&gdev->dev)); err_free_ida: ida_free(&gpio_ida, gdev->id); err_free_gdev: @@ -1489,6 +1495,9 @@ static int gpiochip_add_irqchip(struct gpio_chip *gc, type = IRQ_TYPE_NONE; } + if (gc->to_irq) + chip_warn(gc, "to_irq is redefined in %s and you shouldn't rely on it\n", __func__); + gc->to_irq = gpiochip_to_irq; gc->irq.default_type = type; gc->irq.lock_key = lock_key; @@ -2548,7 +2557,7 @@ int gpiod_get_array_value_complex(bool raw, bool can_sleep, struct gpio_chip *gc = desc_array[i]->gdev->chip; unsigned long fastpath[2 * BITS_TO_LONGS(FASTPATH_NGPIO)]; unsigned long *mask, *bits; - int first, j, ret; + int first, j; if (likely(gc->ngpio <= FASTPATH_NGPIO)) { mask = fastpath; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 2d991da2cead..d1ed4f8df2b7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -26,6 +26,7 @@ #include <linux/sched/task.h> #include "amdgpu_object.h" +#include "amdgpu_gem.h" #include "amdgpu_vm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" @@ -1152,7 +1153,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct sg_table *sg = NULL; uint64_t user_addr = 0; struct amdgpu_bo *bo; - struct amdgpu_bo_param bp; + struct drm_gem_object *gobj; u32 domain, alloc_domain; u64 alloc_flags; int ret; @@ -1220,19 +1221,14 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s\n", va, size, domain_string(alloc_domain)); - memset(&bp, 0, sizeof(bp)); - bp.size = size; - bp.byte_align = 1; - bp.domain = alloc_domain; - bp.flags = alloc_flags; - bp.type = bo_type; - bp.resv = NULL; - ret = amdgpu_bo_create(adev, &bp, &bo); + ret = amdgpu_gem_object_create(adev, size, 1, alloc_domain, alloc_flags, + bo_type, NULL, &gobj); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", - domain_string(alloc_domain), ret); + domain_string(alloc_domain), ret); goto err_bo_create; } + bo = gem_to_amdgpu_bo(gobj); if (bo_type == ttm_bo_type_sg) { bo->tbo.sg = sg; bo->tbo.ttm->sg = sg; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 087afab67e22..cab1ebaf6d62 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -81,7 +81,6 @@ MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/vangogh_gpu_info.bin"); -MODULE_FIRMWARE("amdgpu/green_sardine_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index f764803c53a4..48cb33e5b382 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -926,8 +926,10 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { - struct drm_gem_object *obj; struct amdgpu_framebuffer *amdgpu_fb; + struct drm_gem_object *obj; + struct amdgpu_bo *bo; + uint32_t domains; int ret; obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); @@ -938,7 +940,9 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, } /* Handle is imported dma-buf, so cannot be migrated to VRAM for scanout */ - if (obj->import_attach) { + bo = gem_to_amdgpu_bo(obj); + domains = amdgpu_display_supported_domains(drm_to_adev(dev), bo->flags); + if (obj->import_attach && !(domains & AMDGPU_GEM_DOMAIN_GTT)) { drm_dbg_kms(dev, "Cannot create framebuffer from imported dma_buf\n"); return ERR_PTR(-EINVAL); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index d0a1fee1f5f6..174a73eb23f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -269,8 +269,8 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, resv = vm->root.base.bo->tbo.base.resv; } -retry: initial_domain = (u32)(0xffffffff & args->in.domains); +retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, initial_domain, flags, ttm_bo_type_device, resv, &gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 25ec4d57333f..b4c8e5d5c763 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -897,7 +897,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain, return -EINVAL; /* A shared bo cannot be migrated to VRAM */ - if (bo->prime_shared_count) { + if (bo->prime_shared_count || bo->tbo.base.import_attach) { if (domain & AMDGPU_GEM_DOMAIN_GTT) domain = AMDGPU_GEM_DOMAIN_GTT; else diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 619d34c041ee..d86b42a36560 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -99,6 +99,10 @@ #define mmGCR_GENERAL_CNTL_Sienna_Cichlid 0x1580 #define mmGCR_GENERAL_CNTL_Sienna_Cichlid_BASE_IDX 0 +#define mmCGTS_TCC_DISABLE_Vangogh 0x5006 +#define mmCGTS_TCC_DISABLE_Vangogh_BASE_IDX 1 +#define mmCGTS_USER_TCC_DISABLE_Vangogh 0x5007 +#define mmCGTS_USER_TCC_DISABLE_Vangogh_BASE_IDX 1 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh 0x0025 #define mmGOLDEN_TSC_COUNT_UPPER_Vangogh_BASE_IDX 1 #define mmGOLDEN_TSC_COUNT_LOWER_Vangogh 0x0026 @@ -119,6 +123,8 @@ #define mmVGT_ESGS_RING_SIZE_Vangogh_BASE_IDX 1 #define mmSPI_CONFIG_CNTL_Vangogh 0x2440 #define mmSPI_CONFIG_CNTL_Vangogh_BASE_IDX 1 +#define mmGCR_GENERAL_CNTL_Vangogh 0x1580 +#define mmGCR_GENERAL_CNTL_Vangogh_BASE_IDX 0 #define mmCP_HYP_PFP_UCODE_ADDR 0x5814 #define mmCP_HYP_PFP_UCODE_ADDR_BASE_IDX 1 @@ -3244,7 +3250,7 @@ static const struct soc15_reg_golden golden_settings_gc_10_3_vangogh[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_DEBUG4, 0xffffffff, 0x00800000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmDB_EXCEPTION_CONTROL, 0x7fff0f1f, 0x00b80000), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGB_ADDR_CONFIG, 0x0c1807ff, 0x00000142), - SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL, 0x1ff1ffff, 0x00000500), + SOC15_REG_GOLDEN_VALUE(GC, 0, mmGCR_GENERAL_CNTL_Vangogh, 0x1ff1ffff, 0x00000500), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL1_PIPE_STEER, 0x000000ff, 0x000000e4), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_0, 0x77777777, 0x32103210), SOC15_REG_GOLDEN_VALUE(GC, 0, mmGL2_PIPE_STEER_1, 0x77777777, 0x32103210), @@ -4934,8 +4940,18 @@ static void gfx_v10_0_tcp_harvest(struct amdgpu_device *adev) static void gfx_v10_0_get_tcc_info(struct amdgpu_device *adev) { /* TCCs are global (not instanced). */ - uint32_t tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | - RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + uint32_t tcc_disable; + + switch (adev->asic_type) { + case CHIP_VANGOGH: + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE_Vangogh) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE_Vangogh); + break; + default: + tcc_disable = RREG32_SOC15(GC, 0, mmCGTS_TCC_DISABLE) | + RREG32_SOC15(GC, 0, mmCGTS_USER_TCC_DISABLE); + break; + } adev->gfx.config.tcc_disabled_mask = REG_GET_FIELD(tcc_disable, CGTS_TCC_DISABLE, TCC_DISABLE) | diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c index 07104a1de308..1961745e89c7 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_3.c @@ -491,12 +491,11 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, { uint32_t def, data, def1, data1; - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) { - data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; - + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -505,8 +504,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK); } else { - data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; - + data |= MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK; data1 |= (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | @@ -516,7 +514,7 @@ mmhub_v2_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, } if (def != data) - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); if (def1 != data1) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2, data1); } @@ -525,17 +523,44 @@ static void mmhub_v2_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, bool enable) { - uint32_t def, data; - - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); - - if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) - data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; - else - data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; + uint32_t def, data, def1, data1, def2, data2; + + def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); + def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); + def2 = data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); + + if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) { + data &= ~MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; + data1 &= !(DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + data2 &= !(DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + } else { + data |= MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK; + data1 |= (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + data2 |= (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK); + } if (def != data) - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL, data); + if (def1 != data1) + WREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL, data1); + if (def2 != data2) + WREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL, data2); } static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, @@ -554,26 +579,39 @@ static int mmhub_v2_3_set_clockgating(struct amdgpu_device *adev, static void mmhub_v2_3_get_clockgating(struct amdgpu_device *adev, u32 *flags) { - int data, data1; + int data, data1, data2, data3; if (amdgpu_sriov_vf(adev)) *flags = 0; - data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); - data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + data = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2); + data1 = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_CGTT_CLK_CTRL); + data2 = RREG32_SOC15(MMHUB, 0, mmDAGB0_WR_CGTT_CLK_CTRL); + data3 = RREG32_SOC15(MMHUB, 0, mmDAGB0_RD_CGTT_CLK_CTRL); /* AMD_CG_SUPPORT_MC_MGCG */ - if ((data & MM_ATC_L2_MISC_CG__ENABLE_MASK) && - !(data1 & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | + if (!(data & (DAGB0_CNTL_MISC2__DISABLE_WRREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_WRRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDREQ_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_RDRET_CG_MASK | DAGB0_CNTL_MISC2__DISABLE_TLBWR_CG_MASK | - DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK))) - *flags |= AMD_CG_SUPPORT_MC_MGCG; + DAGB0_CNTL_MISC2__DISABLE_TLBRD_CG_MASK)) + && !(data1 & MM_ATC_L2_CGTT_CLK_CTRL__SOFT_OVERRIDE_MASK)) { + *flags |= AMD_CG_SUPPORT_MC_MGCG; + } /* AMD_CG_SUPPORT_MC_LS */ - if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) + if (!(data1 & MM_ATC_L2_CGTT_CLK_CTRL__MGLS_OVERRIDE_MASK) + && !(data2 & (DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_WR_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK)) + && !(data3 & (DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_WRITE_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_READ_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_RETURN_MASK | + DAGB0_RD_CGTT_CLK_CTRL__LS_OVERRIDE_REGISTER_MASK))) *flags |= AMD_CG_SUPPORT_MC_LS; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c6da89df055d..961abf1cf040 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1833,8 +1833,8 @@ static void emulated_link_detect(struct dc_link *link) link->type = dc_connection_none; prev_sink = link->local_sink; - if (prev_sink != NULL) - dc_sink_retain(prev_sink); + if (prev_sink) + dc_sink_release(prev_sink); switch (link->connector_signal) { case SIGNAL_TYPE_HDMI_TYPE_A: { @@ -1934,7 +1934,7 @@ static void dm_gpureset_commit_state(struct dc_state *dc_state, dc_commit_updates_for_stream( dm->dc, bundle->surface_updates, dc_state->stream_status->plane_count, - dc_state->streams[k], &bundle->stream_update, dc_state); + dc_state->streams[k], &bundle->stream_update); } cleanup: @@ -1965,8 +1965,7 @@ static void dm_set_dpms_off(struct dc_link *link) stream_update.stream = stream_state; dc_commit_updates_for_stream(stream_state->ctx->dc, NULL, 0, - stream_state, &stream_update, - stream_state->ctx->dc->current_state); + stream_state, &stream_update); mutex_unlock(&adev->dm.dc_lock); } @@ -2330,8 +2329,10 @@ void amdgpu_dm_update_connector_after_detect( * TODO: check if we still need the S3 mode update workaround. * If yes, put it here. */ - if (aconnector->dc_sink) + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps(connector, NULL); + dc_sink_release(aconnector->dc_sink); + } aconnector->dc_sink = sink; dc_sink_retain(aconnector->dc_sink); @@ -2347,8 +2348,6 @@ void amdgpu_dm_update_connector_after_detect( drm_connector_update_edid_property(connector, aconnector->edid); - drm_add_edid_modes(connector, aconnector->edid); - if (aconnector->dc_link->aux_mode) drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, aconnector->edid); @@ -7549,7 +7548,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, struct drm_crtc *pcrtc, bool wait_for_vblank) { - uint32_t i; + int i; uint64_t timestamp_ns; struct drm_plane *plane; struct drm_plane_state *old_plane_state, *new_plane_state; @@ -7590,7 +7589,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, amdgpu_dm_commit_cursors(state); /* update planes when needed */ - for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { struct drm_crtc *crtc = new_plane_state->crtc; struct drm_crtc_state *new_crtc_state; struct drm_framebuffer *fb = new_plane_state->fb; @@ -7813,8 +7812,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates, planes_count, acrtc_state->stream, - &bundle->stream_update, - dc_state); + &bundle->stream_update); /** * Enable or disable the interrupts on the backend. @@ -8150,13 +8148,13 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); - struct dc_surface_update dummy_updates[MAX_SURFACES]; + struct dc_surface_update surface_updates[MAX_SURFACES]; struct dc_stream_update stream_update; struct dc_info_packet hdr_packet; struct dc_stream_status *status = NULL; bool abm_changed, hdr_changed, scaling_changed; - memset(&dummy_updates, 0, sizeof(dummy_updates)); + memset(&surface_updates, 0, sizeof(surface_updates)); memset(&stream_update, 0, sizeof(stream_update)); if (acrtc) { @@ -8213,16 +8211,15 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * To fix this, DC should permit updating only stream properties. */ for (j = 0; j < status->plane_count; j++) - dummy_updates[j].surface = status->plane_states[0]; + surface_updates[j].surface = status->plane_states[j]; mutex_lock(&dm->dc_lock); dc_commit_updates_for_stream(dm->dc, - dummy_updates, + surface_updates, status->plane_count, dm_new_crtc_state->stream, - &stream_update, - dc_state); + &stream_update); mutex_unlock(&dm->dc_lock); } @@ -8359,14 +8356,14 @@ static int dm_force_atomic_commit(struct drm_connector *connector) ret = PTR_ERR_OR_ZERO(conn_state); if (ret) - goto err; + goto out; /* Attach crtc to drm_atomic_state*/ crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); ret = PTR_ERR_OR_ZERO(crtc_state); if (ret) - goto err; + goto out; /* force a restore */ crtc_state->mode_changed = true; @@ -8376,17 +8373,15 @@ static int dm_force_atomic_commit(struct drm_connector *connector) ret = PTR_ERR_OR_ZERO(plane_state); if (ret) - goto err; - + goto out; /* Call commit internally with the state we just constructed */ ret = drm_atomic_commit(state); - if (!ret) - return 0; -err: - DRM_ERROR("Restoring old state failed with %i\n", ret); +out: drm_atomic_state_put(state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); return ret; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 8ab0b9060d2b..f2d8cf34be46 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -833,6 +833,9 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, if (computed_streams[i]) continue; + if (dcn20_remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) + return false; + mutex_lock(&aconnector->mst_mgr.lock); if (!compute_mst_dsc_configs_for_link(state, dc_state, stream->link)) { mutex_unlock(&aconnector->mst_mgr.lock); @@ -850,7 +853,8 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, stream = dc_state->streams[i]; if (stream->timing.flags.DSC == 1) - dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream); + if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) + return false; } return true; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index 5b466f440d67..ab98c259ef69 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -251,6 +251,7 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; bool force_reset = false; bool update_uclk = false; + bool p_state_change_support; if (dc->work_arounds.skip_clock_update || !clk_mgr->smu_present) return; @@ -291,8 +292,9 @@ static void dcn3_update_clocks(struct clk_mgr *clk_mgr_base, clk_mgr_base->clks.socclk_khz = new_clocks->socclk_khz; clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support; - if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { - clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support; + p_state_change_support = new_clocks->p_state_change_support || (display_count == 0); + if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) { + clk_mgr_base->clks.p_state_change_support = p_state_change_support; /* to disable P-State switching, set UCLK min = max */ if (!clk_mgr_base->clks.p_state_change_support) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 58eb0d69873a..6cf1a5a2a5ec 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2679,8 +2679,7 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update, - struct dc_state *state) + struct dc_stream_update *stream_update) { const struct dc_stream_status *stream_status; enum surface_update_type update_type; @@ -2699,6 +2698,12 @@ void dc_commit_updates_for_stream(struct dc *dc, if (update_type >= UPDATE_TYPE_FULL) { + struct dc_plane_state *new_planes[MAX_SURFACES]; + + memset(new_planes, 0, sizeof(new_planes)); + + for (i = 0; i < surface_count; i++) + new_planes[i] = srf_updates[i].surface; /* initialize scratch memory for building context */ context = dc_create_state(dc); @@ -2707,15 +2712,21 @@ void dc_commit_updates_for_stream(struct dc *dc, return; } - dc_resource_state_copy_construct(state, context); + dc_resource_state_copy_construct( + dc->current_state, context); - for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; - struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + /*remove old surfaces from context */ + if (!dc_rem_all_planes_for_stream(dc, stream, context)) { + DC_ERROR("Failed to remove streams for new validate context!\n"); + return; + } - if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state) - new_pipe->plane_state->force_full_update = true; + /* add surface to context */ + if (!dc_add_all_planes_for_stream(dc, stream, new_planes, surface_count, context)) { + DC_ERROR("Failed to add streams for new validate context!\n"); + return; } + } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 1bd1a0935290..1e4794e2825c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -892,13 +892,13 @@ static uint32_t translate_training_aux_read_interval(uint32_t dpcd_aux_read_inte switch (dpcd_aux_read_interval) { case 0x01: - aux_rd_interval_us = 400; + aux_rd_interval_us = 4000; break; case 0x02: - aux_rd_interval_us = 4000; + aux_rd_interval_us = 8000; break; case 0x03: - aux_rd_interval_us = 8000; + aux_rd_interval_us = 12000; break; case 0x04: aux_rd_interval_us = 16000; @@ -2399,6 +2399,9 @@ static bool decide_dp_link_settings(struct dc_link *link, struct dc_link_setting initial_link_setting; uint32_t link_bw; + if (req_bw > dc_link_bandwidth_kbps(link, &link->verified_link_cap)) + return false; + /* search for the minimum link setting that: * 1. is supported according to the link training result * 2. could support the b/w requested by the timing @@ -3045,14 +3048,14 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link) + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) core_link_disable_stream(pipe_ctx); } for (i = 0; i < MAX_PIPES; i++) { pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && - pipe_ctx->stream->link == link) + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) core_link_enable_stream(link->dc->current_state, pipe_ctx); } diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index b7910976b81a..e243c01b9672 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -283,8 +283,7 @@ void dc_commit_updates_for_stream(struct dc *dc, struct dc_surface_update *srf_updates, int surface_count, struct dc_stream_state *stream, - struct dc_stream_update *stream_update, - struct dc_state *state); + struct dc_stream_update *stream_update); /* * Log the current stream state. */ diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index cfc130e2d6fd..017b67b830e6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -647,8 +647,13 @@ static void power_on_plane( if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - hws->funcs.dpp_pg_control(hws, plane_id, true); - hws->funcs.hubp_pg_control(hws, plane_id, true); + + if (hws->funcs.dpp_pg_control) + hws->funcs.dpp_pg_control(hws, plane_id, true); + + if (hws->funcs.hubp_pg_control) + hws->funcs.hubp_pg_control(hws, plane_id, true); + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); DC_LOG_DEBUG( @@ -1082,8 +1087,13 @@ void dcn10_plane_atomic_power_down(struct dc *dc, if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - hws->funcs.dpp_pg_control(hws, dpp->inst, false); - hws->funcs.hubp_pg_control(hws, hubp->inst, false); + + if (hws->funcs.dpp_pg_control) + hws->funcs.dpp_pg_control(hws, dpp->inst, false); + + if (hws->funcs.hubp_pg_control) + hws->funcs.hubp_pg_control(hws, hubp->inst, false); + dpp->funcs->dpp_reset(dpp); REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index cb822df21b7c..480d928cb1ca 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1062,8 +1062,13 @@ static void dcn20_power_on_plane( if (REG(DC_IP_REQUEST_CNTL)) { REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 1); - dcn20_dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); - dcn20_hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); + + if (hws->funcs.dpp_pg_control) + hws->funcs.dpp_pg_control(hws, pipe_ctx->plane_res.dpp->inst, true); + + if (hws->funcs.hubp_pg_control) + hws->funcs.hubp_pg_control(hws, pipe_ctx->plane_res.hubp->inst, true); + REG_SET(DC_IP_REQUEST_CNTL, 0, IP_REQUEST_EN, 0); DC_LOG_DEBUG( diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index e04ecf0fc0db..d6b488561871 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -297,8 +297,8 @@ static struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 11.6, - .sr_enter_plus_exit_time_us = 13.9, + .sr_exit_time_us = 8.6, + .sr_enter_plus_exit_time_us = 10.9, .urgent_latency_us = 4.0, .urgent_latency_pixel_data_only_us = 4.0, .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, @@ -2517,8 +2517,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, * if this primary pipe has a bottom pipe in prev. state * and if the bottom pipe is still available (which it should be), * pick that pipe as secondary - * Same logic applies for ODM pipes. Since mpo is not allowed with odm - * check in else case. + * Same logic applies for ODM pipes */ if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe) { preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].bottom_pipe->pipe_idx; @@ -2526,7 +2525,9 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc, secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; secondary_pipe->pipe_idx = preferred_pipe_idx; } - } else if (dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { + } + if (secondary_pipe == NULL && + dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe) { preferred_pipe_idx = dc->current_state->res_ctx.pipe_ctx[primary_pipe->pipe_idx].next_odm_pipe->pipe_idx; if (res_ctx->pipe_ctx[preferred_pipe_idx].stream == NULL) { secondary_pipe = &res_ctx->pipe_ctx[preferred_pipe_idx]; diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c index 1c88d2edd381..674376428916 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c @@ -296,7 +296,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = { .num_banks = 8, .num_chans = 4, .vmm_page_size_bytes = 4096, - .dram_clock_change_latency_us = 23.84, + .dram_clock_change_latency_us = 11.72, .return_bus_width_bytes = 64, .dispclk_dppclk_vco_speed_mhz = 3600, .xfc_bus_transport_time_us = 4, @@ -906,6 +906,8 @@ enum dcn20_clk_src_array_id { DCN20_CLK_SRC_PLL0, DCN20_CLK_SRC_PLL1, DCN20_CLK_SRC_PLL2, + DCN20_CLK_SRC_PLL3, + DCN20_CLK_SRC_PLL4, DCN20_CLK_SRC_TOTAL_DCN21 }; @@ -2030,6 +2032,14 @@ static bool dcn21_resource_construct( dcn21_clock_source_create(ctx, ctx->dc_bios, CLOCK_SOURCE_COMBO_PHY_PLL2, &clk_src_regs[2], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL3] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL3, + &clk_src_regs[3], false); + pool->base.clock_sources[DCN20_CLK_SRC_PLL4] = + dcn21_clock_source_create(ctx, ctx->dc_bios, + CLOCK_SOURCE_COMBO_PHY_PLL4, + &clk_src_regs[4], false); pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index c20331eb62e0..dfd77b3cc84d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -32,8 +32,8 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \ ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse -CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -msse +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -msse endif ifdef CONFIG_PPC64 @@ -45,6 +45,8 @@ ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 endif +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mhard-float +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mhard-float endif ifdef CONFIG_X86 diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile index 3ca7d911d25c..09264716d1dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn301/Makefile @@ -14,7 +14,7 @@ DCN301 = dcn301_init.o dcn301_resource.o dcn301_dccg.o \ dcn301_dio_link_encoder.o dcn301_hwseq.o dcn301_panel_cntl.o dcn301_hubbub.o ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o := -msse endif ifdef CONFIG_PPC64 @@ -25,6 +25,7 @@ ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 endif +CFLAGS_$(AMDDALPATH)/dc/dcn301/dcn301_resource.o += -mhard-float endif ifdef CONFIG_X86 diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile index 8d4924b7dc22..101620a8867a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile @@ -13,7 +13,7 @@ DCN3_02 = dcn302_init.o dcn302_hwseq.o dcn302_resource.o ifdef CONFIG_X86 -CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -msse endif ifdef CONFIG_PPC64 @@ -24,6 +24,7 @@ ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 endif +CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float endif ifdef CONFIG_X86 diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index 4bdbcce7092d..0d797fa9f5cc 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -553,6 +553,7 @@ struct pptable_funcs { *clock_req); uint32_t (*get_fan_control_mode)(struct smu_context *smu); int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); + int (*set_fan_speed_percent)(struct smu_context *smu, uint32_t speed); int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); int (*gfx_off_control)(struct smu_context *smu, bool enable); diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h index 13de692a4213..5d0b29653ffa 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h @@ -203,6 +203,9 @@ int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode); +int +smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); + int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, uint32_t speed); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 8b867a6d52b5..e84c737e3967 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -2151,19 +2151,14 @@ int smu_get_fan_speed_percent(struct smu_context *smu, uint32_t *speed) int smu_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) { int ret = 0; - uint32_t rpm; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_fan_speed_rpm) { - if (speed > 100) - speed = 100; - rpm = speed * smu->fan_max_rpm / 100; - ret = smu->ppt_funcs->set_fan_speed_rpm(smu, rpm); - } + if (smu->ppt_funcs->set_fan_speed_percent) + ret = smu->ppt_funcs->set_fan_speed_percent(smu, speed); mutex_unlock(&smu->mutex); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index cd7b411457ff..16db0b506b0d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2326,6 +2326,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 51e83123f72a..cd7efa923195 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2456,6 +2456,7 @@ static const struct pptable_funcs navi10_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 12b36eb0ff6a..d68d3dfee51d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2802,6 +2802,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .display_clock_voltage_request = smu_v11_0_display_clock_voltage_request, .get_fan_control_mode = smu_v11_0_get_fan_control_mode, .set_fan_control_mode = smu_v11_0_set_fan_control_mode, + .set_fan_speed_percent = smu_v11_0_set_fan_speed_percent, .set_fan_speed_rpm = smu_v11_0_set_fan_speed_rpm, .set_xgmi_pstate = smu_v11_0_set_xgmi_pstate, .gfx_off_control = smu_v11_0_gfx_off_control, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index b279dbbbce6b..5aeb5f5a0447 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1174,6 +1174,35 @@ smu_v11_0_set_fan_static_mode(struct smu_context *smu, uint32_t mode) } int +smu_v11_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed) +{ + struct amdgpu_device *adev = smu->adev; + uint32_t duty100, duty; + uint64_t tmp64; + + if (speed > 100) + speed = 100; + + if (smu_v11_0_auto_fan_control(smu, 0)) + return -EINVAL; + + duty100 = REG_GET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL1), + CG_FDO_CTRL1, FMAX_DUTY100); + if (!duty100) + return -EINVAL; + + tmp64 = (uint64_t)speed * duty100; + do_div(tmp64, 100); + duty = (uint32_t)tmp64; + + WREG32_SOC15(THM, 0, mmCG_FDO_CTRL0, + REG_SET_FIELD(RREG32_SOC15(THM, 0, mmCG_FDO_CTRL0), + CG_FDO_CTRL0, FDO_STATIC_DUTY, duty)); + + return smu_v11_0_set_fan_static_mode(smu, FDO_PWM_MODE_STATIC); +} + +int smu_v11_0_set_fan_control_mode(struct smu_context *smu, uint32_t mode) { @@ -1181,7 +1210,7 @@ smu_v11_0_set_fan_control_mode(struct smu_context *smu, switch (mode) { case AMD_FAN_CTRL_NONE: - ret = smu_v11_0_set_fan_speed_rpm(smu, smu->fan_max_rpm); + ret = smu_v11_0_set_fan_speed_percent(smu, 100); break; case AMD_FAN_CTRL_MANUAL: ret = smu_v11_0_auto_fan_control(smu, 0); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 5c1482d4ca43..92ad2cdbae10 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -591,14 +591,17 @@ static ssize_t vangogh_get_gpu_metrics(struct smu_context *smu, gpu_metrics->average_socket_power = metrics.CurrentSocketPower; gpu_metrics->average_cpu_power = metrics.Power[0]; gpu_metrics->average_soc_power = metrics.Power[1]; + gpu_metrics->average_gfx_power = metrics.Power[2]; memcpy(&gpu_metrics->average_core_power[0], &metrics.CorePower[0], sizeof(uint16_t) * 8); gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency; gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency; + gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency; gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency; gpu_metrics->average_vclk_frequency = metrics.VclkFrequency; + gpu_metrics->average_dclk_frequency = metrics.DclkFrequency; memcpy(&gpu_metrics->current_coreclk[0], &metrics.CoreFrequency[0], diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index f743685a20e8..9a9697038016 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1121,7 +1121,7 @@ static ssize_t renoir_get_gpu_metrics(struct smu_context *smu, static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state) { - return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GpuChangeState, state, NULL); + return 0; } static const struct pptable_funcs renoir_ppt_funcs = { diff --git a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c index 0c98d27f84ac..fee27952ec6d 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611uxc.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611uxc.c @@ -14,6 +14,7 @@ #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/wait.h> +#include <linux/workqueue.h> #include <sound/hdmi-codec.h> @@ -36,6 +37,7 @@ struct lt9611uxc { struct mutex ocm_lock; struct wait_queue_head wq; + struct work_struct work; struct device_node *dsi0_node; struct device_node *dsi1_node; @@ -52,6 +54,8 @@ struct lt9611uxc { bool hpd_supported; bool edid_read; + /* can be accessed from different threads, so protect this with ocm_lock */ + bool hdmi_connected; uint8_t fw_version; }; @@ -143,21 +147,41 @@ static irqreturn_t lt9611uxc_irq_thread_handler(int irq, void *dev_id) if (irq_status) regmap_write(lt9611uxc->regmap, 0xb022, 0); - lt9611uxc_unlock(lt9611uxc); - - if (irq_status & BIT(0)) + if (irq_status & BIT(0)) { lt9611uxc->edid_read = !!(hpd_status & BIT(0)); + wake_up_all(<9611uxc->wq); + } if (irq_status & BIT(1)) { - if (lt9611uxc->connector.dev) - drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); - else - drm_bridge_hpd_notify(<9611uxc->bridge, !!(hpd_status & BIT(1))); + lt9611uxc->hdmi_connected = hpd_status & BIT(1); + schedule_work(<9611uxc->work); } + lt9611uxc_unlock(lt9611uxc); + return IRQ_HANDLED; } +static void lt9611uxc_hpd_work(struct work_struct *work) +{ + struct lt9611uxc *lt9611uxc = container_of(work, struct lt9611uxc, work); + bool connected; + + if (lt9611uxc->connector.dev) + drm_kms_helper_hotplug_event(lt9611uxc->connector.dev); + else { + + mutex_lock(<9611uxc->ocm_lock); + connected = lt9611uxc->hdmi_connected; + mutex_unlock(<9611uxc->ocm_lock); + + drm_bridge_hpd_notify(<9611uxc->bridge, + connected ? + connector_status_connected : + connector_status_disconnected); + } +} + static void lt9611uxc_reset(struct lt9611uxc *lt9611uxc) { gpiod_set_value_cansleep(lt9611uxc->reset_gpio, 1); @@ -445,18 +469,21 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid struct lt9611uxc *lt9611uxc = bridge_to_lt9611uxc(bridge); unsigned int reg_val = 0; int ret; - int connected = 1; + bool connected = true; + + lt9611uxc_lock(lt9611uxc); if (lt9611uxc->hpd_supported) { - lt9611uxc_lock(lt9611uxc); ret = regmap_read(lt9611uxc->regmap, 0xb023, ®_val); - lt9611uxc_unlock(lt9611uxc); if (ret) dev_err(lt9611uxc->dev, "failed to read hpd status: %d\n", ret); else connected = reg_val & BIT(1); } + lt9611uxc->hdmi_connected = connected; + + lt9611uxc_unlock(lt9611uxc); return connected ? connector_status_connected : connector_status_disconnected; @@ -465,7 +492,7 @@ static enum drm_connector_status lt9611uxc_bridge_detect(struct drm_bridge *brid static int lt9611uxc_wait_for_edid(struct lt9611uxc *lt9611uxc) { return wait_event_interruptible_timeout(lt9611uxc->wq, lt9611uxc->edid_read, - msecs_to_jiffies(100)); + msecs_to_jiffies(500)); } static int lt9611uxc_get_edid_block(void *data, u8 *buf, unsigned int block, size_t len) @@ -503,7 +530,10 @@ static struct edid *lt9611uxc_bridge_get_edid(struct drm_bridge *bridge, ret = lt9611uxc_wait_for_edid(lt9611uxc); if (ret < 0) { dev_err(lt9611uxc->dev, "wait for EDID failed: %d\n", ret); - return ERR_PTR(ret); + return NULL; + } else if (ret == 0) { + dev_err(lt9611uxc->dev, "wait for EDID timeout\n"); + return NULL; } return drm_do_get_edid(connector, lt9611uxc_get_edid_block, lt9611uxc); @@ -926,6 +956,8 @@ retry: lt9611uxc->fw_version = ret; init_waitqueue_head(<9611uxc->wq); + INIT_WORK(<9611uxc->work, lt9611uxc_hpd_work); + ret = devm_request_threaded_irq(dev, client->irq, NULL, lt9611uxc_irq_thread_handler, IRQF_ONESHOT, "lt9611uxc", lt9611uxc); @@ -962,6 +994,7 @@ static int lt9611uxc_remove(struct i2c_client *client) struct lt9611uxc *lt9611uxc = i2c_get_clientdata(client); disable_irq(client->irq); + flush_scheduled_work(); lt9611uxc_audio_exit(lt9611uxc); drm_bridge_remove(<9611uxc->bridge); diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index ba1507036f26..4a8cbec832bc 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -3021,7 +3021,7 @@ int drm_atomic_helper_set_config(struct drm_mode_set *set, ret = handle_conflicting_encoders(state, true); if (ret) - return ret; + goto fail; ret = drm_atomic_commit(state); diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 0401b2f47500..b11c0522a441 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -3629,14 +3629,26 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, return 0; } -static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count) +/** + * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link + * @link_rate: link rate in 10kbits/s units + * @link_lane_count: lane count + * + * Calculate the total bandwidth of a MultiStream Transport link. The returned + * value is in units of PBNs/(timeslots/1 MTP). This value can be used to + * convert the number of PBNs required for a given stream to the number of + * timeslots this stream requires in each MTP. + */ +int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count) { - if (dp_link_bw == 0 || dp_link_count == 0) - DRM_DEBUG_KMS("invalid link bandwidth in DPCD: %x (link count: %d)\n", - dp_link_bw, dp_link_count); + if (link_rate == 0 || link_lane_count == 0) + DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n", + link_rate, link_lane_count); - return dp_link_bw * dp_link_count / 2; + /* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */ + return link_rate * link_lane_count / 54000; } +EXPORT_SYMBOL(drm_dp_get_vc_payload_bw); /** * drm_dp_read_mst_cap() - check whether or not a sink supports MST @@ -3692,7 +3704,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms goto out_unlock; } - mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr->dpcd[1], + mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]), mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK); if (mgr->pbn_div == 0) { ret = -EINVAL; @@ -4212,6 +4224,7 @@ drm_dp_mst_detect_port(struct drm_connector *connector, switch (port->pdt) { case DP_PEER_DEVICE_NONE: + break; case DP_PEER_DEVICE_MST_BRANCHING: if (!port->mcs) ret = connector_status_connected; diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 02ca22e90290..0b232a73c1b7 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -387,9 +387,16 @@ static int drm_gem_vram_kmap_locked(struct drm_gem_vram_object *gbo, if (gbo->vmap_use_count > 0) goto out; - ret = ttm_bo_vmap(&gbo->bo, &gbo->map); - if (ret) - return ret; + /* + * VRAM helpers unmap the BO only on demand. So the previous + * page mapping might still be around. Only vmap if the there's + * no mapping present. + */ + if (dma_buf_map_is_null(&gbo->map)) { + ret = ttm_bo_vmap(&gbo->bo, &gbo->map); + if (ret) + return ret; + } out: ++gbo->vmap_use_count; @@ -577,6 +584,7 @@ static void drm_gem_vram_bo_driver_move_notify(struct drm_gem_vram_object *gbo, return; ttm_bo_vunmap(bo, &gbo->map); + dma_buf_map_clear(&gbo->map); /* explicitly clear mapping for next vmap call */ } static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c index 6e74e6745eca..349146049849 100644 --- a/drivers/gpu/drm/drm_syncobj.c +++ b/drivers/gpu/drm/drm_syncobj.c @@ -388,19 +388,18 @@ int drm_syncobj_find_fence(struct drm_file *file_private, return -ENOENT; *fence = drm_syncobj_fence_get(syncobj); - drm_syncobj_put(syncobj); if (*fence) { ret = dma_fence_chain_find_seqno(fence, point); if (!ret) - return 0; + goto out; dma_fence_put(*fence); } else { ret = -EINVAL; } if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)) - return ret; + goto out; memset(&wait, 0, sizeof(wait)); wait.task = current; @@ -432,6 +431,9 @@ int drm_syncobj_find_fence(struct drm_file *file_private, if (wait.node.next) drm_syncobj_remove_wait(syncobj, &wait); +out: + drm_syncobj_put(syncobj); + return ret; } EXPORT_SYMBOL(drm_syncobj_find_fence); diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 92940a0c5ef8..dc13d1814d95 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2754,13 +2754,15 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int n_entries, ln; u32 val; + if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + return; + ddi_translations = icl_get_mg_buf_trans(encoder, crtc_state, &n_entries); - /* The table does not have values for level 3 and level 9. */ - if (level >= n_entries || level == 3 || level == 9) { + if (level >= n_entries) { drm_dbg_kms(&dev_priv->drm, "DDI translation not found for level %d. Using %d instead.", - level, n_entries - 2); - level = n_entries - 2; + level, n_entries - 1); + level = n_entries - 1; } /* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */ @@ -2891,6 +2893,9 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, u32 val, dpcnt_mask, dpcnt_val; int n_entries, ln; + if (enc_to_dig_port(encoder)->tc_mode == TC_PORT_TBT_ALT) + return; + ddi_translations = tgl_get_dkl_buf_trans(encoder, crtc_state, &n_entries); if (level >= n_entries) @@ -3532,6 +3537,23 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder, intel_de_posting_read(dev_priv, dp_tp_ctl_reg(encoder, crtc_state)); } +static void intel_ddi_power_up_lanes(struct intel_encoder *encoder, + const struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + struct intel_digital_port *dig_port = enc_to_dig_port(encoder); + enum phy phy = intel_port_to_phy(i915, encoder->port); + + if (intel_phy_is_combo(i915, phy)) { + bool lane_reversal = + dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; + + intel_combo_phy_power_up_lanes(i915, phy, false, + crtc_state->lane_count, + lane_reversal); + } +} + static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -3621,14 +3643,7 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, * 7.f Combo PHY: Configure PORT_CL_DW10 Static Power Down to power up * the used lanes of the DDI. */ - if (intel_phy_is_combo(dev_priv, phy)) { - bool lane_reversal = - dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - - intel_combo_phy_power_up_lanes(dev_priv, phy, false, - crtc_state->lane_count, - lane_reversal); - } + intel_ddi_power_up_lanes(encoder, crtc_state); /* * 7.g Configure and enable DDI_BUF_CTL @@ -3713,19 +3728,12 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, else intel_prepare_dp_ddi_buffers(encoder, crtc_state); - if (intel_phy_is_combo(dev_priv, phy)) { - bool lane_reversal = - dig_port->saved_port_bits & DDI_BUF_PORT_REVERSAL; - - intel_combo_phy_power_up_lanes(dev_priv, phy, false, - crtc_state->lane_count, - lane_reversal); - } + intel_ddi_power_up_lanes(encoder, crtc_state); intel_ddi_init_dp_buf_reg(encoder, crtc_state); if (!is_mst) intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - intel_dp_configure_protocol_converter(intel_dp); + intel_dp_configure_protocol_converter(intel_dp, crtc_state); intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); intel_dp_sink_set_fec_ready(intel_dp, crtc_state); @@ -4206,6 +4214,8 @@ static void intel_enable_ddi_hdmi(struct intel_atomic_state *state, intel_de_write(dev_priv, reg, val); } + intel_ddi_power_up_lanes(encoder, crtc_state); + /* In HDMI/DVI mode, the port width, and swing/emphasis values * are ignored so nothing special needs to be done besides * enabling the port. diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 53a00cf3fa32..61be6bed9162 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2309,7 +2309,7 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb, */ ret = i915_vma_pin_fence(vma); if (ret != 0 && INTEL_GEN(dev_priv) < 4) { - i915_gem_object_unpin_from_display_plane(vma); + i915_vma_unpin(vma); vma = ERR_PTR(ret); goto err; } @@ -2327,12 +2327,9 @@ err: void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags) { - i915_gem_object_lock(vma->obj, NULL); if (flags & PLANE_HAS_FENCE) i915_vma_unpin_fence(vma); - i915_gem_object_unpin_from_display_plane(vma); - i915_gem_object_unlock(vma->obj); - + i915_vma_unpin(vma); i915_vma_put(vma); } @@ -4807,6 +4804,8 @@ u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } else if (fb->format->is_yuv) { plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE; + if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) + plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE; } return plane_color_ctl; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 37f1a10fd021..8a26307c4896 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4014,7 +4014,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } -void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) { struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 tmp; @@ -4033,8 +4034,8 @@ void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n", enableddisabled(intel_dp->has_hdmi_sink)); - tmp = intel_dp->dfp.ycbcr_444_to_420 ? - DP_CONVERSION_TO_YCBCR420_ENABLE : 0; + tmp = crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 && + intel_dp->dfp.ycbcr_444_to_420 ? DP_CONVERSION_TO_YCBCR420_ENABLE : 0; if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1) @@ -4088,7 +4089,7 @@ static void intel_enable_dp(struct intel_atomic_state *state, } intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - intel_dp_configure_protocol_converter(intel_dp); + intel_dp_configure_protocol_converter(intel_dp, pipe_config); intel_dp_start_link_train(intel_dp, pipe_config); intel_dp_stop_link_train(intel_dp, pipe_config); @@ -4636,24 +4637,6 @@ ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp, intel_de_posting_read(dev_priv, intel_dp->output_reg); } -void intel_dp_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state) -{ - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - u8 train_set = intel_dp->train_set[0]; - - drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n", - train_set & DP_TRAIN_VOLTAGE_SWING_MASK, - train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : ""); - drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n", - (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> - DP_TRAIN_PRE_EMPHASIS_SHIFT, - train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? - " (max)" : ""); - - intel_dp->set_signal_levels(intel_dp, crtc_state); -} - void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, @@ -5702,7 +5685,7 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state); - intel_dp_set_signal_levels(intel_dp, crtc_state); + intel_dp_set_signal_levels(intel_dp, crtc_state, DP_PHY_DPRX); intel_dp_phy_pattern_update(intel_dp, crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index b871a09b6901..6620f9efdcbb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -51,7 +51,8 @@ int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp, int intel_dp_retrain_link(struct intel_encoder *encoder, struct drm_modeset_acquire_ctx *ctx); void intel_dp_set_power(struct intel_dp *intel_dp, u8 mode); -void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp); +void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state); void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, bool enable); @@ -95,9 +96,6 @@ void intel_dp_program_link_training_pattern(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat); -void -intel_dp_set_signal_levels(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state); void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select); bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index 91d3979902d0..d8c6d7054d11 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -334,6 +334,27 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, return drm_dp_dpcd_write(&intel_dp->aux, reg, buf, len) == len; } +void intel_dp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) +{ + struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); + u8 train_set = intel_dp->train_set[0]; + char phy_name[10]; + + drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s, pre-emphasis level %d%s, at %s\n", + train_set & DP_TRAIN_VOLTAGE_SWING_MASK, + train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "", + (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT, + train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ? + " (max)" : "", + intel_dp_phy_name(dp_phy, phy_name, sizeof(phy_name))); + + if (intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy)) + intel_dp->set_signal_levels(intel_dp, crtc_state); +} + static bool intel_dp_reset_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, @@ -341,7 +362,7 @@ intel_dp_reset_link_train(struct intel_dp *intel_dp, u8 dp_train_pat) { memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set)); - intel_dp_set_signal_levels(intel_dp, crtc_state); + intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); return intel_dp_set_link_train(intel_dp, crtc_state, dp_phy, dp_train_pat); } @@ -355,7 +376,7 @@ intel_dp_update_link_train(struct intel_dp *intel_dp, DP_TRAINING_LANE0_SET_PHY_REPEATER(dp_phy); int ret; - intel_dp_set_signal_levels(intel_dp, crtc_state); + intel_dp_set_signal_levels(intel_dp, crtc_state, dp_phy); ret = drm_dp_dpcd_write(&intel_dp->aux, reg, intel_dp->train_set, crtc_state->lane_count); diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 86905aa24db7..6a1f76bd8c75 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -17,6 +17,9 @@ void intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]); +void intel_dp_set_signal_levels(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy); void intel_dp_start_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state); void intel_dp_stop_link_train(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 27f04aed8764..3286b232be0b 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -69,7 +69,9 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder, slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr, connector->port, - crtc_state->pbn, 0); + crtc_state->pbn, + drm_dp_get_vc_payload_bw(crtc_state->port_clock, + crtc_state->lane_count)); if (slots == -EDEADLK) return slots; if (slots >= 0) diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index b2a4bbcfdcd2..b9d8825e2bb1 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -2210,6 +2210,7 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, if (content_protection_type_changed) { mutex_lock(&hdcp->mutex); hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED; + drm_connector_get(&connector->base); schedule_work(&hdcp->prop_work); mutex_unlock(&hdcp->mutex); } @@ -2221,6 +2222,14 @@ void intel_hdcp_update_pipe(struct intel_atomic_state *state, desired_and_not_enabled = hdcp->value != DRM_MODE_CONTENT_PROTECTION_ENABLED; mutex_unlock(&hdcp->mutex); + /* + * If HDCP already ENABLED and CP property is DESIRED, schedule + * prop_work to update correct CP property to user space. + */ + if (!desired_and_not_enabled && !content_protection_type_changed) { + drm_connector_get(&connector->base); + schedule_work(&hdcp->prop_work); + } } if (desired_and_not_enabled || content_protection_type_changed) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 52b4f6193b4c..b73d51e766ce 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -182,6 +182,7 @@ struct intel_overlay { struct intel_crtc *crtc; struct i915_vma *vma; struct i915_vma *old_vma; + struct intel_frontbuffer *frontbuffer; bool active; bool pfit_active; u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */ @@ -282,21 +283,19 @@ static void intel_overlay_flip_prepare(struct intel_overlay *overlay, struct i915_vma *vma) { enum pipe pipe = overlay->crtc->pipe; - struct intel_frontbuffer *from = NULL, *to = NULL; + struct intel_frontbuffer *frontbuffer = NULL; drm_WARN_ON(&overlay->i915->drm, overlay->old_vma); - if (overlay->vma) - from = intel_frontbuffer_get(overlay->vma->obj); if (vma) - to = intel_frontbuffer_get(vma->obj); + frontbuffer = intel_frontbuffer_get(vma->obj); - intel_frontbuffer_track(from, to, INTEL_FRONTBUFFER_OVERLAY(pipe)); + intel_frontbuffer_track(overlay->frontbuffer, frontbuffer, + INTEL_FRONTBUFFER_OVERLAY(pipe)); - if (to) - intel_frontbuffer_put(to); - if (from) - intel_frontbuffer_put(from); + if (overlay->frontbuffer) + intel_frontbuffer_put(overlay->frontbuffer); + overlay->frontbuffer = frontbuffer; intel_frontbuffer_flip_prepare(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(pipe)); @@ -359,7 +358,7 @@ static void intel_overlay_release_old_vma(struct intel_overlay *overlay) intel_frontbuffer_flip_complete(overlay->i915, INTEL_FRONTBUFFER_OVERLAY(overlay->crtc->pipe)); - i915_gem_object_unpin_from_display_plane(vma); + i915_vma_unpin(vma); i915_vma_put(vma); } @@ -860,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay, return 0; out_unpin: - i915_gem_object_unpin_from_display_plane(vma); + i915_vma_unpin(vma); out_pin_section: atomic_dec(&dev_priv->gpu_error.pending_fb_pin); diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 019a2d6d807a..3da2544fa1c0 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -618,13 +618,19 @@ skl_program_scaler(struct intel_plane *plane, /* Preoffset values for YUV to RGB Conversion */ #define PREOFF_YUV_TO_RGB_HI 0x1800 -#define PREOFF_YUV_TO_RGB_ME 0x1F00 +#define PREOFF_YUV_TO_RGB_ME 0x0000 #define PREOFF_YUV_TO_RGB_LO 0x1800 #define ROFF(x) (((x) & 0xffff) << 16) #define GOFF(x) (((x) & 0xffff) << 0) #define BOFF(x) (((x) & 0xffff) << 16) +/* + * Programs the input color space conversion stage for ICL HDR planes. + * Note that it is assumed that this stage always happens after YUV + * range correction. Thus, the input to this stage is assumed to be + * in full-range YCbCr. + */ static void icl_program_input_csc(struct intel_plane *plane, const struct intel_crtc_state *crtc_state, @@ -672,52 +678,7 @@ icl_program_input_csc(struct intel_plane *plane, 0x0, 0x7800, 0x7F10, }, }; - - /* Matrix for Limited Range to Full Range Conversion */ - static const u16 input_csc_matrix_lr[][9] = { - /* - * BT.601 Limted range YCbCr -> full range RGB - * The matrix required is : - * [1.164384, 0.000, 1.596027, - * 1.164384, -0.39175, -0.812813, - * 1.164384, 2.017232, 0.0000] - */ - [DRM_COLOR_YCBCR_BT601] = { - 0x7CC8, 0x7950, 0x0, - 0x8D00, 0x7950, 0x9C88, - 0x0, 0x7950, 0x6810, - }, - /* - * BT.709 Limited range YCbCr -> full range RGB - * The matrix required is : - * [1.164384, 0.000, 1.792741, - * 1.164384, -0.213249, -0.532909, - * 1.164384, 2.112402, 0.0000] - */ - [DRM_COLOR_YCBCR_BT709] = { - 0x7E58, 0x7950, 0x0, - 0x8888, 0x7950, 0xADA8, - 0x0, 0x7950, 0x6870, - }, - /* - * BT.2020 Limited range YCbCr -> full range RGB - * The matrix required is : - * [1.164, 0.000, 1.678, - * 1.164, -0.1873, -0.6504, - * 1.164, 2.1417, 0.0000] - */ - [DRM_COLOR_YCBCR_BT2020] = { - 0x7D70, 0x7950, 0x0, - 0x8A68, 0x7950, 0xAC00, - 0x0, 0x7950, 0x6890, - }, - }; - const u16 *csc; - - if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - csc = input_csc_matrix[plane_state->hw.color_encoding]; - else - csc = input_csc_matrix_lr[plane_state->hw.color_encoding]; + const u16 *csc = input_csc_matrix[plane_state->hw.color_encoding]; intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) | GOFF(csc[1])); @@ -734,14 +695,8 @@ icl_program_input_csc(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0), PREOFF_YUV_TO_RGB_HI); - if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE) - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - 0); - else - intel_de_write_fw(dev_priv, - PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), - PREOFF_YUV_TO_RGB_ME); + intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), + PREOFF_YUV_TO_RGB_ME); intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2), PREOFF_YUV_TO_RGB_LO); intel_de_write_fw(dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 4346bc1a747a..8b6f16f9d0d1 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -23,36 +23,6 @@ static const char *tc_port_mode_name(enum tc_port_mode mode) return names[mode]; } -static void -tc_port_load_fia_params(struct drm_i915_private *i915, - struct intel_digital_port *dig_port) -{ - enum port port = dig_port->base.port; - enum tc_port tc_port = intel_port_to_tc(i915, port); - u32 modular_fia; - - if (INTEL_INFO(i915)->display.has_modular_fia) { - modular_fia = intel_uncore_read(&i915->uncore, - PORT_TX_DFLEXDPSP(FIA1)); - drm_WARN_ON(&i915->drm, modular_fia == 0xffffffff); - modular_fia &= MODULAR_FIA_MASK; - } else { - modular_fia = 0; - } - - /* - * Each Modular FIA instance houses 2 TC ports. In SOC that has more - * than two TC ports, there are multiple instances of Modular FIA. - */ - if (modular_fia) { - dig_port->tc_phy_fia = tc_port / 2; - dig_port->tc_phy_fia_idx = tc_port % 2; - } else { - dig_port->tc_phy_fia = FIA1; - dig_port->tc_phy_fia_idx = tc_port; - } -} - static enum intel_display_power_domain tc_cold_get_power_domain(struct intel_digital_port *dig_port) { @@ -646,6 +616,43 @@ void intel_tc_port_put_link(struct intel_digital_port *dig_port) mutex_unlock(&dig_port->tc_lock); } +static bool +tc_has_modular_fia(struct drm_i915_private *i915, struct intel_digital_port *dig_port) +{ + intel_wakeref_t wakeref; + u32 val; + + if (!INTEL_INFO(i915)->display.has_modular_fia) + return false; + + wakeref = tc_cold_block(dig_port); + val = intel_uncore_read(&i915->uncore, PORT_TX_DFLEXDPSP(FIA1)); + tc_cold_unblock(dig_port, wakeref); + + drm_WARN_ON(&i915->drm, val == 0xffffffff); + + return val & MODULAR_FIA_MASK; +} + +static void +tc_port_load_fia_params(struct drm_i915_private *i915, struct intel_digital_port *dig_port) +{ + enum port port = dig_port->base.port; + enum tc_port tc_port = intel_port_to_tc(i915, port); + + /* + * Each Modular FIA instance houses 2 TC ports. In SOC that has more + * than two TC ports, there are multiple instances of Modular FIA. + */ + if (tc_has_modular_fia(i915, dig_port)) { + dig_port->tc_phy_fia = tc_port / 2; + dig_port->tc_phy_fia_idx = tc_port % 2; + } else { + dig_port->tc_phy_fia = FIA1; + dig_port->tc_phy_fia_idx = tc_port; + } +} + void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c index fcce6909f201..3d435bfff764 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c @@ -387,48 +387,6 @@ err: return vma; } -static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj) -{ - struct drm_i915_private *i915 = to_i915(obj->base.dev); - struct i915_vma *vma; - - if (list_empty(&obj->vma.list)) - return; - - mutex_lock(&i915->ggtt.vm.mutex); - spin_lock(&obj->vma.lock); - for_each_ggtt_vma(vma, obj) { - if (!drm_mm_node_allocated(&vma->node)) - continue; - - GEM_BUG_ON(vma->vm != &i915->ggtt.vm); - list_move_tail(&vma->vm_link, &vma->vm->bound_list); - } - spin_unlock(&obj->vma.lock); - mutex_unlock(&i915->ggtt.vm.mutex); - - if (i915_gem_object_is_shrinkable(obj)) { - unsigned long flags; - - spin_lock_irqsave(&i915->mm.obj_lock, flags); - - if (obj->mm.madv == I915_MADV_WILLNEED && - !atomic_read(&obj->mm.shrink_pin)) - list_move_tail(&obj->mm.link, &i915->mm.shrink_list); - - spin_unlock_irqrestore(&i915->mm.obj_lock, flags); - } -} - -void -i915_gem_object_unpin_from_display_plane(struct i915_vma *vma) -{ - /* Bump the LRU to try and avoid premature eviction whilst flipping */ - i915_gem_object_bump_inactive_ggtt(vma->obj); - - i915_vma_unpin(vma); -} - /** * Moves a single object to the CPU read, and possibly write domain. * @obj: object to act on @@ -569,9 +527,6 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, else err = i915_gem_object_set_to_cpu_domain(obj, write_domain); - /* And bump the LRU for this access */ - i915_gem_object_bump_inactive_ggtt(obj); - i915_gem_object_unlock(obj); if (write_domain) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h index be14486f63a7..4556afe18f16 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_object.h +++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h @@ -486,7 +486,6 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj, u32 alignment, const struct i915_ggtt_view *view, unsigned int flags); -void i915_gem_object_unpin_from_display_plane(struct i915_vma *vma); void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj); void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj); diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c index 94465374ca2f..e961ad6a3129 100644 --- a/drivers/gpu/drm/i915/gt/gen7_renderclear.c +++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c @@ -390,6 +390,16 @@ static void emit_batch(struct i915_vma * const vma, &cb_kernel_ivb, desc_count); + /* Reset inherited context registers */ + gen7_emit_pipeline_invalidate(&cmds); + batch_add(&cmds, MI_LOAD_REGISTER_IMM(2)); + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_0_GEN7)); + batch_add(&cmds, 0xffff0000); + batch_add(&cmds, i915_mmio_reg_offset(CACHE_MODE_1)); + batch_add(&cmds, 0xffff0000 | PIXEL_SUBSPAN_COLLECT_OPT_DISABLE); + gen7_emit_pipeline_flush(&cmds); + + /* Switch to the media pipeline and our base address */ gen7_emit_pipeline_invalidate(&cmds); batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA); batch_add(&cmds, MI_NOOP); @@ -399,9 +409,11 @@ static void emit_batch(struct i915_vma * const vma, gen7_emit_state_base_address(&cmds, descriptors); gen7_emit_pipeline_invalidate(&cmds); + /* Set the clear-residual kernel state */ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0); gen7_emit_interface_descriptor_load(&cmds, descriptors, desc_count); + /* Execute the kernel on all HW threads */ for (i = 0; i < num_primitives(bv); i++) gen7_emit_media_object(&cmds, i); diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c index a24cc1ff08a0..1d1757584f49 100644 --- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c @@ -134,11 +134,6 @@ static bool remove_signaling_context(struct intel_breadcrumbs *b, return true; } -static inline bool __request_completed(const struct i915_request *rq) -{ - return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); -} - __maybe_unused static bool check_signal_order(struct intel_context *ce, struct i915_request *rq) { @@ -192,18 +187,6 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl) intel_engine_add_retire(b->irq_engine, tl); } -static bool __signal_request(struct i915_request *rq) -{ - GEM_BUG_ON(test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)); - - if (!__dma_fence_signal(&rq->fence)) { - i915_request_put(rq); - return false; - } - - return true; -} - static struct llist_node * slist_add(struct llist_node *node, struct llist_node *head) { @@ -257,7 +240,7 @@ static void signal_irq_work(struct irq_work *work) list_for_each_entry_rcu(rq, &ce->signals, signal_link) { bool release; - if (!__request_completed(rq)) + if (!__i915_request_is_complete(rq)) break; if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, @@ -274,9 +257,11 @@ static void signal_irq_work(struct irq_work *work) release = remove_signaling_context(b, ce); spin_unlock(&ce->signal_lock); - if (__signal_request(rq)) + if (__dma_fence_signal(&rq->fence)) /* We own signal_node now, xfer to local list */ signal = slist_add(&rq->signal_node, signal); + else + i915_request_put(rq); if (release) { add_retire(b, ce->timeline); @@ -363,6 +348,17 @@ void intel_breadcrumbs_free(struct intel_breadcrumbs *b) kfree(b); } +static void irq_signal_request(struct i915_request *rq, + struct intel_breadcrumbs *b) +{ + if (!__dma_fence_signal(&rq->fence)) + return; + + i915_request_get(rq); + if (llist_add(&rq->signal_node, &b->signaled_requests)) + irq_work_queue(&b->irq_work); +} + static void insert_breadcrumb(struct i915_request *rq) { struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; @@ -372,17 +368,13 @@ static void insert_breadcrumb(struct i915_request *rq) if (test_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) return; - i915_request_get(rq); - /* * If the request is already completed, we can transfer it * straight onto a signaled list, and queue the irq worker for * its signal completion. */ - if (__request_completed(rq)) { - if (__signal_request(rq) && - llist_add(&rq->signal_node, &b->signaled_requests)) - irq_work_queue(&b->irq_work); + if (__i915_request_is_complete(rq)) { + irq_signal_request(rq, b); return; } @@ -413,6 +405,8 @@ static void insert_breadcrumb(struct i915_request *rq) break; } } + + i915_request_get(rq); list_add_rcu(&rq->signal_link, pos); GEM_BUG_ON(!check_signal_order(ce, rq)); GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags)); @@ -453,19 +447,25 @@ bool i915_request_enable_breadcrumb(struct i915_request *rq) void i915_request_cancel_breadcrumb(struct i915_request *rq) { + struct intel_breadcrumbs *b = READ_ONCE(rq->engine)->breadcrumbs; struct intel_context *ce = rq->context; bool release; - if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) + spin_lock(&ce->signal_lock); + if (!test_and_clear_bit(I915_FENCE_FLAG_SIGNAL, &rq->fence.flags)) { + spin_unlock(&ce->signal_lock); return; + } - spin_lock(&ce->signal_lock); list_del_rcu(&rq->signal_link); - release = remove_signaling_context(rq->engine->breadcrumbs, ce); + release = remove_signaling_context(b, ce); spin_unlock(&ce->signal_lock); if (release) intel_context_put(ce); + if (__i915_request_is_complete(rq)) + irq_signal_request(rq, b); + i915_request_put(rq); } diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c index cf94525be2c1..db8c66dde655 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c @@ -526,16 +526,39 @@ static int init_ggtt(struct i915_ggtt *ggtt) mutex_init(&ggtt->error_mutex); if (ggtt->mappable_end) { - /* Reserve a mappable slot for our lockless error capture */ - ret = drm_mm_insert_node_in_range(&ggtt->vm.mm, - &ggtt->error_capture, - PAGE_SIZE, 0, - I915_COLOR_UNEVICTABLE, - 0, ggtt->mappable_end, - DRM_MM_INSERT_LOW); - if (ret) - return ret; + /* + * Reserve a mappable slot for our lockless error capture. + * + * We strongly prefer taking address 0x0 in order to protect + * other critical buffers against accidental overwrites, + * as writing to address 0 is a very common mistake. + * + * Since 0 may already be in use by the system (e.g. the BIOS + * framebuffer), we let the reservation fail quietly and hope + * 0 remains reserved always. + * + * If we fail to reserve 0, and then fail to find any space + * for an error-capture, remain silent. We can afford not + * to reserve an error_capture node as we have fallback + * paths, and we trust that 0 will remain reserved. However, + * the only likely reason for failure to insert is a driver + * bug, which we expect to cause other failures... + */ + ggtt->error_capture.size = I915_GTT_PAGE_SIZE; + ggtt->error_capture.color = I915_COLOR_UNEVICTABLE; + if (drm_mm_reserve_node(&ggtt->vm.mm, &ggtt->error_capture)) + drm_mm_insert_node_in_range(&ggtt->vm.mm, + &ggtt->error_capture, + ggtt->error_capture.size, 0, + ggtt->error_capture.color, + 0, ggtt->mappable_end, + DRM_MM_INSERT_LOW); } + if (drm_mm_node_allocated(&ggtt->error_capture)) + drm_dbg(&ggtt->vm.i915->drm, + "Reserved GGTT:[%llx, %llx] for use by error capture\n", + ggtt->error_capture.start, + ggtt->error_capture.start + ggtt->error_capture.size); /* * The upper portion of the GuC address space has a sizeable hole @@ -548,9 +571,9 @@ static int init_ggtt(struct i915_ggtt *ggtt) /* Clear any non-preallocated blocks */ drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) { - drm_dbg_kms(&ggtt->vm.i915->drm, - "clearing unused GTT space: [%lx, %lx]\n", - hole_start, hole_end); + drm_dbg(&ggtt->vm.i915->drm, + "clearing unused GTT space: [%lx, %lx]\n", + hole_start, hole_end); ggtt->vm.clear_range(&ggtt->vm, hole_start, hole_end - hole_start); } diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 7614a3d24fca..26c7d0a50585 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -3988,6 +3988,9 @@ err: static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine) { i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0); + + /* Called on error unwind, clear all flags to prevent further use */ + memset(&engine->wa_ctx, 0, sizeof(engine->wa_ctx)); } typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch); diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c index 7ea94d201fe6..8015964043eb 100644 --- a/drivers/gpu/drm/i915/gt/intel_timeline.c +++ b/drivers/gpu/drm/i915/gt/intel_timeline.c @@ -126,6 +126,10 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) struct intel_timeline_cacheline *cl = container_of(rcu, typeof(*cl), rcu); + /* Must wait until after all *rq->hwsp are complete before removing */ + i915_gem_object_unpin_map(cl->hwsp->vma->obj); + __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); + i915_active_fini(&cl->active); kfree(cl); } @@ -133,11 +137,6 @@ static void __rcu_cacheline_free(struct rcu_head *rcu) static void __idle_cacheline_free(struct intel_timeline_cacheline *cl) { GEM_BUG_ON(!i915_active_is_idle(&cl->active)); - - i915_gem_object_unpin_map(cl->hwsp->vma->obj); - i915_vma_put(cl->hwsp->vma); - __idle_hwsp_free(cl->hwsp, ptr_unmask_bits(cl->vaddr, CACHELINE_BITS)); - call_rcu(&cl->rcu, __rcu_cacheline_free); } @@ -179,7 +178,6 @@ cacheline_alloc(struct intel_timeline_hwsp *hwsp, unsigned int cacheline) return ERR_CAST(vaddr); } - i915_vma_get(hwsp->vma); cl->hwsp = hwsp; cl->vaddr = page_pack_bits(vaddr, cacheline); diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 10a865f3dc09..9ed19b8bca60 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -631,24 +631,26 @@ static int flush_lazy_signals(struct i915_active *ref) int __i915_active_wait(struct i915_active *ref, int state) { - int err; - might_sleep(); - if (!i915_active_acquire_if_busy(ref)) - return 0; - /* Any fence added after the wait begins will not be auto-signaled */ - err = flush_lazy_signals(ref); - i915_active_release(ref); - if (err) - return err; + if (i915_active_acquire_if_busy(ref)) { + int err; - if (!i915_active_is_idle(ref) && - ___wait_var_event(ref, i915_active_is_idle(ref), - state, 0, 0, schedule())) - return -EINTR; + err = flush_lazy_signals(ref); + i915_active_release(ref); + if (err) + return err; + if (___wait_var_event(ref, i915_active_is_idle(ref), + state, 0, 0, schedule())) + return -EINTR; + } + + /* + * After the wait is complete, the caller may free the active. + * We have to flush any concurrent retirement before returning. + */ flush_work(&ref->work); return 0; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 632c713227dc..c6964f82a1bb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1346,7 +1346,7 @@ intel_subplatform(const struct intel_runtime_info *info, enum intel_platform p) { const unsigned int pi = __platform_mask_index(info, p); - return info->platform_mask[pi] & INTEL_SUBPLATFORM_BITS; + return info->platform_mask[pi] & ((1 << INTEL_SUBPLATFORM_BITS) - 1); } static __always_inline bool diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d76685ce0399..9856479b56d8 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -184,13 +184,24 @@ static u64 get_rc6(struct intel_gt *gt) return val; } -static void park_rc6(struct drm_i915_private *i915) +static void init_rc6(struct i915_pmu *pmu) { - struct i915_pmu *pmu = &i915->pmu; + struct drm_i915_private *i915 = container_of(pmu, typeof(*i915), pmu); + intel_wakeref_t wakeref; - if (pmu->enable & config_enabled_mask(I915_PMU_RC6_RESIDENCY)) + with_intel_runtime_pm(i915->gt.uncore->rpm, wakeref) { pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); + pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = + pmu->sample[__I915_SAMPLE_RC6].cur; + pmu->sleep_last = ktime_get(); + } +} +static void park_rc6(struct drm_i915_private *i915) +{ + struct i915_pmu *pmu = &i915->pmu; + + pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); pmu->sleep_last = ktime_get(); } @@ -201,6 +212,7 @@ static u64 get_rc6(struct intel_gt *gt) return __get_rc6(gt); } +static void init_rc6(struct i915_pmu *pmu) { } static void park_rc6(struct drm_i915_private *i915) {} #endif @@ -612,10 +624,8 @@ static void i915_pmu_enable(struct perf_event *event) container_of(event->pmu, typeof(*i915), pmu.base); unsigned int bit = event_enabled_bit(event); struct i915_pmu *pmu = &i915->pmu; - intel_wakeref_t wakeref; unsigned long flags; - wakeref = intel_runtime_pm_get(&i915->runtime_pm); spin_lock_irqsave(&pmu->lock, flags); /* @@ -626,13 +636,6 @@ static void i915_pmu_enable(struct perf_event *event) GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count)); GEM_BUG_ON(pmu->enable_count[bit] == ~0); - if (pmu->enable_count[bit] == 0 && - config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) { - pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0; - pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt); - pmu->sleep_last = ktime_get(); - } - pmu->enable |= BIT_ULL(bit); pmu->enable_count[bit]++; @@ -673,8 +676,6 @@ static void i915_pmu_enable(struct perf_event *event) * an existing non-zero value. */ local64_set(&event->hw.prev_count, __i915_pmu_event_read(event)); - - intel_runtime_pm_put(&i915->runtime_pm, wakeref); } static void i915_pmu_disable(struct perf_event *event) @@ -1130,6 +1131,7 @@ void i915_pmu_register(struct drm_i915_private *i915) hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); pmu->timer.function = i915_sample; pmu->cpuhp.cpu = -1; + init_rc6(pmu); if (!is_igp(i915)) { pmu->name = kasprintf(GFP_KERNEL, diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h index 620b6fab2c5c..92adfee30c7c 100644 --- a/drivers/gpu/drm/i915/i915_request.h +++ b/drivers/gpu/drm/i915/i915_request.h @@ -434,7 +434,7 @@ static inline u32 hwsp_seqno(const struct i915_request *rq) static inline bool __i915_request_has_started(const struct i915_request *rq) { - return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno - 1); + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno - 1); } /** @@ -465,11 +465,19 @@ static inline bool __i915_request_has_started(const struct i915_request *rq) */ static inline bool i915_request_started(const struct i915_request *rq) { + bool result; + if (i915_request_signaled(rq)) return true; - /* Remember: started but may have since been preempted! */ - return __i915_request_has_started(rq); + result = true; + rcu_read_lock(); /* the HWSP may be freed at runtime */ + if (likely(!i915_request_signaled(rq))) + /* Remember: started but may have since been preempted! */ + result = __i915_request_has_started(rq); + rcu_read_unlock(); + + return result; } /** @@ -482,10 +490,16 @@ static inline bool i915_request_started(const struct i915_request *rq) */ static inline bool i915_request_is_running(const struct i915_request *rq) { + bool result; + if (!i915_request_is_active(rq)) return false; - return __i915_request_has_started(rq); + rcu_read_lock(); + result = __i915_request_has_started(rq) && i915_request_is_active(rq); + rcu_read_unlock(); + + return result; } /** @@ -509,12 +523,25 @@ static inline bool i915_request_is_ready(const struct i915_request *rq) return !list_empty(&rq->sched.link); } +static inline bool __i915_request_is_complete(const struct i915_request *rq) +{ + return i915_seqno_passed(__hwsp_seqno(rq), rq->fence.seqno); +} + static inline bool i915_request_completed(const struct i915_request *rq) { + bool result; + if (i915_request_signaled(rq)) return true; - return i915_seqno_passed(hwsp_seqno(rq), rq->fence.seqno); + result = true; + rcu_read_lock(); /* the HWSP may be freed at runtime */ + if (likely(!i915_request_signaled(rq))) + result = __i915_request_is_complete(rq); + rcu_read_unlock(); + + return result; } static inline void i915_request_mark_complete(struct i915_request *rq) diff --git a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c index c53a222e3dec..713770fb2b92 100644 --- a/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/selftests/i915_gem_gtt.c @@ -1880,7 +1880,7 @@ static int igt_cs_tlb(void *arg) vma = i915_vma_instance(out, vm, NULL); if (IS_ERR(vma)) { err = PTR_ERR(vma); - goto out_put_batch; + goto out_put_out; } err = i915_vma_pin(vma, 0, 0, diff --git a/drivers/gpu/drm/nouveau/dispnv50/base507c.c b/drivers/gpu/drm/nouveau/dispnv50/base507c.c index 302d4e6fc52f..788db043a342 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base507c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base507c.c @@ -88,7 +88,11 @@ base507c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) NVVAL(NV507C, SET_CONVERSION, OFS, 0x64)); } else { PUSH_MTHD(push, NV507C, SET_PROCESSING, - NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE)); + NVDEF(NV507C, SET_PROCESSING, USE_GAIN_OFS, DISABLE), + + SET_CONVERSION, + NVVAL(NV507C, SET_CONVERSION, GAIN, 0) | + NVVAL(NV507C, SET_CONVERSION, OFS, 0)); } PUSH_MTHD(push, NV507C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8); diff --git a/drivers/gpu/drm/nouveau/dispnv50/base827c.c b/drivers/gpu/drm/nouveau/dispnv50/base827c.c index 18d34096f125..093d4ba6910e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/base827c.c +++ b/drivers/gpu/drm/nouveau/dispnv50/base827c.c @@ -49,7 +49,11 @@ base827c_image_set(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw) NVVAL(NV827C, SET_CONVERSION, OFS, 0x64)); } else { PUSH_MTHD(push, NV827C, SET_PROCESSING, - NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE)); + NVDEF(NV827C, SET_PROCESSING, USE_GAIN_OFS, DISABLE), + + SET_CONVERSION, + NVVAL(NV827C, SET_CONVERSION, GAIN, 0) | + NVVAL(NV827C, SET_CONVERSION, OFS, 0)); } PUSH_MTHD(push, NV827C, SURFACE_SET_OFFSET(0, 0), asyw->image.offset[0] >> 8, diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c index c6367035970e..5f4f09a601d4 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/disp.c +++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c @@ -2663,6 +2663,14 @@ nv50_display_create(struct drm_device *dev) else nouveau_display(dev)->format_modifiers = disp50xx_modifiers; + if (disp->disp->object.oclass >= GK104_DISP) { + dev->mode_config.cursor_width = 256; + dev->mode_config.cursor_height = 256; + } else { + dev->mode_config.cursor_width = 64; + dev->mode_config.cursor_height = 64; + } + /* create crtc objects to represent the hw heads */ if (disp->disp->object.oclass >= GV100_DISP) crtcs = nvif_rd32(&device->object, 0x610060) & 0xff; diff --git a/drivers/gpu/drm/nouveau/dispnv50/head917d.c b/drivers/gpu/drm/nouveau/dispnv50/head917d.c index a5d827403660..ea9f8667305e 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/head917d.c +++ b/drivers/gpu/drm/nouveau/dispnv50/head917d.c @@ -22,6 +22,7 @@ #include "head.h" #include "core.h" +#include "nvif/push.h" #include <nvif/push507c.h> #include <nvhw/class/cl917d.h> @@ -73,6 +74,31 @@ head917d_base(struct nv50_head *head, struct nv50_head_atom *asyh) return 0; } +static int +head917d_curs_set(struct nv50_head *head, struct nv50_head_atom *asyh) +{ + struct nvif_push *push = nv50_disp(head->base.base.dev)->core->chan.push; + const int i = head->base.index; + int ret; + + ret = PUSH_WAIT(push, 5); + if (ret) + return ret; + + PUSH_MTHD(push, NV917D, HEAD_SET_CONTROL_CURSOR(i), + NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, ENABLE, ENABLE) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, FORMAT, asyh->curs.format) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, SIZE, asyh->curs.layout) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_X, 0) | + NVVAL(NV917D, HEAD_SET_CONTROL_CURSOR, HOT_SPOT_Y, 0) | + NVDEF(NV917D, HEAD_SET_CONTROL_CURSOR, COMPOSITION, ALPHA_BLEND), + + HEAD_SET_OFFSET_CURSOR(i), asyh->curs.offset >> 8); + + PUSH_MTHD(push, NV917D, HEAD_SET_CONTEXT_DMA_CURSOR(i), asyh->curs.handle); + return 0; +} + int head917d_curs_layout(struct nv50_head *head, struct nv50_wndw_atom *asyw, struct nv50_head_atom *asyh) @@ -101,7 +127,7 @@ head917d = { .core_clr = head907d_core_clr, .curs_layout = head917d_curs_layout, .curs_format = head507d_curs_format, - .curs_set = head907d_curs_set, + .curs_set = head917d_curs_set, .curs_clr = head907d_curs_clr, .base = head917d_base, .ovly = head907d_ovly, diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c index ce451242f79e..271de3a63f21 100644 --- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c +++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c @@ -702,6 +702,11 @@ nv50_wndw_init(struct nv50_wndw *wndw) nvif_notify_get(&wndw->notify); } +static const u64 nv50_cursor_format_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + DRM_FORMAT_MOD_INVALID, +}; + int nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, enum drm_plane_type type, const char *name, int index, @@ -713,6 +718,7 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, struct nvif_mmu *mmu = &drm->client.mmu; struct nv50_disp *disp = nv50_disp(dev); struct nv50_wndw *wndw; + const u64 *format_modifiers; int nformat; int ret; @@ -728,10 +734,13 @@ nv50_wndw_new_(const struct nv50_wndw_func *func, struct drm_device *dev, for (nformat = 0; format[nformat]; nformat++); - ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, - format, nformat, - nouveau_display(dev)->format_modifiers, - type, "%s-%d", name, index); + if (type == DRM_PLANE_TYPE_CURSOR) + format_modifiers = nv50_cursor_format_modifiers; + else + format_modifiers = nouveau_display(dev)->format_modifiers; + + ret = drm_universal_plane_init(dev, &wndw->plane, heads, &nv50_wndw, format, nformat, + format_modifiers, type, "%s-%d", name, index); if (ret) { kfree(*pwndw); *pwndw = NULL; diff --git a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h index 2a2612d6e1e0..fb223723a38a 100644 --- a/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h +++ b/drivers/gpu/drm/nouveau/include/nvhw/class/cl917d.h @@ -66,6 +66,10 @@ #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_ALPHA_BLEND (0x00000000) #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_PREMULT_ALPHA_BLEND (0x00000001) #define NV917D_HEAD_SET_CONTROL_CURSOR_COMPOSITION_XOR (0x00000002) +#define NV917D_HEAD_SET_OFFSET_CURSOR(a) (0x00000484 + (a)*0x00000300) +#define NV917D_HEAD_SET_OFFSET_CURSOR_ORIGIN 31:0 +#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR(a) (0x0000048C + (a)*0x00000300) +#define NV917D_HEAD_SET_CONTEXT_DMA_CURSOR_HANDLE 31:0 #define NV917D_HEAD_SET_DITHER_CONTROL(a) (0x000004A0 + (a)*0x00000300) #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE 0:0 #define NV917D_HEAD_SET_DITHER_CONTROL_ENABLE_DISABLE (0x00000000) diff --git a/drivers/gpu/drm/nouveau/include/nvif/push.h b/drivers/gpu/drm/nouveau/include/nvif/push.h index 168d7694ede5..6d3a8a3d2087 100644 --- a/drivers/gpu/drm/nouveau/include/nvif/push.h +++ b/drivers/gpu/drm/nouveau/include/nvif/push.h @@ -123,131 +123,131 @@ PUSH_KICK(struct nvif_push *push) } while(0) #endif -#define PUSH_1(X,f,ds,n,c,o,p,s,mA,dA) do { \ - PUSH_##o##_HDR((p), s, mA, (c)+(n)); \ - PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ +#define PUSH_1(X,f,ds,n,o,p,s,mA,dA) do { \ + PUSH_##o##_HDR((p), s, mA, (ds)+(n)); \ + PUSH_##f(X, (p), X##mA, 1, o, (dA), ds, ""); \ } while(0) -#define PUSH_2(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ - PUSH_1(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_2(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (1?PUSH_##o##_INC), "mthd1"); \ + PUSH_1(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_3(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ - PUSH_2(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_3(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd2"); \ + PUSH_2(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_4(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ - PUSH_3(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_4(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd3"); \ + PUSH_3(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_5(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ - PUSH_4(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_5(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd4"); \ + PUSH_4(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_6(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ - PUSH_5(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_6(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd5"); \ + PUSH_5(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_7(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ - PUSH_6(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_7(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd6"); \ + PUSH_6(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_8(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ - PUSH_7(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_8(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd7"); \ + PUSH_7(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_9(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ - PUSH_8(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_9(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd8"); \ + PUSH_8(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_10(X,f,ds,n,c,o,p,s,mB,dB,mA,dA,a...) do { \ - PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ - PUSH_9(X, DATA_, 1, ds, (c)+(n), o, (p), s, X##mA, (dA), ##a); \ - PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ +#define PUSH_10(X,f,ds,n,o,p,s,mB,dB,mA,dA,a...) do { \ + PUSH_ASSERT((mB) - (mA) == (0?PUSH_##o##_INC), "mthd9"); \ + PUSH_9(X, DATA_, 1, (ds) + (n), o, (p), s, X##mA, (dA), ##a); \ + PUSH_##f(X, (p), X##mB, 0, o, (dB), ds, ""); \ } while(0) -#define PUSH_1D(X,o,p,s,mA,dA) \ - PUSH_1(X, DATA_, 1, 1, 0, o, (p), s, X##mA, (dA)) -#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ - PUSH_2(X, DATA_, 1, 1, 0, o, (p), s, X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ - PUSH_3(X, DATA_, 1, 1, 0, o, (p), s, X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ - PUSH_4(X, DATA_, 1, 1, 0, o, (p), s, X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) -#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ - PUSH_5(X, DATA_, 1, 1, 0, o, (p), s, X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) +#define PUSH_1D(X,o,p,s,mA,dA) \ + PUSH_1(X, DATA_, 1, 0, o, (p), s, X##mA, (dA)) +#define PUSH_2D(X,o,p,s,mA,dA,mB,dB) \ + PUSH_2(X, DATA_, 1, 0, o, (p), s, X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_3D(X,o,p,s,mA,dA,mB,dB,mC,dC) \ + PUSH_3(X, DATA_, 1, 0, o, (p), s, X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_4D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD) \ + PUSH_4(X, DATA_, 1, 0, o, (p), s, X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) +#define PUSH_5D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE) \ + PUSH_5(X, DATA_, 1, 0, o, (p), s, X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_6D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF) \ - PUSH_6(X, DATA_, 1, 1, 0, o, (p), s, X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_6(X, DATA_, 1, 0, o, (p), s, X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_7D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG) \ - PUSH_7(X, DATA_, 1, 1, 0, o, (p), s, X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_7(X, DATA_, 1, 0, o, (p), s, X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_8D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH) \ - PUSH_8(X, DATA_, 1, 1, 0, o, (p), s, X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_8(X, DATA_, 1, 0, o, (p), s, X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_9D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI) \ - PUSH_9(X, DATA_, 1, 1, 0, o, (p), s, X##mI, (dI), \ - X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_9(X, DATA_, 1, 0, o, (p), s, X##mI, (dI), \ + X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_10D(X,o,p,s,mA,dA,mB,dB,mC,dC,mD,dD,mE,dE,mF,dF,mG,dG,mH,dH,mI,dI,mJ,dJ) \ - PUSH_10(X, DATA_, 1, 1, 0, o, (p), s, X##mJ, (dJ), \ - X##mI, (dI), \ - X##mH, (dH), \ - X##mG, (dG), \ - X##mF, (dF), \ - X##mE, (dE), \ - X##mD, (dD), \ - X##mC, (dC), \ - X##mB, (dB), \ - X##mA, (dA)) + PUSH_10(X, DATA_, 1, 0, o, (p), s, X##mJ, (dJ), \ + X##mI, (dI), \ + X##mH, (dH), \ + X##mG, (dG), \ + X##mF, (dF), \ + X##mE, (dE), \ + X##mD, (dD), \ + X##mC, (dC), \ + X##mB, (dB), \ + X##mA, (dA)) -#define PUSH_1P(X,o,p,s,mA,dp,ds) \ - PUSH_1(X, DATAp, ds, ds, 0, o, (p), s, X##mA, (dp)) -#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ - PUSH_2(X, DATAp, ds, ds, 0, o, (p), s, X##mB, (dp), \ - X##mA, (dA)) -#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ - PUSH_3(X, DATAp, ds, ds, 0, o, (p), s, X##mC, (dp), \ - X##mB, (dB), \ - X##mA, (dA)) +#define PUSH_1P(X,o,p,s,mA,dp,ds) \ + PUSH_1(X, DATAp, ds, 0, o, (p), s, X##mA, (dp)) +#define PUSH_2P(X,o,p,s,mA,dA,mB,dp,ds) \ + PUSH_2(X, DATAp, ds, 0, o, (p), s, X##mB, (dp), \ + X##mA, (dA)) +#define PUSH_3P(X,o,p,s,mA,dA,mB,dB,mC,dp,ds) \ + PUSH_3(X, DATAp, ds, 0, o, (p), s, X##mC, (dp), \ + X##mB, (dB), \ + X##mA, (dA)) #define PUSH_(A,B,C,D,E,F,G,H,I,J,K,L,M,N,O,P,Q,R,S,T,U,V,W,X,IMPL,...) IMPL #define PUSH(A...) PUSH_(A, PUSH_10P, PUSH_10D, \ diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c85b1af06b7b..7ea367a5444d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -547,7 +547,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -556,10 +556,21 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; ++i) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } dma_sync_single_for_device(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_TO_DEVICE); + num_pages * PAGE_SIZE, DMA_TO_DEVICE); + i += num_pages; + } } void @@ -567,7 +578,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; - int i; + int i, j; if (!ttm_dma) return; @@ -576,9 +587,21 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; ++i) { + struct page *p = ttm_dma->pages[i]; + size_t num_pages = 1; + + for (j = i + 1; j < ttm_dma->num_pages; ++j) { + if (++p != ttm_dma->pages[j]) + break; + + ++num_pages; + } + dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], - PAGE_SIZE, DMA_FROM_DEVICE); + num_pages * PAGE_SIZE, DMA_FROM_DEVICE); + i += num_pages; + } } void nouveau_bo_add_io_reserve_lru(struct ttm_buffer_object *bo) diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c index 4f69e4c3dafd..1c3f890377d2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_svm.c +++ b/drivers/gpu/drm/nouveau/nouveau_svm.c @@ -315,6 +315,10 @@ nouveau_svmm_init(struct drm_device *dev, void *data, struct drm_nouveau_svm_init *args = data; int ret; + /* We need to fail if svm is disabled */ + if (!cli->drm->svm) + return -ENOSYS; + /* Allocate tracking for SVM-enabled VMM. */ if (!(svmm = kzalloc(sizeof(*svmm), GFP_KERNEL))) return -ENOMEM; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c index eaaf5d70e352..1e643bc7e786 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.c +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c @@ -689,6 +689,30 @@ static void sun4i_tcon1_mode_set(struct sun4i_tcon *tcon, SUN4I_TCON1_BASIC5_V_SYNC(vsync) | SUN4I_TCON1_BASIC5_H_SYNC(hsync)); + /* Setup the polarity of multiple signals */ + if (tcon->quirks->polarity_in_ch0) { + val = 0; + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + val |= SUN4I_TCON0_IO_POL_HSYNC_POSITIVE; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE; + + regmap_write(tcon->regs, SUN4I_TCON0_IO_POL_REG, val); + } else { + /* according to vendor driver, this bit must be always set */ + val = SUN4I_TCON1_IO_POL_UNKNOWN; + + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + val |= SUN4I_TCON1_IO_POL_HSYNC_POSITIVE; + + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + val |= SUN4I_TCON1_IO_POL_VSYNC_POSITIVE; + + regmap_write(tcon->regs, SUN4I_TCON1_IO_POL_REG, val); + } + /* Map output pins to channel 1 */ regmap_update_bits(tcon->regs, SUN4I_TCON_GCTL_REG, SUN4I_TCON_GCTL_IOMAP_MASK, @@ -1517,6 +1541,7 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = { static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = { .has_channel_1 = true, + .polarity_in_ch0 = true, .set_mux = sun8i_r40_tcon_tv_set_mux, }; diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h index cfbf4e6c1679..ee555318e3c2 100644 --- a/drivers/gpu/drm/sun4i/sun4i_tcon.h +++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h @@ -153,6 +153,11 @@ #define SUN4I_TCON1_BASIC5_V_SYNC(height) (((height) - 1) & 0x3ff) #define SUN4I_TCON1_IO_POL_REG 0xf0 +/* there is no documentation about this bit */ +#define SUN4I_TCON1_IO_POL_UNKNOWN BIT(26) +#define SUN4I_TCON1_IO_POL_HSYNC_POSITIVE BIT(25) +#define SUN4I_TCON1_IO_POL_VSYNC_POSITIVE BIT(24) + #define SUN4I_TCON1_IO_TRI_REG 0xf4 #define SUN4I_TCON_ECC_FIFO_REG 0xf8 @@ -235,6 +240,7 @@ struct sun4i_tcon_quirks { bool needs_de_be_mux; /* sun6i needs mux to select backend */ bool needs_edp_reset; /* a80 edp reset needed for tcon0 access */ bool supports_lvds; /* Does the TCON support an LVDS output? */ + bool polarity_in_ch0; /* some tcon1 channels have polarity bits in tcon0 pol register */ u8 dclk_min_div; /* minimum divider for TCON0 DCLK */ /* callback to handle tcon muxing options */ diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index 92add2cef2e7..bbdfd5e26ec8 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -21,8 +21,7 @@ static void sun8i_dw_hdmi_encoder_mode_set(struct drm_encoder *encoder, { struct sun8i_dw_hdmi *hdmi = encoder_to_sun8i_dw_hdmi(encoder); - if (hdmi->quirks->set_rate) - clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000); + clk_set_rate(hdmi->clk_tmds, mode->crtc_clock * 1000); } static const struct drm_encoder_helper_funcs @@ -48,11 +47,9 @@ sun8i_dw_hdmi_mode_valid_h6(struct dw_hdmi *hdmi, void *data, { /* * Controller support maximum of 594 MHz, which correlates to - * 4K@60Hz 4:4:4 or RGB. However, for frequencies greater than - * 340 MHz scrambling has to be enabled. Because scrambling is - * not yet implemented, just limit to 340 MHz for now. + * 4K@60Hz 4:4:4 or RGB. */ - if (mode->clock > 340000) + if (mode->clock > 594000) return MODE_CLOCK_HIGH; return MODE_OK; @@ -295,7 +292,6 @@ static int sun8i_dw_hdmi_remove(struct platform_device *pdev) static const struct sun8i_dw_hdmi_quirks sun8i_a83t_quirks = { .mode_valid = sun8i_dw_hdmi_mode_valid_a83t, - .set_rate = true, }; static const struct sun8i_dw_hdmi_quirks sun50i_h6_quirks = { diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index d983746fa194..d4b55af0592f 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -179,7 +179,6 @@ struct sun8i_dw_hdmi_quirks { enum drm_mode_status (*mode_valid)(struct dw_hdmi *hdmi, void *data, const struct drm_display_info *info, const struct drm_display_mode *mode); - unsigned int set_rate : 1; unsigned int use_drm_infoframe : 1; }; diff --git a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c index 35c2133724e2..9994edf67509 100644 --- a/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c +++ b/drivers/gpu/drm/sun4i/sun8i_hdmi_phy.c @@ -104,29 +104,21 @@ static const struct dw_hdmi_mpll_config sun50i_h6_mpll_cfg[] = { static const struct dw_hdmi_curr_ctrl sun50i_h6_cur_ctr[] = { /* pixelclk bpp8 bpp10 bpp12 */ - { 25175000, { 0x0000, 0x0000, 0x0000 }, }, { 27000000, { 0x0012, 0x0000, 0x0000 }, }, - { 59400000, { 0x0008, 0x0008, 0x0008 }, }, - { 72000000, { 0x0008, 0x0008, 0x001b }, }, - { 74250000, { 0x0013, 0x0013, 0x0013 }, }, - { 90000000, { 0x0008, 0x001a, 0x001b }, }, - { 118800000, { 0x001b, 0x001a, 0x001b }, }, - { 144000000, { 0x001b, 0x001a, 0x0034 }, }, - { 180000000, { 0x001b, 0x0033, 0x0034 }, }, - { 216000000, { 0x0036, 0x0033, 0x0034 }, }, - { 237600000, { 0x0036, 0x0033, 0x001b }, }, - { 288000000, { 0x0036, 0x001b, 0x001b }, }, - { 297000000, { 0x0019, 0x001b, 0x0019 }, }, - { 330000000, { 0x0036, 0x001b, 0x001b }, }, - { 594000000, { 0x003f, 0x001b, 0x001b }, }, + { 74250000, { 0x0013, 0x001a, 0x001b }, }, + { 148500000, { 0x0019, 0x0033, 0x0034 }, }, + { 297000000, { 0x0019, 0x001b, 0x001b }, }, + { 594000000, { 0x0010, 0x001b, 0x001b }, }, { ~0UL, { 0x0000, 0x0000, 0x0000 }, } }; static const struct dw_hdmi_phy_config sun50i_h6_phy_config[] = { /*pixelclk symbol term vlev*/ - { 74250000, 0x8009, 0x0004, 0x0232}, - { 148500000, 0x8029, 0x0004, 0x0273}, - { 594000000, 0x8039, 0x0004, 0x014a}, + { 27000000, 0x8009, 0x0007, 0x02b0 }, + { 74250000, 0x8009, 0x0006, 0x022d }, + { 148500000, 0x8029, 0x0006, 0x0270 }, + { 297000000, 0x8039, 0x0005, 0x01ab }, + { 594000000, 0x8029, 0x0000, 0x008a }, { ~0UL, 0x0000, 0x0000, 0x0000} }; diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index 8cd776adc592..6e27cb1bf48b 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -33,6 +33,7 @@ #include <linux/module.h> #include <linux/dma-mapping.h> +#include <linux/highmem.h> #ifdef CONFIG_X86 #include <asm/set_memory.h> @@ -79,12 +80,13 @@ static struct page *ttm_pool_alloc_page(struct ttm_pool *pool, gfp_t gfp_flags, struct page *p; void *vaddr; - if (order) { - gfp_flags |= GFP_TRANSHUGE_LIGHT | __GFP_NORETRY | + /* Don't set the __GFP_COMP flag for higher order allocations. + * Mapping pages directly into an userspace process and calling + * put_page() on a TTM allocated page is illegal. + */ + if (order) + gfp_flags |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM; - gfp_flags &= ~__GFP_MOVABLE; - gfp_flags &= ~__GFP_COMP; - } if (!pool->use_dma_alloc) { p = alloc_pages(gfp_flags, order); @@ -217,6 +219,15 @@ static void ttm_pool_unmap(struct ttm_pool *pool, dma_addr_t dma_addr, /* Give pages into a specific pool_type */ static void ttm_pool_type_give(struct ttm_pool_type *pt, struct page *p) { + unsigned int i, num_pages = 1 << pt->order; + + for (i = 0; i < num_pages; ++i) { + if (PageHighMem(p)) + clear_highpage(p + i); + else + clear_page(page_address(p + i)); + } + spin_lock(&pt->lock); list_add(&p->lru, &pt->pages); spin_unlock(&pt->lock); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 555106220578..98cab0bbe92d 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1267,6 +1267,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) card->dai_link = dai_link; card->num_links = 1; card->name = vc4_hdmi->variant->card_name; + card->driver_name = "vc4-hdmi"; card->dev = dev; card->owner = THIS_MODULE; diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index cccd341e5d67..3b722252d1fb 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -620,11 +620,11 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * for now we just allocate globally. */ if (!hvs->hvs5) - /* 96kB */ - drm_mm_init(&hvs->lbm_mm, 0, 96 * 1024); + /* 48k words of 2x12-bit pixels */ + drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else - /* 70k words */ - drm_mm_init(&hvs->lbm_mm, 0, 70 * 2 * 1024); + /* 60k words of 4x12-bit pixels */ + drm_mm_init(&hvs->lbm_mm, 0, 60 * 1024); /* Upload filter kernels. We only have the one for now, so we * keep it around for the lifetime of the driver. diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index 6b39cc2ca18d..af4b8944a603 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -220,7 +220,7 @@ static void vc4_plane_reset(struct drm_plane *plane) __drm_atomic_helper_plane_reset(plane, &vc4_state->base); } -static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) +static void vc4_dlist_counter_increment(struct vc4_plane_state *vc4_state) { if (vc4_state->dlist_count == vc4_state->dlist_size) { u32 new_size = max(4u, vc4_state->dlist_count * 2); @@ -235,7 +235,15 @@ static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) vc4_state->dlist_size = new_size; } - vc4_state->dlist[vc4_state->dlist_count++] = val; + vc4_state->dlist_count++; +} + +static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val) +{ + unsigned int idx = vc4_state->dlist_count; + + vc4_dlist_counter_increment(vc4_state); + vc4_state->dlist[idx] = val; } /* Returns the scl0/scl1 field based on whether the dimensions need to @@ -437,6 +445,7 @@ static void vc4_write_ppf(struct vc4_plane_state *vc4_state, u32 src, u32 dst) static u32 vc4_lbm_size(struct drm_plane_state *state) { struct vc4_plane_state *vc4_state = to_vc4_plane_state(state); + struct vc4_dev *vc4 = to_vc4_dev(state->plane->dev); u32 pix_per_line; u32 lbm; @@ -472,7 +481,11 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) lbm = pix_per_line * 16; } - lbm = roundup(lbm, 32); + /* Align it to 64 or 128 (hvs5) bytes */ + lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64); + + /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ + lbm /= vc4->hvs->hvs5 ? 4 : 2; return lbm; } @@ -912,9 +925,9 @@ static int vc4_plane_mode_set(struct drm_plane *plane, if (!vc4_state->is_unity) { vc4_dlist_write(vc4_state, VC4_SET_FIELD(vc4_state->crtc_w, - SCALER_POS1_SCL_WIDTH) | + SCALER5_POS1_SCL_WIDTH) | VC4_SET_FIELD(vc4_state->crtc_h, - SCALER_POS1_SCL_HEIGHT)); + SCALER5_POS1_SCL_HEIGHT)); } /* Position Word 2: Source Image Size */ @@ -973,8 +986,10 @@ static int vc4_plane_mode_set(struct drm_plane *plane, * be set when calling vc4_plane_allocate_lbm(). */ if (vc4_state->y_scaling[0] != VC4_SCALING_NONE || - vc4_state->y_scaling[1] != VC4_SCALING_NONE) - vc4_state->lbm_offset = vc4_state->dlist_count++; + vc4_state->y_scaling[1] != VC4_SCALING_NONE) { + vc4_state->lbm_offset = vc4_state->dlist_count; + vc4_dlist_counter_increment(vc4_state); + } if (num_planes > 1) { /* Emit Cb/Cr as channel 0 and Y as channel diff --git a/drivers/gpu/drm/xlnx/zynqmp_disp.c b/drivers/gpu/drm/xlnx/zynqmp_disp.c index c685d94409b0..148add0ca1d6 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_disp.c +++ b/drivers/gpu/drm/xlnx/zynqmp_disp.c @@ -1396,19 +1396,11 @@ static void zynqmp_disp_enable(struct zynqmp_disp *disp) */ static void zynqmp_disp_disable(struct zynqmp_disp *disp) { - struct drm_crtc *crtc = &disp->crtc; - zynqmp_disp_audio_disable(&disp->audio); zynqmp_disp_avbuf_disable_audio(&disp->avbuf); zynqmp_disp_avbuf_disable_channels(&disp->avbuf); zynqmp_disp_avbuf_disable(&disp->avbuf); - - /* Mark the flip is done as crtc is disabled anyway */ - if (crtc->state->event) { - complete_all(crtc->state->event->base.completion); - crtc->state->event = NULL; - } } static inline struct zynqmp_disp *crtc_to_disp(struct drm_crtc *crtc) @@ -1499,6 +1491,13 @@ zynqmp_disp_crtc_atomic_disable(struct drm_crtc *crtc, drm_crtc_vblank_off(&disp->crtc); + spin_lock_irq(&crtc->dev->event_lock); + if (crtc->state->event) { + drm_crtc_send_vblank_event(crtc, crtc->state->event); + crtc->state->event = NULL; + } + spin_unlock_irq(&crtc->dev->event_lock); + clk_disable_unprepare(disp->pclk); pm_runtime_put_sync(disp->dev); } diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 0743ef51d3b2..8429ebe7097e 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -758,7 +758,8 @@ static int mt_touch_input_mapping(struct hid_device *hdev, struct hid_input *hi, MT_STORE_FIELD(inrange_state); return 1; case HID_DG_CONFIDENCE: - if (cls->name == MT_CLS_WIN_8 && + if ((cls->name == MT_CLS_WIN_8 || + cls->name == MT_CLS_WIN_8_FORCE_MULTI_INPUT) && (field->application == HID_DG_TOUCHPAD || field->application == HID_DG_TOUCHSCREEN)) app->quirks |= MT_QUIRK_CONFIDENCE; diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c index e8acd235db2a..aa9e48876ced 100644 --- a/drivers/hid/wacom_sys.c +++ b/drivers/hid/wacom_sys.c @@ -147,9 +147,9 @@ static int wacom_wac_pen_serial_enforce(struct hid_device *hdev, } if (flush) - wacom_wac_queue_flush(hdev, &wacom_wac->pen_fifo); + wacom_wac_queue_flush(hdev, wacom_wac->pen_fifo); else if (insert) - wacom_wac_queue_insert(hdev, &wacom_wac->pen_fifo, + wacom_wac_queue_insert(hdev, wacom_wac->pen_fifo, raw_data, report_size); return insert && !flush; @@ -1280,7 +1280,7 @@ static void wacom_devm_kfifo_release(struct device *dev, void *res) static int wacom_devm_kfifo_alloc(struct wacom *wacom) { struct wacom_wac *wacom_wac = &wacom->wacom_wac; - struct kfifo_rec_ptr_2 *pen_fifo = &wacom_wac->pen_fifo; + struct kfifo_rec_ptr_2 *pen_fifo; int error; pen_fifo = devres_alloc(wacom_devm_kfifo_release, @@ -1297,6 +1297,7 @@ static int wacom_devm_kfifo_alloc(struct wacom *wacom) } devres_add(&wacom->hdev->dev, pen_fifo); + wacom_wac->pen_fifo = pen_fifo; return 0; } diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h index da612b6e9c77..195910dd2154 100644 --- a/drivers/hid/wacom_wac.h +++ b/drivers/hid/wacom_wac.h @@ -342,7 +342,7 @@ struct wacom_wac { struct input_dev *pen_input; struct input_dev *touch_input; struct input_dev *pad_input; - struct kfifo_rec_ptr_2 pen_fifo; + struct kfifo_rec_ptr_2 *pen_fifo; int pid; int num_contacts_left; u8 bt_features; diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c index 52acd77438ed..251e75c9ba9d 100644 --- a/drivers/hwtracing/intel_th/pci.c +++ b/drivers/hwtracing/intel_th/pci.c @@ -269,6 +269,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = { .driver_data = (kernel_ulong_t)&intel_th_2x, }, { + /* Alder Lake-P */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x51a6), + .driver_data = (kernel_ulong_t)&intel_th_2x, + }, + { /* Alder Lake CPU */ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x466f), .driver_data = (kernel_ulong_t)&intel_th_2x, diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c index 3e7df1c0477f..81d7b21d31ec 100644 --- a/drivers/hwtracing/stm/heartbeat.c +++ b/drivers/hwtracing/stm/heartbeat.c @@ -64,7 +64,7 @@ static void stm_heartbeat_unlink(struct stm_source_data *data) static int stm_heartbeat_init(void) { - int i, ret = -ENOMEM; + int i, ret; if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX) return -EINVAL; @@ -72,8 +72,10 @@ static int stm_heartbeat_init(void) for (i = 0; i < nr_devs; i++) { stm_heartbeat[i].data.name = kasprintf(GFP_KERNEL, "heartbeat.%d", i); - if (!stm_heartbeat[i].data.name) + if (!stm_heartbeat[i].data.name) { + ret = -ENOMEM; goto fail_unregister; + } stm_heartbeat[i].data.nr_chans = 1; stm_heartbeat[i].data.link = stm_heartbeat_link; diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index d4d60ad0eda0..ab1f39ac39f4 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -1013,6 +1013,7 @@ config I2C_SIRF config I2C_SPRD tristate "Spreadtrum I2C interface" depends on I2C=y && (ARCH_SPRD || COMPILE_TEST) + depends on COMMON_CLK help If you say yes to this option, support will be included for the Spreadtrum I2C interface. diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c index b444fbf1a262..a8e8af57e33f 100644 --- a/drivers/i2c/busses/i2c-imx.c +++ b/drivers/i2c/busses/i2c-imx.c @@ -241,6 +241,19 @@ static struct imx_i2c_hwdata vf610_i2c_hwdata = { }; +static const struct platform_device_id imx_i2c_devtype[] = { + { + .name = "imx1-i2c", + .driver_data = (kernel_ulong_t)&imx1_i2c_hwdata, + }, { + .name = "imx21-i2c", + .driver_data = (kernel_ulong_t)&imx21_i2c_hwdata, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, imx_i2c_devtype); + static const struct of_device_id i2c_imx_dt_ids[] = { { .compatible = "fsl,imx1-i2c", .data = &imx1_i2c_hwdata, }, { .compatible = "fsl,imx21-i2c", .data = &imx21_i2c_hwdata, }, @@ -1330,7 +1343,11 @@ static int i2c_imx_probe(struct platform_device *pdev) return -ENOMEM; match = device_get_match_data(&pdev->dev); - i2c_imx->hwdata = match; + if (match) + i2c_imx->hwdata = match; + else + i2c_imx->hwdata = (struct imx_i2c_hwdata *) + platform_get_device_id(pdev)->driver_data; /* Setup i2c_imx driver structure */ strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name)); @@ -1498,6 +1515,7 @@ static struct platform_driver i2c_imx_driver = { .of_match_table = i2c_imx_dt_ids, .acpi_match_table = i2c_imx_acpi_ids, }, + .id_table = imx_i2c_devtype, }; static int __init i2c_adap_imx_init(void) diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index 0818d3e50734..2ffd2f354d0a 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1275,7 +1275,8 @@ static int mtk_i2c_probe(struct platform_device *pdev) mtk_i2c_clock_disable(i2c); ret = devm_request_irq(&pdev->dev, irq, mtk_i2c_irq, - IRQF_TRIGGER_NONE, I2C_DRV_NAME, i2c); + IRQF_NO_SUSPEND | IRQF_TRIGGER_NONE, + I2C_DRV_NAME, i2c); if (ret < 0) { dev_err(&pdev->dev, "Request I2C IRQ %d fail\n", irq); @@ -1302,7 +1303,16 @@ static int mtk_i2c_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int mtk_i2c_resume(struct device *dev) +static int mtk_i2c_suspend_noirq(struct device *dev) +{ + struct mtk_i2c *i2c = dev_get_drvdata(dev); + + i2c_mark_adapter_suspended(&i2c->adap); + + return 0; +} + +static int mtk_i2c_resume_noirq(struct device *dev) { int ret; struct mtk_i2c *i2c = dev_get_drvdata(dev); @@ -1317,12 +1327,15 @@ static int mtk_i2c_resume(struct device *dev) mtk_i2c_clock_disable(i2c); + i2c_mark_adapter_resumed(&i2c->adap); + return 0; } #endif static const struct dev_pm_ops mtk_i2c_pm = { - SET_SYSTEM_SLEEP_PM_OPS(NULL, mtk_i2c_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mtk_i2c_suspend_noirq, + mtk_i2c_resume_noirq) }; static struct platform_driver mtk_i2c_driver = { diff --git a/drivers/i2c/busses/i2c-octeon-core.c b/drivers/i2c/busses/i2c-octeon-core.c index d9607905dc2f..845eda70b8ca 100644 --- a/drivers/i2c/busses/i2c-octeon-core.c +++ b/drivers/i2c/busses/i2c-octeon-core.c @@ -347,7 +347,7 @@ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, if (result) return result; if (recv_len && i == 0) { - if (data[i] > I2C_SMBUS_BLOCK_MAX + 1) + if (data[i] > I2C_SMBUS_BLOCK_MAX) return -EPROTO; length += data[i]; } diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 9aa8e65b511e..473fbe144b7e 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -57,6 +57,8 @@ #define STM32F7_I2C_CR1_RXDMAEN BIT(15) #define STM32F7_I2C_CR1_TXDMAEN BIT(14) #define STM32F7_I2C_CR1_ANFOFF BIT(12) +#define STM32F7_I2C_CR1_DNF_MASK GENMASK(11, 8) +#define STM32F7_I2C_CR1_DNF(n) (((n) & 0xf) << 8) #define STM32F7_I2C_CR1_ERRIE BIT(7) #define STM32F7_I2C_CR1_TCIE BIT(6) #define STM32F7_I2C_CR1_STOPIE BIT(5) @@ -160,7 +162,7 @@ enum { }; #define STM32F7_I2C_DNF_DEFAULT 0 -#define STM32F7_I2C_DNF_MAX 16 +#define STM32F7_I2C_DNF_MAX 15 #define STM32F7_I2C_ANALOG_FILTER_ENABLE 1 #define STM32F7_I2C_ANALOG_FILTER_DELAY_MIN 50 /* ns */ @@ -725,6 +727,13 @@ static void stm32f7_i2c_hw_config(struct stm32f7_i2c_dev *i2c_dev) else stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_ANFOFF); + + /* Program the Digital Filter */ + stm32f7_i2c_clr_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_DNF_MASK); + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, + STM32F7_I2C_CR1_DNF(i2c_dev->setup.dnf)); + stm32f7_i2c_set_bits(i2c_dev->base + STM32F7_I2C_CR1, STM32F7_I2C_CR1_PE); } diff --git a/drivers/i2c/busses/i2c-tegra-bpmp.c b/drivers/i2c/busses/i2c-tegra-bpmp.c index ec7a7e917edd..c0c7d01473f2 100644 --- a/drivers/i2c/busses/i2c-tegra-bpmp.c +++ b/drivers/i2c/busses/i2c-tegra-bpmp.c @@ -80,7 +80,7 @@ static int tegra_bpmp_xlate_flags(u16 flags, u16 *out) flags &= ~I2C_M_RECV_LEN; } - return (flags != 0) ? -EINVAL : 0; + return 0; } /** diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 6f08c0c3238d..8b113ae32dc7 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -326,6 +326,8 @@ static void i2c_writel(struct tegra_i2c_dev *i2c_dev, u32 val, unsigned int reg) /* read back register to make sure that register writes completed */ if (reg != I2C_TX_FIFO) readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg)); + else if (i2c_dev->is_vi) + readl_relaxed(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, I2C_INT_STATUS)); } static u32 i2c_readl(struct tegra_i2c_dev *i2c_dev, unsigned int reg) @@ -339,6 +341,21 @@ static void i2c_writesl(struct tegra_i2c_dev *i2c_dev, void *data, writesl(i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg), data, len); } +static void i2c_writesl_vi(struct tegra_i2c_dev *i2c_dev, void *data, + unsigned int reg, unsigned int len) +{ + u32 *data32 = data; + + /* + * VI I2C controller has known hardware bug where writes get stuck + * when immediate multiple writes happen to TX_FIFO register. + * Recommended software work around is to read I2C register after + * each write to TX_FIFO register to flush out the data. + */ + while (len--) + i2c_writel(i2c_dev, *data32++, reg); +} + static void i2c_readsl(struct tegra_i2c_dev *i2c_dev, void *data, unsigned int reg, unsigned int len) { @@ -533,7 +550,7 @@ static int tegra_i2c_poll_register(struct tegra_i2c_dev *i2c_dev, void __iomem *addr = i2c_dev->base + tegra_i2c_reg_addr(i2c_dev, reg); u32 val; - if (!i2c_dev->atomic_mode) + if (!i2c_dev->atomic_mode && !in_irq()) return readl_relaxed_poll_timeout(addr, val, !(val & mask), delay_us, timeout_us); @@ -811,7 +828,10 @@ static int tegra_i2c_fill_tx_fifo(struct tegra_i2c_dev *i2c_dev) i2c_dev->msg_buf_remaining = buf_remaining; i2c_dev->msg_buf = buf + words_to_transfer * BYTES_PER_FIFO_WORD; - i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); + if (i2c_dev->is_vi) + i2c_writesl_vi(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); + else + i2c_writesl(i2c_dev, buf, I2C_TX_FIFO, words_to_transfer); buf += words_to_transfer * BYTES_PER_FIFO_WORD; } diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c index 500abd27fb22..1b73647cc3b1 100644 --- a/drivers/i3c/master/mipi-i3c-hci/core.c +++ b/drivers/i3c/master/mipi-i3c-hci/core.c @@ -777,7 +777,7 @@ static int i3c_hci_remove(struct platform_device *pdev) return 0; } -static const struct __maybe_unused of_device_id i3c_hci_of_match[] = { +static const __maybe_unused struct of_device_id i3c_hci_of_match[] = { { .compatible = "mipi-i3c-hci", }, {}, }; diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c index b11c8c47ba2a..e946903b0993 100644 --- a/drivers/iio/adc/ti_am335x_adc.c +++ b/drivers/iio/adc/ti_am335x_adc.c @@ -397,16 +397,12 @@ static int tiadc_iio_buffered_hardware_setup(struct device *dev, ret = devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh, flags, indio_dev->name, indio_dev); if (ret) - goto error_kfifo_free; + return ret; indio_dev->setup_ops = setup_ops; indio_dev->modes |= INDIO_BUFFER_SOFTWARE; return 0; - -error_kfifo_free: - iio_kfifo_free(indio_dev->buffer); - return ret; } static const char * const chan_name_ain[] = { diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index 0507283bd4c1..2dbd2646e44e 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -23,35 +23,31 @@ * @sdata: Sensor data. * * returns: - * 0 - no new samples available - * 1 - new samples available - * negative - error or unknown + * false - no new samples available or read error + * true - new samples available */ -static int st_sensors_new_samples_available(struct iio_dev *indio_dev, - struct st_sensor_data *sdata) +static bool st_sensors_new_samples_available(struct iio_dev *indio_dev, + struct st_sensor_data *sdata) { int ret, status; /* How would I know if I can't check it? */ if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) - return -EINVAL; + return true; /* No scan mask, no interrupt */ if (!indio_dev->active_scan_mask) - return 0; + return false; ret = regmap_read(sdata->regmap, sdata->sensor_settings->drdy_irq.stat_drdy.addr, &status); if (ret < 0) { dev_err(sdata->dev, "error checking samples available\n"); - return ret; + return false; } - if (status & sdata->sensor_settings->drdy_irq.stat_drdy.mask) - return 1; - - return 0; + return !!(status & sdata->sensor_settings->drdy_irq.stat_drdy.mask); } /** @@ -180,9 +176,15 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, /* Tell the interrupt handler that we're dealing with edges */ if (irq_trig == IRQF_TRIGGER_FALLING || - irq_trig == IRQF_TRIGGER_RISING) + irq_trig == IRQF_TRIGGER_RISING) { + if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) { + dev_err(&indio_dev->dev, + "edge IRQ not supported w/o stat register.\n"); + err = -EOPNOTSUPP; + goto iio_trigger_free; + } sdata->edge_irq = true; - else + } else { /* * If we're not using edges (i.e. level interrupts) we * just mask off the IRQ, handle one interrupt, then @@ -190,6 +192,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, * interrupt handler top half again and start over. */ irq_trig |= IRQF_ONESHOT; + } /* * If the interrupt pin is Open Drain, by definition this diff --git a/drivers/iio/dac/ad5504.c b/drivers/iio/dac/ad5504.c index 28921b62e642..e9297c25d4ef 100644 --- a/drivers/iio/dac/ad5504.c +++ b/drivers/iio/dac/ad5504.c @@ -187,9 +187,9 @@ static ssize_t ad5504_write_dac_powerdown(struct iio_dev *indio_dev, return ret; if (pwr_down) - st->pwr_down_mask |= (1 << chan->channel); - else st->pwr_down_mask &= ~(1 << chan->channel); + else + st->pwr_down_mask |= (1 << chan->channel); ret = ad5504_spi_write(st, AD5504_ADDR_CTRL, AD5504_DAC_PWRDWN_MODE(st->pwr_down_mode) | diff --git a/drivers/iio/proximity/sx9310.c b/drivers/iio/proximity/sx9310.c index a2f820997afc..37fd0b65a014 100644 --- a/drivers/iio/proximity/sx9310.c +++ b/drivers/iio/proximity/sx9310.c @@ -601,7 +601,7 @@ static int sx9310_read_thresh(struct sx9310_data *data, return ret; regval = FIELD_GET(SX9310_REG_PROX_CTRL8_9_PTHRESH_MASK, regval); - if (regval > ARRAY_SIZE(sx9310_pthresh_codes)) + if (regval >= ARRAY_SIZE(sx9310_pthresh_codes)) return -EINVAL; *val = sx9310_pthresh_codes[regval]; @@ -1305,7 +1305,8 @@ sx9310_get_default_reg(struct sx9310_data *data, int i, if (ret) break; - pos = min(max(ilog2(pos), 3), 10) - 3; + /* Powers of 2, except for a gap between 16 and 64 */ + pos = clamp(ilog2(pos), 3, 11) - (pos >= 32 ? 4 : 3); reg_def->def &= ~SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK; reg_def->def |= FIELD_PREP(SX9310_REG_PROX_CTRL7_AVGPOSFILT_MASK, pos); diff --git a/drivers/iio/temperature/mlx90632.c b/drivers/iio/temperature/mlx90632.c index 503fe54a0bb9..608ccb1d8bc8 100644 --- a/drivers/iio/temperature/mlx90632.c +++ b/drivers/iio/temperature/mlx90632.c @@ -248,6 +248,12 @@ static int mlx90632_set_meas_type(struct regmap *regmap, u8 type) if (ret < 0) return ret; + /* + * Give the mlx90632 some time to reset properly before sending a new I2C command + * if this is not done, the following I2C command(s) will not be accepted. + */ + usleep_range(150, 200); + ret = regmap_write_bits(regmap, MLX90632_REG_CONTROL, (MLX90632_CFG_MTYP_MASK | MLX90632_CFG_PWR_MASK), (MLX90632_MTYP_STATUS(type) | MLX90632_PWR_STATUS_HALT)); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index a7401398cb34..d109bb3822a5 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2474,7 +2474,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; - init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges; + init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges; init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE; init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 315c0137f584..3d6b7a2db496 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -500,7 +500,7 @@ struct hns_roce_qp_table { struct hns_roce_hem_table sccc_table; struct mutex scc_mutex; struct hns_roce_bank bank[HNS_ROCE_QP_BANK_NUM]; - spinlock_t bank_lock; + struct mutex bank_mutex; }; struct hns_roce_cq_table { diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 8af411f6389e..004aca9086ab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -209,7 +209,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) hr_qp->doorbell_qpn = 1; } else { - spin_lock(&qp_table->bank_lock); + mutex_lock(&qp_table->bank_mutex); bankid = get_least_load_bankid_for_qp(qp_table->bank); ret = alloc_qpn_with_bankid(&qp_table->bank[bankid], bankid, @@ -217,12 +217,12 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) if (ret) { ibdev_err(&hr_dev->ib_dev, "failed to alloc QPN, ret = %d\n", ret); - spin_unlock(&qp_table->bank_lock); + mutex_unlock(&qp_table->bank_mutex); return ret; } qp_table->bank[bankid].inuse++; - spin_unlock(&qp_table->bank_lock); + mutex_unlock(&qp_table->bank_mutex); hr_qp->doorbell_qpn = (u32)num; } @@ -408,9 +408,9 @@ static void free_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ida_free(&hr_dev->qp_table.bank[bankid].ida, hr_qp->qpn >> 3); - spin_lock(&hr_dev->qp_table.bank_lock); + mutex_lock(&hr_dev->qp_table.bank_mutex); hr_dev->qp_table.bank[bankid].inuse--; - spin_unlock(&hr_dev->qp_table.bank_lock); + mutex_unlock(&hr_dev->qp_table.bank_mutex); } static u32 proc_rq_sge(struct hns_roce_dev *dev, struct hns_roce_qp *hr_qp, @@ -1395,6 +1395,7 @@ int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) unsigned int i; mutex_init(&qp_table->scc_mutex); + mutex_init(&qp_table->bank_mutex); xa_init(&hr_dev->qp_table_xa); reserved_from_bot = hr_dev->caps.reserved_qps; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 93dd568a5397..0d69a697d75f 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -3258,8 +3258,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) int err; dev->port[port_num].roce.nb.notifier_call = mlx5_netdev_event; - err = register_netdevice_notifier_net(mlx5_core_net(dev->mdev), - &dev->port[port_num].roce.nb); + err = register_netdevice_notifier(&dev->port[port_num].roce.nb); if (err) { dev->port[port_num].roce.nb.notifier_call = NULL; return err; @@ -3271,8 +3270,7 @@ static int mlx5_add_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) static void mlx5_remove_netdev_notifier(struct mlx5_ib_dev *dev, u8 port_num) { if (dev->port[port_num].roce.nb.notifier_call) { - unregister_netdevice_notifier_net(mlx5_core_net(dev->mdev), - &dev->port[port_num].roce.nb); + unregister_netdevice_notifier(&dev->port[port_num].roce.nb); dev->port[port_num].roce.nb.notifier_call = NULL; } } diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c index e59615a4c9d9..586b0e52ba7f 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c @@ -214,7 +214,7 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf) struct usnic_vnic_res *vnic_res; int len; - len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu ", + len = sysfs_emit(buf, "QPN: %d State: (%s) PID: %u VF Idx: %hu", qp_grp->ibqp.qp_num, usnic_ib_qp_grp_state_to_string(qp_grp->state), qp_grp->owner_pid, @@ -224,14 +224,13 @@ static ssize_t summary_show(struct usnic_ib_qp_grp *qp_grp, char *buf) res_chunk = qp_grp->res_chunk_list[i]; for (j = 0; j < res_chunk->cnt; j++) { vnic_res = res_chunk->res[j]; - len += sysfs_emit_at( - buf, len, "%s[%d] ", + len += sysfs_emit_at(buf, len, " %s[%d]", usnic_vnic_res_type_to_str(vnic_res->type), vnic_res->vnic_idx); } } - len = sysfs_emit_at(buf, len, "\n"); + len += sysfs_emit_at(buf, len, "\n"); return len; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h index c142f5e7f25f..de57f2fed743 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma.h @@ -509,6 +509,20 @@ static inline int ib_send_flags_to_pvrdma(int flags) return flags & PVRDMA_MASK(PVRDMA_SEND_FLAGS_MAX); } +static inline int pvrdma_network_type_to_ib(enum pvrdma_network_type type) +{ + switch (type) { + case PVRDMA_NETWORK_ROCE_V1: + return RDMA_NETWORK_ROCE_V1; + case PVRDMA_NETWORK_IPV4: + return RDMA_NETWORK_IPV4; + case PVRDMA_NETWORK_IPV6: + return RDMA_NETWORK_IPV6; + default: + return RDMA_NETWORK_IPV6; + } +} + void pvrdma_qp_cap_to_ib(struct ib_qp_cap *dst, const struct pvrdma_qp_cap *src); void ib_qp_cap_to_pvrdma(struct pvrdma_qp_cap *dst, diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c index a119ac3e103c..6aa40bd2fd52 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c @@ -367,7 +367,7 @@ retry: wc->dlid_path_bits = cqe->dlid_path_bits; wc->port_num = cqe->port_num; wc->vendor_err = cqe->vendor_err; - wc->network_hdr_type = cqe->network_hdr_type; + wc->network_hdr_type = pvrdma_network_type_to_ib(cqe->network_hdr_type); /* Update shared ring state */ pvrdma_idx_ring_inc(&cq->ring_state->rx.cons_head, cq->ibcq.cqe); diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 36d56163afac..0701bd1ffd1a 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -8,6 +8,7 @@ #include <linux/if_arp.h> #include <linux/netdevice.h> #include <linux/if.h> +#include <linux/if_vlan.h> #include <net/udp_tunnel.h> #include <net/sch_generic.h> #include <linux/netfilter.h> @@ -160,6 +161,8 @@ static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb) * drop when skb is freed */ rxe = rxe_get_dev_from_net(ndev); + if (!rxe && is_vlan_dev(ndev)) + rxe = rxe_get_dev_from_net(vlan_dev_real_dev(ndev)); if (!rxe) goto drop; diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 1ae94f2cb336..142f3d8014d8 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -872,6 +872,11 @@ static enum resp_states do_complete(struct rxe_qp *qp, else wc->network_hdr_type = RDMA_NETWORK_IPV6; + if (is_vlan_dev(skb->dev)) { + wc->wc_flags |= IB_WC_WITH_VLAN; + wc->vlan_id = vlan_dev_vlan_id(skb->dev); + } + if (pkt->mask & RXE_IMMDT_MASK) { wc->wc_flags |= IB_WC_WITH_IMM; wc->ex.imm_data = immdt_imm(pkt); diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index 0687f0ed60b8..8cc8ca4a9ac0 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -215,9 +215,17 @@ static const struct xpad_device { { 0x0e6f, 0x0213, "Afterglow Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x021f, "Rock Candy Gamepad for Xbox 360", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0246, "Rock Candy Gamepad for Xbox One 2015", 0, XTYPE_XBOXONE }, - { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a0, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a1, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a2, "PDP Wired Controller for Xbox One - Crimson Red", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a4, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x02a6, "PDP Wired Controller for Xbox One - Camo Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a7, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02a8, "PDP Xbox One Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ab, "PDP Controller for Xbox One", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02ad, "PDP Wired Controller for Xbox One - Stealth Series", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02b3, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, + { 0x0e6f, 0x02b8, "Afterglow Prismatic Wired Controller", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0301, "Logic3 Controller", 0, XTYPE_XBOX360 }, { 0x0e6f, 0x0346, "Rock Candy Gamepad for Xbox One 2016", 0, XTYPE_XBOXONE }, { 0x0e6f, 0x0401, "Logic3 Controller", 0, XTYPE_XBOX360 }, @@ -296,6 +304,9 @@ static const struct xpad_device { { 0x1bad, 0xfa01, "MadCatz GamePad", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd00, "Razer Onza TE", 0, XTYPE_XBOX360 }, { 0x1bad, 0xfd01, "Razer Onza", 0, XTYPE_XBOX360 }, + { 0x20d6, 0x2001, "BDA Xbox Series X Wired Controller", 0, XTYPE_XBOXONE }, + { 0x20d6, 0x281f, "PowerA Wired Controller For Xbox 360", 0, XTYPE_XBOX360 }, + { 0x2e24, 0x0652, "Hyperkin Duke X-Box One pad", 0, XTYPE_XBOXONE }, { 0x24c6, 0x5000, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 }, { 0x24c6, 0x5300, "PowerA MINI PROEX Controller", 0, XTYPE_XBOX360 }, { 0x24c6, 0x5303, "Xbox Airflo wired controller", 0, XTYPE_XBOX360 }, @@ -429,8 +440,12 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x162e), /* Joytech X-Box 360 controllers */ XPAD_XBOX360_VENDOR(0x1689), /* Razer Onza */ XPAD_XBOX360_VENDOR(0x1bad), /* Harminix Rock Band Guitar and Drums */ + XPAD_XBOX360_VENDOR(0x20d6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x20d6), /* PowerA Controllers */ XPAD_XBOX360_VENDOR(0x24c6), /* PowerA Controllers */ XPAD_XBOXONE_VENDOR(0x24c6), /* PowerA Controllers */ + XPAD_XBOXONE_VENDOR(0x2e24), /* Hyperkin Duke X-Box One pad */ + XPAD_XBOX360_VENDOR(0x2f24), /* GameSir Controllers */ { } }; diff --git a/drivers/input/misc/ariel-pwrbutton.c b/drivers/input/misc/ariel-pwrbutton.c index eda86ab552b9..17bbaac8b80c 100644 --- a/drivers/input/misc/ariel-pwrbutton.c +++ b/drivers/input/misc/ariel-pwrbutton.c @@ -149,12 +149,6 @@ static const struct of_device_id ariel_pwrbutton_of_match[] = { }; MODULE_DEVICE_TABLE(of, ariel_pwrbutton_of_match); -static const struct spi_device_id ariel_pwrbutton_id_table[] = { - { "wyse-ariel-ec-input", 0 }, - {} -}; -MODULE_DEVICE_TABLE(spi, ariel_pwrbutton_id_table); - static struct spi_driver ariel_pwrbutton_driver = { .driver = { .name = "dell-wyse-ariel-ec-input", diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 3a2dcf0805f1..c74b020796a9 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -219,6 +219,8 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = { DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"), DMI_MATCH(DMI_PRODUCT_NAME, "C15B"), }, + }, + { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ByteSpeed LLC"), DMI_MATCH(DMI_PRODUCT_NAME, "ByteSpeed Laptop C15B"), diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index 19765f1c04f7..c682b028f0a2 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -157,6 +157,7 @@ static const struct goodix_chip_id goodix_chip_ids[] = { { .id = "5663", .data = >1x_chip_data }, { .id = "5688", .data = >1x_chip_data }, { .id = "917S", .data = >1x_chip_data }, + { .id = "9286", .data = >1x_chip_data }, { .id = "911", .data = >911_chip_data }, { .id = "9271", .data = >911_chip_data }, @@ -1448,6 +1449,7 @@ static const struct of_device_id goodix_of_match[] = { { .compatible = "goodix,gt927" }, { .compatible = "goodix,gt9271" }, { .compatible = "goodix,gt928" }, + { .compatible = "goodix,gt9286" }, { .compatible = "goodix,gt967" }, { } }; diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c index 199cf3daec10..d8fccf048bf4 100644 --- a/drivers/input/touchscreen/ili210x.c +++ b/drivers/input/touchscreen/ili210x.c @@ -29,11 +29,13 @@ struct ili2xxx_chip { void *buf, size_t len); int (*get_touch_data)(struct i2c_client *client, u8 *data); bool (*parse_touch_data)(const u8 *data, unsigned int finger, - unsigned int *x, unsigned int *y); + unsigned int *x, unsigned int *y, + unsigned int *z); bool (*continue_polling)(const u8 *data, bool touch); unsigned int max_touches; unsigned int resolution; bool has_calibrate_reg; + bool has_pressure_reg; }; struct ili210x { @@ -82,7 +84,8 @@ static int ili210x_read_touch_data(struct i2c_client *client, u8 *data) static bool ili210x_touchdata_to_coords(const u8 *touchdata, unsigned int finger, - unsigned int *x, unsigned int *y) + unsigned int *x, unsigned int *y, + unsigned int *z) { if (touchdata[0] & BIT(finger)) return false; @@ -137,7 +140,8 @@ static int ili211x_read_touch_data(struct i2c_client *client, u8 *data) static bool ili211x_touchdata_to_coords(const u8 *touchdata, unsigned int finger, - unsigned int *x, unsigned int *y) + unsigned int *x, unsigned int *y, + unsigned int *z) { u32 data; @@ -169,7 +173,8 @@ static const struct ili2xxx_chip ili211x_chip = { static bool ili212x_touchdata_to_coords(const u8 *touchdata, unsigned int finger, - unsigned int *x, unsigned int *y) + unsigned int *x, unsigned int *y, + unsigned int *z) { u16 val; @@ -235,7 +240,8 @@ static int ili251x_read_touch_data(struct i2c_client *client, u8 *data) static bool ili251x_touchdata_to_coords(const u8 *touchdata, unsigned int finger, - unsigned int *x, unsigned int *y) + unsigned int *x, unsigned int *y, + unsigned int *z) { u16 val; @@ -245,6 +251,7 @@ static bool ili251x_touchdata_to_coords(const u8 *touchdata, *x = val & 0x3fff; *y = get_unaligned_be16(touchdata + 1 + (finger * 5) + 2); + *z = touchdata[1 + (finger * 5) + 4]; return true; } @@ -261,6 +268,7 @@ static const struct ili2xxx_chip ili251x_chip = { .continue_polling = ili251x_check_continue_polling, .max_touches = 10, .has_calibrate_reg = true, + .has_pressure_reg = true, }; static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata) @@ -268,14 +276,16 @@ static bool ili210x_report_events(struct ili210x *priv, u8 *touchdata) struct input_dev *input = priv->input; int i; bool contact = false, touch; - unsigned int x = 0, y = 0; + unsigned int x = 0, y = 0, z = 0; for (i = 0; i < priv->chip->max_touches; i++) { - touch = priv->chip->parse_touch_data(touchdata, i, &x, &y); + touch = priv->chip->parse_touch_data(touchdata, i, &x, &y, &z); input_mt_slot(input, i); if (input_mt_report_slot_state(input, MT_TOOL_FINGER, touch)) { touchscreen_report_pos(input, &priv->prop, x, y, true); + if (priv->chip->has_pressure_reg) + input_report_abs(input, ABS_MT_PRESSURE, z); contact = true; } } @@ -437,6 +447,8 @@ static int ili210x_i2c_probe(struct i2c_client *client, max_xy = (chip->resolution ?: SZ_64K) - 1; input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_xy, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_xy, 0, 0); + if (priv->chip->has_pressure_reg) + input_set_abs_params(input, ABS_MT_PRESSURE, 0, 0xa, 0, 0); touchscreen_parse_properties(input, true, &priv->prop); error = input_mt_init_slots(input, priv->chip->max_touches, diff --git a/drivers/input/touchscreen/st1232.c b/drivers/input/touchscreen/st1232.c index bda96762744e..b4e7bcbe9b91 100644 --- a/drivers/input/touchscreen/st1232.c +++ b/drivers/input/touchscreen/st1232.c @@ -26,6 +26,20 @@ #define ST1232_TS_NAME "st1232-ts" #define ST1633_TS_NAME "st1633-ts" +#define REG_STATUS 0x01 /* Device Status | Error Code */ + +#define STATUS_NORMAL 0x00 +#define STATUS_INIT 0x01 +#define STATUS_ERROR 0x02 +#define STATUS_AUTO_TUNING 0x03 +#define STATUS_IDLE 0x04 +#define STATUS_POWER_DOWN 0x05 + +#define ERROR_NONE 0x00 +#define ERROR_INVALID_ADDRESS 0x10 +#define ERROR_INVALID_VALUE 0x20 +#define ERROR_INVALID_PLATFORM 0x30 + #define REG_XY_RESOLUTION 0x04 #define REG_XY_COORDINATES 0x12 #define ST_TS_MAX_FINGERS 10 @@ -47,7 +61,8 @@ struct st1232_ts_data { u8 *read_buf; }; -static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg) +static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg, + unsigned int n) { struct i2c_client *client = ts->client; struct i2c_msg msg[] = { @@ -59,7 +74,7 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg) { .addr = client->addr, .flags = I2C_M_RD | I2C_M_DMA_SAFE, - .len = ts->read_buf_len, + .len = n, .buf = ts->read_buf, } }; @@ -72,6 +87,22 @@ static int st1232_ts_read_data(struct st1232_ts_data *ts, u8 reg) return 0; } +static int st1232_ts_wait_ready(struct st1232_ts_data *ts) +{ + unsigned int retries; + int error; + + for (retries = 10; retries; retries--) { + error = st1232_ts_read_data(ts, REG_STATUS, 1); + if (!error && ts->read_buf[0] == (STATUS_NORMAL | ERROR_NONE)) + return 0; + + usleep_range(1000, 2000); + } + + return -ENXIO; +} + static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x, u16 *max_y) { @@ -79,14 +110,14 @@ static int st1232_ts_read_resolution(struct st1232_ts_data *ts, u16 *max_x, int error; /* select resolution register */ - error = st1232_ts_read_data(ts, REG_XY_RESOLUTION); + error = st1232_ts_read_data(ts, REG_XY_RESOLUTION, 3); if (error) return error; buf = ts->read_buf; - *max_x = ((buf[0] & 0x0070) << 4) | buf[1]; - *max_y = ((buf[0] & 0x0007) << 8) | buf[2]; + *max_x = (((buf[0] & 0x0070) << 4) | buf[1]) - 1; + *max_y = (((buf[0] & 0x0007) << 8) | buf[2]) - 1; return 0; } @@ -140,7 +171,7 @@ static irqreturn_t st1232_ts_irq_handler(int irq, void *dev_id) int count; int error; - error = st1232_ts_read_data(ts, REG_XY_COORDINATES); + error = st1232_ts_read_data(ts, REG_XY_COORDINATES, ts->read_buf_len); if (error) goto out; @@ -251,6 +282,11 @@ static int st1232_ts_probe(struct i2c_client *client, input_dev->name = "st1232-touchscreen"; input_dev->id.bustype = BUS_I2C; + /* Wait until device is ready */ + error = st1232_ts_wait_ready(ts); + if (error) + return error; + /* Read resolution from the chip */ error = st1232_ts_read_resolution(ts, &max_x, &max_y); if (error) { diff --git a/drivers/iommu/amd/amd_iommu.h b/drivers/iommu/amd/amd_iommu.h index 6b8cbdf71714..b4adab698563 100644 --- a/drivers/iommu/amd/amd_iommu.h +++ b/drivers/iommu/amd/amd_iommu.h @@ -84,12 +84,9 @@ static inline bool is_rd890_iommu(struct pci_dev *pdev) (pdev->device == PCI_DEVICE_ID_RD890_IOMMU); } -static inline bool iommu_feature(struct amd_iommu *iommu, u64 f) +static inline bool iommu_feature(struct amd_iommu *iommu, u64 mask) { - if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) - return false; - - return !!(iommu->features & f); + return !!(iommu->features & mask); } static inline u64 iommu_virt_to_phys(void *vaddr) diff --git a/drivers/iommu/amd/amd_iommu_types.h b/drivers/iommu/amd/amd_iommu_types.h index 553587827771..1a0495dd5fcb 100644 --- a/drivers/iommu/amd/amd_iommu_types.h +++ b/drivers/iommu/amd/amd_iommu_types.h @@ -387,6 +387,10 @@ #define IOMMU_CAP_NPCACHE 26 #define IOMMU_CAP_EFR 27 +/* IOMMU IVINFO */ +#define IOMMU_IVINFO_OFFSET 36 +#define IOMMU_IVINFO_EFRSUP BIT(0) + /* IOMMU Feature Reporting Field (for IVHD type 10h */ #define IOMMU_FEAT_GASUP_SHIFT 6 diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 6a1f7048dacc..83d8ab2aed9f 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -257,6 +257,8 @@ static void init_device_table_dma(void); static bool amd_iommu_pre_enabled = true; +static u32 amd_iommu_ivinfo __initdata; + bool translation_pre_enabled(struct amd_iommu *iommu) { return (iommu->flags & AMD_IOMMU_FLAG_TRANS_PRE_ENABLED); @@ -296,6 +298,18 @@ int amd_iommu_get_num_iommus(void) return amd_iommus_present; } +/* + * For IVHD type 0x11/0x40, EFR is also available via IVHD. + * Default to IVHD EFR since it is available sooner + * (i.e. before PCI init). + */ +static void __init early_iommu_features_init(struct amd_iommu *iommu, + struct ivhd_header *h) +{ + if (amd_iommu_ivinfo & IOMMU_IVINFO_EFRSUP) + iommu->features = h->efr_reg; +} + /* Access to l1 and l2 indexed register spaces */ static u32 iommu_read_l1(struct amd_iommu *iommu, u16 l1, u8 address) @@ -1577,6 +1591,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (h->efr_reg & BIT(IOMMU_EFR_XTSUP_SHIFT)) amd_iommu_xt_mode = IRQ_REMAP_X2APIC_MODE; + + early_iommu_features_init(iommu, h); + break; default: return -EINVAL; @@ -1770,6 +1787,35 @@ static const struct attribute_group *amd_iommu_groups[] = { NULL, }; +/* + * Note: IVHD 0x11 and 0x40 also contains exact copy + * of the IOMMU Extended Feature Register [MMIO Offset 0030h]. + * Default to EFR in IVHD since it is available sooner (i.e. before PCI init). + */ +static void __init late_iommu_features_init(struct amd_iommu *iommu) +{ + u64 features; + + if (!(iommu->cap & (1 << IOMMU_CAP_EFR))) + return; + + /* read extended feature bits */ + features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); + + if (!iommu->features) { + iommu->features = features; + return; + } + + /* + * Sanity check and warn if EFR values from + * IVHD and MMIO conflict. + */ + if (features != iommu->features) + pr_warn(FW_WARN "EFR mismatch. Use IVHD EFR (%#llx : %#llx\n).", + features, iommu->features); +} + static int __init iommu_init_pci(struct amd_iommu *iommu) { int cap_ptr = iommu->cap_ptr; @@ -1789,8 +1835,7 @@ static int __init iommu_init_pci(struct amd_iommu *iommu) if (!(iommu->cap & (1 << IOMMU_CAP_IOTLB))) amd_iommu_iotlb_sup = false; - /* read extended feature bits */ - iommu->features = readq(iommu->mmio_base + MMIO_EXT_FEATURES); + late_iommu_features_init(iommu); if (iommu_feature(iommu, FEATURE_GT)) { int glxval; @@ -2607,6 +2652,11 @@ static void __init free_dma_resources(void) free_unity_maps(); } +static void __init ivinfo_init(void *ivrs) +{ + amd_iommu_ivinfo = *((u32 *)(ivrs + IOMMU_IVINFO_OFFSET)); +} + /* * This is the hardware init function for AMD IOMMU in the system. * This function is called either from amd_iommu_init or from the interrupt @@ -2661,6 +2711,8 @@ static int __init early_amd_iommu_init(void) if (ret) goto out; + ivinfo_init(ivrs_base); + amd_iommu_target_ivhd_type = get_highest_supported_ivhd_type(ivrs_base); DUMP_printk("Using IVHD type %#x\n", amd_iommu_target_ivhd_type); diff --git a/drivers/iommu/intel/dmar.c b/drivers/iommu/intel/dmar.c index 004feaed3c72..02e7c10a4224 100644 --- a/drivers/iommu/intel/dmar.c +++ b/drivers/iommu/intel/dmar.c @@ -1496,7 +1496,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid, * Max Invs Pending (MIP) is set to 0 for now until we have DIT in * ECAP. */ - if (addr & GENMASK_ULL(size_order + VTD_PAGE_SHIFT, 0)) + if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order)) pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n", addr, size_order); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index f665322a0991..06b00b5363d8 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -5440,6 +5440,36 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain, return ret; } +static bool domain_use_flush_queue(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + bool r = true; + + if (intel_iommu_strict) + return false; + + /* + * The flush queue implementation does not perform page-selective + * invalidations that are required for efficient TLB flushes in virtual + * environments. The benefit of batching is likely to be much lower than + * the overhead of synchronizing the virtual and physical IOMMU + * page-tables. + */ + rcu_read_lock(); + for_each_active_iommu(iommu, drhd) { + if (!cap_caching_mode(iommu->cap)) + continue; + + pr_warn_once("IOMMU batching is disabled due to virtualization"); + r = false; + break; + } + rcu_read_unlock(); + + return r; +} + static int intel_iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr attr, void *data) @@ -5450,7 +5480,7 @@ intel_iommu_domain_get_attr(struct iommu_domain *domain, case IOMMU_DOMAIN_DMA: switch (attr) { case DOMAIN_ATTR_DMA_USE_FLUSH_QUEUE: - *(int *)data = !intel_iommu_strict; + *(int *)data = domain_use_flush_queue(); return 0; default: return -ENODEV; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 94920a51c628..b147f22a78f4 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -493,8 +493,9 @@ config TI_SCI_INTA_IRQCHIP TI System Controller, say Y here. Otherwise, say N. config TI_PRUSS_INTC - tristate "TI PRU-ICSS Interrupt Controller" - depends on ARCH_DAVINCI || SOC_AM33XX || SOC_AM43XX || SOC_DRA7XX || ARCH_KEYSTONE || ARCH_K3 + tristate + depends on TI_PRUSS + default TI_PRUSS select IRQ_DOMAIN help This enables support for the PRU-ICSS Local Interrupt Controller diff --git a/drivers/irqchip/irq-bcm2836.c b/drivers/irqchip/irq-bcm2836.c index 5f5eb8877c41..25c9a9c06e41 100644 --- a/drivers/irqchip/irq-bcm2836.c +++ b/drivers/irqchip/irq-bcm2836.c @@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc) chained_irq_exit(chip, desc); } -static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d) +static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d) { int cpu = smp_processor_id(); @@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = { .name = "IPI", .irq_mask = bcm2836_arm_irqchip_dummy_op, .irq_unmask = bcm2836_arm_irqchip_dummy_op, - .irq_eoi = bcm2836_arm_irqchip_ipi_eoi, + .irq_ack = bcm2836_arm_irqchip_ipi_ack, .ipi_send_mask = bcm2836_arm_irqchip_ipi_send_mask, }; diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index 9ed1bc473663..09b91b81851c 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -142,8 +142,8 @@ static void liointc_resume(struct irq_chip_generic *gc) static const char * const parent_names[] = {"int0", "int1", "int2", "int3"}; -int __init liointc_of_init(struct device_node *node, - struct device_node *parent) +static int __init liointc_of_init(struct device_node *node, + struct device_node *parent) { struct irq_chip_generic *gc; struct irq_domain *domain; diff --git a/drivers/irqchip/irq-mips-cpu.c b/drivers/irqchip/irq-mips-cpu.c index 95d4fd8f7a96..0bbb0b2d0dd5 100644 --- a/drivers/irqchip/irq-mips-cpu.c +++ b/drivers/irqchip/irq-mips-cpu.c @@ -197,6 +197,13 @@ static int mips_cpu_ipi_alloc(struct irq_domain *domain, unsigned int virq, if (ret) return ret; + ret = irq_domain_set_hwirq_and_chip(domain->parent, virq + i, hwirq, + &mips_mt_cpu_irq_controller, + NULL); + + if (ret) + return ret; + ret = irq_set_irq_type(virq + i, IRQ_TYPE_LEVEL_HIGH); if (ret) return ret; diff --git a/drivers/irqchip/irq-sl28cpld.c b/drivers/irqchip/irq-sl28cpld.c index 0aa50d025ef6..fbb354413ffa 100644 --- a/drivers/irqchip/irq-sl28cpld.c +++ b/drivers/irqchip/irq-sl28cpld.c @@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev) irqchip->chip.num_regs = 1; irqchip->chip.status_base = base + INTC_IP; irqchip->chip.mask_base = base + INTC_IE; - irqchip->chip.mask_invert = true, + irqchip->chip.mask_invert = true; irqchip->chip.ack_base = base + INTC_IP; return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev), diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 849d3c5f908e..6c1d8b69a465 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig @@ -928,6 +928,9 @@ config LEDS_ACER_A500 This option enables support for the Power Button LED of Acer Iconia Tab A500. +comment "Flash and Torch LED drivers" +source "drivers/leds/flash/Kconfig" + comment "LED Triggers" source "drivers/leds/trigger/Kconfig" diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index 73e603e1727e..156c0b4e60d9 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile @@ -103,5 +103,8 @@ obj-$(CONFIG_LEDS_SPI_BYTE) += leds-spi-byte.o # LED Userspace Drivers obj-$(CONFIG_LEDS_USER) += uleds.o +# Flash and Torch LED Drivers +obj-$(CONFIG_LEDS_CLASS_FLASH) += flash/ + # LED Triggers obj-$(CONFIG_LEDS_TRIGGERS) += trigger/ diff --git a/drivers/leds/flash/Kconfig b/drivers/leds/flash/Kconfig new file mode 100644 index 000000000000..b580b416b9a4 --- /dev/null +++ b/drivers/leds/flash/Kconfig @@ -0,0 +1,16 @@ +# SPDX-License-Identifier: GPL-2.0 + +if LEDS_CLASS_FLASH + +config LEDS_RT8515 + tristate "LED support for Richtek RT8515 flash/torch LED" + depends on GPIOLIB + depends on V4L2_FLASH_LED_CLASS || !V4L2_FLASH_LED_CLASS + help + This option enables support for the Richtek RT8515 flash + and torch LEDs found on some mobile phones. + + To compile this driver as a module, choose M here: the module + will be called leds-rt8515. + +endif # LEDS_CLASS_FLASH diff --git a/drivers/leds/flash/Makefile b/drivers/leds/flash/Makefile new file mode 100644 index 000000000000..e990e257f4d7 --- /dev/null +++ b/drivers/leds/flash/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_LEDS_RT8515) += leds-rt8515.o diff --git a/drivers/leds/flash/leds-rt8515.c b/drivers/leds/flash/leds-rt8515.c new file mode 100644 index 000000000000..590bfa180d10 --- /dev/null +++ b/drivers/leds/flash/leds-rt8515.c @@ -0,0 +1,397 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LED driver for Richtek RT8515 flash/torch white LEDs + * found on some Samsung mobile phones. + * + * This is a 1.5A Boost dual channel driver produced around 2011. + * + * The component lacks a datasheet, but in the schematic picture + * from the LG P970 service manual you can see the connections + * from the RT8515 to the LED, with two resistors connected + * from the pins "RFS" and "RTS" to ground. + * + * On the LG P970: + * RFS (resistance flash setting?) is 20 kOhm + * RTS (resistance torch setting?) is 39 kOhm + * + * Some sleuthing finds us the RT9387A which we have a datasheet for: + * https://static5.arrow.com/pdfs/2014/7/27/8/21/12/794/rtt_/manual/94download_ds.jspprt9387a.jspprt9387a.pdf + * This apparently works the same way so in theory this driver + * should cover RT9387A as well. This has not been tested, please + * update the compatibles if you add RT9387A support. + * + * Linus Walleij <linus.walleij@linaro.org> + */ +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/led-class-flash.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> + +#include <media/v4l2-flash-led-class.h> + +/* We can provide 15-700 mA out to the LED */ +#define RT8515_MIN_IOUT_MA 15 +#define RT8515_MAX_IOUT_MA 700 +/* The maximum intensity is 1-16 for flash and 1-100 for torch */ +#define RT8515_FLASH_MAX 16 +#define RT8515_TORCH_MAX 100 + +#define RT8515_TIMEOUT_US 250000U +#define RT8515_MAX_TIMEOUT_US 300000U + +struct rt8515 { + struct led_classdev_flash fled; + struct device *dev; + struct v4l2_flash *v4l2_flash; + struct mutex lock; + struct regulator *reg; + struct gpio_desc *enable_torch; + struct gpio_desc *enable_flash; + struct timer_list powerdown_timer; + u32 max_timeout; /* Flash max timeout */ + int flash_max_intensity; + int torch_max_intensity; +}; + +static struct rt8515 *to_rt8515(struct led_classdev_flash *fled) +{ + return container_of(fled, struct rt8515, fled); +} + +static void rt8515_gpio_led_off(struct rt8515 *rt) +{ + gpiod_set_value(rt->enable_flash, 0); + gpiod_set_value(rt->enable_torch, 0); +} + +static void rt8515_gpio_brightness_commit(struct gpio_desc *gpiod, + int brightness) +{ + int i; + + /* + * Toggling a GPIO line with a small delay increases the + * brightness one step at a time. + */ + for (i = 0; i < brightness; i++) { + gpiod_set_value(gpiod, 0); + udelay(1); + gpiod_set_value(gpiod, 1); + udelay(1); + } +} + +/* This is setting the torch light level */ +static int rt8515_led_brightness_set(struct led_classdev *led, + enum led_brightness brightness) +{ + struct led_classdev_flash *fled = lcdev_to_flcdev(led); + struct rt8515 *rt = to_rt8515(fled); + + mutex_lock(&rt->lock); + + if (brightness == LED_OFF) { + /* Off */ + rt8515_gpio_led_off(rt); + } else if (brightness < RT8515_TORCH_MAX) { + /* Step it up to movie mode brightness using the flash pin */ + rt8515_gpio_brightness_commit(rt->enable_torch, brightness); + } else { + /* Max torch brightness requested */ + gpiod_set_value(rt->enable_torch, 1); + } + + mutex_unlock(&rt->lock); + + return 0; +} + +static int rt8515_led_flash_strobe_set(struct led_classdev_flash *fled, + bool state) +{ + struct rt8515 *rt = to_rt8515(fled); + struct led_flash_setting *timeout = &fled->timeout; + int brightness = rt->flash_max_intensity; + + mutex_lock(&rt->lock); + + if (state) { + /* Enable LED flash mode and set brightness */ + rt8515_gpio_brightness_commit(rt->enable_flash, brightness); + /* Set timeout */ + mod_timer(&rt->powerdown_timer, + jiffies + usecs_to_jiffies(timeout->val)); + } else { + del_timer_sync(&rt->powerdown_timer); + /* Turn the LED off */ + rt8515_gpio_led_off(rt); + } + + fled->led_cdev.brightness = LED_OFF; + /* After this the torch LED will be disabled */ + + mutex_unlock(&rt->lock); + + return 0; +} + +static int rt8515_led_flash_strobe_get(struct led_classdev_flash *fled, + bool *state) +{ + struct rt8515 *rt = to_rt8515(fled); + + *state = timer_pending(&rt->powerdown_timer); + + return 0; +} + +static int rt8515_led_flash_timeout_set(struct led_classdev_flash *fled, + u32 timeout) +{ + /* The timeout is stored in the led-class-flash core */ + return 0; +} + +static const struct led_flash_ops rt8515_flash_ops = { + .strobe_set = rt8515_led_flash_strobe_set, + .strobe_get = rt8515_led_flash_strobe_get, + .timeout_set = rt8515_led_flash_timeout_set, +}; + +static void rt8515_powerdown_timer(struct timer_list *t) +{ + struct rt8515 *rt = from_timer(rt, t, powerdown_timer); + + /* Turn the LED off */ + rt8515_gpio_led_off(rt); +} + +static void rt8515_init_flash_timeout(struct rt8515 *rt) +{ + struct led_classdev_flash *fled = &rt->fled; + struct led_flash_setting *s; + + /* Init flash timeout setting */ + s = &fled->timeout; + s->min = 1; + s->max = rt->max_timeout; + s->step = 1; + /* + * Set default timeout to RT8515_TIMEOUT_US except if + * max_timeout from DT is lower. + */ + s->val = min(rt->max_timeout, RT8515_TIMEOUT_US); +} + +#if IS_ENABLED(CONFIG_V4L2_FLASH_LED_CLASS) +/* Configure the V2L2 flash subdevice */ +static void rt8515_init_v4l2_flash_config(struct rt8515 *rt, + struct v4l2_flash_config *v4l2_sd_cfg) +{ + struct led_classdev *led = &rt->fled.led_cdev; + struct led_flash_setting *s; + + strscpy(v4l2_sd_cfg->dev_name, led->dev->kobj.name, + sizeof(v4l2_sd_cfg->dev_name)); + + /* + * Init flash intensity setting: this is a linear scale + * capped from the device tree max intensity setting + * 1..flash_max_intensity + */ + s = &v4l2_sd_cfg->intensity; + s->min = 1; + s->max = rt->flash_max_intensity; + s->step = 1; + s->val = s->max; +} + +static void rt8515_v4l2_flash_release(struct rt8515 *rt) +{ + v4l2_flash_release(rt->v4l2_flash); +} + +#else +static void rt8515_init_v4l2_flash_config(struct rt8515 *rt, + struct v4l2_flash_config *v4l2_sd_cfg) +{ +} + +static void rt8515_v4l2_flash_release(struct rt8515 *rt) +{ +} +#endif + +static void rt8515_determine_max_intensity(struct rt8515 *rt, + struct fwnode_handle *led, + const char *resistance, + const char *max_ua_prop, int hw_max, + int *max_intensity_setting) +{ + u32 res = 0; /* Can't be 0 so 0 is undefined */ + u32 ua; + u32 max_ma; + int max_intensity; + int ret; + + fwnode_property_read_u32(rt->dev->fwnode, resistance, &res); + ret = fwnode_property_read_u32(led, max_ua_prop, &ua); + + /* Missing info in DT, OK go with hardware maxima */ + if (ret || res == 0) { + dev_err(rt->dev, + "either %s or %s missing from DT, using HW max\n", + resistance, max_ua_prop); + max_ma = RT8515_MAX_IOUT_MA; + max_intensity = hw_max; + goto out_assign_max; + } + + /* + * Formula from the datasheet, this is the maximum current + * defined by the hardware. + */ + max_ma = (5500 * 1000) / res; + /* + * Calculate max intensity (linear scaling) + * Formula is ((ua / 1000) / max_ma) * 100, then simplified + */ + max_intensity = (ua / 10) / max_ma; + + dev_info(rt->dev, + "current restricted from %u to %u mA, max intensity %d/100\n", + max_ma, (ua / 1000), max_intensity); + +out_assign_max: + dev_info(rt->dev, "max intensity %d/%d = %d mA\n", + max_intensity, hw_max, max_ma); + *max_intensity_setting = max_intensity; +} + +static int rt8515_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct fwnode_handle *child; + struct rt8515 *rt; + struct led_classdev *led; + struct led_classdev_flash *fled; + struct led_init_data init_data = {}; + struct v4l2_flash_config v4l2_sd_cfg = {}; + int ret; + + rt = devm_kzalloc(dev, sizeof(*rt), GFP_KERNEL); + if (!rt) + return -ENOMEM; + + rt->dev = dev; + fled = &rt->fled; + led = &fled->led_cdev; + + /* ENF - Enable Flash line */ + rt->enable_flash = devm_gpiod_get(dev, "enf", GPIOD_OUT_LOW); + if (IS_ERR(rt->enable_flash)) + return dev_err_probe(dev, PTR_ERR(rt->enable_flash), + "cannot get ENF (enable flash) GPIO\n"); + + /* ENT - Enable Torch line */ + rt->enable_torch = devm_gpiod_get(dev, "ent", GPIOD_OUT_LOW); + if (IS_ERR(rt->enable_torch)) + return dev_err_probe(dev, PTR_ERR(rt->enable_torch), + "cannot get ENT (enable torch) GPIO\n"); + + child = fwnode_get_next_available_child_node(dev->fwnode, NULL); + if (!child) { + dev_err(dev, + "No fwnode child node found for connected LED.\n"); + return -EINVAL; + } + init_data.fwnode = child; + + rt8515_determine_max_intensity(rt, child, "richtek,rfs-ohms", + "flash-max-microamp", + RT8515_FLASH_MAX, + &rt->flash_max_intensity); + rt8515_determine_max_intensity(rt, child, "richtek,rts-ohms", + "led-max-microamp", + RT8515_TORCH_MAX, + &rt->torch_max_intensity); + + ret = fwnode_property_read_u32(child, "flash-max-timeout-us", + &rt->max_timeout); + if (ret) { + rt->max_timeout = RT8515_MAX_TIMEOUT_US; + dev_warn(dev, + "flash-max-timeout-us property missing\n"); + } + timer_setup(&rt->powerdown_timer, rt8515_powerdown_timer, 0); + rt8515_init_flash_timeout(rt); + + fled->ops = &rt8515_flash_ops; + + led->max_brightness = rt->torch_max_intensity; + led->brightness_set_blocking = rt8515_led_brightness_set; + led->flags |= LED_CORE_SUSPENDRESUME | LED_DEV_CAP_FLASH; + + mutex_init(&rt->lock); + + platform_set_drvdata(pdev, rt); + + ret = devm_led_classdev_flash_register_ext(dev, fled, &init_data); + if (ret) { + dev_err(dev, "can't register LED %s\n", led->name); + mutex_destroy(&rt->lock); + return ret; + } + + rt8515_init_v4l2_flash_config(rt, &v4l2_sd_cfg); + + /* Create a V4L2 Flash device if V4L2 flash is enabled */ + rt->v4l2_flash = v4l2_flash_init(dev, child, fled, NULL, &v4l2_sd_cfg); + if (IS_ERR(rt->v4l2_flash)) { + ret = PTR_ERR(rt->v4l2_flash); + dev_err(dev, "failed to register V4L2 flash device (%d)\n", + ret); + /* + * Continue without the V4L2 flash + * (we still have the classdev) + */ + } + + return 0; +} + +static int rt8515_remove(struct platform_device *pdev) +{ + struct rt8515 *rt = platform_get_drvdata(pdev); + + rt8515_v4l2_flash_release(rt); + del_timer_sync(&rt->powerdown_timer); + mutex_destroy(&rt->lock); + + return 0; +} + +static const struct of_device_id rt8515_match[] = { + { .compatible = "richtek,rt8515", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rt8515_match); + +static struct platform_driver rt8515_driver = { + .driver = { + .name = "rt8515", + .of_match_table = rt8515_match, + }, + .probe = rt8515_probe, + .remove = rt8515_remove, +}; +module_platform_driver(rt8515_driver); + +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Richtek RT8515 LED driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/leds/led-triggers.c b/drivers/leds/led-triggers.c index 91da90cfb11d..4e7b78a84149 100644 --- a/drivers/leds/led-triggers.c +++ b/drivers/leds/led-triggers.c @@ -378,14 +378,15 @@ void led_trigger_event(struct led_trigger *trig, enum led_brightness brightness) { struct led_classdev *led_cdev; + unsigned long flags; if (!trig) return; - read_lock(&trig->leddev_list_lock); + read_lock_irqsave(&trig->leddev_list_lock, flags); list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) led_set_brightness(led_cdev, brightness); - read_unlock(&trig->leddev_list_lock); + read_unlock_irqrestore(&trig->leddev_list_lock, flags); } EXPORT_SYMBOL_GPL(led_trigger_event); @@ -396,11 +397,12 @@ static void led_trigger_blink_setup(struct led_trigger *trig, int invert) { struct led_classdev *led_cdev; + unsigned long flags; if (!trig) return; - read_lock(&trig->leddev_list_lock); + read_lock_irqsave(&trig->leddev_list_lock, flags); list_for_each_entry(led_cdev, &trig->led_cdevs, trig_list) { if (oneshot) led_blink_set_oneshot(led_cdev, delay_on, delay_off, @@ -408,7 +410,7 @@ static void led_trigger_blink_setup(struct led_trigger *trig, else led_blink_set(led_cdev, delay_on, delay_off); } - read_unlock(&trig->leddev_list_lock); + read_unlock_irqrestore(&trig->leddev_list_lock, flags); } void led_trigger_blink(struct led_trigger *trig, diff --git a/drivers/leds/leds-ariel.c b/drivers/leds/leds-ariel.c index bb68ba23a7d4..49e1bddaa15e 100644 --- a/drivers/leds/leds-ariel.c +++ b/drivers/leds/leds-ariel.c @@ -96,14 +96,14 @@ static int ariel_led_probe(struct platform_device *pdev) return -ENOMEM; leds[0].ec_index = EC_BLUE_LED; - leds[0].led_cdev.name = "blue:power", + leds[0].led_cdev.name = "blue:power"; leds[0].led_cdev.default_trigger = "default-on"; leds[1].ec_index = EC_AMBER_LED; - leds[1].led_cdev.name = "amber:status", + leds[1].led_cdev.name = "amber:status"; leds[2].ec_index = EC_GREEN_LED; - leds[2].led_cdev.name = "green:status", + leds[2].led_cdev.name = "green:status"; leds[2].led_cdev.default_trigger = "default-on"; for (i = 0; i < NLEDS; i++) { diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c index b3edee703193..9dd205870525 100644 --- a/drivers/leds/leds-lm3533.c +++ b/drivers/leds/leds-lm3533.c @@ -679,7 +679,7 @@ static int lm3533_led_probe(struct platform_device *pdev) led->cdev.brightness_get = lm3533_led_get; led->cdev.blink_set = lm3533_led_blink_set; led->cdev.brightness = LED_OFF; - led->cdev.groups = lm3533_led_attribute_groups, + led->cdev.groups = lm3533_led_attribute_groups; led->id = pdev->id; mutex_init(&led->mutex); diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index c1bcac71008c..28ddcaa5358b 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -844,11 +844,10 @@ static int nvm_bb_chunk_sense(struct nvm_dev *dev, struct ppa_addr ppa) rqd.ppa_addr = generic_to_dev_addr(dev, ppa); ret = nvm_submit_io_sync_raw(dev, &rqd); + __free_page(page); if (ret) return ret; - __free_page(page); - return rqd.error; } diff --git a/drivers/md/bcache/features.h b/drivers/md/bcache/features.h index 84fc2c0f0101..d1c8fd3977fc 100644 --- a/drivers/md/bcache/features.h +++ b/drivers/md/bcache/features.h @@ -33,6 +33,8 @@ #define BCH_FEATURE_COMPAT_FUNCS(name, flagname) \ static inline int bch_has_feature_##name(struct cache_sb *sb) \ { \ + if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \ + return 0; \ return (((sb)->feature_compat & \ BCH##_FEATURE_COMPAT_##flagname) != 0); \ } \ @@ -50,6 +52,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \ #define BCH_FEATURE_RO_COMPAT_FUNCS(name, flagname) \ static inline int bch_has_feature_##name(struct cache_sb *sb) \ { \ + if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \ + return 0; \ return (((sb)->feature_ro_compat & \ BCH##_FEATURE_RO_COMPAT_##flagname) != 0); \ } \ @@ -67,6 +71,8 @@ static inline void bch_clear_feature_##name(struct cache_sb *sb) \ #define BCH_FEATURE_INCOMPAT_FUNCS(name, flagname) \ static inline int bch_has_feature_##name(struct cache_sb *sb) \ { \ + if (sb->version < BCACHE_SB_VERSION_CDEV_WITH_FEATURES) \ + return 0; \ return (((sb)->feature_incompat & \ BCH##_FEATURE_INCOMPAT_##flagname) != 0); \ } \ diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 8c874710f0bc..5a55617a08e6 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1481,9 +1481,9 @@ static int crypt_alloc_req_skcipher(struct crypt_config *cc, static int crypt_alloc_req_aead(struct crypt_config *cc, struct convert_context *ctx) { - if (!ctx->r.req) { - ctx->r.req = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); - if (!ctx->r.req) + if (!ctx->r.req_aead) { + ctx->r.req_aead = mempool_alloc(&cc->req_pool, in_interrupt() ? GFP_ATOMIC : GFP_NOIO); + if (!ctx->r.req_aead) return -ENOMEM; } diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 81df019ab284..b64fede032dc 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -257,8 +257,9 @@ struct dm_integrity_c { bool journal_uptodate; bool just_formatted; bool recalculate_flag; - bool fix_padding; bool discard; + bool fix_padding; + bool legacy_recalculate; struct alg_spec internal_hash_alg; struct alg_spec journal_crypt_alg; @@ -386,6 +387,14 @@ static int dm_integrity_failed(struct dm_integrity_c *ic) return READ_ONCE(ic->failed); } +static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic) +{ + if ((ic->internal_hash_alg.key || ic->journal_mac_alg.key) && + !ic->legacy_recalculate) + return true; + return false; +} + static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, unsigned j, unsigned char seq) { @@ -3140,6 +3149,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, arg_count += !!ic->journal_crypt_alg.alg_string; arg_count += !!ic->journal_mac_alg.alg_string; arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0; + arg_count += ic->legacy_recalculate; DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start, ic->tag_size, ic->mode, arg_count); if (ic->meta_dev) @@ -3163,6 +3173,8 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type, } if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) DMEMIT(" fix_padding"); + if (ic->legacy_recalculate) + DMEMIT(" legacy_recalculate"); #define EMIT_ALG(a, n) \ do { \ @@ -3792,7 +3804,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) unsigned extra_args; struct dm_arg_set as; static const struct dm_arg _args[] = { - {0, 15, "Invalid number of feature args"}, + {0, 16, "Invalid number of feature args"}, }; unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec; bool should_write_sb; @@ -3940,6 +3952,8 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv) ic->discard = true; } else if (!strcmp(opt_string, "fix_padding")) { ic->fix_padding = true; + } else if (!strcmp(opt_string, "legacy_recalculate")) { + ic->legacy_recalculate = true; } else { r = -EINVAL; ti->error = "Invalid argument"; @@ -4235,6 +4249,20 @@ try_smaller_buffer: r = -ENOMEM; goto bad; } + } else { + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) { + ti->error = "Recalculate can only be specified with internal_hash"; + r = -EINVAL; + goto bad; + } + } + + if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) && + le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors && + dm_integrity_disable_recalculate(ic)) { + ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\""; + r = -EOPNOTSUPP; + goto bad; } ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev, diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 188f41287f18..4acf2342f7ad 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -363,14 +363,23 @@ int dm_get_device(struct dm_target *ti, const char *path, fmode_t mode, { int r; dev_t dev; + unsigned int major, minor; + char dummy; struct dm_dev_internal *dd; struct dm_table *t = ti->table; BUG_ON(!t); - dev = dm_get_dev_t(path); - if (!dev) - return -ENODEV; + if (sscanf(path, "%u:%u%c", &major, &minor, &dummy) == 2) { + /* Extract the major/minor numbers */ + dev = MKDEV(major, minor); + if (MAJOR(dev) != major || MINOR(dev) != minor) + return -EOVERFLOW; + } else { + dev = dm_get_dev_t(path); + if (!dev) + return -ENODEV; + } dd = find_device(&t->devices, dev); if (!dd) { diff --git a/drivers/md/md.c b/drivers/md/md.c index ca409428b4fc..04384452a7ab 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -639,8 +639,10 @@ static void md_submit_flush_data(struct work_struct *ws) * could wait for this and below md_handle_request could wait for those * bios because of suspend check */ + spin_lock_irq(&mddev->lock); mddev->prev_flush_start = mddev->start_flush; mddev->flush_bio = NULL; + spin_unlock_irq(&mddev->lock); wake_up(&mddev->sb_wait); if (bio->bi_iter.bi_size == 0) { diff --git a/drivers/media/cec/platform/Makefile b/drivers/media/cec/platform/Makefile index 3a947159b25a..ea6f8ee8161c 100644 --- a/drivers/media/cec/platform/Makefile +++ b/drivers/media/cec/platform/Makefile @@ -10,5 +10,6 @@ obj-$(CONFIG_CEC_MESON_AO) += meson/ obj-$(CONFIG_CEC_SAMSUNG_S5P) += s5p/ obj-$(CONFIG_CEC_SECO) += seco/ obj-$(CONFIG_CEC_STI) += sti/ +obj-$(CONFIG_CEC_STM32) += stm32/ obj-$(CONFIG_CEC_TEGRA) += tegra/ diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 96d3b2b2aa31..3f61f5863bf7 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -118,8 +118,7 @@ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) return -EINVAL; } } else { - length = (b->memory == VB2_MEMORY_USERPTR || - b->memory == VB2_MEMORY_DMABUF) + length = (b->memory == VB2_MEMORY_USERPTR) ? b->length : vb->planes[0].length; if (b->bytesused > length) diff --git a/drivers/media/i2c/ccs-pll.c b/drivers/media/i2c/ccs-pll.c index eb7b6f01f623..58ca47e904a1 100644 --- a/drivers/media/i2c/ccs-pll.c +++ b/drivers/media/i2c/ccs-pll.c @@ -772,14 +772,8 @@ int ccs_pll_calculate(struct device *dev, const struct ccs_pll_limits *lim, switch (pll->bus_type) { case CCS_PLL_BUS_TYPE_CSI2_DPHY: - /* CSI transfers 2 bits per clock per lane; thus times 2 */ - op_sys_clk_freq_hz_sdr = pll->link_freq * 2 - * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ? - 1 : pll->csi2.lanes); - break; case CCS_PLL_BUS_TYPE_CSI2_CPHY: - op_sys_clk_freq_hz_sdr = - pll->link_freq + op_sys_clk_freq_hz_sdr = pll->link_freq * 2 * (pll->flags & CCS_PLL_FLAG_LANE_SPEED_MODEL ? 1 : pll->csi2.lanes); break; diff --git a/drivers/media/i2c/ccs/ccs-data.c b/drivers/media/i2c/ccs/ccs-data.c index 9a6097b088bd..6555bd4b325a 100644 --- a/drivers/media/i2c/ccs/ccs-data.c +++ b/drivers/media/i2c/ccs/ccs-data.c @@ -152,7 +152,7 @@ static int ccs_data_parse_version(struct bin_container *bin, vv->version_major = ((u16)v->static_data_version_major[0] << 8) + v->static_data_version_major[1]; vv->version_minor = ((u16)v->static_data_version_minor[0] << 8) + - v->static_data_version_major[1]; + v->static_data_version_minor[1]; vv->date_year = ((u16)v->year[0] << 8) + v->year[1]; vv->date_month = v->month; vv->date_day = v->day; diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2.c b/drivers/media/pci/intel/ipu3/ipu3-cio2.c index 36e354ecf71e..6cada8a6e50c 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2.c @@ -302,7 +302,7 @@ static int cio2_csi2_calc_timing(struct cio2_device *cio2, struct cio2_queue *q, if (!q->sensor) return -ENODEV; - freq = v4l2_get_link_rate(q->sensor->ctrl_handler, bpp, lanes); + freq = v4l2_get_link_freq(q->sensor->ctrl_handler, bpp, lanes); if (freq < 0) { dev_err(dev, "error %lld, invalid link_freq\n", freq); return freq; diff --git a/drivers/media/platform/qcom/venus/core.c b/drivers/media/platform/qcom/venus/core.c index bdd293faaad0..7233a7311757 100644 --- a/drivers/media/platform/qcom/venus/core.c +++ b/drivers/media/platform/qcom/venus/core.c @@ -349,8 +349,10 @@ static void venus_core_shutdown(struct platform_device *pdev) { struct venus_core *core = platform_get_drvdata(pdev); + pm_runtime_get_sync(core->dev); venus_shutdown(core); venus_firmware_deinit(core); + pm_runtime_put_sync(core->dev); } static __maybe_unused int venus_runtime_suspend(struct device *dev) diff --git a/drivers/media/platform/rcar-vin/rcar-core.c b/drivers/media/platform/rcar-vin/rcar-core.c index 98bff765b02e..e48d666f2c63 100644 --- a/drivers/media/platform/rcar-vin/rcar-core.c +++ b/drivers/media/platform/rcar-vin/rcar-core.c @@ -654,7 +654,7 @@ static int rvin_parallel_parse_of(struct rvin_dev *vin) out: fwnode_handle_put(fwnode); - return 0; + return ret; } static int rvin_parallel_init(struct rvin_dev *vin) diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c index 68da1eed753d..f7e9fd305548 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-dev.c @@ -104,6 +104,7 @@ struct rkisp1_match_data { const char * const *clks; unsigned int size; + enum rkisp1_cif_isp_version isp_ver; }; /* ---------------------------------------------------------------------------- @@ -411,15 +412,16 @@ static const char * const rk3399_isp_clks[] = { "hclk", }; -static const struct rkisp1_match_data rk3399_isp_clk_data = { +static const struct rkisp1_match_data rk3399_isp_match_data = { .clks = rk3399_isp_clks, .size = ARRAY_SIZE(rk3399_isp_clks), + .isp_ver = RKISP1_V10, }; static const struct of_device_id rkisp1_of_match[] = { { .compatible = "rockchip,rk3399-cif-isp", - .data = &rk3399_isp_clk_data, + .data = &rk3399_isp_match_data, }, {}, }; @@ -457,15 +459,15 @@ static void rkisp1_debug_init(struct rkisp1_device *rkisp1) static int rkisp1_probe(struct platform_device *pdev) { - const struct rkisp1_match_data *clk_data; + const struct rkisp1_match_data *match_data; struct device *dev = &pdev->dev; struct rkisp1_device *rkisp1; struct v4l2_device *v4l2_dev; unsigned int i; int ret, irq; - clk_data = of_device_get_match_data(&pdev->dev); - if (!clk_data) + match_data = of_device_get_match_data(&pdev->dev); + if (!match_data) return -ENODEV; rkisp1 = devm_kzalloc(dev, sizeof(*rkisp1), GFP_KERNEL); @@ -494,15 +496,16 @@ static int rkisp1_probe(struct platform_device *pdev) rkisp1->irq = irq; - for (i = 0; i < clk_data->size; i++) - rkisp1->clks[i].id = clk_data->clks[i]; - ret = devm_clk_bulk_get(dev, clk_data->size, rkisp1->clks); + for (i = 0; i < match_data->size; i++) + rkisp1->clks[i].id = match_data->clks[i]; + ret = devm_clk_bulk_get(dev, match_data->size, rkisp1->clks); if (ret) return ret; - rkisp1->clk_size = clk_data->size; + rkisp1->clk_size = match_data->size; pm_runtime_enable(&pdev->dev); + rkisp1->media_dev.hw_revision = match_data->isp_ver; strscpy(rkisp1->media_dev.model, RKISP1_DRIVER_NAME, sizeof(rkisp1->media_dev.model)); rkisp1->media_dev.dev = &pdev->dev; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c index 6af4d551ffb5..aa5f45749543 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c @@ -391,7 +391,7 @@ static void rkisp1_goc_config(struct rkisp1_params *params, RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA); rkisp1_write(params->rkisp1, arg->mode, RKISP1_CIF_ISP_GAMMA_OUT_MODE); - for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES; i++) + for (i = 0; i < RKISP1_CIF_ISP_GAMMA_OUT_MAX_SAMPLES_V10; i++) rkisp1_write(params->rkisp1, arg->gamma_y[i], RKISP1_CIF_ISP_GAMMA_OUT_Y_0 + i * 4); } @@ -589,7 +589,6 @@ static void rkisp1_hst_config(struct rkisp1_params *params, RKISP1_CIF_ISP_HIST_WEIGHT_22TO03, RKISP1_CIF_ISP_HIST_WEIGHT_13TO43, RKISP1_CIF_ISP_HIST_WEIGHT_04TO34, - RKISP1_CIF_ISP_HIST_WEIGHT_44, }; const u8 *weight; unsigned int i; @@ -622,6 +621,8 @@ static void rkisp1_hst_config(struct rkisp1_params *params, weight[2], weight[3]), hist_weight_regs[i]); + + rkisp1_write(params->rkisp1, weight[0] & 0x1F, RKISP1_CIF_ISP_HIST_WEIGHT_44); } static void diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h index 8a8d960a679c..fa33080f51db 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h @@ -365,6 +365,7 @@ #define RKISP1_CIF_ISP_MAX_HIST_PREDIVIDER 0x0000007F #define RKISP1_CIF_ISP_HIST_ROW_NUM 5 #define RKISP1_CIF_ISP_HIST_COLUMN_NUM 5 +#define RKISP1_CIF_ISP_HIST_GET_BIN(x) ((x) & 0x000FFFFF) /* AUTO FOCUS MEASUREMENT: ISP_AFM_CTRL */ #define RKISP1_ISP_AFM_CTRL_ENABLE BIT(0) diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c index 3ddab8fa8f2d..c1d07a2e8839 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-stats.c @@ -203,7 +203,7 @@ static void rkisp1_stats_get_aec_meas(struct rkisp1_stats *stats, unsigned int i; pbuf->meas_type |= RKISP1_CIF_ISP_STAT_AUTOEXP; - for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX; i++) + for (i = 0; i < RKISP1_CIF_ISP_AE_MEAN_MAX_V10; i++) pbuf->params.ae.exp_mean[i] = (u8)rkisp1_read(rkisp1, RKISP1_CIF_ISP_EXP_MEAN_00 + i * 4); @@ -233,10 +233,11 @@ static void rkisp1_stats_get_hst_meas(struct rkisp1_stats *stats, unsigned int i; pbuf->meas_type |= RKISP1_CIF_ISP_STAT_HIST; - for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX; i++) - pbuf->params.hist.hist_bins[i] = - (u8)rkisp1_read(rkisp1, - RKISP1_CIF_ISP_HIST_BIN_0 + i * 4); + for (i = 0; i < RKISP1_CIF_ISP_HIST_BIN_N_MAX_V10; i++) { + u32 reg_val = rkisp1_read(rkisp1, RKISP1_CIF_ISP_HIST_BIN_0 + i * 4); + + pbuf->params.hist.hist_bins[i] = RKISP1_CIF_ISP_HIST_GET_BIN(reg_val); + } } static void rkisp1_stats_get_bls_meas(struct rkisp1_stats *stats, diff --git a/drivers/media/rc/ir-mce_kbd-decoder.c b/drivers/media/rc/ir-mce_kbd-decoder.c index be8f2756a444..1524dc0fc566 100644 --- a/drivers/media/rc/ir-mce_kbd-decoder.c +++ b/drivers/media/rc/ir-mce_kbd-decoder.c @@ -320,7 +320,7 @@ again: data->body); spin_lock(&data->keylock); if (scancode) { - delay = nsecs_to_jiffies(dev->timeout) + + delay = usecs_to_jiffies(dev->timeout) + msecs_to_jiffies(100); mod_timer(&data->rx_timeout, jiffies + delay); } else { diff --git a/drivers/media/rc/ite-cir.c b/drivers/media/rc/ite-cir.c index a905113fef6e..0c6229592e13 100644 --- a/drivers/media/rc/ite-cir.c +++ b/drivers/media/rc/ite-cir.c @@ -1551,7 +1551,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id rdev->s_rx_carrier_range = ite_set_rx_carrier_range; /* FIFO threshold is 17 bytes, so 17 * 8 samples minimum */ rdev->min_timeout = 17 * 8 * ITE_BAUDRATE_DIVISOR * - itdev->params.sample_period; + itdev->params.sample_period / 1000; rdev->timeout = IR_DEFAULT_TIMEOUT; rdev->max_timeout = 10 * IR_DEFAULT_TIMEOUT; rdev->rx_resolution = ITE_BAUDRATE_DIVISOR * diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c index 1d811e5ffb55..1fd62c1dac76 100644 --- a/drivers/media/rc/rc-main.c +++ b/drivers/media/rc/rc-main.c @@ -737,7 +737,7 @@ static unsigned int repeat_period(int protocol) void rc_repeat(struct rc_dev *dev) { unsigned long flags; - unsigned int timeout = nsecs_to_jiffies(dev->timeout) + + unsigned int timeout = usecs_to_jiffies(dev->timeout) + msecs_to_jiffies(repeat_period(dev->last_protocol)); struct lirc_scancode sc = { .scancode = dev->last_scancode, .rc_proto = dev->last_protocol, @@ -855,7 +855,7 @@ void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode, ir_do_keydown(dev, protocol, scancode, keycode, toggle); if (dev->keypressed) { - dev->keyup_jiffies = jiffies + nsecs_to_jiffies(dev->timeout) + + dev->keyup_jiffies = jiffies + usecs_to_jiffies(dev->timeout) + msecs_to_jiffies(repeat_period(protocol)); mod_timer(&dev->timer_keyup, dev->keyup_jiffies); } @@ -1928,6 +1928,8 @@ int rc_register_device(struct rc_dev *dev) goto out_raw; } + dev->registered = true; + rc = device_add(&dev->dev); if (rc) goto out_rx_free; @@ -1937,8 +1939,6 @@ int rc_register_device(struct rc_dev *dev) dev->device_name ?: "Unspecified device", path ?: "N/A"); kfree(path); - dev->registered = true; - /* * once the the input device is registered in rc_setup_rx_device, * userspace can open the input device and rc_open() will be called diff --git a/drivers/media/rc/serial_ir.c b/drivers/media/rc/serial_ir.c index 8cc28c92d05d..96ae0294ac10 100644 --- a/drivers/media/rc/serial_ir.c +++ b/drivers/media/rc/serial_ir.c @@ -385,7 +385,7 @@ static irqreturn_t serial_ir_irq_handler(int i, void *blah) } while (!(sinp(UART_IIR) & UART_IIR_NO_INT)); /* still pending ? */ mod_timer(&serial_ir.timeout_timer, - jiffies + nsecs_to_jiffies(serial_ir.rcdev->timeout)); + jiffies + usecs_to_jiffies(serial_ir.rcdev->timeout)); ir_raw_event_handle(serial_ir.rcdev); diff --git a/drivers/media/v4l2-core/v4l2-common.c b/drivers/media/v4l2-core/v4l2-common.c index 78007dba4677..133d20e40f82 100644 --- a/drivers/media/v4l2-core/v4l2-common.c +++ b/drivers/media/v4l2-core/v4l2-common.c @@ -442,7 +442,7 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat, } EXPORT_SYMBOL_GPL(v4l2_fill_pixfmt); -s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul, +s64 v4l2_get_link_freq(struct v4l2_ctrl_handler *handler, unsigned int mul, unsigned int div) { struct v4l2_ctrl *ctrl; @@ -473,4 +473,4 @@ s64 v4l2_get_link_rate(struct v4l2_ctrl_handler *handler, unsigned int mul, return freq > 0 ? freq : -EINVAL; } -EXPORT_SYMBOL_GPL(v4l2_get_link_rate); +EXPORT_SYMBOL_GPL(v4l2_get_link_freq); diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 2aa6648fa41f..5a491d2cd1ae 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1512,6 +1512,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, struct pcr_handle *handle; u32 base, len; int ret, i, bar = 0; + u8 val; dev_dbg(&(pcidev->dev), ": Realtek PCI-E Card Reader found at %s [%04x:%04x] (rev %x)\n", @@ -1577,7 +1578,11 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, pcr->host_cmds_addr = pcr->rtsx_resv_buf_addr; pcr->host_sg_tbl_ptr = pcr->rtsx_resv_buf + HOST_CMDS_BUF_LEN; pcr->host_sg_tbl_addr = pcr->rtsx_resv_buf_addr + HOST_CMDS_BUF_LEN; - + rtsx_pci_read_register(pcr, ASPM_FORCE_CTL, &val); + if (val & FORCE_ASPM_CTL0 && val & FORCE_ASPM_CTL1) + pcr->aspm_enabled = false; + else + pcr->aspm_enabled = true; pcr->card_inserted = 0; pcr->card_removed = 0; INIT_DELAYED_WORK(&pcr->carddet_work, rtsx_pci_card_detect); diff --git a/drivers/misc/habanalabs/common/device.c b/drivers/misc/habanalabs/common/device.c index 1456eabf9601..69d04eca767f 100644 --- a/drivers/misc/habanalabs/common/device.c +++ b/drivers/misc/habanalabs/common/device.c @@ -1037,7 +1037,7 @@ kill_processes: if (hard_reset) { /* Release kernel context */ - if (hl_ctx_put(hdev->kernel_ctx) == 1) + if (hdev->kernel_ctx && hl_ctx_put(hdev->kernel_ctx) == 1) hdev->kernel_ctx = NULL; hl_vm_fini(hdev); hl_mmu_fini(hdev); @@ -1487,6 +1487,15 @@ void hl_device_fini(struct hl_device *hdev) } } + /* Disable PCI access from device F/W so it won't send us additional + * interrupts. We disable MSI/MSI-X at the halt_engines function and we + * can't have the F/W sending us interrupts after that. We need to + * disable the access here because if the device is marked disable, the + * message won't be send. Also, in case of heartbeat, the device CPU is + * marked as disable so this message won't be sent + */ + hl_fw_send_pci_access_msg(hdev, CPUCP_PACKET_DISABLE_PCI_ACCESS); + /* Mark device as disabled */ hdev->disabled = true; diff --git a/drivers/misc/habanalabs/common/firmware_if.c b/drivers/misc/habanalabs/common/firmware_if.c index 20f77f58edef..c9a12980218a 100644 --- a/drivers/misc/habanalabs/common/firmware_if.c +++ b/drivers/misc/habanalabs/common/firmware_if.c @@ -402,6 +402,10 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, } counters->rx_throughput = result; + memset(&pkt, 0, sizeof(pkt)); + pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_THROUGHPUT_GET << + CPUCP_PKT_CTL_OPCODE_SHIFT); + /* Fetch PCI tx counter */ pkt.index = cpu_to_le32(cpucp_pcie_throughput_tx); rc = hdev->asic_funcs->send_cpu_message(hdev, (u32 *) &pkt, sizeof(pkt), @@ -414,6 +418,7 @@ int hl_fw_cpucp_pci_counters_get(struct hl_device *hdev, counters->tx_throughput = result; /* Fetch PCI replay counter */ + memset(&pkt, 0, sizeof(pkt)); pkt.ctl = cpu_to_le32(CPUCP_PACKET_PCIE_REPLAY_CNT_GET << CPUCP_PKT_CTL_OPCODE_SHIFT); diff --git a/drivers/misc/habanalabs/common/habanalabs.h b/drivers/misc/habanalabs/common/habanalabs.h index e0d7f5fbaa5c..60e16dc4bcac 100644 --- a/drivers/misc/habanalabs/common/habanalabs.h +++ b/drivers/misc/habanalabs/common/habanalabs.h @@ -2182,6 +2182,7 @@ void hl_mmu_v1_set_funcs(struct hl_device *hdev, struct hl_mmu_funcs *mmu); int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr); int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops); +bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr); int hl_fw_load_fw_to_device(struct hl_device *hdev, const char *fw_name, void __iomem *dst, u32 src_offset, u32 size); diff --git a/drivers/misc/habanalabs/common/habanalabs_ioctl.c b/drivers/misc/habanalabs/common/habanalabs_ioctl.c index 12efbd9d2e3a..d25892d61ec9 100644 --- a/drivers/misc/habanalabs/common/habanalabs_ioctl.c +++ b/drivers/misc/habanalabs/common/habanalabs_ioctl.c @@ -133,6 +133,8 @@ static int hw_idle(struct hl_device *hdev, struct hl_info_args *args) hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev, &hw_idle.busy_engines_mask_ext, NULL); + hw_idle.busy_engines_mask = + lower_32_bits(hw_idle.busy_engines_mask_ext); return copy_to_user(out, &hw_idle, min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0; diff --git a/drivers/misc/habanalabs/common/memory.c b/drivers/misc/habanalabs/common/memory.c index cbe9da4e0211..5d4fbdcaefe3 100644 --- a/drivers/misc/habanalabs/common/memory.c +++ b/drivers/misc/habanalabs/common/memory.c @@ -886,8 +886,10 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, { struct hl_device *hdev = ctx->hdev; u64 next_vaddr, i; + bool is_host_addr; u32 page_size; + is_host_addr = !hl_is_dram_va(hdev, vaddr); page_size = phys_pg_pack->page_size; next_vaddr = vaddr; @@ -900,9 +902,13 @@ static void unmap_phys_pg_pack(struct hl_ctx *ctx, u64 vaddr, /* * unmapping on Palladium can be really long, so avoid a CPU * soft lockup bug by sleeping a little between unmapping pages + * + * In addition, when unmapping host memory we pass through + * the Linux kernel to unpin the pages and that takes a long + * time. Therefore, sleep every 32K pages to avoid soft lockup */ - if (hdev->pldm) - usleep_range(500, 1000); + if (hdev->pldm || (is_host_addr && (i & 0x7FFF) == 0)) + usleep_range(50, 200); } } diff --git a/drivers/misc/habanalabs/common/mmu.c b/drivers/misc/habanalabs/common/mmu.c index 33ae953d3a36..28a4638741d8 100644 --- a/drivers/misc/habanalabs/common/mmu.c +++ b/drivers/misc/habanalabs/common/mmu.c @@ -9,7 +9,7 @@ #include "habanalabs.h" -static bool is_dram_va(struct hl_device *hdev, u64 virt_addr) +bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr) { struct asic_fixed_properties *prop = &hdev->asic_prop; @@ -156,7 +156,7 @@ int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, if (!hdev->mmu_enable) return 0; - is_dram_addr = is_dram_va(hdev, virt_addr); + is_dram_addr = hl_is_dram_va(hdev, virt_addr); if (is_dram_addr) mmu_prop = &prop->dmmu; @@ -236,7 +236,7 @@ int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, if (!hdev->mmu_enable) return 0; - is_dram_addr = is_dram_va(hdev, virt_addr); + is_dram_addr = hl_is_dram_va(hdev, virt_addr); if (is_dram_addr) mmu_prop = &prop->dmmu; diff --git a/drivers/misc/habanalabs/common/mmu_v1.c b/drivers/misc/habanalabs/common/mmu_v1.c index 2ce6ea89d4fa..06d8a44dd5d4 100644 --- a/drivers/misc/habanalabs/common/mmu_v1.c +++ b/drivers/misc/habanalabs/common/mmu_v1.c @@ -467,8 +467,16 @@ static void hl_mmu_v1_fini(struct hl_device *hdev) { /* MMU H/W fini was already done in device hw_fini() */ - kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); - gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); + if (!ZERO_OR_NULL_PTR(hdev->mmu_priv.hr.mmu_shadow_hop0)) { + kvfree(hdev->mmu_priv.dr.mmu_shadow_hop0); + gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); + } + + /* Make sure that if we arrive here again without init was called we + * won't cause kernel panic. This can happen for example if we fail + * during hard reset code at certain points + */ + hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL; } /** diff --git a/drivers/misc/habanalabs/gaudi/gaudi.c b/drivers/misc/habanalabs/gaudi/gaudi.c index 8c09e4466af8..b328ddaa64ee 100644 --- a/drivers/misc/habanalabs/gaudi/gaudi.c +++ b/drivers/misc/habanalabs/gaudi/gaudi.c @@ -4002,7 +4002,8 @@ static int gaudi_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE; - rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size); + rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, + (dma_addr - HOST_PHYS_BASE), size); if (rc) dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c index b8b4aa636b7c..63679a747d2c 100644 --- a/drivers/misc/habanalabs/goya/goya.c +++ b/drivers/misc/habanalabs/goya/goya.c @@ -2719,7 +2719,8 @@ static int goya_cb_mmap(struct hl_device *hdev, struct vm_area_struct *vma, vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | VM_DONTCOPY | VM_NORESERVE; - rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size); + rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, + (dma_addr - HOST_PHYS_BASE), size); if (rc) dev_err(hdev->dev, "dma_mmap_coherent error %d", rc); diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index de7cb0369c30..002426e3cf76 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -384,8 +384,10 @@ static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) "merging was advertised but not possible"); blk_queue_max_segments(mq->queue, mmc_get_max_segments(host)); - if (mmc_card_mmc(card)) + if (mmc_card_mmc(card) && card->ext_csd.data_sector_size) { block_size = card->ext_csd.data_sector_size; + WARN_ON(block_size != 512 && block_size != 4096); + } blk_queue_logical_block_size(mq->queue, block_size); /* diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index 44bea5e4aeda..b23773583179 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -20,6 +20,8 @@ #include "sdio_cis.h" #include "sdio_ops.h" +#define SDIO_READ_CIS_TIMEOUT_MS (10 * 1000) /* 10s */ + static int cistpl_vers_1(struct mmc_card *card, struct sdio_func *func, const unsigned char *buf, unsigned size) { @@ -274,6 +276,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) do { unsigned char tpl_code, tpl_link; + unsigned long timeout = jiffies + + msecs_to_jiffies(SDIO_READ_CIS_TIMEOUT_MS); ret = mmc_io_rw_direct(card, 0, 0, ptr++, 0, &tpl_code); if (ret) @@ -326,6 +330,8 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) prev = &this->next; if (ret == -ENOENT) { + if (time_after(jiffies, timeout)) + break; /* warn about unknown tuples */ pr_warn_ratelimited("%s: queuing unknown" " CIS tuple 0x%02x (%u bytes)\n", diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index bbf3496f4495..f9780c65ebe9 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -314,11 +314,7 @@ err_clk: static void sdhci_brcmstb_shutdown(struct platform_device *pdev) { - int ret; - - ret = sdhci_pltfm_unregister(pdev); - if (ret) - dev_err(&pdev->dev, "failed to shutdown\n"); + sdhci_pltfm_suspend(&pdev->dev); } MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); diff --git a/drivers/mmc/host/sdhci-of-dwcmshc.c b/drivers/mmc/host/sdhci-of-dwcmshc.c index 4b673792b5a4..d90020ed3622 100644 --- a/drivers/mmc/host/sdhci-of-dwcmshc.c +++ b/drivers/mmc/host/sdhci-of-dwcmshc.c @@ -16,6 +16,8 @@ #include "sdhci-pltfm.h" +#define SDHCI_DWCMSHC_ARG2_STUFF GENMASK(31, 16) + /* DWCMSHC specific Mode Select value */ #define DWCMSHC_CTRL_HS400 0x7 @@ -49,6 +51,29 @@ static void dwcmshc_adma_write_desc(struct sdhci_host *host, void **desc, sdhci_adma_write_desc(host, desc, addr, len, cmd); } +static void dwcmshc_check_auto_cmd23(struct mmc_host *mmc, + struct mmc_request *mrq) +{ + struct sdhci_host *host = mmc_priv(mmc); + + /* + * No matter V4 is enabled or not, ARGUMENT2 register is 32-bit + * block count register which doesn't support stuff bits of + * CMD23 argument on dwcmsch host controller. + */ + if (mrq->sbc && (mrq->sbc->arg & SDHCI_DWCMSHC_ARG2_STUFF)) + host->flags &= ~SDHCI_AUTO_CMD23; + else + host->flags |= SDHCI_AUTO_CMD23; +} + +static void dwcmshc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + dwcmshc_check_auto_cmd23(mmc, mrq); + + sdhci_request(mmc, mrq); +} + static void dwcmshc_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) { @@ -133,6 +158,8 @@ static int dwcmshc_probe(struct platform_device *pdev) sdhci_get_of_property(pdev); + host->mmc_host_ops.request = dwcmshc_request; + err = sdhci_add_host(host); if (err) goto err_clk; diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 6301b81cf573..9bd717ff784b 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -111,8 +111,13 @@ static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) return host->private; } +extern const struct dev_pm_ops sdhci_pltfm_pmops; +#ifdef CONFIG_PM_SLEEP int sdhci_pltfm_suspend(struct device *dev); int sdhci_pltfm_resume(struct device *dev); -extern const struct dev_pm_ops sdhci_pltfm_pmops; +#else +static inline int sdhci_pltfm_suspend(struct device *dev) { return 0; } +static inline int sdhci_pltfm_resume(struct device *dev) { return 0; } +#endif #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ diff --git a/drivers/mmc/host/sdhci-xenon.c b/drivers/mmc/host/sdhci-xenon.c index c67611fdaa8a..d19eef5f725f 100644 --- a/drivers/mmc/host/sdhci-xenon.c +++ b/drivers/mmc/host/sdhci-xenon.c @@ -168,7 +168,12 @@ static void xenon_reset_exit(struct sdhci_host *host, /* Disable tuning request and auto-retuning again */ xenon_retune_setup(host); - xenon_set_acg(host, true); + /* + * The ACG should be turned off at the early init time, in order + * to solve a possible issues with the 1.8V regulator stabilization. + * The feature is enabled in later stage. + */ + xenon_set_acg(host, false); xenon_set_sdclk_off_idle(host, sdhc_id, false); diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 5cdf05bcbf8f..3fa8c22d3f36 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -1615,7 +1615,7 @@ static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf, /* Extract interleaved payload data and ECC bits */ for (step = 0; step < nfc_geo->ecc_chunk_count; step++) { if (buf) - nand_extract_bits(buf, step * eccsize, tmp_buf, + nand_extract_bits(buf, step * eccsize * 8, tmp_buf, src_bit_off, eccsize * 8); src_bit_off += eccsize * 8; diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c index fdb112e8a90d..a304fda5d1fa 100644 --- a/drivers/mtd/nand/raw/intel-nand-controller.c +++ b/drivers/mtd/nand/raw/intel-nand-controller.c @@ -579,7 +579,7 @@ static int ebu_nand_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct ebu_nand_controller *ebu_host; struct nand_chip *nand; - struct mtd_info *mtd = NULL; + struct mtd_info *mtd; struct resource *res; char *resname; int ret; @@ -647,12 +647,13 @@ static int ebu_nand_probe(struct platform_device *pdev) ebu_host->ebu + EBU_ADDR_SEL(cs)); nand_set_flash_node(&ebu_host->chip, dev->of_node); + + mtd = nand_to_mtd(&ebu_host->chip); if (!mtd->name) { dev_err(ebu_host->dev, "NAND label property is mandatory\n"); return -EINVAL; } - mtd = nand_to_mtd(&ebu_host->chip); mtd->dev.parent = dev; ebu_host->dev = dev; diff --git a/drivers/mtd/nand/raw/nandsim.c b/drivers/mtd/nand/raw/nandsim.c index f2b9250c0ea8..0750121ac371 100644 --- a/drivers/mtd/nand/raw/nandsim.c +++ b/drivers/mtd/nand/raw/nandsim.c @@ -2210,6 +2210,9 @@ static int ns_attach_chip(struct nand_chip *chip) { unsigned int eccsteps, eccbytes; + chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; + chip->ecc.algo = bch ? NAND_ECC_ALGO_BCH : NAND_ECC_ALGO_HAMMING; + if (!bch) return 0; @@ -2233,8 +2236,6 @@ static int ns_attach_chip(struct nand_chip *chip) return -EINVAL; } - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_BCH; chip->ecc.size = 512; chip->ecc.strength = bch; chip->ecc.bytes = eccbytes; @@ -2273,8 +2274,6 @@ static int __init ns_init_module(void) nsmtd = nand_to_mtd(chip); nand_set_controller_data(chip, (void *)ns); - chip->ecc.engine_type = NAND_ECC_ENGINE_TYPE_SOFT; - chip->ecc.algo = NAND_ECC_ALGO_HAMMING; /* The NAND_SKIP_BBTSCAN option is necessary for 'overridesize' */ /* and 'badblocks' parameters to work */ chip->options |= NAND_SKIP_BBTSCAN; diff --git a/drivers/mtd/nand/raw/omap2.c b/drivers/mtd/nand/raw/omap2.c index fbb9955f2467..2c3e65cb68f3 100644 --- a/drivers/mtd/nand/raw/omap2.c +++ b/drivers/mtd/nand/raw/omap2.c @@ -15,6 +15,7 @@ #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/mtd/mtd.h> +#include <linux/mtd/nand-ecc-sw-bch.h> #include <linux/mtd/rawnand.h> #include <linux/mtd/partitions.h> #include <linux/omap-dma.h> @@ -1866,18 +1867,19 @@ static const struct mtd_ooblayout_ops omap_ooblayout_ops = { static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { - struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_device *nand = mtd_to_nanddev(mtd); + const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; int off = BADBLOCK_MARKER_LENGTH; - if (section >= chip->ecc.steps) + if (section >= engine_conf->nsteps) return -ERANGE; /* * When SW correction is employed, one OMAP specific marker byte is * reserved after each ECC step. */ - oobregion->offset = off + (section * (chip->ecc.bytes + 1)); - oobregion->length = chip->ecc.bytes; + oobregion->offset = off + (section * (engine_conf->code_size + 1)); + oobregion->length = engine_conf->code_size; return 0; } @@ -1885,7 +1887,8 @@ static int omap_sw_ooblayout_ecc(struct mtd_info *mtd, int section, static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section, struct mtd_oob_region *oobregion) { - struct nand_chip *chip = mtd_to_nand(mtd); + struct nand_device *nand = mtd_to_nanddev(mtd); + const struct nand_ecc_sw_bch_conf *engine_conf = nand->ecc.ctx.priv; int off = BADBLOCK_MARKER_LENGTH; if (section) @@ -1895,7 +1898,7 @@ static int omap_sw_ooblayout_free(struct mtd_info *mtd, int section, * When SW correction is employed, one OMAP specific marker byte is * reserved after each ECC step. */ - off += ((chip->ecc.bytes + 1) * chip->ecc.steps); + off += ((engine_conf->code_size + 1) * engine_conf->nsteps); if (off >= mtd->oobsize) return -ERANGE; diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 8ea545bb924d..61d932c1b718 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -343,6 +343,7 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, const struct nand_page_io_req *req) { struct nand_device *nand = spinand_to_nand(spinand); + struct mtd_info *mtd = spinand_to_mtd(spinand); struct spi_mem_dirmap_desc *rdesc; unsigned int nbytes = 0; void *buf = NULL; @@ -382,9 +383,16 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand, memcpy(req->databuf.in, spinand->databuf + req->dataoffs, req->datalen); - if (req->ooblen) - memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, - req->ooblen); + if (req->ooblen) { + if (req->mode == MTD_OPS_AUTO_OOB) + mtd_ooblayout_get_databytes(mtd, req->oobbuf.in, + spinand->oobbuf, + req->ooboffs, + req->ooblen); + else + memcpy(req->oobbuf.in, spinand->oobbuf + req->ooboffs, + req->ooblen); + } return 0; } diff --git a/drivers/net/arcnet/arc-rimi.c b/drivers/net/arcnet/arc-rimi.c index 98df38fe553c..12d085405bd0 100644 --- a/drivers/net/arcnet/arc-rimi.c +++ b/drivers/net/arcnet/arc-rimi.c @@ -332,7 +332,7 @@ static int __init arc_rimi_init(void) dev->irq = 9; if (arcrimi_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -349,7 +349,7 @@ static void __exit arc_rimi_exit(void) iounmap(lp->mem_start); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/arcdevice.h b/drivers/net/arcnet/arcdevice.h index 22a49c6d7ae6..5d4a4c7efbbf 100644 --- a/drivers/net/arcnet/arcdevice.h +++ b/drivers/net/arcnet/arcdevice.h @@ -298,6 +298,10 @@ struct arcnet_local { int excnak_pending; /* We just got an excesive nak interrupt */ + /* RESET flag handling */ + int reset_in_progress; + struct work_struct reset_work; + struct { uint16_t sequence; /* sequence number (incs with each packet) */ __be16 aborted_seq; @@ -350,7 +354,9 @@ void arcnet_dump_skb(struct net_device *dev, struct sk_buff *skb, char *desc) void arcnet_unregister_proto(struct ArcProto *proto); irqreturn_t arcnet_interrupt(int irq, void *dev_id); + struct net_device *alloc_arcdev(const char *name); +void free_arcdev(struct net_device *dev); int arcnet_open(struct net_device *dev); int arcnet_close(struct net_device *dev); diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index e04efc0a5c97..d76dd7d14299 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -387,10 +387,44 @@ static void arcnet_timer(struct timer_list *t) struct arcnet_local *lp = from_timer(lp, t, timer); struct net_device *dev = lp->dev; - if (!netif_carrier_ok(dev)) { + spin_lock_irq(&lp->lock); + + if (!lp->reset_in_progress && !netif_carrier_ok(dev)) { netif_carrier_on(dev); netdev_info(dev, "link up\n"); } + + spin_unlock_irq(&lp->lock); +} + +static void reset_device_work(struct work_struct *work) +{ + struct arcnet_local *lp; + struct net_device *dev; + + lp = container_of(work, struct arcnet_local, reset_work); + dev = lp->dev; + + /* Do not bring the network interface back up if an ifdown + * was already done. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + return; + + rtnl_lock(); + + /* Do another check, in case of an ifdown that was triggered in + * the small race window between the exit condition above and + * acquiring RTNL. + */ + if (!netif_running(dev) || !lp->reset_in_progress) + goto out; + + dev_close(dev); + dev_open(dev, NULL); + +out: + rtnl_unlock(); } static void arcnet_reply_tasklet(unsigned long data) @@ -452,12 +486,25 @@ struct net_device *alloc_arcdev(const char *name) lp->dev = dev; spin_lock_init(&lp->lock); timer_setup(&lp->timer, arcnet_timer, 0); + INIT_WORK(&lp->reset_work, reset_device_work); } return dev; } EXPORT_SYMBOL(alloc_arcdev); +void free_arcdev(struct net_device *dev) +{ + struct arcnet_local *lp = netdev_priv(dev); + + /* Do not cancel this at ->ndo_close(), as the workqueue itself + * indirectly calls the ifdown path through dev_close(). + */ + cancel_work_sync(&lp->reset_work); + free_netdev(dev); +} +EXPORT_SYMBOL(free_arcdev); + /* Open/initialize the board. This is called sometime after booting when * the 'ifconfig' program is run. * @@ -587,6 +634,10 @@ int arcnet_close(struct net_device *dev) /* shut down the card */ lp->hw.close(dev); + + /* reset counters */ + lp->reset_in_progress = 0; + module_put(lp->hw.owner); return 0; } @@ -820,6 +871,9 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) spin_lock_irqsave(&lp->lock, flags); + if (lp->reset_in_progress) + goto out; + /* RESET flag was enabled - if device is not running, we must * clear it right away (but nothing else). */ @@ -852,11 +906,14 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) if (status & RESETflag) { arc_printk(D_NORMAL, dev, "spurious reset (status=%Xh)\n", status); - arcnet_close(dev); - arcnet_open(dev); + + lp->reset_in_progress = 1; + netif_stop_queue(dev); + netif_carrier_off(dev); + schedule_work(&lp->reset_work); /* get out of the interrupt handler! */ - break; + goto out; } /* RX is inhibited - we must have received something. * Prepare to receive into the next buffer. @@ -1052,6 +1109,7 @@ irqreturn_t arcnet_interrupt(int irq, void *dev_id) udelay(1); lp->hw.intmask(dev, lp->intmask); +out: spin_unlock_irqrestore(&lp->lock, flags); return retval; } diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index f983c4ce6b07..be618e4b9ed5 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -169,7 +169,7 @@ static int __init com20020_init(void) dev->irq = 9; if (com20020isa_probe(dev)) { - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -182,7 +182,7 @@ static void __exit com20020_exit(void) unregister_netdev(my_dev); free_irq(my_dev->irq, my_dev); release_region(my_dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(my_dev); + free_arcdev(my_dev); } #ifndef MODULE diff --git a/drivers/net/arcnet/com20020-pci.c b/drivers/net/arcnet/com20020-pci.c index eb7f76753c9c..8bdc44b7e09a 100644 --- a/drivers/net/arcnet/com20020-pci.c +++ b/drivers/net/arcnet/com20020-pci.c @@ -291,7 +291,7 @@ static void com20020pci_remove(struct pci_dev *pdev) unregister_netdev(dev); free_irq(dev->irq, dev); - free_netdev(dev); + free_arcdev(dev); } } diff --git a/drivers/net/arcnet/com20020_cs.c b/drivers/net/arcnet/com20020_cs.c index cf607ffcf358..9cc5eb6a8e90 100644 --- a/drivers/net/arcnet/com20020_cs.c +++ b/drivers/net/arcnet/com20020_cs.c @@ -177,7 +177,7 @@ static void com20020_detach(struct pcmcia_device *link) dev = info->dev; if (dev) { dev_dbg(&link->dev, "kfree...\n"); - free_netdev(dev); + free_arcdev(dev); } dev_dbg(&link->dev, "kfree2...\n"); kfree(info); diff --git a/drivers/net/arcnet/com90io.c b/drivers/net/arcnet/com90io.c index cf214b730671..3856b447d38e 100644 --- a/drivers/net/arcnet/com90io.c +++ b/drivers/net/arcnet/com90io.c @@ -396,7 +396,7 @@ static int __init com90io_init(void) err = com90io_probe(dev); if (err) { - free_netdev(dev); + free_arcdev(dev); return err; } @@ -419,7 +419,7 @@ static void __exit com90io_exit(void) free_irq(dev->irq, dev); release_region(dev->base_addr, ARCNET_TOTAL_SIZE); - free_netdev(dev); + free_arcdev(dev); } module_init(com90io_init) diff --git a/drivers/net/arcnet/com90xx.c b/drivers/net/arcnet/com90xx.c index 3dc3d533cb19..d8dfb9ea0de8 100644 --- a/drivers/net/arcnet/com90xx.c +++ b/drivers/net/arcnet/com90xx.c @@ -554,7 +554,7 @@ err_free_irq: err_release_mem: release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); err_free_dev: - free_netdev(dev); + free_arcdev(dev); return -EIO; } @@ -672,7 +672,7 @@ static void __exit com90xx_exit(void) release_region(dev->base_addr, ARCNET_TOTAL_SIZE); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); - free_netdev(dev); + free_arcdev(dev); } } diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3486704c8a95..c73e2a65c904 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -592,11 +592,11 @@ static void can_restart(struct net_device *dev) cf->can_id |= CAN_ERR_RESTARTED; - netif_rx_ni(skb); - stats->rx_packets++; stats->rx_bytes += cf->len; + netif_rx_ni(skb); + restart: netdev_dbg(dev, "restarted\n"); priv->can_stats.restarts++; @@ -1163,7 +1163,7 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct can_ctrlmode cm = {.flags = priv->ctrlmode}; - struct can_berr_counter bec; + struct can_berr_counter bec = { }; enum can_state state = priv->state; if (priv->do_get_state) diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 61631f4fd92a..f347ecc79aef 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -514,11 +514,11 @@ static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, else memcpy(cfd->data, rm->d, cfd->len); - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low)); - netdev->stats.rx_packets++; netdev->stats.rx_bytes += cfd->len; + peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(rm->ts_low)); + return 0; } @@ -580,11 +580,11 @@ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, if (!skb) return -ENOMEM; - peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); - netdev->stats.rx_packets++; netdev->stats.rx_bytes += cf->len; + peak_usb_netif_rx(skb, &usb_if->time_ref, le32_to_cpu(sm->ts_low)); + return 0; } diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index fa47bab510bb..f9a524c5f6d6 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -39,6 +39,7 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) struct net_device *peer; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct net_device_stats *peerstats, *srcstats = &dev->stats; + u8 len; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; @@ -61,12 +62,13 @@ static netdev_tx_t vxcan_xmit(struct sk_buff *skb, struct net_device *dev) skb->dev = peer; skb->ip_summed = CHECKSUM_UNNECESSARY; + len = cfd->len; if (netif_rx_ni(skb) == NET_RX_SUCCESS) { srcstats->tx_packets++; - srcstats->tx_bytes += cfd->len; + srcstats->tx_bytes += len; peerstats = &peer->stats; peerstats->rx_packets++; - peerstats->rx_bytes += cfd->len; + peerstats->rx_bytes += len; } out_unlock: diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c index 288b5a5c3e0d..95c7fa171e35 100644 --- a/drivers/net/dsa/b53/b53_common.c +++ b/drivers/net/dsa/b53/b53_common.c @@ -1404,7 +1404,7 @@ int b53_vlan_prepare(struct dsa_switch *ds, int port, !(vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED)) return -EINVAL; - if (vlan->vid_end > dev->num_vlans) + if (vlan->vid_end >= dev->num_vlans) return -ERANGE; b53_enable_vlan(dev, true, ds->vlan_filtering); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 1e9a0adda2d6..445226720ff2 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -509,15 +509,19 @@ static int bcm_sf2_mdio_register(struct dsa_switch *ds) /* Find our integrated MDIO bus node */ dn = of_find_compatible_node(NULL, NULL, "brcm,unimac-mdio"); priv->master_mii_bus = of_mdio_find_bus(dn); - if (!priv->master_mii_bus) + if (!priv->master_mii_bus) { + of_node_put(dn); return -EPROBE_DEFER; + } get_device(&priv->master_mii_bus->dev); priv->master_mii_dn = dn; priv->slave_mii_bus = devm_mdiobus_alloc(ds->dev); - if (!priv->slave_mii_bus) + if (!priv->slave_mii_bus) { + of_node_put(dn); return -ENOMEM; + } priv->slave_mii_bus->priv = priv; priv->slave_mii_bus->name = "sf2 slave mii"; diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index c973db101b72..a4570ba29c83 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -1187,6 +1187,20 @@ static const struct ksz_chip_data ksz8795_switch_chips[] = { .port_cnt = 5, /* total cpu and user ports */ }, { + /* + * WARNING + * ======= + * KSZ8794 is similar to KSZ8795, except the port map + * contains a gap between external and CPU ports, the + * port map is NOT continuous. The per-port register + * map is shifted accordingly too, i.e. registers at + * offset 0x40 are NOT used on KSZ8794 and they ARE + * used on KSZ8795 for external port 3. + * external cpu + * KSZ8794 0,1,2 4 + * KSZ8795 0,1,2,3 4 + * KSZ8765 0,1,2,3 4 + */ .chip_id = 0x8794, .dev_name = "KSZ8794", .num_vlans = 4096, @@ -1220,9 +1234,13 @@ static int ksz8795_switch_init(struct ksz_device *dev) dev->num_vlans = chip->num_vlans; dev->num_alus = chip->num_alus; dev->num_statics = chip->num_statics; - dev->port_cnt = chip->port_cnt; + dev->port_cnt = fls(chip->cpu_ports); + dev->cpu_port = fls(chip->cpu_ports) - 1; + dev->phy_port_cnt = dev->port_cnt - 1; dev->cpu_ports = chip->cpu_ports; - + dev->host_mask = chip->cpu_ports; + dev->port_mask = (BIT(dev->phy_port_cnt) - 1) | + chip->cpu_ports; break; } } @@ -1231,17 +1249,9 @@ static int ksz8795_switch_init(struct ksz_device *dev) if (!dev->cpu_ports) return -ENODEV; - dev->port_mask = BIT(dev->port_cnt) - 1; - dev->port_mask |= dev->host_mask; - dev->reg_mib_cnt = KSZ8795_COUNTER_NUM; dev->mib_cnt = ARRAY_SIZE(mib_names); - dev->phy_port_cnt = dev->port_cnt - 1; - - dev->cpu_port = dev->port_cnt - 1; - dev->host_mask = BIT(dev->cpu_port); - dev->ports = devm_kzalloc(dev->dev, dev->port_cnt * sizeof(struct ksz_port), GFP_KERNEL); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index cf743133b0b9..389abfd27770 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -400,7 +400,7 @@ int ksz_switch_register(struct ksz_device *dev, gpiod_set_value_cansleep(dev->reset_gpio, 1); usleep_range(10000, 12000); gpiod_set_value_cansleep(dev->reset_gpio, 0); - usleep_range(100, 1000); + msleep(100); } mutex_init(&dev->dev_mutex); @@ -434,7 +434,7 @@ int ksz_switch_register(struct ksz_device *dev, if (of_property_read_u32(port, "reg", &port_num)) continue; - if (port_num >= dev->port_cnt) + if (!(dev->port_mask & BIT(port_num))) return -EINVAL; of_get_phy_mode(port, &dev->ports[port_num].interface); diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index eafe6bedc692..54aa942eedaa 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -1676,7 +1676,11 @@ static int mv88e6xxx_port_db_load_purge(struct mv88e6xxx_chip *chip, int port, if (!entry.portvec) entry.state = 0; } else { - entry.portvec |= BIT(port); + if (state == MV88E6XXX_G1_ATU_DATA_STATE_UC_STATIC) + entry.portvec = BIT(port); + else + entry.portvec |= BIT(port); + entry.state = state; } diff --git a/drivers/net/dsa/mv88e6xxx/global1_vtu.c b/drivers/net/dsa/mv88e6xxx/global1_vtu.c index 66ddf67b8737..7b96396be609 100644 --- a/drivers/net/dsa/mv88e6xxx/global1_vtu.c +++ b/drivers/net/dsa/mv88e6xxx/global1_vtu.c @@ -351,6 +351,10 @@ int mv88e6250_g1_vtu_getnext(struct mv88e6xxx_chip *chip, if (err) return err; + err = mv88e6185_g1_stu_data_read(chip, entry); + if (err) + return err; + /* VTU DBNum[3:0] are located in VTU Operation 3:0 * VTU DBNum[5:4] are located in VTU Operation 9:8 */ diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 7dc230677b78..45fdb1256dbf 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -233,9 +233,24 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, { struct ocelot *ocelot = ds->priv; struct ocelot_port *ocelot_port = ocelot->ports[port]; + int err; + + ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, + DEV_MAC_ENA_CFG); - ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); + + err = ocelot_port_flush(ocelot, port); + if (err) + dev_err(ocelot->dev, "failed to flush port %d: %d\n", + port, err); + + /* Put the port in reset. */ + ocelot_port_writel(ocelot_port, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_RX_RST | + DEV_CLOCK_CFG_LINK_SPEED(OCELOT_SPEED_1000), + DEV_CLOCK_CFG); } static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 06596fa1f9fe..a0596c073ddd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -404,6 +404,7 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) if (unlikely(!xdpf)) { trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; + verdict = XDP_ABORTED; break; } @@ -424,7 +425,10 @@ static int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp) xdp_stat = &rx_ring->rx_stats.xdp_redirect; break; } - fallthrough; + trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); + xdp_stat = &rx_ring->rx_stats.xdp_aborted; + verdict = XDP_ABORTED; + break; case XDP_ABORTED: trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict); xdp_stat = &rx_ring->rx_stats.xdp_aborted; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index b1ae9eb8f247..0404aafd5ce5 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -2503,8 +2503,10 @@ static int bcm_sysport_probe(struct platform_device *pdev) priv = netdev_priv(dev); priv->clk = devm_clk_get_optional(&pdev->dev, "sw_sysport"); - if (IS_ERR(priv->clk)) - return PTR_ERR(priv->clk); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto err_free_netdev; + } /* Allocate number of TX rings */ priv->tx_rings = devm_kcalloc(&pdev->dev, txq, diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 0c5373462ced..0b1b5f9c67d4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -219,6 +219,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */ CH_PCI_ID_TABLE_FENTRY(0x608b), /* Custom T6225-CR */ + CH_PCI_ID_TABLE_FENTRY(0x6092), /* Custom T62100-CR-LOM */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c index e5cfbe196ba6..19dc7dc054a2 100644 --- a/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c +++ b/drivers/net/ethernet/chelsio/inline_crypto/chtls/chtls_cm.c @@ -1158,11 +1158,9 @@ static struct sock *chtls_recv_sock(struct sock *lsk, #endif } if (!n || !n->dev) - goto free_sk; + goto free_dst; ndev = n->dev; - if (!ndev) - goto free_dst; if (is_vlan_dev(ndev)) ndev = vlan_dev_real_dev(ndev); @@ -1250,7 +1248,8 @@ static struct sock *chtls_recv_sock(struct sock *lsk, free_csk: chtls_sock_release(&csk->kref); free_dst: - neigh_release(n); + if (n) + neigh_release(n); dst_release(dst); free_sk: inet_csk_prepare_forced_close(newsk); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 4360ce4d3fb6..6faa20bed488 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -2180,8 +2180,10 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv, struct xdp_frame **init_xdpf) { struct xdp_frame *new_xdpf, *xdpf = *init_xdpf; - void *new_buff; + void *new_buff, *aligned_data; struct page *p; + u32 data_shift; + int headroom; /* Check the data alignment and make sure the headroom is large * enough to store the xdpf backpointer. Use an aligned headroom @@ -2191,25 +2193,57 @@ static int dpaa_a050385_wa_xdpf(struct dpaa_priv *priv, * byte frame headroom. If the XDP program uses all of it, copy the * data to a new buffer and make room for storing the backpointer. */ - if (PTR_IS_ALIGNED(xdpf->data, DPAA_A050385_ALIGN) && + if (PTR_IS_ALIGNED(xdpf->data, DPAA_FD_DATA_ALIGNMENT) && xdpf->headroom >= priv->tx_headroom) { xdpf->headroom = priv->tx_headroom; return 0; } + /* Try to move the data inside the buffer just enough to align it and + * store the xdpf backpointer. If the available headroom isn't large + * enough, resort to allocating a new buffer and copying the data. + */ + aligned_data = PTR_ALIGN_DOWN(xdpf->data, DPAA_FD_DATA_ALIGNMENT); + data_shift = xdpf->data - aligned_data; + + /* The XDP frame's headroom needs to be large enough to accommodate + * shifting the data as well as storing the xdpf backpointer. + */ + if (xdpf->headroom >= data_shift + priv->tx_headroom) { + memmove(aligned_data, xdpf->data, xdpf->len); + xdpf->data = aligned_data; + xdpf->headroom = priv->tx_headroom; + return 0; + } + + /* The new xdp_frame is stored in the new buffer. Reserve enough space + * in the headroom for storing it along with the driver's private + * info. The headroom needs to be aligned to DPAA_FD_DATA_ALIGNMENT to + * guarantee the data's alignment in the buffer. + */ + headroom = ALIGN(sizeof(*new_xdpf) + priv->tx_headroom, + DPAA_FD_DATA_ALIGNMENT); + + /* Assure the extended headroom and data don't overflow the buffer, + * while maintaining the mandatory tailroom. + */ + if (headroom + xdpf->len > DPAA_BP_RAW_SIZE - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + return -ENOMEM; + p = dev_alloc_pages(0); if (unlikely(!p)) return -ENOMEM; /* Copy the data to the new buffer at a properly aligned offset */ new_buff = page_address(p); - memcpy(new_buff + priv->tx_headroom, xdpf->data, xdpf->len); + memcpy(new_buff + headroom, xdpf->data, xdpf->len); /* Create an XDP frame around the new buffer in a similar fashion * to xdp_convert_buff_to_frame. */ new_xdpf = new_buff; - new_xdpf->data = new_buff + priv->tx_headroom; + new_xdpf->data = new_buff + headroom; new_xdpf->len = xdpf->len; new_xdpf->headroom = priv->tx_headroom; new_xdpf->frame_sz = DPAA_BP_RAW_SIZE; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_hw.h b/drivers/net/ethernet/freescale/enetc/enetc_hw.h index e1e950d48c92..c71fe8d751d5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_hw.h +++ b/drivers/net/ethernet/freescale/enetc/enetc_hw.h @@ -196,6 +196,8 @@ enum enetc_bdr_type {TX, RX}; #define ENETC_CBS_BW_MASK GENMASK(6, 0) #define ENETC_PTCCBSR1(n) (0x1114 + (n) * 8) /* n = 0 to 7*/ #define ENETC_RSSHASH_KEY_SIZE 40 +#define ENETC_PRSSCAPR 0x1404 +#define ENETC_PRSSCAPR_GET_NUM_RSS(val) (BIT((val) & 0xf) * 32) #define ENETC_PRSSK(n) (0x1410 + (n) * 4) /* n = [0..9] */ #define ENETC_PSIVLANFMR 0x1700 #define ENETC_PSIVLANFMR_VS BIT(0) diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index ed8fcb8b486e..3eb5f1375bd4 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -996,6 +996,51 @@ static void enetc_phylink_destroy(struct enetc_ndev_priv *priv) phylink_destroy(priv->phylink); } +/* Initialize the entire shared memory for the flow steering entries + * of this port (PF + VFs) + */ +static int enetc_init_port_rfs_memory(struct enetc_si *si) +{ + struct enetc_cmd_rfse rfse = {0}; + struct enetc_hw *hw = &si->hw; + int num_rfs, i, err = 0; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRFSCAPR); + num_rfs = ENETC_PRFSCAPR_GET_NUM_RFS(val); + + for (i = 0; i < num_rfs; i++) { + err = enetc_set_fs_entry(si, &rfse, i); + if (err) + break; + } + + return err; +} + +static int enetc_init_port_rss_memory(struct enetc_si *si) +{ + struct enetc_hw *hw = &si->hw; + int num_rss, err; + int *rss_table; + u32 val; + + val = enetc_port_rd(hw, ENETC_PRSSCAPR); + num_rss = ENETC_PRSSCAPR_GET_NUM_RSS(val); + if (!num_rss) + return 0; + + rss_table = kcalloc(num_rss, sizeof(*rss_table), GFP_KERNEL); + if (!rss_table) + return -ENOMEM; + + err = enetc_set_rss_table(si, rss_table, num_rss); + + kfree(rss_table); + + return err; +} + static int enetc_pf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -1051,6 +1096,18 @@ static int enetc_pf_probe(struct pci_dev *pdev, goto err_alloc_si_res; } + err = enetc_init_port_rfs_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RFS memory\n"); + goto err_init_port_rfs; + } + + err = enetc_init_port_rss_memory(si); + if (err) { + dev_err(&pdev->dev, "Failed to initialize RSS memory\n"); + goto err_init_port_rss; + } + err = enetc_alloc_msix(priv); if (err) { dev_err(&pdev->dev, "MSIX alloc failed\n"); @@ -1079,6 +1136,8 @@ err_phylink_create: enetc_mdiobus_destroy(pf); err_mdiobus_create: enetc_free_msix(priv); +err_init_port_rss: +err_init_port_rfs: err_alloc_msix: enetc_free_si_resources(priv); err_alloc_si_res: diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index c527f4ee1d3a..0602d5d5d2ee 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -462,6 +462,11 @@ struct bufdesc_ex { */ #define FEC_QUIRK_CLEAR_SETUP_MII (1 << 17) +/* Some link partners do not tolerate the momentary reset of the REF_CLK + * frequency when the RNCTL register is cleared by hardware reset. + */ +#define FEC_QUIRK_NO_HARD_RESET (1 << 18) + struct bufdesc_prop { int qid; /* Address of Rx and Tx buffers */ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 04f24c66cf36..9ebdb0e54291 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -100,7 +100,8 @@ static const struct fec_devinfo fec_imx27_info = { static const struct fec_devinfo fec_imx28_info = { .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC | - FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII, + FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII | + FEC_QUIRK_NO_HARD_RESET, }; static const struct fec_devinfo fec_imx6q_info = { @@ -953,7 +954,8 @@ fec_restart(struct net_device *ndev) * For i.MX6SX SOC, enet use AXI bus, we use disable MAC * instead of reset MAC itself. */ - if (fep->quirks & FEC_QUIRK_HAS_AVB) { + if (fep->quirks & FEC_QUIRK_HAS_AVB || + ((fep->quirks & FEC_QUIRK_NO_HARD_RESET) && fep->link)) { writel(0, fep->hwp + FEC_ECNTRL); } else { writel(1, fep->hwp + FEC_ECNTRL); @@ -2165,9 +2167,9 @@ static int fec_enet_mii_init(struct platform_device *pdev) fep->mii_bus->parent = &pdev->dev; err = of_mdiobus_register(fep->mii_bus, node); - of_node_put(node); if (err) goto err_out_free_mdiobus; + of_node_put(node); mii_cnt++; @@ -2180,6 +2182,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) err_out_free_mdiobus: mdiobus_free(fep->mii_bus); err_out: + of_node_put(node); return err; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index c242883fea5d..48549db23c52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9813,12 +9813,19 @@ int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id) void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id) { + struct hnae3_handle *handle = &vport->nic; struct hclge_dev *hdev = vport->back; int reset_try_times = 0; int reset_status; u16 queue_gid; int ret; + if (queue_id >= handle->kinfo.num_tqps) { + dev_warn(&hdev->pdev->dev, "Invalid vf queue id(%u)\n", + queue_id); + return; + } + queue_gid = hclge_covert_handle_qid_global(&vport->nic, queue_id); ret = hclge_send_reset_tqp_cmd(hdev, queue_gid, true); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index 754c09ada901..ffb416e088a9 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -158,21 +158,31 @@ static int hclge_get_ring_chain_from_mbx( struct hclge_vport *vport) { struct hnae3_ring_chain_node *cur_chain, *new_chain; + struct hclge_dev *hdev = vport->back; int ring_num; - int i = 0; + int i; ring_num = req->msg.ring_num; if (ring_num > HCLGE_MBX_MAX_RING_CHAIN_PARAM_NUM) return -ENOMEM; + for (i = 0; i < ring_num; i++) { + if (req->msg.param[i].tqp_index >= vport->nic.kinfo.rss_size) { + dev_err(&hdev->pdev->dev, "tqp index(%u) is out of range(0-%u)\n", + req->msg.param[i].tqp_index, + vport->nic.kinfo.rss_size - 1); + return -EINVAL; + } + } + hnae3_set_bit(ring_chain->flag, HNAE3_RING_TYPE_B, - req->msg.param[i].ring_type); + req->msg.param[0].ring_type); ring_chain->tqp_index = hclge_get_queue_id(vport->nic.kinfo.tqp - [req->msg.param[i].tqp_index]); + [req->msg.param[0].tqp_index]); hnae3_set_field(ring_chain->int_gl_idx, HNAE3_RING_GL_IDX_M, - HNAE3_RING_GL_IDX_S, req->msg.param[i].int_gl_index); + HNAE3_RING_GL_IDX_S, req->msg.param[0].int_gl_index); cur_chain = ring_chain; @@ -597,6 +607,17 @@ static void hclge_get_rss_key(struct hclge_vport *vport, index = mbx_req->msg.data[0]; + /* Check the query index of rss_hash_key from VF, make sure no + * more than the size of rss_hash_key. + */ + if (((index + 1) * HCLGE_RSS_MBX_RESP_LEN) > + sizeof(vport[0].rss_hash_key)) { + dev_warn(&hdev->pdev->dev, + "failed to get the rss hash key, the index(%u) invalid !\n", + index); + return; + } + memcpy(resp_msg->data, &hdev->vport[0].rss_hash_key[index * HCLGE_RSS_MBX_RESP_LEN], HCLGE_RSS_MBX_RESP_LEN); diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 9778c83150f1..a536fdbf05e1 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4918,7 +4918,22 @@ static void ibmvnic_handle_crq(union ibmvnic_crq *crq, complete(&adapter->init_done); adapter->init_done_rc = -EIO; } - ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); + rc = ibmvnic_reset(adapter, VNIC_RESET_FAILOVER); + if (rc && rc != -EBUSY) { + /* We were unable to schedule the failover + * reset either because the adapter was still + * probing (eg: during kexec) or we could not + * allocate memory. Clear the failover_pending + * flag since no one else will. We ignore + * EBUSY because it means either FAILOVER reset + * is already scheduled or the adapter is + * being removed. + */ + netdev_err(netdev, + "Error %ld scheduling failover reset\n", + rc); + adapter->failover_pending = false; + } break; case IBMVNIC_CRQ_INIT_COMPLETE: dev_info(dev, "Partner initialization complete\n"); @@ -5084,6 +5099,12 @@ static void ibmvnic_tasklet(struct tasklet_struct *t) while (!done) { /* Pull all the valid messages off the CRQ */ while ((crq = ibmvnic_next_crq(adapter)) != NULL) { + /* This barrier makes sure ibmvnic_next_crq()'s + * crq->generic.first & IBMVNIC_CRQ_CMD_RSP is loaded + * before ibmvnic_handle_crq()'s + * switch(gen_crq->first) and switch(gen_crq->cmd). + */ + dma_rmb(); ibmvnic_handle_crq(crq, adapter); crq->generic.first = 0; } @@ -5438,11 +5459,6 @@ static int ibmvnic_remove(struct vio_dev *dev) unsigned long flags; spin_lock_irqsave(&adapter->state_lock, flags); - if (test_bit(0, &adapter->resetting)) { - spin_unlock_irqrestore(&adapter->state_lock, flags); - return -EBUSY; - } - adapter->state = VNIC_REMOVING; spin_unlock_irqrestore(&adapter->state_lock, flags); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 21ee56420c3a..1b6ec9be155a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -55,12 +55,7 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event = VIRTCHNL_EVENT_LINK_CHANGE; pfe.severity = PF_EVENT_SEVERITY_INFO; - - /* Always report link is down if the VF queues aren't enabled */ - if (!vf->queues_enabled) { - pfe.event_data.link_event.link_status = false; - pfe.event_data.link_event.link_speed = 0; - } else if (vf->link_forced) { + if (vf->link_forced) { pfe.event_data.link_event.link_status = vf->link_up; pfe.event_data.link_event.link_speed = (vf->link_up ? i40e_virtchnl_link_speed(ls->link_speed) : 0); @@ -70,7 +65,6 @@ static void i40e_vc_notify_vf_link_state(struct i40e_vf *vf) pfe.event_data.link_event.link_speed = i40e_virtchnl_link_speed(ls->link_speed); } - i40e_aq_send_msg_to_vf(hw, abs_vf_id, VIRTCHNL_OP_EVENT, 0, (u8 *)&pfe, sizeof(pfe), NULL); } @@ -2443,8 +2437,6 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg) } } - vf->queues_enabled = true; - error_param: /* send the response to the VF */ return i40e_vc_send_resp_to_vf(vf, VIRTCHNL_OP_ENABLE_QUEUES, @@ -2466,9 +2458,6 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg) struct i40e_pf *pf = vf->pf; i40e_status aq_ret = 0; - /* Immediately mark queues as disabled */ - vf->queues_enabled = false; - if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states)) { aq_ret = I40E_ERR_PARAM; goto error_param; @@ -4046,20 +4035,16 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; vf = &pf->vf[vf_id]; - vsi = pf->vsi[vf->lan_vsi_idx]; /* When the VF is resetting wait until it is done. * It can take up to 200 milliseconds, * but wait for up to 300 milliseconds to be safe. - * If the VF is indeed in reset, the vsi pointer has - * to show on the newly loaded vsi under pf->vsi[id]. + * Acquire the VSI pointer only after the VF has been + * properly initialized. */ for (i = 0; i < 15; i++) { - if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { - if (i > 0) - vsi = pf->vsi[vf->lan_vsi_idx]; + if (test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) break; - } msleep(20); } if (!test_bit(I40E_VF_STATE_INIT, &vf->vf_states)) { @@ -4068,6 +4053,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ret = -EAGAIN; goto error_param; } + vsi = pf->vsi[vf->lan_vsi_idx]; if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index 5491215d81de..091e32c1bb46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -98,7 +98,6 @@ struct i40e_vf { unsigned int tx_rate; /* Tx bandwidth limit in Mbps */ bool link_forced; bool link_up; /* only valid if VF link is forced */ - bool queues_enabled; /* true if the VF queues are enabled */ bool spoofchk; u16 num_vlan; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index 56725356a17b..fa1e128c24ec 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -68,7 +68,9 @@ #define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16) #define ICE_AQ_LEN 64 #define ICE_MBXSQ_LEN 64 -#define ICE_MIN_MSIX 2 +#define ICE_MIN_LAN_TXRX_MSIX 1 +#define ICE_MIN_LAN_OICR_MSIX 1 +#define ICE_MIN_MSIX (ICE_MIN_LAN_TXRX_MSIX + ICE_MIN_LAN_OICR_MSIX) #define ICE_FDIR_MSIX 1 #define ICE_NO_VSI 0xffff #define ICE_VSI_MAP_CONTIG 0 diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 9e8e9531cd87..69c113a4de7e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3258,8 +3258,8 @@ ice_set_rxfh(struct net_device *netdev, const u32 *indir, const u8 *key, */ static int ice_get_max_txq(struct ice_pf *pf) { - return min_t(int, num_online_cpus(), - pf->hw.func_caps.common_cap.num_txq); + return min3(pf->num_lan_msix, (u16)num_online_cpus(), + (u16)pf->hw.func_caps.common_cap.num_txq); } /** @@ -3268,8 +3268,8 @@ static int ice_get_max_txq(struct ice_pf *pf) */ static int ice_get_max_rxq(struct ice_pf *pf) { - return min_t(int, num_online_cpus(), - pf->hw.func_caps.common_cap.num_rxq); + return min3(pf->num_lan_msix, (u16)num_online_cpus(), + (u16)pf->hw.func_caps.common_cap.num_rxq); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 2d27f66ac853..192729546bbf 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1576,7 +1576,13 @@ ice_set_fdir_input_set(struct ice_vsi *vsi, struct ethtool_rx_flow_spec *fsp, sizeof(struct in6_addr)); input->ip.v6.l4_header = fsp->h_u.usr_ip6_spec.l4_4_bytes; input->ip.v6.tc = fsp->h_u.usr_ip6_spec.tclass; - input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; + + /* if no protocol requested, use IPPROTO_NONE */ + if (!fsp->m_u.usr_ip6_spec.l4_proto) + input->ip.v6.proto = IPPROTO_NONE; + else + input->ip.v6.proto = fsp->h_u.usr_ip6_spec.l4_proto; + memcpy(input->mask.v6.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, sizeof(struct in6_addr)); memcpy(input->mask.v6.src_ip, fsp->m_u.usr_ip6_spec.ip6src, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 3df67486d42d..ad9c22a1b97a 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -161,8 +161,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) switch (vsi->type) { case ICE_VSI_PF: - vsi->alloc_txq = min_t(int, ice_get_avail_txq_count(pf), - num_online_cpus()); + vsi->alloc_txq = min3(pf->num_lan_msix, + ice_get_avail_txq_count(pf), + (u16)num_online_cpus()); if (vsi->req_txq) { vsi->alloc_txq = vsi->req_txq; vsi->num_txq = vsi->req_txq; @@ -174,8 +175,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) { vsi->alloc_rxq = 1; } else { - vsi->alloc_rxq = min_t(int, ice_get_avail_rxq_count(pf), - num_online_cpus()); + vsi->alloc_rxq = min3(pf->num_lan_msix, + ice_get_avail_rxq_count(pf), + (u16)num_online_cpus()); if (vsi->req_rxq) { vsi->alloc_rxq = vsi->req_rxq; vsi->num_rxq = vsi->req_rxq; @@ -184,7 +186,9 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) pf->num_lan_rx = vsi->alloc_rxq; - vsi->num_q_vectors = max_t(int, vsi->alloc_rxq, vsi->alloc_txq); + vsi->num_q_vectors = min_t(int, pf->num_lan_msix, + max_t(int, vsi->alloc_rxq, + vsi->alloc_txq)); break; case ICE_VSI_VF: vf = &pf->vf[vsi->vf_id]; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index c52b9bb0e3ab..e10ca8929f85 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -3430,18 +3430,14 @@ static int ice_ena_msix_range(struct ice_pf *pf) if (v_actual < v_budget) { dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n", v_budget, v_actual); -/* 2 vectors each for LAN and RDMA (traffic + OICR), one for flow director */ -#define ICE_MIN_LAN_VECS 2 -#define ICE_MIN_RDMA_VECS 2 -#define ICE_MIN_VECS (ICE_MIN_LAN_VECS + ICE_MIN_RDMA_VECS + 1) - if (v_actual < ICE_MIN_LAN_VECS) { + if (v_actual < ICE_MIN_MSIX) { /* error if we can't get minimum vectors */ pci_disable_msix(pf->pdev); err = -ERANGE; goto msix_err; } else { - pf->num_lan_msix = ICE_MIN_LAN_VECS; + pf->num_lan_msix = ICE_MIN_LAN_TXRX_MSIX; } } @@ -4884,9 +4880,15 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) goto err_update_filters; } - /* Add filter for new MAC. If filter exists, just return success */ + /* Add filter for new MAC. If filter exists, return success */ status = ice_fltr_add_mac(vsi, mac, ICE_FWD_TO_VSI); if (status == ICE_ERR_ALREADY_EXISTS) { + /* Although this MAC filter is already present in hardware it's + * possible in some cases (e.g. bonding) that dev_addr was + * modified outside of the driver and needs to be restored back + * to this value. + */ + memcpy(netdev->dev_addr, mac, netdev->addr_len); netdev_dbg(netdev, "filter for MAC %pM already exists\n", mac); return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index a2d0aad8cfdd..b6fa83c619dd 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -1923,12 +1923,15 @@ int ice_tx_csum(struct ice_tx_buf *first, struct ice_tx_offload_params *off) ICE_TX_CTX_EIPT_IPV4_NO_CSUM; l4_proto = ip.v4->protocol; } else if (first->tx_flags & ICE_TX_FLAGS_IPV6) { + int ret; + tunnel |= ICE_TX_CTX_EIPT_IPV6; exthdr = ip.hdr + sizeof(*ip.v6); l4_proto = ip.v6->nexthdr; - if (l4.hdr != exthdr) - ipv6_skip_exthdr(skb, exthdr - skb->data, - &l4_proto, &frag_off); + ret = ipv6_skip_exthdr(skb, exthdr - skb->data, + &l4_proto, &frag_off); + if (ret < 0) + return -1; } /* define outer transport */ diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 61d331ce38cd..ec8cd69d4992 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1675,12 +1675,18 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, cmd->base.phy_address = hw->phy.addr; /* advertising link modes */ - ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); - ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_10_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_10_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 10baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_100_HALF) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Half); + if (hw->phy.autoneg_advertised & ADVERTISE_100_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 100baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_1000_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 1000baseT_Full); + if (hw->phy.autoneg_advertised & ADVERTISE_2500_FULL) + ethtool_link_ksettings_add_link_mode(cmd, advertising, 2500baseT_Full); /* set autoneg settings */ if (hw->mac.autoneg == 1) { @@ -1708,7 +1714,8 @@ static int igc_ethtool_get_link_ksettings(struct net_device *netdev, Asym_Pause); } - status = rd32(IGC_STATUS); + status = pm_runtime_suspended(&adapter->pdev->dev) ? + 0 : rd32(IGC_STATUS); if (status & IGC_STATUS_LU) { if (status & IGC_STATUS_SPEED_1000) { @@ -1792,6 +1799,12 @@ igc_ethtool_set_link_ksettings(struct net_device *netdev, ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); + /* Converting to legacy u32 drops ETHTOOL_LINK_MODE_2500baseT_Full_BIT. + * We have to check this and convert it to ADVERTISE_2500_FULL + * (aka ETHTOOL_LINK_MODE_2500baseX_Full_BIT) explicitly. + */ + if (ethtool_link_ksettings_test_link_mode(cmd, advertising, 2500baseT_Full)) + advertising |= ADVERTISE_2500_FULL; if (cmd->base.autoneg == AUTONEG_ENABLE) { hw->mac.autoneg = 1; diff --git a/drivers/net/ethernet/intel/igc/igc_i225.c b/drivers/net/ethernet/intel/igc/igc_i225.c index 8b67d9b49a83..7ec04e48860c 100644 --- a/drivers/net/ethernet/intel/igc/igc_i225.c +++ b/drivers/net/ethernet/intel/igc/igc_i225.c @@ -219,9 +219,9 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, u16 *data) { struct igc_nvm_info *nvm = &hw->nvm; + s32 ret_val = -IGC_ERR_NVM; u32 attempts = 100000; u32 i, k, eewr = 0; - s32 ret_val = 0; /* A check for invalid values: offset too large, too many words, * too many words for the offset, and not enough words. @@ -229,7 +229,6 @@ static s32 igc_write_nvm_srwr(struct igc_hw *hw, u16 offset, u16 words, if (offset >= nvm->word_size || (words > (nvm->word_size - offset)) || words == 0) { hw_dbg("nvm parameter(s) out of bounds\n"); - ret_val = -IGC_ERR_NVM; goto out; } diff --git a/drivers/net/ethernet/intel/igc/igc_mac.c b/drivers/net/ethernet/intel/igc/igc_mac.c index 09cd0ec7ee87..67b8ffd21d8a 100644 --- a/drivers/net/ethernet/intel/igc/igc_mac.c +++ b/drivers/net/ethernet/intel/igc/igc_mac.c @@ -638,7 +638,7 @@ s32 igc_config_fc_after_link_up(struct igc_hw *hw) } out: - return 0; + return ret_val; } /** diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c index a30eb90ba3d2..dd590086fe6a 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c @@ -29,16 +29,16 @@ static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe) /* Clear entry invalidation bit */ pe->tcam[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK; - /* Write tcam index - indirect access */ - mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); - for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) - mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); - /* Write sram index - indirect access */ mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index); for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++) mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram[i]); + /* Write tcam index - indirect access */ + mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index); + for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++) + mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam[i]); + return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index d298b9357177..6c6b411e78fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -469,6 +469,9 @@ int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, int pf = rvu_get_pf(req->hdr.pcifunc); u8 cgx_id, lmac_id; + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); cgx_lmac_addr_set(cgx_id, lmac_id, req->mac_addr); @@ -485,6 +488,9 @@ int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, int rc = 0, i; u64 cfg; + if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) + return -EPERM; + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); rsp->hdr.rc = rc; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 73fb94dd5fbc..e6869435e1f3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -478,10 +478,11 @@ dma_addr_t __otx2_alloc_rbuf(struct otx2_nic *pfvf, struct otx2_pool *pool) dma_addr_t iova; u8 *buf; - buf = napi_alloc_frag(pool->rbsize); + buf = napi_alloc_frag(pool->rbsize + OTX2_ALIGN); if (unlikely(!buf)) return -ENOMEM; + buf = PTR_ALIGN(buf, OTX2_ALIGN); iova = dma_map_single_attrs(pfvf->dev, buf, pool->rbsize, DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); if (unlikely(dma_mapping_error(pfvf->dev, iova))) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c index 718f8c0a4f6b..84e501e057b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c @@ -273,7 +273,7 @@ int mlx5e_health_rsc_fmsg_dump(struct mlx5e_priv *priv, struct mlx5_rsc_key *key err = devlink_fmsg_binary_pair_nest_start(fmsg, "data"); if (err) - return err; + goto free_page; cmd = mlx5_rsc_dump_cmd_create(mdev, key); if (IS_ERR(cmd)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 072363e73f1c..6bc6b48a56dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -167,6 +167,12 @@ static const struct rhashtable_params tuples_nat_ht_params = { .min_size = 16 * 1024, }; +static bool +mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry) +{ + return !!(entry->tuple_nat_node.next); +} + static int mlx5_tc_ct_rule_to_tuple(struct mlx5_ct_tuple *tuple, struct flow_rule *rule) { @@ -911,13 +917,13 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, err_insert: mlx5_tc_ct_entry_del_rules(ct_priv, entry); err_rules: - rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, tuples_nat_ht_params); + if (mlx5_tc_ct_entry_has_nat(entry)) + rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, tuples_nat_ht_params); err_tuple_nat: - if (entry->tuple_node.next) - rhashtable_remove_fast(&ct_priv->ct_tuples_ht, - &entry->tuple_node, - tuples_ht_params); + rhashtable_remove_fast(&ct_priv->ct_tuples_ht, + &entry->tuple_node, + tuples_ht_params); err_tuple: err_set: kfree(entry); @@ -932,7 +938,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv, { mlx5_tc_ct_entry_del_rules(ct_priv, entry); mutex_lock(&ct_priv->shared_counter_lock); - if (entry->tuple_node.next) + if (mlx5_tc_ct_entry_has_nat(entry)) rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, &entry->tuple_nat_node, tuples_nat_ht_params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c index 6c5c54bcd9be..5cb936541b9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_stats.c @@ -76,7 +76,7 @@ static const struct counter_desc mlx5e_ipsec_sw_stats_desc[] = { static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_sw) { - return NUM_IPSEC_SW_COUNTERS; + return priv->ipsec ? NUM_IPSEC_SW_COUNTERS : 0; } static inline MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_sw) {} @@ -105,7 +105,7 @@ static MLX5E_DECLARE_STATS_GRP_OP_FILL_STATS(ipsec_sw) static MLX5E_DECLARE_STATS_GRP_OP_NUM_STATS(ipsec_hw) { - return (mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0; + return (priv->ipsec && mlx5_fpga_ipsec_device_caps(priv->mdev)) ? NUM_IPSEC_HW_COUNTERS : 0; } static MLX5E_DECLARE_STATS_GRP_OP_UPDATE_STATS(ipsec_hw) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c index d20243d6a032..f23c67575073 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c @@ -1151,6 +1151,7 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) { struct mlx5e_channels new_channels = {}; bool reset_channels = true; + bool opened; int err = 0; mutex_lock(&priv->state_lock); @@ -1159,22 +1160,24 @@ static int mlx5e_set_trust_state(struct mlx5e_priv *priv, u8 trust_state) mlx5e_params_calc_trust_tx_min_inline_mode(priv->mdev, &new_channels.params, trust_state); - if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { - priv->channels.params = new_channels.params; + opened = test_bit(MLX5E_STATE_OPENED, &priv->state); + if (!opened) reset_channels = false; - } /* Skip if tx_min_inline is the same */ if (new_channels.params.tx_min_inline_mode == priv->channels.params.tx_min_inline_mode) reset_channels = false; - if (reset_channels) + if (reset_channels) { err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_update_trust_state_hw, &trust_state); - else + } else { err = mlx5e_update_trust_state_hw(priv, &trust_state); + if (!err && !opened) + priv->channels.params = new_channels.params; + } mutex_unlock(&priv->state_lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 2d37742a888c..302001d6661e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -447,12 +447,18 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, goto out; } - new_channels.params = priv->channels.params; + new_channels.params = *cur_params; new_channels.params.num_channels = count; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + struct mlx5e_params old_params; + + old_params = *cur_params; *cur_params = new_channels.params; err = mlx5e_num_channels_changed(priv); + if (err) + *cur_params = old_params; + goto out; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 6a852b4901aa..3fc7d18ac868 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -3614,18 +3614,23 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, new_channels.params.num_tc = tc ? tc : 1; if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + struct mlx5e_params old_params; + + old_params = priv->channels.params; priv->channels.params = new_channels.params; + err = mlx5e_num_channels_changed(priv); + if (err) + priv->channels.params = old_params; + goto out; } err = mlx5e_safe_switch_channels(priv, &new_channels, mlx5e_num_channels_changed_ctx, NULL); - if (err) - goto out; - priv->max_opened_tc = max_t(u8, priv->max_opened_tc, - new_channels.params.num_tc); out: + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, + priv->channels.params.num_tc); mutex_unlock(&priv->state_lock); return err; } @@ -3757,7 +3762,7 @@ static int set_feature_lro(struct net_device *netdev, bool enable) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_channels new_channels = {}; - struct mlx5e_params *old_params; + struct mlx5e_params *cur_params; int err = 0; bool reset; @@ -3770,8 +3775,8 @@ static int set_feature_lro(struct net_device *netdev, bool enable) goto out; } - old_params = &priv->channels.params; - if (enable && !MLX5E_GET_PFLAG(old_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { + cur_params = &priv->channels.params; + if (enable && !MLX5E_GET_PFLAG(cur_params, MLX5E_PFLAG_RX_STRIDING_RQ)) { netdev_warn(netdev, "can't set LRO with legacy RQ\n"); err = -EINVAL; goto out; @@ -3779,18 +3784,23 @@ static int set_feature_lro(struct net_device *netdev, bool enable) reset = test_bit(MLX5E_STATE_OPENED, &priv->state); - new_channels.params = *old_params; + new_channels.params = *cur_params; new_channels.params.lro_en = enable; - if (old_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { - if (mlx5e_rx_mpwqe_is_linear_skb(mdev, old_params, NULL) == + if (cur_params->rq_wq_type != MLX5_WQ_TYPE_CYCLIC) { + if (mlx5e_rx_mpwqe_is_linear_skb(mdev, cur_params, NULL) == mlx5e_rx_mpwqe_is_linear_skb(mdev, &new_channels.params, NULL)) reset = false; } if (!reset) { - *old_params = new_channels.params; + struct mlx5e_params old_params; + + old_params = *cur_params; + *cur_params = new_channels.params; err = mlx5e_modify_tirs_lro(priv); + if (err) + *cur_params = old_params; goto out; } @@ -4067,9 +4077,16 @@ int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, } if (!reset) { + unsigned int old_mtu = params->sw_mtu; + params->sw_mtu = new_mtu; - if (preactivate) - preactivate(priv, NULL); + if (preactivate) { + err = preactivate(priv, NULL); + if (err) { + params->sw_mtu = old_mtu; + goto out; + } + } netdev->mtu = params->sw_mtu; goto out; } @@ -5027,7 +5044,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) FT_CAP(modify_root) && FT_CAP(identified_miss_table_mode) && FT_CAP(flow_table_modify)) { -#ifdef CONFIG_MLX5_ESWITCH +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; #endif #ifdef CONFIG_MLX5_EN_ARFS diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 989c70c1eda3..f0ceae65f6cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -737,7 +737,9 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->features |= NETIF_F_NETNS_LOCAL; +#if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; +#endif netdev->hw_features |= NETIF_F_SG; netdev->hw_features |= NETIF_F_IP_CSUM; netdev->hw_features |= NETIF_F_IPV6_CSUM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 7f5851c61218..ca4b55839a8a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); if (mlx5e_cqe_regb_chain(cqe)) - if (!mlx5e_tc_update_skb(cqe, skb)) + if (!mlx5e_tc_update_skb(cqe, skb)) { + dev_kfree_skb_any(skb); goto free_wqe; + } napi_gro_receive(rq->cq.napi, skb); @@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) if (rep->vlan && skb_vlan_tag_present(skb)) skb_vlan_pop(skb); - if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) + if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) { + dev_kfree_skb_any(skb); goto free_wqe; + } napi_gro_receive(rq->cq.napi, skb); @@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); - if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) + if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) { + dev_kfree_skb_any(skb); goto mpwrq_cqe_out; + } napi_gro_receive(rq->cq.napi, skb); @@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); if (mlx5e_cqe_regb_chain(cqe)) - if (!mlx5e_tc_update_skb(cqe, skb)) + if (!mlx5e_tc_update_skb(cqe, skb)) { + dev_kfree_skb_any(skb); goto mpwrq_cqe_out; + } napi_gro_receive(rq->cq.napi, skb); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 4cdf834fa74a..dd0bfbacad47 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -67,6 +67,7 @@ #include "lib/geneve.h" #include "lib/fs_chains.h" #include "diag/en_tc_tracepoint.h" +#include <asm/div64.h> #define nic_chains(priv) ((priv)->fs.tc.chains) #define MLX5_MH_ACT_SZ MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto) @@ -1162,6 +1163,9 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, struct mlx5e_tc_mod_hdr_acts *mod_hdr_acts; struct mlx5_flow_handle *rule; + if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) + return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); + if (flow_flag_test(flow, CT)) { mod_hdr_acts = &attr->parse_attr->mod_hdr_acts; @@ -1192,6 +1196,9 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, { flow_flag_clear(flow, OFFLOADED); + if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) + goto offload_rule_0; + if (flow_flag_test(flow, CT)) { mlx5_tc_ct_delete_flow(get_ct_priv(flow->priv), flow, attr); return; @@ -1200,6 +1207,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, if (attr->esw_attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); +offload_rule_0: mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); } @@ -2269,8 +2277,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | BIT(FLOW_DISSECTOR_KEY_MPLS))) { NL_SET_ERR_MSG_MOD(extack, "Unsupported key"); - netdev_warn(priv->netdev, "Unsupported key used: 0x%x\n", - dissector->used_keys); + netdev_dbg(priv->netdev, "Unsupported key used: 0x%x\n", + dissector->used_keys); return -EOPNOTSUPP; } @@ -5007,13 +5015,13 @@ errout: return err; } -static int apply_police_params(struct mlx5e_priv *priv, u32 rate, +static int apply_police_params(struct mlx5e_priv *priv, u64 rate, struct netlink_ext_ack *extack) { struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch *esw; + u32 rate_mbps = 0; u16 vport_num; - u32 rate_mbps; int err; vport_num = rpriv->rep->vport; @@ -5030,7 +5038,11 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, * Moreover, if rate is non zero we choose to configure to a minimum of * 1 mbit/sec. */ - rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; + if (rate) { + rate = (rate * BITS_PER_BYTE) + 500000; + rate_mbps = max_t(u32, do_div(rate, 1000000), 1); + } + err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index b899539a0786..ee4d86c1f436 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1141,6 +1141,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa destroy_ft: root->cmds->destroy_flow_table(root, ft); free_ft: + rhltable_destroy(&ft->fgs_hash); kfree(ft); unlock_root: mutex_unlock(&root->chain_lock); @@ -1759,6 +1760,7 @@ search_again_locked: if (!fte_tmp) continue; rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); + /* No error check needed here, because insert_fte() is not called */ up_write_ref_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false); kmem_cache_free(steering->ftes_cache, fte); @@ -1811,6 +1813,8 @@ skip_search: up_write_ref_node(&g->node, false); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); up_write_ref_node(&fte->node, false); + if (IS_ERR(rule)) + tree_put_node(&fte->node, false); return rule; } rule = ERR_PTR(-ENOENT); @@ -1909,6 +1913,8 @@ search_again_locked: up_write_ref_node(&g->node, false); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); up_write_ref_node(&fte->node, false); + if (IS_ERR(rule)) + tree_put_node(&fte->node, false); tree_put_node(&g->node, false); return rule; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index 3a9fa629503f..d046db7bb047 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -90,4 +90,9 @@ int mlx5_create_encryption_key(struct mlx5_core_dev *mdev, u32 key_type, u32 *p_key_id); void mlx5_destroy_encryption_key(struct mlx5_core_dev *mdev, u32 key_id); +static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index eb956ce904bc..c0656d4782e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -58,7 +58,7 @@ struct fw_page { struct rb_node rb_node; u64 addr; struct page *page; - u16 func_id; + u32 function; unsigned long bitmask; struct list_head list; unsigned free_count; @@ -74,12 +74,17 @@ enum { MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, }; -static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func_id) +static u32 get_function(u16 func_id, bool ec_function) +{ + return (u32)func_id | (ec_function << 16); +} + +static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) { struct rb_root *root; int err; - root = xa_load(&dev->priv.page_root_xa, func_id); + root = xa_load(&dev->priv.page_root_xa, function); if (root) return root; @@ -87,7 +92,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func if (!root) return ERR_PTR(-ENOMEM); - err = xa_insert(&dev->priv.page_root_xa, func_id, root, GFP_KERNEL); + err = xa_insert(&dev->priv.page_root_xa, function, root, GFP_KERNEL); if (err) { kfree(root); return ERR_PTR(err); @@ -98,7 +103,7 @@ static struct rb_root *page_root_per_func_id(struct mlx5_core_dev *dev, u16 func return root; } -static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id) +static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u32 function) { struct rb_node *parent = NULL; struct rb_root *root; @@ -107,7 +112,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u struct fw_page *tfp; int i; - root = page_root_per_func_id(dev, func_id); + root = page_root_per_function(dev, function); if (IS_ERR(root)) return PTR_ERR(root); @@ -130,7 +135,7 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u nfp->addr = addr; nfp->page = page; - nfp->func_id = func_id; + nfp->function = function; nfp->free_count = MLX5_NUM_4K_IN_PAGE; for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++) set_bit(i, &nfp->bitmask); @@ -143,14 +148,14 @@ static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u } static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr, - u32 func_id) + u32 function) { struct fw_page *result = NULL; struct rb_root *root; struct rb_node *tmp; struct fw_page *tfp; - root = xa_load(&dev->priv.page_root_xa, func_id); + root = xa_load(&dev->priv.page_root_xa, function); if (WARN_ON_ONCE(!root)) return NULL; @@ -194,14 +199,14 @@ static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id, return err; } -static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u16 func_id) +static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr, u32 function) { struct fw_page *fp = NULL; struct fw_page *iter; unsigned n; list_for_each_entry(iter, &dev->priv.free_list, list) { - if (iter->func_id != func_id) + if (iter->function != function) continue; fp = iter; } @@ -231,7 +236,7 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, { struct rb_root *root; - root = xa_load(&dev->priv.page_root_xa, fwp->func_id); + root = xa_load(&dev->priv.page_root_xa, fwp->function); if (WARN_ON_ONCE(!root)) return; @@ -244,12 +249,12 @@ static void free_fwp(struct mlx5_core_dev *dev, struct fw_page *fwp, kfree(fwp); } -static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id) +static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 function) { struct fw_page *fwp; int n; - fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, func_id); + fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK, function); if (!fwp) { mlx5_core_warn_rl(dev, "page not found\n"); return; @@ -263,7 +268,7 @@ static void free_4k(struct mlx5_core_dev *dev, u64 addr, u32 func_id) list_add(&fwp->list, &dev->priv.free_list); } -static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id) +static int alloc_system_page(struct mlx5_core_dev *dev, u32 function) { struct device *device = mlx5_core_dma_dev(dev); int nid = dev_to_node(device); @@ -291,7 +296,7 @@ map: goto map; } - err = insert_page(dev, addr, page, func_id); + err = insert_page(dev, addr, page, function); if (err) { mlx5_core_err(dev, "failed to track allocated page\n"); dma_unmap_page(device, addr, PAGE_SIZE, DMA_BIDIRECTIONAL); @@ -328,6 +333,7 @@ static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id, static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int notify_fail, bool ec_function) { + u32 function = get_function(func_id, ec_function); u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0}; int inlen = MLX5_ST_SZ_BYTES(manage_pages_in); u64 addr; @@ -345,10 +351,10 @@ static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, for (i = 0; i < npages; i++) { retry: - err = alloc_4k(dev, &addr, func_id); + err = alloc_4k(dev, &addr, function); if (err) { if (err == -ENOMEM) - err = alloc_system_page(dev, func_id); + err = alloc_system_page(dev, function); if (err) goto out_4k; @@ -384,7 +390,7 @@ retry: out_4k: for (i--; i >= 0; i--) - free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), func_id); + free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]), function); out_free: kvfree(in); if (notify_fail) @@ -392,14 +398,15 @@ out_free: return err; } -static void release_all_pages(struct mlx5_core_dev *dev, u32 func_id, +static void release_all_pages(struct mlx5_core_dev *dev, u16 func_id, bool ec_function) { + u32 function = get_function(func_id, ec_function); struct rb_root *root; struct rb_node *p; int npages = 0; - root = xa_load(&dev->priv.page_root_xa, func_id); + root = xa_load(&dev->priv.page_root_xa, function); if (WARN_ON_ONCE(!root)) return; @@ -446,6 +453,7 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, struct rb_root *root; struct fw_page *fwp; struct rb_node *p; + bool ec_function; u32 func_id; u32 npages; u32 i = 0; @@ -456,8 +464,9 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, /* No hard feelings, we want our pages back! */ npages = MLX5_GET(manage_pages_in, in, input_num_entries); func_id = MLX5_GET(manage_pages_in, in, function_id); + ec_function = MLX5_GET(manage_pages_in, in, embedded_cpu_function); - root = xa_load(&dev->priv.page_root_xa, func_id); + root = xa_load(&dev->priv.page_root_xa, get_function(func_id, ec_function)); if (WARN_ON_ONCE(!root)) return -EEXIST; @@ -473,9 +482,10 @@ static int reclaim_pages_cmd(struct mlx5_core_dev *dev, return 0; } -static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, +static int reclaim_pages(struct mlx5_core_dev *dev, u16 func_id, int npages, int *nclaimed, bool ec_function) { + u32 function = get_function(func_id, ec_function); int outlen = MLX5_ST_SZ_BYTES(manage_pages_out); u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {}; int num_claimed; @@ -514,7 +524,7 @@ static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages, } for (i = 0; i < num_claimed; i++) - free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), func_id); + free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]), function); if (nclaimed) *nclaimed = num_claimed; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index c6c5826aba41..1892cea05ee7 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -157,6 +157,7 @@ mlxsw_sp1_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) static const struct mlxsw_sp_span_entry_ops mlxsw_sp1_span_entry_ops_cpu = { + .is_static = true, .can_handle = mlxsw_sp1_span_cpu_can_handle, .parms_set = mlxsw_sp1_span_entry_cpu_parms, .configure = mlxsw_sp1_span_entry_cpu_configure, @@ -214,6 +215,7 @@ mlxsw_sp_span_entry_phys_deconfigure(struct mlxsw_sp_span_entry *span_entry) static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_phys = { + .is_static = true, .can_handle = mlxsw_sp_port_dev_check, .parms_set = mlxsw_sp_span_entry_phys_parms, .configure = mlxsw_sp_span_entry_phys_configure, @@ -721,6 +723,7 @@ mlxsw_sp2_span_entry_cpu_deconfigure(struct mlxsw_sp_span_entry *span_entry) static const struct mlxsw_sp_span_entry_ops mlxsw_sp2_span_entry_ops_cpu = { + .is_static = true, .can_handle = mlxsw_sp2_span_cpu_can_handle, .parms_set = mlxsw_sp2_span_entry_cpu_parms, .configure = mlxsw_sp2_span_entry_cpu_configure, @@ -1036,6 +1039,9 @@ static void mlxsw_sp_span_respin_work(struct work_struct *work) if (!refcount_read(&curr->ref_count)) continue; + if (curr->ops->is_static) + continue; + err = curr->ops->parms_set(mlxsw_sp, curr->to_dev, &sparms); if (err) continue; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h index d907718bc8c5..aa1cd409c0e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h @@ -60,6 +60,7 @@ struct mlxsw_sp_span_entry { }; struct mlxsw_sp_span_entry_ops { + bool is_static; bool (*can_handle)(const struct net_device *to_dev); int (*parms_set)(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 0b9992bd6626..c072eb5c0764 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -60,14 +60,27 @@ int ocelot_mact_learn(struct ocelot *ocelot, int port, const unsigned char mac[ETH_ALEN], unsigned int vid, enum macaccess_entry_type type) { + u32 cmd = ANA_TABLES_MACACCESS_VALID | + ANA_TABLES_MACACCESS_DEST_IDX(port) | + ANA_TABLES_MACACCESS_ENTRYTYPE(type) | + ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN); + unsigned int mc_ports; + + /* Set MAC_CPU_COPY if the CPU port is used by a multicast entry */ + if (type == ENTRYTYPE_MACv4) + mc_ports = (mac[1] << 8) | mac[2]; + else if (type == ENTRYTYPE_MACv6) + mc_ports = (mac[0] << 8) | mac[1]; + else + mc_ports = 0; + + if (mc_ports & BIT(ocelot->num_phys_ports)) + cmd |= ANA_TABLES_MACACCESS_MAC_CPU_COPY; + ocelot_mact_select(ocelot, mac, vid); /* Issue a write command */ - ocelot_write(ocelot, ANA_TABLES_MACACCESS_VALID | - ANA_TABLES_MACACCESS_DEST_IDX(port) | - ANA_TABLES_MACACCESS_ENTRYTYPE(type) | - ANA_TABLES_MACACCESS_MAC_TABLE_CMD(MACACCESS_CMD_LEARN), - ANA_TABLES_MACACCESS); + ocelot_write(ocelot, cmd, ANA_TABLES_MACACCESS); return ocelot_mact_wait_for_completion(ocelot); } @@ -362,6 +375,60 @@ static void ocelot_vlan_init(struct ocelot *ocelot) } } +static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port) +{ + return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port); +} + +int ocelot_port_flush(struct ocelot *ocelot, int port) +{ + int err, val; + + /* Disable dequeuing from the egress queues */ + ocelot_rmw_rix(ocelot, QSYS_PORT_MODE_DEQUEUE_DIS, + QSYS_PORT_MODE_DEQUEUE_DIS, + QSYS_PORT_MODE, port); + + /* Disable flow control */ + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); + + /* Disable priority flow control */ + ocelot_fields_write(ocelot, port, + QSYS_SWITCH_PORT_MODE_TX_PFC_ENA, 0); + + /* Wait at least the time it takes to receive a frame of maximum length + * at the port. + * Worst-case delays for 10 kilobyte jumbo frames are: + * 8 ms on a 10M port + * 800 μs on a 100M port + * 80 μs on a 1G port + * 32 μs on a 2.5G port + */ + usleep_range(8000, 10000); + + /* Disable half duplex backpressure. */ + ocelot_rmw_rix(ocelot, 0, SYS_FRONT_PORT_MODE_HDX_MODE, + SYS_FRONT_PORT_MODE, port); + + /* Flush the queues associated with the port. */ + ocelot_rmw_gix(ocelot, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG_FLUSH_ENA, + REW_PORT_CFG, port); + + /* Enable dequeuing from the egress queues. */ + ocelot_rmw_rix(ocelot, 0, QSYS_PORT_MODE_DEQUEUE_DIS, QSYS_PORT_MODE, + port); + + /* Wait until flushing is complete. */ + err = read_poll_timeout(ocelot_read_eq_avail, val, !val, + 100, 2000000, false, ocelot, port); + + /* Clear flushing again. */ + ocelot_rmw_gix(ocelot, 0, REW_PORT_CFG_FLUSH_ENA, REW_PORT_CFG, port); + + return err; +} +EXPORT_SYMBOL(ocelot_port_flush); + void ocelot_adjust_link(struct ocelot *ocelot, int port, struct phy_device *phydev) { diff --git a/drivers/net/ethernet/mscc/ocelot_io.c b/drivers/net/ethernet/mscc/ocelot_io.c index 0acb45948418..ea4e83410fe4 100644 --- a/drivers/net/ethernet/mscc/ocelot_io.c +++ b/drivers/net/ethernet/mscc/ocelot_io.c @@ -71,6 +71,14 @@ void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg) } EXPORT_SYMBOL(ocelot_port_writel); +void ocelot_port_rmwl(struct ocelot_port *port, u32 val, u32 mask, u32 reg) +{ + u32 cur = ocelot_port_readl(port, reg); + + ocelot_port_writel(port, (cur & (~mask)) | val, reg); +} +EXPORT_SYMBOL(ocelot_port_rmwl); + u32 __ocelot_target_read_ix(struct ocelot *ocelot, enum ocelot_target target, u32 reg, u32 offset) { diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 2bd2840d88bd..42230f92ca9c 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -1042,10 +1042,8 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); int ret = 0; - if (!ocelot_netdevice_dev_check(dev)) - return 0; - if (event == NETDEV_PRECHANGEUPPER && + ocelot_netdevice_dev_check(dev) && netif_is_lag_master(info->upper_dev)) { struct netdev_lag_upper_info *lag_upper_info = info->upper_info; struct netlink_ext_ack *extack; diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index a569abe7f5ef..0d78408b4e26 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -4046,17 +4046,72 @@ err_out: return -EIO; } -static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp) +static bool rtl_skb_is_udp(struct sk_buff *skb) +{ + int no = skb_network_offset(skb); + struct ipv6hdr *i6h, _i6h; + struct iphdr *ih, _ih; + + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + ih = skb_header_pointer(skb, no, sizeof(_ih), &_ih); + return ih && ih->protocol == IPPROTO_UDP; + case htons(ETH_P_IPV6): + i6h = skb_header_pointer(skb, no, sizeof(_i6h), &_i6h); + return i6h && i6h->nexthdr == IPPROTO_UDP; + default: + return false; + } +} + +#define RTL_MIN_PATCH_LEN 47 + +/* see rtl8125_get_patch_pad_len() in r8125 vendor driver */ +static unsigned int rtl8125_quirk_udp_padto(struct rtl8169_private *tp, + struct sk_buff *skb) { + unsigned int padto = 0, len = skb->len; + + if (rtl_is_8125(tp) && len < 128 + RTL_MIN_PATCH_LEN && + rtl_skb_is_udp(skb) && skb_transport_header_was_set(skb)) { + unsigned int trans_data_len = skb_tail_pointer(skb) - + skb_transport_header(skb); + + if (trans_data_len >= offsetof(struct udphdr, len) && + trans_data_len < RTL_MIN_PATCH_LEN) { + u16 dest = ntohs(udp_hdr(skb)->dest); + + /* dest is a standard PTP port */ + if (dest == 319 || dest == 320) + padto = len + RTL_MIN_PATCH_LEN - trans_data_len; + } + + if (trans_data_len < sizeof(struct udphdr)) + padto = max_t(unsigned int, padto, + len + sizeof(struct udphdr) - trans_data_len); + } + + return padto; +} + +static unsigned int rtl_quirk_packet_padto(struct rtl8169_private *tp, + struct sk_buff *skb) +{ + unsigned int padto; + + padto = rtl8125_quirk_udp_padto(tp, skb); + switch (tp->mac_version) { case RTL_GIGA_MAC_VER_34: case RTL_GIGA_MAC_VER_60: case RTL_GIGA_MAC_VER_61: case RTL_GIGA_MAC_VER_63: - return true; + padto = max_t(unsigned int, padto, ETH_ZLEN); default: - return false; + break; } + + return padto; } static void rtl8169_tso_csum_v1(struct sk_buff *skb, u32 *opts) @@ -4128,9 +4183,10 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, opts[1] |= transport_offset << TCPHO_SHIFT; } else { - if (unlikely(skb->len < ETH_ZLEN && rtl_test_hw_pad_bug(tp))) - /* eth_skb_pad would free the skb on error */ - return !__skb_put_padto(skb, ETH_ZLEN, false); + unsigned int padto = rtl_quirk_packet_padto(tp, skb); + + /* skb_padto would free the skb on error */ + return !__skb_put_padto(skb, padto, false); } return true; @@ -4307,6 +4363,9 @@ static netdev_features_t rtl8169_features_check(struct sk_buff *skb, if (skb->len < ETH_ZLEN) features &= ~NETIF_F_CSUM_MASK; + if (rtl_quirk_packet_padto(tp, skb)) + features &= ~NETIF_F_CSUM_MASK; + if (transport_offset > TCPHO_MAX && rtl_chip_supports_csum_v2(tp)) features &= ~NETIF_F_CSUM_MASK; @@ -4645,10 +4704,10 @@ static int rtl8169_close(struct net_device *dev) cancel_work_sync(&tp->wk.work); - phy_disconnect(tp->phydev); - free_irq(pci_irq_vector(pdev, 0), tp); + phy_disconnect(tp->phydev); + dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray, tp->RxPhyAddr); dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray, diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c63304632935..590b088bc4c7 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2606,10 +2606,10 @@ static int sh_eth_close(struct net_device *ndev) /* Free all the skbuffs in the Rx queue and the DMA buffer. */ sh_eth_ring_free(ndev); - pm_runtime_put_sync(&mdp->pdev->dev); - mdp->is_opened = 0; + pm_runtime_put(&mdp->pdev->dev); + return 0; } @@ -3034,6 +3034,28 @@ static int sh_mdio_release(struct sh_eth_private *mdp) return 0; } +static int sh_mdiobb_read(struct mii_bus *bus, int phy, int reg) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_read(bus, phy, reg); + pm_runtime_put(bus->parent); + + return res; +} + +static int sh_mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) +{ + int res; + + pm_runtime_get_sync(bus->parent); + res = mdiobb_write(bus, phy, reg, val); + pm_runtime_put(bus->parent); + + return res; +} + /* MDIO bus init function */ static int sh_mdio_init(struct sh_eth_private *mdp, struct sh_eth_plat_data *pd) @@ -3058,6 +3080,10 @@ static int sh_mdio_init(struct sh_eth_private *mdp, if (!mdp->mii_bus) return -ENOMEM; + /* Wrap accessors with Runtime PM-aware ops */ + mdp->mii_bus->read = sh_mdiobb_read; + mdp->mii_bus->write = sh_mdiobb_write; + /* Hook up MII support for ethtool */ mdp->mii_bus->name = "sh_mii"; mdp->mii_bus->parent = dev; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c index 82b1c7a5a7a9..ba0e4d2b256a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel-plat.c @@ -129,7 +129,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set tx_clk\n"); - return ret; + goto err_remove_config_dt; } } } @@ -143,7 +143,7 @@ static int intel_eth_plat_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "Failed to set clk_ptp_ref\n"); - return ret; + goto err_remove_config_dt; } } } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 9a6a519426a0..103d2448e9e0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -375,6 +375,7 @@ static int ehl_pse0_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 2; + plat->addr64 = 32; return ehl_common_data(pdev, plat); } @@ -406,6 +407,7 @@ static int ehl_pse1_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { plat->bus_id = 3; + plat->addr64 = 32; return ehl_common_data(pdev, plat); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 8ed3b2c834a0..56985542e202 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -324,7 +324,12 @@ static int tc_setup_cbs(struct stmmac_priv *priv, priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_AVB; } else if (!qopt->enable) { - return stmmac_dma_qmode(priv, priv->ioaddr, queue, MTL_QUEUE_DCB); + ret = stmmac_dma_qmode(priv, priv->ioaddr, queue, + MTL_QUEUE_DCB); + if (ret) + return ret; + + priv->plat->tx_queues_cfg[queue].mode_to_use = MTL_QUEUE_DCB; } /* Port Transmit Rate and Speed Divider */ diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 2350342b961f..13bd48a75db7 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -1262,8 +1262,11 @@ static int netvsc_receive(struct net_device *ndev, ret = rndis_filter_receive(ndev, net_device, nvchan, data, buflen); - if (unlikely(ret != NVSP_STAT_SUCCESS)) + if (unlikely(ret != NVSP_STAT_SUCCESS)) { + /* Drop incomplete packet */ + nvchan->rsc.cnt = 0; status = NVSP_STAT_FAIL; + } } enq_receive_complete(ndev, net_device, q_idx, diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 598713c0d5a8..3aab2b867fc0 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -509,8 +509,6 @@ static int rndis_filter_receive_data(struct net_device *ndev, return ret; drop: - /* Drop incomplete packet */ - nvchan->rsc.cnt = 0; return NVSP_STAT_FAIL; } diff --git a/drivers/net/ipa/gsi.c b/drivers/net/ipa/gsi.c index 14d9a791924b..b77f5fef7aec 100644 --- a/drivers/net/ipa/gsi.c +++ b/drivers/net/ipa/gsi.c @@ -440,7 +440,7 @@ static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id) static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel) { u32 channel_id = gsi_channel_id(channel); - void *virt = channel->gsi->virt; + void __iomem *virt = channel->gsi->virt; u32 val; val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id)); @@ -1373,7 +1373,7 @@ static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count) /* Hardware requires a 2^n ring size, with alignment equal to size */ ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL); if (ring->virt && addr % size) { - dma_free_coherent(dev, size, ring->virt, ring->addr); + dma_free_coherent(dev, size, ring->virt, addr); dev_err(dev, "unable to alloc 0x%zx-aligned ring buffer\n", size); return -EINVAL; /* Not a good error value, but distinct */ @@ -1710,6 +1710,7 @@ static int gsi_channel_setup(struct gsi *gsi) if (!channel->gsi) continue; /* Ignore uninitialized channels */ + ret = -EINVAL; dev_err(gsi->dev, "channel %u not supported by hardware\n", channel_id - 1); channel_id = gsi->channel_count; diff --git a/drivers/net/ipa/ipa_endpoint.c b/drivers/net/ipa/ipa_endpoint.c index 9f4be9812a1f..612afece303f 100644 --- a/drivers/net/ipa/ipa_endpoint.c +++ b/drivers/net/ipa/ipa_endpoint.c @@ -588,7 +588,7 @@ static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) /* Note that HDR_ENDIANNESS indicates big endian header fields */ if (endpoint->data->qmap) - val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); + val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK); iowrite32(val, endpoint->ipa->reg_virt + offset); } @@ -1164,8 +1164,8 @@ static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint, return true; if (!status->pkt_len) return true; - endpoint_id = u32_get_bits(status->endp_dst_idx, - IPA_STATUS_DST_IDX_FMASK); + endpoint_id = u8_get_bits(status->endp_dst_idx, + IPA_STATUS_DST_IDX_FMASK); if (endpoint_id != endpoint->endpoint_id) return true; diff --git a/drivers/net/ipa/ipa_mem.c b/drivers/net/ipa/ipa_mem.c index 0cc3a3374caa..f25029b9ec85 100644 --- a/drivers/net/ipa/ipa_mem.c +++ b/drivers/net/ipa/ipa_mem.c @@ -336,7 +336,7 @@ static void ipa_imem_exit(struct ipa *ipa) size = iommu_unmap(domain, ipa->imem_iova, ipa->imem_size); if (size != ipa->imem_size) - dev_warn(dev, "unmapped %zu IMEM bytes, expected %lu\n", + dev_warn(dev, "unmapped %zu IMEM bytes, expected %zu\n", size, ipa->imem_size); } else { dev_err(dev, "couldn't get IPA IOMMU domain for IMEM\n"); @@ -440,7 +440,7 @@ static void ipa_smem_exit(struct ipa *ipa) size = iommu_unmap(domain, ipa->smem_iova, ipa->smem_size); if (size != ipa->smem_size) - dev_warn(dev, "unmapped %zu SMEM bytes, expected %lu\n", + dev_warn(dev, "unmapped %zu SMEM bytes, expected %zu\n", size, ipa->smem_size); } else { diff --git a/drivers/net/mdio/mdio-bitbang.c b/drivers/net/mdio/mdio-bitbang.c index 5136275c8e73..d3915f831854 100644 --- a/drivers/net/mdio/mdio-bitbang.c +++ b/drivers/net/mdio/mdio-bitbang.c @@ -149,7 +149,7 @@ static int mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, u32 addr) return dev_addr; } -static int mdiobb_read(struct mii_bus *bus, int phy, int reg) +int mdiobb_read(struct mii_bus *bus, int phy, int reg) { struct mdiobb_ctrl *ctrl = bus->priv; int ret, i; @@ -180,8 +180,9 @@ static int mdiobb_read(struct mii_bus *bus, int phy, int reg) mdiobb_get_bit(ctrl); return ret; } +EXPORT_SYMBOL(mdiobb_read); -static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) +int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) { struct mdiobb_ctrl *ctrl = bus->priv; @@ -201,6 +202,7 @@ static int mdiobb_write(struct mii_bus *bus, int phy, int reg, u16 val) mdiobb_get_bit(ctrl); return 0; } +EXPORT_SYMBOL(mdiobb_write); struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index c19dac21c468..dd7917cab2b1 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -992,7 +992,8 @@ static void __team_compute_features(struct team *team) unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM; - list_for_each_entry(port, &team->port_list, list) { + rcu_read_lock(); + list_for_each_entry_rcu(port, &team->port_list, list) { vlan_features = netdev_increment_features(vlan_features, port->dev->vlan_features, TEAM_VLAN_FEATURES); @@ -1006,6 +1007,7 @@ static void __team_compute_features(struct team *team) if (port->dev->hard_header_len > max_hard_header_len) max_hard_header_len = port->dev->hard_header_len; } + rcu_read_unlock(); team->dev->vlan_features = vlan_features; team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL | @@ -1020,9 +1022,7 @@ static void __team_compute_features(struct team *team) static void team_compute_features(struct team *team) { - mutex_lock(&team->lock); __team_compute_features(team); - mutex_unlock(&team->lock); netdev_change_features(team->dev); } diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 6aaa0675c28a..a9b551028659 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -969,6 +969,12 @@ static const struct usb_device_id products[] = { USB_CDC_PROTO_NONE), .driver_info = (unsigned long)&wwan_info, }, { + /* Cinterion PLS83/PLS63 modem by GEMALTO/THALES */ + USB_DEVICE_AND_INTERFACE_INFO(0x1e2d, 0x0069, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = (unsigned long)&wwan_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 5a78848db93f..291e76d32abe 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -1827,6 +1827,15 @@ cdc_ncm_speed_change(struct usbnet *dev, uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); uint32_t tx_speed = le32_to_cpu(data->ULBitRate); + /* if the speed hasn't changed, don't report it. + * RTL8156 shipped before 2021 sends notification about every 32ms. + */ + if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed) + return; + + dev->rx_speed = rx_speed; + dev->tx_speed = tx_speed; + /* * Currently the USB-NET API does not support reporting the actual * device speed. Do print it instead. @@ -1867,7 +1876,8 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb) * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. */ - usbnet_link_change(dev, !!event->wValue, 0); + if (netif_carrier_ok(dev->net) != !!event->wValue) + usbnet_link_change(dev, !!event->wValue, 0); break; case USB_CDC_NOTIFY_SPEED_CHANGE: diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index af19513a9f75..5a05add9b4e6 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1302,12 +1302,14 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)}, /* Olivetti Olicard 160 */ {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */ {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */ + {QMI_QUIRK_SET_DTR(0x1e2d, 0x006f, 8)}, /* Cinterion PLS83/PLS63 */ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */ {QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/ {QMI_QUIRK_SET_DTR(0x1e2d, 0x00b0, 4)}, /* Cinterion CLS8 */ + {QMI_FIXED_INTF(0x1e2d, 0x00b7, 0)}, /* Cinterion MV31 RmNet */ {QMI_FIXED_INTF(0x413c, 0x81a2, 8)}, /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a3, 8)}, /* Dell Wireless 5570 HSPA+ (42Mbps) Mobile Broadband Card */ {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c index bb164805804e..4aaa6388b9ee 100644 --- a/drivers/net/wan/hdlc_x25.c +++ b/drivers/net/wan/hdlc_x25.c @@ -169,11 +169,11 @@ static int x25_open(struct net_device *dev) result = lapb_register(dev, &cb); if (result != LAPB_OK) - return result; + return -ENOMEM; result = lapb_getparms(dev, ¶ms); if (result != LAPB_OK) - return result; + return -EINVAL; if (state(hdlc)->settings.dce) params.mode = params.mode | LAPB_DCE; @@ -188,7 +188,7 @@ static int x25_open(struct net_device *dev) result = lapb_setparms(dev, ¶ms); if (result != LAPB_OK) - return result; + return -EINVAL; return 0; } diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index a84bb9b6573f..e150d82eddb6 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -21,11 +21,9 @@ config ATH9K_BTCOEX_SUPPORT config ATH9K tristate "Atheros 802.11n wireless cards support" depends on MAC80211 && HAS_DMA + select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211 select ATH9K_HW select ATH9K_COMMON - imply NEW_LEDS - imply LEDS_CLASS - imply MAC80211_LEDS help This module adds support for wireless adapters based on Atheros IEEE 802.11n AR5008, AR9001 and AR9002 family @@ -176,11 +174,9 @@ config ATH9K_PCI_NO_EEPROM config ATH9K_HTC tristate "Atheros HTC based wireless cards support" depends on USB && MAC80211 + select MAC80211_LEDS if LEDS_CLASS=y || LEDS_CLASS=MAC80211 select ATH9K_HW select ATH9K_COMMON - imply NEW_LEDS - imply LEDS_CLASS - imply MAC80211_LEDS help Support for Atheros HTC based cards. Chipsets supported: AR9271 diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index 7220fc8fd9b0..8280092066e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -314,6 +314,7 @@ const struct iwl_cfg_trans_params iwl_ma_trans_cfg = { const char iwl_ax101_name[] = "Intel(R) Wi-Fi 6 AX101"; const char iwl_ax200_name[] = "Intel(R) Wi-Fi 6 AX200 160MHz"; const char iwl_ax201_name[] = "Intel(R) Wi-Fi 6 AX201 160MHz"; +const char iwl_ax203_name[] = "Intel(R) Wi-Fi 6 AX203"; const char iwl_ax211_name[] = "Intel(R) Wi-Fi 6 AX211 160MHz"; const char iwl_ax411_name[] = "Intel(R) Wi-Fi 6 AX411 160MHz"; const char iwl_ma_name[] = "Intel(R) Wi-Fi 6"; @@ -340,6 +341,18 @@ const struct iwl_cfg iwl_qu_b0_hr1_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; +const struct iwl_cfg iwl_qu_b0_hr_b0 = { + .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .num_rbds = IWL_NUM_RBDS_22000_HE, +}; + const struct iwl_cfg iwl_ax201_cfg_qu_hr = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QU_B_HR_B_FW_PRE, @@ -366,6 +379,18 @@ const struct iwl_cfg iwl_qu_c0_hr1_b0 = { .num_rbds = IWL_NUM_RBDS_22000_HE, }; +const struct iwl_cfg iwl_qu_c0_hr_b0 = { + .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, + .num_rbds = IWL_NUM_RBDS_22000_HE, +}; + const struct iwl_cfg iwl_ax201_cfg_qu_c0_hr_b0 = { .name = "Intel(R) Wi-Fi 6 AX201 160MHz", .fw_name_pre = IWL_QU_C_HR_B_FW_PRE, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 15248b064380..d8b7776a8dde 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -80,19 +80,45 @@ static void *iwl_acpi_get_dsm_object(struct device *dev, int rev, int func, } /* - * Evaluate a DSM with no arguments and a single u8 return value (inside a - * buffer object), verify and return that value. + * Generic function to evaluate a DSM with no arguments + * and an integer return value, + * (as an integer object or inside a buffer object), + * verify and assign the value in the "value" parameter. + * return 0 in success and the appropriate errno otherwise. */ -int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func) +static int iwl_acpi_get_dsm_integer(struct device *dev, int rev, int func, + u64 *value, size_t expected_size) { union acpi_object *obj; - int ret; + int ret = 0; obj = iwl_acpi_get_dsm_object(dev, rev, func, NULL); - if (IS_ERR(obj)) + if (IS_ERR(obj)) { + IWL_DEBUG_DEV_RADIO(dev, + "Failed to get DSM object. func= %d\n", + func); return -ENOENT; + } + + if (obj->type == ACPI_TYPE_INTEGER) { + *value = obj->integer.value; + } else if (obj->type == ACPI_TYPE_BUFFER) { + __le64 le_value = 0; - if (obj->type != ACPI_TYPE_BUFFER) { + if (WARN_ON_ONCE(expected_size > sizeof(le_value))) + return -EINVAL; + + /* if the buffer size doesn't match the expected size */ + if (obj->buffer.length != expected_size) + IWL_DEBUG_DEV_RADIO(dev, + "ACPI: DSM invalid buffer size, padding or truncating (%d)\n", + obj->buffer.length); + + /* assuming LE from Intel BIOS spec */ + memcpy(&le_value, obj->buffer.pointer, + min_t(size_t, expected_size, (size_t)obj->buffer.length)); + *value = le64_to_cpu(le_value); + } else { IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method did not return a valid object, type=%d\n", obj->type); @@ -100,15 +126,6 @@ int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func) goto out; } - if (obj->buffer.length != sizeof(u8)) { - IWL_DEBUG_DEV_RADIO(dev, - "ACPI: DSM method returned invalid buffer, length=%d\n", - obj->buffer.length); - ret = -EINVAL; - goto out; - } - - ret = obj->buffer.pointer[0]; IWL_DEBUG_DEV_RADIO(dev, "ACPI: DSM method evaluated: func=%d, ret=%d\n", func, ret); @@ -116,6 +133,24 @@ out: ACPI_FREE(obj); return ret; } + +/* + * Evaluate a DSM with no arguments and a u8 return value, + */ +int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value) +{ + int ret; + u64 val; + + ret = iwl_acpi_get_dsm_integer(dev, rev, func, &val, sizeof(u8)); + + if (ret < 0) + return ret; + + /* cast val (u64) to be u8 */ + *value = (u8)val; + return 0; +} IWL_EXPORT_SYMBOL(iwl_acpi_get_dsm_u8); union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h index 042dd247d387..1cce30d1ef55 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020 Intel Corporation + * Copyright (C) 2018-2021 Intel Corporation */ #ifndef __iwl_fw_acpi__ #define __iwl_fw_acpi__ @@ -99,7 +99,7 @@ struct iwl_fw_runtime; void *iwl_acpi_get_object(struct device *dev, acpi_string method); -int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func); +int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value); union acpi_object *iwl_acpi_get_wifi_pkg(struct device *dev, union acpi_object *data, @@ -159,7 +159,8 @@ static inline void *iwl_acpi_get_dsm_object(struct device *dev, int rev, return ERR_PTR(-ENOENT); } -static inline int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func) +static inline +int iwl_acpi_get_dsm_u8(struct device *dev, int rev, int func, u8 *value) { return -ENOENT; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c index 6d8f7bff1243..895a907acdf0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/pnvm.c @@ -224,40 +224,46 @@ static int iwl_pnvm_parse(struct iwl_trans *trans, const u8 *data, int iwl_pnvm_load(struct iwl_trans *trans, struct iwl_notif_wait_data *notif_wait) { - const struct firmware *pnvm; struct iwl_notification_wait pnvm_wait; static const u16 ntf_cmds[] = { WIDE_ID(REGULATORY_AND_NVM_GROUP, PNVM_INIT_COMPLETE_NTFY) }; - char pnvm_name[64]; - int ret; /* if the SKU_ID is empty, there's nothing to do */ if (!trans->sku_id[0] && !trans->sku_id[1] && !trans->sku_id[2]) return 0; - /* if we already have it, nothing to do either */ - if (trans->pnvm_loaded) - return 0; + /* load from disk only if we haven't done it (or tried) before */ + if (!trans->pnvm_loaded) { + const struct firmware *pnvm; + char pnvm_name[64]; + int ret; + + /* + * The prefix unfortunately includes a hyphen at the end, so + * don't add the dot here... + */ + snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm", + trans->cfg->fw_name_pre); + + /* ...but replace the hyphen with the dot here. */ + if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name)) + pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.'; + + ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev); + if (ret) { + IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n", + pnvm_name, ret); + /* + * Pretend we've loaded it - at least we've tried and + * couldn't load it at all, so there's no point in + * trying again over and over. + */ + trans->pnvm_loaded = true; + } else { + iwl_pnvm_parse(trans, pnvm->data, pnvm->size); - /* - * The prefix unfortunately includes a hyphen at the end, so - * don't add the dot here... - */ - snprintf(pnvm_name, sizeof(pnvm_name), "%spnvm", - trans->cfg->fw_name_pre); - - /* ...but replace the hyphen with the dot here. */ - if (strlen(trans->cfg->fw_name_pre) < sizeof(pnvm_name)) - pnvm_name[strlen(trans->cfg->fw_name_pre) - 1] = '.'; - - ret = firmware_request_nowarn(&pnvm, pnvm_name, trans->dev); - if (ret) { - IWL_DEBUG_FW(trans, "PNVM file %s not found %d\n", - pnvm_name, ret); - } else { - iwl_pnvm_parse(trans, pnvm->data, pnvm->size); - - release_firmware(pnvm); + release_firmware(pnvm); + } } iwl_init_notification_wait(notif_wait, &pnvm_wait, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 27cb0406ba9a..86e1d57df65e 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2020 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __IWL_CONFIG_H__ @@ -445,7 +445,7 @@ struct iwl_cfg { #define IWL_CFG_CORES_BT_GNSS 0x5 #define IWL_SUBDEVICE_RF_ID(subdevice) ((u16)((subdevice) & 0x00F0) >> 4) -#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0100) >> 9) +#define IWL_SUBDEVICE_NO_160(subdevice) ((u16)((subdevice) & 0x0200) >> 9) #define IWL_SUBDEVICE_CORES(subdevice) ((u16)((subdevice) & 0x1C00) >> 10) struct iwl_dev_info { @@ -491,6 +491,7 @@ extern const char iwl9260_killer_1550_name[]; extern const char iwl9560_killer_1550i_name[]; extern const char iwl9560_killer_1550s_name[]; extern const char iwl_ax200_name[]; +extern const char iwl_ax203_name[]; extern const char iwl_ax201_name[]; extern const char iwl_ax101_name[]; extern const char iwl_ax200_killer_1650w_name[]; @@ -574,6 +575,8 @@ extern const struct iwl_cfg iwl9560_2ac_cfg_soc; extern const struct iwl_cfg iwl_qu_b0_hr1_b0; extern const struct iwl_cfg iwl_qu_c0_hr1_b0; extern const struct iwl_cfg iwl_quz_a0_hr1_b0; +extern const struct iwl_cfg iwl_qu_b0_hr_b0; +extern const struct iwl_cfg iwl_qu_c0_hr_b0; extern const struct iwl_cfg iwl_ax200_cfg_cc; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; extern const struct iwl_cfg iwl_ax201_cfg_qu_hr; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index a654147d3cd6..a80a35a7740f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -180,13 +180,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, if (le32_to_cpu(tlv->length) < sizeof(*reg)) return -EINVAL; - /* For safe using a string from FW make sure we have a - * null terminator - */ - reg->name[IWL_FW_INI_MAX_NAME - 1] = 0; - - IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name); - if (id >= IWL_FW_INI_MAX_REGION_ID) { IWL_ERR(trans, "WRT: Invalid region id %u\n", id); return -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index 2ac20d0a30eb..2b7ef1583e7f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -150,16 +150,17 @@ u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) } IWL_EXPORT_SYMBOL(iwl_read_prph); -void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) +void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, u32 val, u32 delay_ms) { unsigned long flags; if (iwl_trans_grab_nic_access(trans, &flags)) { + mdelay(delay_ms); iwl_write_prph_no_grab(trans, ofs, val); iwl_trans_release_nic_access(trans, &flags); } } -IWL_EXPORT_SYMBOL(iwl_write_prph); +IWL_EXPORT_SYMBOL(iwl_write_prph_delay); int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout) @@ -219,8 +220,8 @@ IWL_EXPORT_SYMBOL(iwl_clear_bits_prph); void iwl_force_nmi(struct iwl_trans *trans) { if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_9000) - iwl_write_prph(trans, DEVICE_SET_NMI_REG, - DEVICE_SET_NMI_VAL_DRV); + iwl_write_prph_delay(trans, DEVICE_SET_NMI_REG, + DEVICE_SET_NMI_VAL_DRV, 1); else if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) iwl_write_umac_prph(trans, UREG_NIC_SET_NMI_DRIVER, UREG_NIC_SET_NMI_DRIVER_NMI_FROM_DRIVER); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.h b/drivers/net/wireless/intel/iwlwifi/iwl-io.h index 39bceee4e9e7..3c21c0e081f8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2018-2019 Intel Corporation + * Copyright (C) 2018-2020 Intel Corporation */ #ifndef __iwl_io_h__ #define __iwl_io_h__ @@ -37,7 +37,13 @@ u32 iwl_read_prph_no_grab(struct iwl_trans *trans, u32 ofs); u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs); void iwl_write_prph_no_grab(struct iwl_trans *trans, u32 ofs, u32 val); void iwl_write_prph64_no_grab(struct iwl_trans *trans, u64 ofs, u64 val); -void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); +void iwl_write_prph_delay(struct iwl_trans *trans, u32 ofs, + u32 val, u32 delay_ms); +static inline void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) +{ + iwl_write_prph_delay(trans, ofs, val, 0); +} + int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr, u32 bits, u32 mask, int timeout); void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 0b03fdedc1f7..1158e256f601 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -301,6 +301,12 @@ #define RADIO_RSP_ADDR_POS (6) #define RADIO_RSP_RD_CMD (3) +/* LTR control (Qu only) */ +#define HPM_MAC_LTR_CSR 0xa0348c +#define HPM_MAC_LRT_ENABLE_ALL 0xf +/* also uses CSR_LTR_* for values */ +#define HPM_UMAC_LTR 0xa03480 + /* FW monitor */ #define MON_BUFF_SAMPLE_CTL (0xa03c00) #define MON_BUFF_BASE_ADDR (0xa03c1c) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index c025188fa9bc..df018972a46b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2020 Intel Corporation + * Copyright (C) 2012-2014, 2018-2021 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -2032,8 +2032,6 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) mutex_lock(&mvm->mutex); - clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); - /* get the BSS vif pointer again */ vif = iwl_mvm_get_bss_vif(mvm); if (IS_ERR_OR_NULL(vif)) @@ -2148,6 +2146,8 @@ out_iterate: iwl_mvm_d3_disconnect_iter, keep ? vif : NULL); out: + clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + /* no need to reset the device in unified images, if successful */ if (unified_image && !ret) { /* nothing else to do if we already sent D0I3_END_CMD */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 573e46956c14..38d0bfb649cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -459,7 +459,10 @@ static ssize_t iwl_dbgfs_os_device_timediff_read(struct file *file, const size_t bufsz = sizeof(buf); int pos = 0; + mutex_lock(&mvm->mutex); iwl_mvm_get_sync_time(mvm, &curr_gp2, &curr_os); + mutex_unlock(&mvm->mutex); + do_div(curr_os, NSEC_PER_USEC); diff = curr_os - curr_gp2; pos += scnprintf(buf + pos, bufsz - pos, "diff=%lld\n", diff); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 0637eb1cff4e..313e9f106f46 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -1090,20 +1090,22 @@ static void iwl_mvm_tas_init(struct iwl_mvm *mvm) static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm) { + u8 value; + int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, - DSM_FUNC_ENABLE_INDONESIA_5G2); + DSM_FUNC_ENABLE_INDONESIA_5G2, &value); if (ret < 0) IWL_DEBUG_RADIO(mvm, "Failed to evaluate DSM function ENABLE_INDONESIA_5G2, ret=%d\n", ret); - else if (ret >= DSM_VALUE_INDONESIA_MAX) + else if (value >= DSM_VALUE_INDONESIA_MAX) IWL_DEBUG_RADIO(mvm, - "DSM function ENABLE_INDONESIA_5G2 return invalid value, ret=%d\n", - ret); + "DSM function ENABLE_INDONESIA_5G2 return invalid value, value=%d\n", + value); - else if (ret == DSM_VALUE_INDONESIA_ENABLE) { + else if (value == DSM_VALUE_INDONESIA_ENABLE) { IWL_DEBUG_RADIO(mvm, "Evaluated DSM function ENABLE_INDONESIA_5G2: Enabling 5g2\n"); return DSM_VALUE_INDONESIA_ENABLE; @@ -1114,25 +1116,26 @@ static u8 iwl_mvm_eval_dsm_indonesia_5g2(struct iwl_mvm *mvm) static u8 iwl_mvm_eval_dsm_disable_srd(struct iwl_mvm *mvm) { + u8 value; int ret = iwl_acpi_get_dsm_u8((&mvm->fwrt)->dev, 0, - DSM_FUNC_DISABLE_SRD); + DSM_FUNC_DISABLE_SRD, &value); if (ret < 0) IWL_DEBUG_RADIO(mvm, "Failed to evaluate DSM function DISABLE_SRD, ret=%d\n", ret); - else if (ret >= DSM_VALUE_SRD_MAX) + else if (value >= DSM_VALUE_SRD_MAX) IWL_DEBUG_RADIO(mvm, - "DSM function DISABLE_SRD return invalid value, ret=%d\n", - ret); + "DSM function DISABLE_SRD return invalid value, value=%d\n", + value); - else if (ret == DSM_VALUE_SRD_PASSIVE) { + else if (value == DSM_VALUE_SRD_PASSIVE) { IWL_DEBUG_RADIO(mvm, "Evaluated DSM function DISABLE_SRD: setting SRD to passive\n"); return DSM_VALUE_SRD_PASSIVE; - } else if (ret == DSM_VALUE_SRD_DISABLE) { + } else if (value == DSM_VALUE_SRD_DISABLE) { IWL_DEBUG_RADIO(mvm, "Evaluated DSM function DISABLE_SRD: disabling SRD\n"); return DSM_VALUE_SRD_DISABLE; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index da32937ba9a7..43ff0407916a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -4194,6 +4194,9 @@ static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, iwl_mvm_binding_remove_vif(mvm, vif); out: + if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && + switching_chanctx) + return; mvmvif->phy_ctxt = NULL; iwl_mvm_power_update_mac(mvm); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 98f62d78cf9c..61618f607927 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -791,6 +791,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (!mvm->scan_cmd) goto out_free; + /* invalidate ids to prevent accidental removal of sta_id 0 */ + mvm->aux_sta.sta_id = IWL_MVM_INVALID_STA; + mvm->snif_sta.sta_id = IWL_MVM_INVALID_STA; + /* Set EBS as successful as long as not stated otherwise by the FW. */ mvm->last_ebs_successful = true; @@ -1205,6 +1209,7 @@ static void iwl_mvm_reprobe_wk(struct work_struct *wk) reprobe = container_of(wk, struct iwl_mvm_reprobe, work); if (device_reprobe(reprobe->dev)) dev_err(reprobe->dev, "reprobe failed!\n"); + put_device(reprobe->dev); kfree(reprobe); module_put(THIS_MODULE); } @@ -1255,7 +1260,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) module_put(THIS_MODULE); return; } - reprobe->dev = mvm->trans->dev; + reprobe->dev = get_device(mvm->trans->dev); INIT_WORK(&reprobe->work, iwl_mvm_reprobe_wk); schedule_work(&reprobe->work); } else if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index dc174410bf9c..578c353ae02c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2057,6 +2057,9 @@ int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) lockdep_assert_held(&mvm->mutex); + if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; + iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); if (ret) @@ -2071,6 +2074,9 @@ int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) lockdep_assert_held(&mvm->mutex); + if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) + return -EINVAL; + iwl_mvm_disable_txq(mvm, NULL, mvm->aux_queue, IWL_MAX_TID_COUNT, 0); ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index a983c215df31..3712adc3ccc2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -773,6 +773,7 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, next = skb_gso_segment(skb, netdev_flags); skb_shinfo(skb)->gso_size = mss; + skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6; if (WARN_ON_ONCE(IS_ERR(next))) return -EINVAL; else if (next) @@ -795,6 +796,8 @@ iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes, if (tcp_payload_len > mss) { skb_shinfo(tmp)->gso_size = mss; + skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 : + SKB_GSO_TCPV6; } else { if (qos) { u8 *qc; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 36bf414a388a..5b5134dd49af 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -75,6 +75,15 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, const struct fw_img *fw) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | + u32_encode_bits(250, + CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | + CSR_LTR_LONG_VAL_AD_SNOOP_REQ | + u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, + CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | + u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); struct iwl_context_info_gen3 *ctxt_info_gen3; struct iwl_prph_scratch *prph_scratch; struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; @@ -189,8 +198,10 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, /* Allocate IML */ iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, &trans_pcie->iml_dma_addr, GFP_KERNEL); - if (!iml_img) - return -ENOMEM; + if (!iml_img) { + ret = -ENOMEM; + goto err_free_ctxt_info; + } memcpy(iml_img, trans->iml, trans->iml_len); @@ -206,23 +217,19 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { - /* - * The firmware initializes this again later (to a smaller - * value), but for the boot process initialize the LTR to - * ~250 usec. - */ - u32 val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ | - u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, - CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) | - u32_encode_bits(250, - CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) | - CSR_LTR_LONG_VAL_AD_SNOOP_REQ | - u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC, - CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) | - u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL); - - iwl_write32(trans, CSR_LTR_LONG_VAL_AD, val); + /* + * To workaround hardware latency issues during the boot process, + * initialize the LTR to ~250 usec (see ltr_val above). + * The firmware initializes this again later (to a smaller value). + */ + if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 || + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) && + !trans->trans_cfg->integrated) { + iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val); + } else if (trans->trans_cfg->integrated && + trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) { + iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL); + iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val); } if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) @@ -232,6 +239,11 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, return 0; +err_free_ctxt_info: + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), + trans_pcie->ctxt_info_gen3, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_gen3 = NULL; err_free_prph_info: dma_free_coherent(trans->dev, sizeof(*prph_info), @@ -294,6 +306,9 @@ int iwl_trans_pcie_ctx_info_gen3_set_pnvm(struct iwl_trans *trans, return ret; } + if (WARN_ON(prph_sc_ctrl->pnvm_cfg.pnvm_size)) + return -EBUSY; + prph_sc_ctrl->pnvm_cfg.pnvm_base_addr = cpu_to_le64(trans_pcie->pnvm_dram.physical); prph_sc_ctrl->pnvm_cfg.pnvm_size = diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 965982612e74..ed3f5b7aa71e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -910,6 +910,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_qu_b0_hr1_b0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_qu_b0_hr_b0, iwl_ax203_name), /* Qu C step */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, @@ -917,6 +922,11 @@ static const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_RF_TYPE_HR1, IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, iwl_qu_c0_hr1_b0, iwl_ax101_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_QU, SILICON_C_STEP, + IWL_CFG_RF_TYPE_HR2, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, + iwl_qu_c0_hr_b0, iwl_ax203_name), /* QuZ */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 285e0d586021..ab93a848a466 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2107,7 +2107,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, while (offs < dwords) { /* limit the time we spin here under lock to 1/2s */ - ktime_t timeout = ktime_add_us(ktime_get(), 500 * USEC_PER_MSEC); + unsigned long end = jiffies + HZ / 2; + bool resched = false; if (iwl_trans_grab_nic_access(trans, &flags)) { iwl_write32(trans, HBUS_TARG_MEM_RADDR, @@ -2118,14 +2119,15 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, HBUS_TARG_MEM_RDAT); offs++; - /* calling ktime_get is expensive so - * do it once in 128 reads - */ - if (offs % 128 == 0 && ktime_after(ktime_get(), - timeout)) + if (time_after(jiffies, end)) { + resched = true; break; + } } iwl_trans_release_nic_access(trans, &flags); + + if (resched) + cond_resched(); } else { return -EBUSY; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 5dda0015522d..83f4964f3cb2 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -201,6 +201,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans->txqs.txq[txq_id]; + if (!txq) { + IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); + return; + } + spin_lock_bh(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c index 27eea909e32d..7ff1bb0ccc9c 100644 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/queue/tx.c @@ -142,26 +142,25 @@ void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) * idx is bounded by n_window */ int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb; lockdep_assert_held(&txq->lock); + if (!txq->entries) + return; + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, iwl_txq_get_tfd(trans, txq, idx)); - /* free SKB */ - if (txq->entries) { - struct sk_buff *skb; - - skb = txq->entries[idx].skb; + skb = txq->entries[idx].skb; - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; } } @@ -841,10 +840,8 @@ void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); struct sk_buff *skb = txq->entries[idx].skb; - if (WARN_ON_ONCE(!skb)) - continue; - - iwl_txq_free_tso_page(trans, skb); + if (!WARN_ON_ONCE(!skb)) + iwl_txq_free_tso_page(trans, skb); } iwl_txq_gen2_free_tfd(trans, txq); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); @@ -1494,28 +1491,28 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) */ int rd_ptr = txq->read_ptr; int idx = iwl_txq_get_cmd_index(txq, rd_ptr); + struct sk_buff *skb; lockdep_assert_held(&txq->lock); + if (!txq->entries) + return; + /* We have only q->n_window txq->entries, but we use * TFD_QUEUE_SIZE_MAX tfds */ iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); /* free SKB */ - if (txq->entries) { - struct sk_buff *skb; - - skb = txq->entries[idx].skb; + skb = txq->entries[idx].skb; - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; } } diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index 73eeb00d5aa6..e81dfaf99bcb 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -509,15 +509,17 @@ static void mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data, int len, bool more) { - struct page *page = virt_to_head_page(data); - int offset = data - page_address(page); struct sk_buff *skb = q->rx_head; struct skb_shared_info *shinfo = skb_shinfo(skb); if (shinfo->nr_frags < ARRAY_SIZE(shinfo->frags)) { - offset += q->buf_offset; + struct page *page = virt_to_head_page(data); + int offset = data - page_address(page) + q->buf_offset; + skb_add_rx_frag(skb, shinfo->nr_frags, page, offset, len, q->buf_size); + } else { + skb_free_frag(data); } if (more) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index a44b7766dec6..c13547841a4e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -231,7 +231,7 @@ mt7615_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *seq) { struct mt7615_dev *dev = container_of(mdev, struct mt7615_dev, mt76); - enum mt76_txq_id qid; + enum mt76_mcuq_id qid; mt7615_mcu_fill_msg(dev, skb, cmd, seq); if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c index 13d77f8fca86..9fb506f2ace6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/sdio_txrx.c @@ -83,7 +83,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, { struct mt76_queue *q = &dev->q_rx[qid]; struct mt76_sdio *sdio = &dev->sdio; - int len = 0, err, i, order; + int len = 0, err, i; struct page *page; u8 *buf; @@ -96,8 +96,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, if (len > sdio->func->cur_blksize) len = roundup(len, sdio->func->cur_blksize); - order = get_order(len); - page = __dev_alloc_pages(GFP_KERNEL, order); + page = __dev_alloc_pages(GFP_KERNEL, get_order(len)); if (!page) return -ENOMEM; @@ -106,7 +105,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len); if (err < 0) { dev_err(dev->dev, "sdio read data failed:%d\n", err); - __free_pages(page, order); + put_page(page); return err; } @@ -123,7 +122,7 @@ static int mt7663s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid, if (q->queued + i + 1 == q->ndesc) break; } - __free_pages(page, order); + put_page(page); spin_lock_bh(&q->lock); q->head = (q->head + i) % q->ndesc; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 5fdd1a6d32ee..e211a2bd4d3c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -256,7 +256,7 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, struct mt7915_dev *dev = container_of(mdev, struct mt7915_dev, mt76); struct mt7915_mcu_txd *mcu_txd; u8 seq, pkt_fmt, qidx; - enum mt76_txq_id txq; + enum mt76_mcuq_id qid; __le32 *txd; u32 val; @@ -268,18 +268,18 @@ mt7915_mcu_send_message(struct mt76_dev *mdev, struct sk_buff *skb, seq = ++dev->mt76.mcu.msg_seq & 0xf; if (cmd == -MCU_CMD_FW_SCATTER) { - txq = MT_MCUQ_FWDL; + qid = MT_MCUQ_FWDL; goto exit; } mcu_txd = (struct mt7915_mcu_txd *)skb_push(skb, sizeof(*mcu_txd)); if (test_bit(MT76_STATE_MCU_RUNNING, &dev->mphy.state)) { - txq = MT_MCUQ_WA; + qid = MT_MCUQ_WA; qidx = MT_TX_MCU_PORT_RX_Q0; pkt_fmt = MT_TX_TYPE_CMD; } else { - txq = MT_MCUQ_WM; + qid = MT_MCUQ_WM; qidx = MT_TX_MCU_PORT_RX_Q0; pkt_fmt = MT_TX_TYPE_CMD; } @@ -326,7 +326,7 @@ exit: if (wait_seq) *wait_seq = seq; - return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[txq], skb, 0); + return mt76_tx_queue_skb_raw(dev, mdev->q_mcu[qid], skb, 0); } static void diff --git a/drivers/net/wireless/mediatek/mt7601u/dma.c b/drivers/net/wireless/mediatek/mt7601u/dma.c index 5f99054f535b..af7d1ecb777c 100644 --- a/drivers/net/wireless/mediatek/mt7601u/dma.c +++ b/drivers/net/wireless/mediatek/mt7601u/dma.c @@ -152,8 +152,7 @@ mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) if (new_p) { /* we have one extra ref from the allocator */ - __free_pages(e->p, MT_RX_ORDER); - + put_page(e->p); e->p = new_p; } } @@ -310,7 +309,6 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, } e = &q->e[q->end]; - e->skb = skb; usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, mt7601u_complete_tx, q); ret = usb_submit_urb(e->urb, GFP_ATOMIC); @@ -328,6 +326,7 @@ static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, q->end = (q->end + 1) % q->entries; q->used++; + e->skb = skb; if (q->used >= q->entries) ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); diff --git a/drivers/net/xen-netback/rx.c b/drivers/net/xen-netback/rx.c index b8febe1d1bfd..accc991d153f 100644 --- a/drivers/net/xen-netback/rx.c +++ b/drivers/net/xen-netback/rx.c @@ -38,10 +38,15 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) RING_IDX prod, cons; struct sk_buff *skb; int needed; + unsigned long flags; + + spin_lock_irqsave(&queue->rx_queue.lock, flags); skb = skb_peek(&queue->rx_queue); - if (!skb) + if (!skb) { + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); return false; + } needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE); if (skb_is_gso(skb)) @@ -49,6 +54,8 @@ static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue) if (skb->sw_hash) needed++; + spin_unlock_irqrestore(&queue->rx_queue.lock, flags); + do { prod = queue->rx.sring->req_prod; cons = queue->rx.req_cons; diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index b59032e0859b..9d208570d059 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -335,16 +335,16 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(state); -static ssize_t available_slots_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t __available_slots_show(struct nvdimm_drvdata *ndd, char *buf) { - struct nvdimm_drvdata *ndd = dev_get_drvdata(dev); + struct device *dev; ssize_t rc; u32 nfree; if (!ndd) return -ENXIO; + dev = ndd->dev; nvdimm_bus_lock(dev); nfree = nd_label_nfree(ndd); if (nfree - 1 > nfree) { @@ -356,6 +356,18 @@ static ssize_t available_slots_show(struct device *dev, nvdimm_bus_unlock(dev); return rc; } + +static ssize_t available_slots_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t rc; + + nd_device_lock(dev); + rc = __available_slots_show(dev_get_drvdata(dev), buf); + nd_device_unlock(dev); + + return rc; +} static DEVICE_ATTR_RO(available_slots); __weak ssize_t security_show(struct device *dev, diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c index 6da67f4d641a..2403b71b601e 100644 --- a/drivers/nvdimm/namespace_devs.c +++ b/drivers/nvdimm/namespace_devs.c @@ -1635,11 +1635,11 @@ static umode_t namespace_visible(struct kobject *kobj, return a->mode; } - if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr - || a == &dev_attr_holder.attr - || a == &dev_attr_holder_class.attr - || a == &dev_attr_force_raw.attr - || a == &dev_attr_mode.attr) + /* base is_namespace_io() attributes */ + if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr || + a == &dev_attr_holder.attr || a == &dev_attr_holder_class.attr || + a == &dev_attr_force_raw.attr || a == &dev_attr_mode.attr || + a == &dev_attr_resource.attr) return a->mode; return 0; diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c index 875076b0ea6c..f33bdae626ba 100644 --- a/drivers/nvdimm/pmem.c +++ b/drivers/nvdimm/pmem.c @@ -23,7 +23,6 @@ #include <linux/uio.h> #include <linux/dax.h> #include <linux/nd.h> -#include <linux/backing-dev.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include "pmem.h" diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 200bdd672c28..f13eb4ded95f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -1543,8 +1543,21 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio) } length = (io.nblocks + 1) << ns->lba_shift; - meta_len = (io.nblocks + 1) * ns->ms; - metadata = nvme_to_user_ptr(io.metadata); + + if ((io.control & NVME_RW_PRINFO_PRACT) && + ns->ms == sizeof(struct t10_pi_tuple)) { + /* + * Protection information is stripped/inserted by the + * controller. + */ + if (nvme_to_user_ptr(io.metadata)) + return -EINVAL; + meta_len = 0; + metadata = NULL; + } else { + meta_len = (io.nblocks + 1) * ns->ms; + metadata = nvme_to_user_ptr(io.metadata); + } if (ns->features & NVME_NS_EXT_LBAS) { length += meta_len; @@ -3816,7 +3829,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, } } - list_add_tail(&ns->siblings, &head->list); + list_add_tail_rcu(&ns->siblings, &head->list); ns->head = head; mutex_unlock(&ctrl->subsys->lock); return 0; diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 9ac762b28811..282b7a4ea9a9 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -221,7 +221,7 @@ static struct nvme_ns *nvme_round_robin_path(struct nvme_ns_head *head, } for (ns = nvme_next_ns(head, old); - ns != old; + ns && ns != old; ns = nvme_next_ns(head, ns)) { if (nvme_path_is_disabled(ns)) continue; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 50d9a20568a2..6bad4d4dcdf0 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -23,6 +23,7 @@ #include <linux/t10-pi.h> #include <linux/types.h> #include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/sed-opal.h> #include <linux/pci-p2pdma.h> @@ -542,50 +543,71 @@ static inline bool nvme_pci_use_sgls(struct nvme_dev *dev, struct request *req) return true; } -static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +static void nvme_free_prps(struct nvme_dev *dev, struct request *req) { - struct nvme_iod *iod = blk_mq_rq_to_pdu(req); const int last_prp = NVME_CTRL_PAGE_SIZE / sizeof(__le64) - 1; - dma_addr_t dma_addr = iod->first_dma, next_dma_addr; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; int i; - if (iod->dma_len) { - dma_unmap_page(dev->dev, dma_addr, iod->dma_len, - rq_dma_dir(req)); - return; + for (i = 0; i < iod->npages; i++) { + __le64 *prp_list = nvme_pci_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu(prp_list[last_prp]); + + dma_pool_free(dev->prp_page_pool, prp_list, dma_addr); + dma_addr = next_dma_addr; } - WARN_ON_ONCE(!iod->nents); +} - if (is_pci_p2pdma_page(sg_page(iod->sg))) - pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, - rq_dma_dir(req)); - else - dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); +static void nvme_free_sgls(struct nvme_dev *dev, struct request *req) +{ + const int last_sg = SGES_PER_PAGE - 1; + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); + dma_addr_t dma_addr = iod->first_dma; + int i; + for (i = 0; i < iod->npages; i++) { + struct nvme_sgl_desc *sg_list = nvme_pci_iod_list(req)[i]; + dma_addr_t next_dma_addr = le64_to_cpu((sg_list[last_sg]).addr); - if (iod->npages == 0) - dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], - dma_addr); + dma_pool_free(dev->prp_page_pool, sg_list, dma_addr); + dma_addr = next_dma_addr; + } - for (i = 0; i < iod->npages; i++) { - void *addr = nvme_pci_iod_list(req)[i]; +} - if (iod->use_sgl) { - struct nvme_sgl_desc *sg_list = addr; +static void nvme_unmap_sg(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - next_dma_addr = - le64_to_cpu((sg_list[SGES_PER_PAGE - 1]).addr); - } else { - __le64 *prp_list = addr; + if (is_pci_p2pdma_page(sg_page(iod->sg))) + pci_p2pdma_unmap_sg(dev->dev, iod->sg, iod->nents, + rq_dma_dir(req)); + else + dma_unmap_sg(dev->dev, iod->sg, iod->nents, rq_dma_dir(req)); +} - next_dma_addr = le64_to_cpu(prp_list[last_prp]); - } +static void nvme_unmap_data(struct nvme_dev *dev, struct request *req) +{ + struct nvme_iod *iod = blk_mq_rq_to_pdu(req); - dma_pool_free(dev->prp_page_pool, addr, dma_addr); - dma_addr = next_dma_addr; + if (iod->dma_len) { + dma_unmap_page(dev->dev, iod->first_dma, iod->dma_len, + rq_dma_dir(req)); + return; } + WARN_ON_ONCE(!iod->nents); + + nvme_unmap_sg(dev, req); + if (iod->npages == 0) + dma_pool_free(dev->prp_small_pool, nvme_pci_iod_list(req)[0], + iod->first_dma); + else if (iod->use_sgl) + nvme_free_sgls(dev, req); + else + nvme_free_prps(dev, req); mempool_free(iod->sg, dev->iod_mempool); } @@ -661,7 +683,7 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, __le64 *old_prp_list = prp_list; prp_list = dma_pool_alloc(pool, GFP_ATOMIC, &prp_dma); if (!prp_list) - return BLK_STS_RESOURCE; + goto free_prps; list[iod->npages++] = prp_list; prp_list[0] = old_prp_list[i - 1]; old_prp_list[i - 1] = cpu_to_le64(prp_dma); @@ -681,14 +703,14 @@ static blk_status_t nvme_pci_setup_prps(struct nvme_dev *dev, dma_addr = sg_dma_address(sg); dma_len = sg_dma_len(sg); } - done: cmnd->dptr.prp1 = cpu_to_le64(sg_dma_address(iod->sg)); cmnd->dptr.prp2 = cpu_to_le64(iod->first_dma); - return BLK_STS_OK; - - bad_sgl: +free_prps: + nvme_free_prps(dev, req); + return BLK_STS_RESOURCE; +bad_sgl: WARN(DO_ONCE(nvme_print_sgl, iod->sg, iod->nents), "Invalid SGL for payload:%d nents:%d\n", blk_rq_payload_bytes(req), iod->nents); @@ -760,7 +782,7 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, sg_list = dma_pool_alloc(pool, GFP_ATOMIC, &sgl_dma); if (!sg_list) - return BLK_STS_RESOURCE; + goto free_sgls; i = 0; nvme_pci_iod_list(req)[iod->npages++] = sg_list; @@ -773,6 +795,9 @@ static blk_status_t nvme_pci_setup_sgls(struct nvme_dev *dev, } while (--entries > 0); return BLK_STS_OK; +free_sgls: + nvme_free_sgls(dev, req); + return BLK_STS_RESOURCE; } static blk_status_t nvme_setup_prp_simple(struct nvme_dev *dev, @@ -841,7 +866,7 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, sg_init_table(iod->sg, blk_rq_nr_phys_segments(req)); iod->nents = blk_rq_map_sg(req->q, req, iod->sg); if (!iod->nents) - goto out; + goto out_free_sg; if (is_pci_p2pdma_page(sg_page(iod->sg))) nr_mapped = pci_p2pdma_map_sg_attrs(dev->dev, iod->sg, @@ -850,16 +875,21 @@ static blk_status_t nvme_map_data(struct nvme_dev *dev, struct request *req, nr_mapped = dma_map_sg_attrs(dev->dev, iod->sg, iod->nents, rq_dma_dir(req), DMA_ATTR_NO_WARN); if (!nr_mapped) - goto out; + goto out_free_sg; iod->use_sgl = nvme_pci_use_sgls(dev, req); if (iod->use_sgl) ret = nvme_pci_setup_sgls(dev, req, &cmnd->rw, nr_mapped); else ret = nvme_pci_setup_prps(dev, req, &cmnd->rw); -out: if (ret != BLK_STS_OK) - nvme_unmap_data(dev, req); + goto out_unmap_sg; + return BLK_STS_OK; + +out_unmap_sg: + nvme_unmap_sg(dev, req); +out_free_sg: + mempool_free(iod->sg, dev->iod_mempool); return ret; } @@ -1795,6 +1825,9 @@ static void nvme_map_cmb(struct nvme_dev *dev) if (dev->cmb_size) return; + if (NVME_CAP_CMBS(dev->ctrl.cap)) + writel(NVME_CMBMSC_CRE, dev->bar + NVME_REG_CMBMSC); + dev->cmbsz = readl(dev->bar + NVME_REG_CMBSZ); if (!dev->cmbsz) return; @@ -1809,6 +1842,16 @@ static void nvme_map_cmb(struct nvme_dev *dev) return; /* + * Tell the controller about the host side address mapping the CMB, + * and enable CMB decoding for the NVMe 1.4+ scheme: + */ + if (NVME_CAP_CMBS(dev->ctrl.cap)) { + hi_lo_writeq(NVME_CMBMSC_CRE | NVME_CMBMSC_CMSE | + (pci_bus_address(pdev, bar) + offset), + dev->bar + NVME_REG_CMBMSC); + } + + /* * Controllers may support a CMB size larger than their BAR, * for example, due to being behind a bridge. Reduce the CMB to * the reported size of the BAR @@ -3199,6 +3242,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x144d, 0xa822), /* Samsung PM1725a */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1d1d, 0x1f1f), /* LighNVM qemu device */ .driver_data = NVME_QUIRK_LIGHTNVM, }, { PCI_DEVICE(0x1d1d, 0x2807), /* CNEX WL */ @@ -3214,6 +3259,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001), .driver_data = NVME_QUIRK_SINGLE_VECTOR }, { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) }, diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index cf6c49d09c82..b7ce4f221d99 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -97,6 +97,7 @@ struct nvme_rdma_queue { struct completion cm_done; bool pi_support; int cq_size; + struct mutex queue_lock; }; struct nvme_rdma_ctrl { @@ -579,6 +580,7 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, int ret; queue = &ctrl->queues[idx]; + mutex_init(&queue->queue_lock); queue->ctrl = ctrl; if (idx && ctrl->ctrl.max_integrity_segments) queue->pi_support = true; @@ -598,7 +600,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, if (IS_ERR(queue->cm_id)) { dev_info(ctrl->ctrl.device, "failed to create CM ID: %ld\n", PTR_ERR(queue->cm_id)); - return PTR_ERR(queue->cm_id); + ret = PTR_ERR(queue->cm_id); + goto out_destroy_mutex; } if (ctrl->ctrl.opts->mask & NVMF_OPT_HOST_TRADDR) @@ -628,6 +631,8 @@ static int nvme_rdma_alloc_queue(struct nvme_rdma_ctrl *ctrl, out_destroy_cm_id: rdma_destroy_id(queue->cm_id); nvme_rdma_destroy_queue_ib(queue); +out_destroy_mutex: + mutex_destroy(&queue->queue_lock); return ret; } @@ -639,9 +644,10 @@ static void __nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) static void nvme_rdma_stop_queue(struct nvme_rdma_queue *queue) { - if (!test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) - return; - __nvme_rdma_stop_queue(queue); + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_RDMA_Q_LIVE, &queue->flags)) + __nvme_rdma_stop_queue(queue); + mutex_unlock(&queue->queue_lock); } static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) @@ -651,6 +657,7 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue) nvme_rdma_destroy_queue_ib(queue); rdma_destroy_id(queue->cm_id); + mutex_destroy(&queue->queue_lock); } static void nvme_rdma_free_io_queues(struct nvme_rdma_ctrl *ctrl) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 216619926563..881d28eb15e9 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -76,6 +76,7 @@ struct nvme_tcp_queue { struct work_struct io_work; int io_cpu; + struct mutex queue_lock; struct mutex send_mutex; struct llist_head req_list; struct list_head send_list; @@ -1219,6 +1220,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) sock_release(queue->sock); kfree(queue->pdu); + mutex_destroy(&queue->queue_lock); } static int nvme_tcp_init_connection(struct nvme_tcp_queue *queue) @@ -1380,6 +1382,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, struct nvme_tcp_queue *queue = &ctrl->queues[qid]; int ret, rcv_pdu_size; + mutex_init(&queue->queue_lock); queue->ctrl = ctrl; init_llist_head(&queue->req_list); INIT_LIST_HEAD(&queue->send_list); @@ -1398,7 +1401,7 @@ static int nvme_tcp_alloc_queue(struct nvme_ctrl *nctrl, if (ret) { dev_err(nctrl->device, "failed to create socket: %d\n", ret); - return ret; + goto err_destroy_mutex; } /* Single syn retry */ @@ -1507,6 +1510,8 @@ err_crypto: err_sock: sock_release(queue->sock); queue->sock = NULL; +err_destroy_mutex: + mutex_destroy(&queue->queue_lock); return ret; } @@ -1534,9 +1539,10 @@ static void nvme_tcp_stop_queue(struct nvme_ctrl *nctrl, int qid) struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; - if (!test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) - return; - __nvme_tcp_stop_queue(queue); + mutex_lock(&queue->queue_lock); + if (test_and_clear_bit(NVME_TCP_Q_LIVE, &queue->flags)) + __nvme_tcp_stop_queue(queue); + mutex_unlock(&queue->queue_lock); } static int nvme_tcp_start_queue(struct nvme_ctrl *nctrl, int idx) diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 8d90235e4fcc..dc1ea468b182 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -487,8 +487,10 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) /* return an all zeroed buffer if we can't find an active namespace */ ns = nvmet_find_namespace(ctrl, req->cmd->identify.nsid); - if (!ns) + if (!ns) { + status = NVME_SC_INVALID_NS; goto done; + } nvmet_ns_revalidate(ns); @@ -541,7 +543,9 @@ static void nvmet_execute_identify_ns(struct nvmet_req *req) id->nsattr |= (1 << 0); nvmet_put_namespace(ns); done: - status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + if (!status) + status = nvmet_copy_to_sgl(req, 0, id, sizeof(*id)); + kfree(id); out: nvmet_req_complete(req, status); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index dc1f0f647189..aacf06f0b431 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -305,7 +305,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) length = cmd->pdu_len; cmd->nr_mapped = DIV_ROUND_UP(length, PAGE_SIZE); offset = cmd->rbytes_done; - cmd->sg_idx = DIV_ROUND_UP(offset, PAGE_SIZE); + cmd->sg_idx = offset / PAGE_SIZE; sg_offset = offset % PAGE_SIZE; sg = &cmd->req.sg[cmd->sg_idx]; @@ -318,6 +318,7 @@ static void nvmet_tcp_map_pdu_iovec(struct nvmet_tcp_cmd *cmd) length -= iov_len; sg = sg_next(sg); iov++; + sg_offset = 0; } iov_iter_kvec(&cmd->recv_msg.msg_iter, READ, cmd->iov, diff --git a/drivers/of/device.c b/drivers/of/device.c index aedfaaafd3e7..1122daa8e273 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -162,9 +162,11 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, mask = DMA_BIT_MASK(ilog2(end) + 1); dev->coherent_dma_mask &= mask; *dev->dma_mask &= mask; - /* ...but only set bus limit if we found valid dma-ranges earlier */ - if (!ret) + /* ...but only set bus limit and range map if we found valid dma-ranges earlier */ + if (!ret) { dev->bus_dma_limit = end; + dev->dma_range_map = map; + } coherent = of_dma_is_coherent(np); dev_dbg(dev, "device is%sdma coherent\n", @@ -172,6 +174,9 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, iommu = of_iommu_configure(dev, np, id); if (PTR_ERR(iommu) == -EPROBE_DEFER) { + /* Don't touch range map if it wasn't set from a valid dma-ranges */ + if (!ret) + dev->dma_range_map = NULL; kfree(map); return -EPROBE_DEFER; } @@ -181,7 +186,6 @@ int of_dma_configure_id(struct device *dev, struct device_node *np, arch_setup_dma_ops(dev, dma_start, size, iommu, coherent); - dev->dma_range_map = map; return 0; } EXPORT_SYMBOL_GPL(of_dma_configure_id); diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index b9fecc25d213..790393d1e318 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1558,7 +1558,6 @@ int pci_save_state(struct pci_dev *dev) return i; pci_save_ltr_state(dev); - pci_save_aspm_l1ss_state(dev); pci_save_dpc_state(dev); pci_save_aer_state(dev); pci_save_ptm_state(dev); @@ -1665,7 +1664,6 @@ void pci_restore_state(struct pci_dev *dev) * LTR itself (in the PCIe capability). */ pci_restore_ltr_state(dev); - pci_restore_aspm_l1ss_state(dev); pci_restore_pcie_state(dev); pci_restore_pasid_state(dev); @@ -3353,11 +3351,6 @@ void pci_allocate_cap_save_buffers(struct pci_dev *dev) if (error) pci_err(dev, "unable to allocate suspend buffer for LTR\n"); - error = pci_add_ext_cap_save_buffer(dev, PCI_EXT_CAP_ID_L1SS, - 2 * sizeof(u32)); - if (error) - pci_err(dev, "unable to allocate suspend buffer for ASPM-L1SS\n"); - pci_allocate_vc_save_buffers(dev); } diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h index 5c59365092fa..a7bdf0b1d45d 100644 --- a/drivers/pci/pci.h +++ b/drivers/pci/pci.h @@ -582,15 +582,11 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev); void pcie_aspm_exit_link_state(struct pci_dev *pdev); void pcie_aspm_pm_state_change(struct pci_dev *pdev); void pcie_aspm_powersave_config_link(struct pci_dev *pdev); -void pci_save_aspm_l1ss_state(struct pci_dev *dev); -void pci_restore_aspm_l1ss_state(struct pci_dev *dev); #else static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) { } static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) { } static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) { } -static inline void pci_save_aspm_l1ss_state(struct pci_dev *dev) { } -static inline void pci_restore_aspm_l1ss_state(struct pci_dev *dev) { } #endif #ifdef CONFIG_PCIE_ECRC diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index a08e7d6dc248..ac0557a305af 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -734,50 +734,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state) PCI_L1SS_CTL1_L1SS_MASK, val); } -void pci_save_aspm_l1ss_state(struct pci_dev *dev) -{ - int aspm_l1ss; - struct pci_cap_saved_state *save_state; - u32 *cap; - - if (!pci_is_pcie(dev)) - return; - - aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS); - if (!aspm_l1ss) - return; - - save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS); - if (!save_state) - return; - - cap = (u32 *)&save_state->cap.data[0]; - pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, cap++); - pci_read_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, cap++); -} - -void pci_restore_aspm_l1ss_state(struct pci_dev *dev) -{ - int aspm_l1ss; - struct pci_cap_saved_state *save_state; - u32 *cap; - - if (!pci_is_pcie(dev)) - return; - - aspm_l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS); - if (!aspm_l1ss) - return; - - save_state = pci_find_saved_ext_cap(dev, PCI_EXT_CAP_ID_L1SS); - if (!save_state) - return; - - cap = (u32 *)&save_state->cap.data[0]; - pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL1, *cap++); - pci_write_config_dword(dev, aspm_l1ss + PCI_L1SS_CTL2, *cap++); -} - static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val) { pcie_capability_clear_and_set_word(pdev, PCI_EXP_LNKCTL, diff --git a/drivers/phy/ingenic/Makefile b/drivers/phy/ingenic/Makefile index 65d5ea00fc9d..1cb158d7233f 100644 --- a/drivers/phy/ingenic/Makefile +++ b/drivers/phy/ingenic/Makefile @@ -1,2 +1,2 @@ # SPDX-License-Identifier: GPL-2.0 -obj-y += phy-ingenic-usb.o +obj-$(CONFIG_PHY_INGENIC_USB) += phy-ingenic-usb.o diff --git a/drivers/phy/mediatek/Kconfig b/drivers/phy/mediatek/Kconfig index d38def43b1bf..55f8e6c048ab 100644 --- a/drivers/phy/mediatek/Kconfig +++ b/drivers/phy/mediatek/Kconfig @@ -49,7 +49,9 @@ config PHY_MTK_HDMI config PHY_MTK_MIPI_DSI tristate "MediaTek MIPI-DSI Driver" - depends on ARCH_MEDIATEK && OF + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on COMMON_CLK + depends on OF select GENERIC_PHY help Support MIPI DSI for Mediatek SoCs. diff --git a/drivers/phy/motorola/phy-cpcap-usb.c b/drivers/phy/motorola/phy-cpcap-usb.c index 442522ba487f..4728e2bff662 100644 --- a/drivers/phy/motorola/phy-cpcap-usb.c +++ b/drivers/phy/motorola/phy-cpcap-usb.c @@ -662,35 +662,42 @@ static int cpcap_usb_phy_probe(struct platform_device *pdev) generic_phy = devm_phy_create(ddata->dev, NULL, &ops); if (IS_ERR(generic_phy)) { error = PTR_ERR(generic_phy); - return PTR_ERR(generic_phy); + goto out_reg_disable; } phy_set_drvdata(generic_phy, ddata); phy_provider = devm_of_phy_provider_register(ddata->dev, of_phy_simple_xlate); - if (IS_ERR(phy_provider)) - return PTR_ERR(phy_provider); + if (IS_ERR(phy_provider)) { + error = PTR_ERR(phy_provider); + goto out_reg_disable; + } error = cpcap_usb_init_optional_pins(ddata); if (error) - return error; + goto out_reg_disable; cpcap_usb_init_optional_gpios(ddata); error = cpcap_usb_init_iio(ddata); if (error) - return error; + goto out_reg_disable; error = cpcap_usb_init_interrupts(pdev, ddata); if (error) - return error; + goto out_reg_disable; usb_add_phy_dev(&ddata->phy); atomic_set(&ddata->active, 1); schedule_delayed_work(&ddata->detect_work, msecs_to_jiffies(1)); return 0; + +out_reg_disable: + regulator_disable(ddata->vusb); + + return error; } static int cpcap_usb_phy_remove(struct platform_device *pdev) diff --git a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c index 34803a6c7664..5c1a109842a7 100644 --- a/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c +++ b/drivers/pinctrl/aspeed/pinctrl-aspeed-g6.c @@ -347,7 +347,7 @@ FUNC_GROUP_DECL(RMII4, F24, E23, E24, E25, C25, C24, B26, B25, B24); #define D22 40 SIG_EXPR_LIST_DECL_SESG(D22, SD1CLK, SD1, SIG_DESC_SET(SCU414, 8)); -SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU414, 8)); +SIG_EXPR_LIST_DECL_SEMG(D22, PWM8, PWM8G0, PWM8, SIG_DESC_SET(SCU4B4, 8)); PIN_DECL_2(D22, GPIOF0, SD1CLK, PWM8); GROUP_DECL(PWM8G0, D22); diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c index 7aeb552d16ce..72f17f26acd8 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c @@ -920,6 +920,10 @@ int mtk_pinconf_adv_pull_set(struct mtk_pinctrl *hw, err = hw->soc->bias_set(hw, desc, pullup); if (err) return err; + } else if (hw->soc->bias_set_combo) { + err = hw->soc->bias_set_combo(hw, desc, pullup, arg); + if (err) + return err; } else { return -ENOTSUPP; } diff --git a/drivers/pinctrl/nomadik/pinctrl-nomadik.c b/drivers/pinctrl/nomadik/pinctrl-nomadik.c index d4ea10803fd9..abfe11c7b49f 100644 --- a/drivers/pinctrl/nomadik/pinctrl-nomadik.c +++ b/drivers/pinctrl/nomadik/pinctrl-nomadik.c @@ -949,7 +949,6 @@ static void nmk_gpio_dbg_show_one(struct seq_file *s, } else { int irq = chip->to_irq(chip, offset); const int pullidx = pull ? 1 : 0; - bool wake; int val; static const char * const pulls[] = { "none ", diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 53a6a24bd052..3ea163498647 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -37,11 +37,11 @@ #define JZ4740_GPIO_TRIG 0x70 #define JZ4740_GPIO_FLAG 0x80 -#define JZ4760_GPIO_INT 0x10 -#define JZ4760_GPIO_PAT1 0x30 -#define JZ4760_GPIO_PAT0 0x40 -#define JZ4760_GPIO_FLAG 0x50 -#define JZ4760_GPIO_PEN 0x70 +#define JZ4770_GPIO_INT 0x10 +#define JZ4770_GPIO_PAT1 0x30 +#define JZ4770_GPIO_PAT0 0x40 +#define JZ4770_GPIO_FLAG 0x50 +#define JZ4770_GPIO_PEN 0x70 #define X1830_GPIO_PEL 0x110 #define X1830_GPIO_PEH 0x120 @@ -1688,8 +1688,8 @@ static inline bool ingenic_gpio_get_value(struct ingenic_gpio_chip *jzgc, static void ingenic_gpio_set_value(struct ingenic_gpio_chip *jzgc, u8 offset, int value) { - if (jzgc->jzpc->info->version >= ID_JZ4760) - ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_PAT0, offset, !!value); + if (jzgc->jzpc->info->version >= ID_JZ4770) + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_PAT0, offset, !!value); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, offset, !!value); } @@ -1718,9 +1718,9 @@ static void irq_set_type(struct ingenic_gpio_chip *jzgc, break; } - if (jzgc->jzpc->info->version >= ID_JZ4760) { - reg1 = JZ4760_GPIO_PAT1; - reg2 = JZ4760_GPIO_PAT0; + if (jzgc->jzpc->info->version >= ID_JZ4770) { + reg1 = JZ4770_GPIO_PAT1; + reg2 = JZ4770_GPIO_PAT0; } else { reg1 = JZ4740_GPIO_TRIG; reg2 = JZ4740_GPIO_DIR; @@ -1758,8 +1758,8 @@ static void ingenic_gpio_irq_enable(struct irq_data *irqd) struct ingenic_gpio_chip *jzgc = gpiochip_get_data(gc); int irq = irqd->hwirq; - if (jzgc->jzpc->info->version >= ID_JZ4760) - ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, true); + if (jzgc->jzpc->info->version >= ID_JZ4770) + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, true); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, true); @@ -1774,8 +1774,8 @@ static void ingenic_gpio_irq_disable(struct irq_data *irqd) ingenic_gpio_irq_mask(irqd); - if (jzgc->jzpc->info->version >= ID_JZ4760) - ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_INT, irq, false); + if (jzgc->jzpc->info->version >= ID_JZ4770) + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_INT, irq, false); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_SELECT, irq, false); } @@ -1799,8 +1799,8 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd) irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH); } - if (jzgc->jzpc->info->version >= ID_JZ4760) - ingenic_gpio_set_bit(jzgc, JZ4760_GPIO_FLAG, irq, false); + if (jzgc->jzpc->info->version >= ID_JZ4770) + ingenic_gpio_set_bit(jzgc, JZ4770_GPIO_FLAG, irq, false); else ingenic_gpio_set_bit(jzgc, JZ4740_GPIO_DATA, irq, true); } @@ -1856,8 +1856,8 @@ static void ingenic_gpio_irq_handler(struct irq_desc *desc) chained_irq_enter(irq_chip, desc); - if (jzgc->jzpc->info->version >= ID_JZ4760) - flag = ingenic_gpio_read_reg(jzgc, JZ4760_GPIO_FLAG); + if (jzgc->jzpc->info->version >= ID_JZ4770) + flag = ingenic_gpio_read_reg(jzgc, JZ4770_GPIO_FLAG); else flag = ingenic_gpio_read_reg(jzgc, JZ4740_GPIO_FLAG); @@ -1938,9 +1938,9 @@ static int ingenic_gpio_get_direction(struct gpio_chip *gc, unsigned int offset) struct ingenic_pinctrl *jzpc = jzgc->jzpc; unsigned int pin = gc->base + offset; - if (jzpc->info->version >= ID_JZ4760) { - if (ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_INT) || - ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PAT1)) + if (jzpc->info->version >= ID_JZ4770) { + if (ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_INT) || + ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PAT1)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; } @@ -1991,20 +1991,20 @@ static int ingenic_pinmux_set_pin_fn(struct ingenic_pinctrl *jzpc, 'A' + offt, idx, func); if (jzpc->info->version >= ID_X1000) { - ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false); + ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, false); - ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2); - ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1); + ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2); + ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1); ingenic_shadow_config_pin_load(jzpc, pin); - } else if (jzpc->info->version >= ID_JZ4760) { - ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false); + } else if (jzpc->info->version >= ID_JZ4770) { + ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_config_pin(jzpc, pin, GPIO_MSK, false); - ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, func & 0x2); - ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, func & 0x1); + ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, func & 0x2); + ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, func & 0x1); } else { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, true); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_TRIG, func & 0x2); - ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func > 0); + ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, func & 0x1); } return 0; @@ -2057,14 +2057,14 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev, 'A' + offt, idx, input ? "in" : "out"); if (jzpc->info->version >= ID_X1000) { - ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_INT, false); + ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_shadow_config_pin(jzpc, pin, GPIO_MSK, true); - ingenic_shadow_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input); + ingenic_shadow_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); ingenic_shadow_config_pin_load(jzpc, pin); - } else if (jzpc->info->version >= ID_JZ4760) { - ingenic_config_pin(jzpc, pin, JZ4760_GPIO_INT, false); + } else if (jzpc->info->version >= ID_JZ4770) { + ingenic_config_pin(jzpc, pin, JZ4770_GPIO_INT, false); ingenic_config_pin(jzpc, pin, GPIO_MSK, true); - ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT1, input); + ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input); } else { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false); ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input); @@ -2091,8 +2091,8 @@ static int ingenic_pinconf_get(struct pinctrl_dev *pctldev, unsigned int offt = pin / PINS_PER_GPIO_CHIP; bool pull; - if (jzpc->info->version >= ID_JZ4760) - pull = !ingenic_get_pin_config(jzpc, pin, JZ4760_GPIO_PEN); + if (jzpc->info->version >= ID_JZ4770) + pull = !ingenic_get_pin_config(jzpc, pin, JZ4770_GPIO_PEN); else pull = !ingenic_get_pin_config(jzpc, pin, JZ4740_GPIO_PULL_DIS); @@ -2141,8 +2141,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, REG_SET(X1830_GPIO_PEH), bias << idxh); } - } else if (jzpc->info->version >= ID_JZ4760) { - ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PEN, !bias); + } else if (jzpc->info->version >= ID_JZ4770) { + ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PEN, !bias); } else { ingenic_config_pin(jzpc, pin, JZ4740_GPIO_PULL_DIS, !bias); } @@ -2151,8 +2151,8 @@ static void ingenic_set_bias(struct ingenic_pinctrl *jzpc, static void ingenic_set_output_level(struct ingenic_pinctrl *jzpc, unsigned int pin, bool high) { - if (jzpc->info->version >= ID_JZ4760) - ingenic_config_pin(jzpc, pin, JZ4760_GPIO_PAT0, high); + if (jzpc->info->version >= ID_JZ4770) + ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT0, high); else ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DATA, high); } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index e051aecf95c4..d70caecd21d2 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -51,6 +51,7 @@ * @dual_edge_irqs: Bitmap of irqs that need sw emulated dual edge * detection. * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller + * @disabled_for_mux: These IRQs were disabled because we muxed away. * @soc: Reference to soc_data of platform specific data. * @regs: Base addresses for the TLMM tiles. * @phys_base: Physical base address @@ -72,6 +73,7 @@ struct msm_pinctrl { DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO); DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO); DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO); + DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO); const struct msm_pinctrl_soc_data *soc; void __iomem *regs[MAX_NR_TILES]; @@ -96,6 +98,14 @@ MSM_ACCESSOR(intr_cfg) MSM_ACCESSOR(intr_status) MSM_ACCESSOR(intr_target) +static void msm_ack_intr_status(struct msm_pinctrl *pctrl, + const struct msm_pingroup *g) +{ + u32 val = g->intr_ack_high ? BIT(g->intr_status_bit) : 0; + + msm_writel_intr_status(val, pctrl, g); +} + static int msm_get_groups_count(struct pinctrl_dev *pctldev) { struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); @@ -171,6 +181,10 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned group) { struct msm_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev); + struct gpio_chip *gc = &pctrl->chip; + unsigned int irq = irq_find_mapping(gc->irq.domain, group); + struct irq_data *d = irq_get_irq_data(irq); + unsigned int gpio_func = pctrl->soc->gpio_func; const struct msm_pingroup *g; unsigned long flags; u32 val, mask; @@ -187,6 +201,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, if (WARN_ON(i == g->nfuncs)) return -EINVAL; + /* + * If an GPIO interrupt is setup on this pin then we need special + * handling. Specifically interrupt detection logic will still see + * the pin twiddle even when we're muxed away. + * + * When we see a pin with an interrupt setup on it then we'll disable + * (mask) interrupts on it when we mux away until we mux back. Note + * that disable_irq() refcounts and interrupts are disabled as long as + * at least one disable_irq() has been called. + */ + if (d && i != gpio_func && + !test_and_set_bit(d->hwirq, pctrl->disabled_for_mux)) + disable_irq(irq); + raw_spin_lock_irqsave(&pctrl->lock, flags); val = msm_readl_ctl(pctrl, g); @@ -196,6 +224,20 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, raw_spin_unlock_irqrestore(&pctrl->lock, flags); + if (d && i == gpio_func && + test_and_clear_bit(d->hwirq, pctrl->disabled_for_mux)) { + /* + * Clear interrupts detected while not GPIO since we only + * masked things. + */ + if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) + irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, false); + else + msm_ack_intr_status(pctrl, g); + + enable_irq(irq); + } + return 0; } @@ -210,8 +252,7 @@ static int msm_pinmux_request_gpio(struct pinctrl_dev *pctldev, if (!g->nfuncs) return 0; - /* For now assume function 0 is GPIO because it always is */ - return msm_pinmux_set_mux(pctldev, g->funcs[0], offset); + return msm_pinmux_set_mux(pctldev, g->funcs[pctrl->soc->gpio_func], offset); } static const struct pinmux_ops msm_pinmux_ops = { @@ -774,7 +815,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) raw_spin_unlock_irqrestore(&pctrl->lock, flags); } -static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear) +static void msm_gpio_irq_unmask(struct irq_data *d) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct msm_pinctrl *pctrl = gpiochip_get_data(gc); @@ -792,17 +833,6 @@ static void msm_gpio_irq_clear_unmask(struct irq_data *d, bool status_clear) raw_spin_lock_irqsave(&pctrl->lock, flags); - if (status_clear) { - /* - * clear the interrupt status bit before unmask to avoid - * any erroneous interrupts that would have got latched - * when the interrupt is not in use. - */ - val = msm_readl_intr_status(pctrl, g); - val &= ~BIT(g->intr_status_bit); - msm_writel_intr_status(val, pctrl, g); - } - val = msm_readl_intr_cfg(pctrl, g); val |= BIT(g->intr_raw_status_bit); val |= BIT(g->intr_enable_bit); @@ -822,7 +852,7 @@ static void msm_gpio_irq_enable(struct irq_data *d) irq_chip_enable_parent(d); if (!test_bit(d->hwirq, pctrl->skip_wake_irqs)) - msm_gpio_irq_clear_unmask(d, true); + msm_gpio_irq_unmask(d); } static void msm_gpio_irq_disable(struct irq_data *d) @@ -837,11 +867,6 @@ static void msm_gpio_irq_disable(struct irq_data *d) msm_gpio_irq_mask(d); } -static void msm_gpio_irq_unmask(struct irq_data *d) -{ - msm_gpio_irq_clear_unmask(d, false); -} - /** * msm_gpio_update_dual_edge_parent() - Prime next edge for IRQs handled by parent. * @d: The irq dta. @@ -894,7 +919,6 @@ static void msm_gpio_irq_ack(struct irq_data *d) struct msm_pinctrl *pctrl = gpiochip_get_data(gc); const struct msm_pingroup *g; unsigned long flags; - u32 val; if (test_bit(d->hwirq, pctrl->skip_wake_irqs)) { if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) @@ -906,12 +930,7 @@ static void msm_gpio_irq_ack(struct irq_data *d) raw_spin_lock_irqsave(&pctrl->lock, flags); - val = msm_readl_intr_status(pctrl, g); - if (g->intr_ack_high) - val |= BIT(g->intr_status_bit); - else - val &= ~BIT(g->intr_status_bit); - msm_writel_intr_status(val, pctrl, g); + msm_ack_intr_status(pctrl, g); if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); @@ -936,6 +955,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) struct msm_pinctrl *pctrl = gpiochip_get_data(gc); const struct msm_pingroup *g; unsigned long flags; + bool was_enabled; u32 val; if (msm_gpio_needs_dual_edge_parent_workaround(d, type)) { @@ -997,6 +1017,7 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) * could cause the INTR_STATUS to be set for EDGE interrupts. */ val = msm_readl_intr_cfg(pctrl, g); + was_enabled = val & BIT(g->intr_raw_status_bit); val |= BIT(g->intr_raw_status_bit); if (g->intr_detection_width == 2) { val &= ~(3 << g->intr_detection_bit); @@ -1046,6 +1067,14 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) } msm_writel_intr_cfg(val, pctrl, g); + /* + * The first time we set RAW_STATUS_EN it could trigger an interrupt. + * Clear the interrupt. This is safe because we have + * IRQCHIP_SET_TYPE_MASKED. + */ + if (!was_enabled) + msm_ack_intr_status(pctrl, g); + if (test_bit(d->hwirq, pctrl->dual_edge_irqs)) msm_gpio_update_dual_edge_pos(pctrl, g, d); @@ -1099,16 +1128,11 @@ static int msm_gpio_irq_reqres(struct irq_data *d) } /* - * Clear the interrupt that may be pending before we enable - * the line. - * This is especially a problem with the GPIOs routed to the - * PDC. These GPIOs are direct-connect interrupts to the GIC. - * Disabling the interrupt line at the PDC does not prevent - * the interrupt from being latched at the GIC. The state at - * GIC needs to be cleared before enabling. + * The disable / clear-enable workaround we do in msm_pinmux_set_mux() + * only works if disable is not lazy since we only clear any bogus + * interrupt in hardware. Explicitly mark the interrupt as UNLAZY. */ - if (d->parent_data && test_bit(d->hwirq, pctrl->skip_wake_irqs)) - irq_chip_set_parent_state(d, IRQCHIP_STATE_PENDING, 0); + irq_set_status_flags(d->irq, IRQ_DISABLE_UNLAZY); return 0; out: diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h index 333f99243c43..e31a5167c91e 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.h +++ b/drivers/pinctrl/qcom/pinctrl-msm.h @@ -118,6 +118,7 @@ struct msm_gpio_wakeirq_map { * @wakeirq_dual_edge_errata: If true then GPIOs using the wakeirq_map need * to be aware that their parent can't handle dual * edge interrupts. + * @gpio_func: Which function number is GPIO (usually 0). */ struct msm_pinctrl_soc_data { const struct pinctrl_pin_desc *pins; @@ -134,6 +135,7 @@ struct msm_pinctrl_soc_data { const struct msm_gpio_wakeirq_map *wakeirq_map; unsigned int nwakeirq_map; bool wakeirq_dual_edge_errata; + unsigned int gpio_func; }; extern const struct dev_pm_ops msm_pinctrl_dev_pm_ops; diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig index 33040b0b3b79..2c941cdac9ee 100644 --- a/drivers/platform/surface/Kconfig +++ b/drivers/platform/surface/Kconfig @@ -5,6 +5,7 @@ menuconfig SURFACE_PLATFORMS bool "Microsoft Surface Platform-Specific Device Drivers" + depends on ACPI default y help Say Y here to get to see options for platform-specific device drivers @@ -29,20 +30,19 @@ config SURFACE3_WMI config SURFACE_3_BUTTON tristate "Power/home/volume buttons driver for Microsoft Surface 3 tablet" - depends on ACPI && KEYBOARD_GPIO && I2C + depends on KEYBOARD_GPIO && I2C help This driver handles the power/home/volume buttons on the Microsoft Surface 3 tablet. config SURFACE_3_POWER_OPREGION tristate "Surface 3 battery platform operation region support" - depends on ACPI && I2C + depends on I2C help This driver provides support for ACPI operation region of the Surface 3 battery platform driver. config SURFACE_GPE tristate "Surface GPE/Lid Support Driver" - depends on ACPI depends on DMI help This driver marks the GPEs related to the ACPI lid device found on @@ -52,7 +52,7 @@ config SURFACE_GPE config SURFACE_PRO3_BUTTON tristate "Power/home/volume buttons driver for Microsoft Surface Pro 3/4 tablet" - depends on ACPI && INPUT + depends on INPUT help This driver handles the power/home/volume buttons on the Microsoft Surface Pro 3/4 tablet. diff --git a/drivers/platform/surface/surface_gpe.c b/drivers/platform/surface/surface_gpe.c index e49e5d6d5d4e..86f6991b1215 100644 --- a/drivers/platform/surface/surface_gpe.c +++ b/drivers/platform/surface/surface_gpe.c @@ -181,12 +181,12 @@ static int surface_lid_enable_wakeup(struct device *dev, bool enable) return 0; } -static int surface_gpe_suspend(struct device *dev) +static int __maybe_unused surface_gpe_suspend(struct device *dev) { return surface_lid_enable_wakeup(dev, true); } -static int surface_gpe_resume(struct device *dev) +static int __maybe_unused surface_gpe_resume(struct device *dev) { return surface_lid_enable_wakeup(dev, false); } diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index 0102bf1c7916..ef8342572463 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -85,7 +85,7 @@ static inline void amd_pmc_reg_write(struct amd_pmc_dev *dev, int reg_offset, u3 iowrite32(val, dev->regbase + reg_offset); } -#if CONFIG_DEBUG_FS +#ifdef CONFIG_DEBUG_FS static int smu_fw_info_show(struct seq_file *s, void *unused) { struct amd_pmc_dev *dev = s->private; diff --git a/drivers/platform/x86/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell-wmi-sysman/sysman.c index dc6dd531c996..cb81010ba1a2 100644 --- a/drivers/platform/x86/dell-wmi-sysman/sysman.c +++ b/drivers/platform/x86/dell-wmi-sysman/sysman.c @@ -419,13 +419,17 @@ static int init_bios_attributes(int attr_type, const char *guid) return retval; /* need to use specific instance_id and guid combination to get right data */ obj = get_wmiobj_pointer(instance_id, guid); - if (!obj) + if (!obj || obj->type != ACPI_TYPE_PACKAGE) return -ENODEV; elements = obj->package.elements; mutex_lock(&wmi_priv.mutex); while (elements) { /* sanity checking */ + if (elements[ATTR_NAME].type != ACPI_TYPE_STRING) { + pr_debug("incorrect element type\n"); + goto nextobj; + } if (strlen(elements[ATTR_NAME].string.pointer) == 0) { pr_debug("empty attribute found\n"); goto nextobj; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index ecd477964d11..e94e59283ecb 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -32,6 +32,10 @@ MODULE_LICENSE("GPL"); MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C"); MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); +static int enable_tablet_mode_sw = -1; +module_param(enable_tablet_mode_sw, int, 0444); +MODULE_PARM_DESC(enable_tablet_mode_sw, "Enable SW_TABLET_MODE reporting (-1=auto, 0=no, 1=yes)"); + #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" @@ -247,7 +251,8 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, ret = bios_return->return_code; if (ret) { - if (ret != HPWMI_RET_UNKNOWN_CMDTYPE) + if (ret != HPWMI_RET_UNKNOWN_COMMAND && + ret != HPWMI_RET_UNKNOWN_CMDTYPE) pr_warn("query 0x%x returned error 0x%x\n", query, ret); goto out_free; } @@ -653,10 +658,12 @@ static int __init hp_wmi_input_setup(void) } /* Tablet mode */ - val = hp_wmi_hw_state(HPWMI_TABLET_MASK); - if (!(val < 0)) { - __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); - input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); + if (enable_tablet_mode_sw > 0) { + val = hp_wmi_hw_state(HPWMI_TABLET_MASK); + if (val >= 0) { + __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); + input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); + } } err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL); diff --git a/drivers/platform/x86/i2c-multi-instantiate.c b/drivers/platform/x86/i2c-multi-instantiate.c index b457b0babde3..2cce82579d09 100644 --- a/drivers/platform/x86/i2c-multi-instantiate.c +++ b/drivers/platform/x86/i2c-multi-instantiate.c @@ -164,13 +164,29 @@ static const struct i2c_inst_data bsg2150_data[] = { {} }; -static const struct i2c_inst_data int3515_data[] = { - { "tps6598x", IRQ_RESOURCE_APIC, 0 }, - { "tps6598x", IRQ_RESOURCE_APIC, 1 }, - { "tps6598x", IRQ_RESOURCE_APIC, 2 }, - { "tps6598x", IRQ_RESOURCE_APIC, 3 }, - {} -}; +/* + * Device with _HID INT3515 (TI PD controllers) has some unresolved interrupt + * issues. The most common problem seen is interrupt flood. + * + * There are at least two known causes. Firstly, on some boards, the + * I2CSerialBus resource index does not match the Interrupt resource, i.e. they + * are not one-to-one mapped like in the array below. Secondly, on some boards + * the IRQ line from the PD controller is not actually connected at all. But the + * interrupt flood is also seen on some boards where those are not a problem, so + * there are some other problems as well. + * + * Because of the issues with the interrupt, the device is disabled for now. If + * you wish to debug the issues, uncomment the below, and add an entry for the + * INT3515 device to the i2c_multi_instance_ids table. + * + * static const struct i2c_inst_data int3515_data[] = { + * { "tps6598x", IRQ_RESOURCE_APIC, 0 }, + * { "tps6598x", IRQ_RESOURCE_APIC, 1 }, + * { "tps6598x", IRQ_RESOURCE_APIC, 2 }, + * { "tps6598x", IRQ_RESOURCE_APIC, 3 }, + * { } + * }; + */ /* * Note new device-ids must also be added to i2c_multi_instantiate_ids in @@ -179,7 +195,6 @@ static const struct i2c_inst_data int3515_data[] = { static const struct acpi_device_id i2c_multi_inst_acpi_ids[] = { { "BSG1160", (unsigned long)bsg1160_data }, { "BSG2150", (unsigned long)bsg2150_data }, - { "INT3515", (unsigned long)int3515_data }, { } }; MODULE_DEVICE_TABLE(acpi, i2c_multi_inst_acpi_ids); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 7598cd46cf60..5b81bafa5c16 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -92,6 +92,7 @@ struct ideapad_private { struct dentry *debug; unsigned long cfg; bool has_hw_rfkill_switch; + bool has_touchpad_switch; const char *fnesc_guid; }; @@ -535,7 +536,9 @@ static umode_t ideapad_is_visible(struct kobject *kobj, } else if (attr == &dev_attr_fn_lock.attr) { supported = acpi_has_method(priv->adev->handle, "HALS") && acpi_has_method(priv->adev->handle, "SALS"); - } else + } else if (attr == &dev_attr_touchpad.attr) + supported = priv->has_touchpad_switch; + else supported = true; return supported ? attr->mode : 0; @@ -867,6 +870,9 @@ static void ideapad_sync_touchpad_state(struct ideapad_private *priv) { unsigned long value; + if (!priv->has_touchpad_switch) + return; + /* Without reading from EC touchpad LED doesn't switch state */ if (!read_ec_data(priv->adev->handle, VPCCMD_R_TOUCHPAD, &value)) { /* Some IdeaPads don't really turn off touchpad - they only @@ -989,6 +995,9 @@ static int ideapad_acpi_add(struct platform_device *pdev) priv->platform_device = pdev; priv->has_hw_rfkill_switch = dmi_check_system(hw_rfkill_list); + /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */ + priv->has_touchpad_switch = !acpi_dev_present("ELAN0634", NULL, -1); + ret = ideapad_sysfs_init(priv); if (ret) return ret; @@ -1006,6 +1015,10 @@ static int ideapad_acpi_add(struct platform_device *pdev) if (!priv->has_hw_rfkill_switch) write_ec_cmd(priv->adev->handle, VPCCMD_W_RF, 1); + /* The same for Touchpad */ + if (!priv->has_touchpad_switch) + write_ec_cmd(priv->adev->handle, VPCCMD_W_TOUCHPAD, 1); + for (i = 0; i < IDEAPAD_RFKILL_DEV_NUM; i++) if (test_bit(ideapad_rfk_data[i].cfgbit, &priv->cfg)) ideapad_register_rfkill(priv, i); diff --git a/drivers/platform/x86/intel-vbtn.c b/drivers/platform/x86/intel-vbtn.c index 3b49a1f4061b..30a9062d2b4b 100644 --- a/drivers/platform/x86/intel-vbtn.c +++ b/drivers/platform/x86/intel-vbtn.c @@ -207,19 +207,19 @@ static const struct dmi_system_id dmi_switches_allow_list[] = { { .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Stream x360 Convertible PC 11"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"), }, }, { .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), - DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion 13 x360 PC"), + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"), }, }, { .matches = { - DMI_MATCH(DMI_SYS_VENDOR, "Acer"), - DMI_MATCH(DMI_PRODUCT_NAME, "Switch SA5-271"), + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7352"), }, }, {} /* Array terminator */ diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index e03df2881dc6..f3e8eca8d86d 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8783,6 +8783,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '1', 'T', TPACPI_FAN_2CTL), /* P71 */ TPACPI_Q_LNV3('N', '1', 'U', TPACPI_FAN_2CTL), /* P51 */ TPACPI_Q_LNV3('N', '2', 'C', TPACPI_FAN_2CTL), /* P52 / P72 */ + TPACPI_Q_LNV3('N', '2', 'N', TPACPI_FAN_2CTL), /* P53 / P73 */ TPACPI_Q_LNV3('N', '2', 'E', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (1st gen) */ TPACPI_Q_LNV3('N', '2', 'O', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (2nd gen) */ TPACPI_Q_LNV3('N', '2', 'V', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (3nd gen) */ @@ -9951,9 +9952,9 @@ static int tpacpi_proxsensor_init(struct ibm_init_struct *iibm) if ((palm_err == -ENODEV) && (lap_err == -ENODEV)) return 0; /* Otherwise, if there was an error return it */ - if (palm_err && (palm_err != ENODEV)) + if (palm_err && (palm_err != -ENODEV)) return palm_err; - if (lap_err && (lap_err != ENODEV)) + if (lap_err && (lap_err != -ENODEV)) return lap_err; if (has_palmsensor) { diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 5783139d0a11..c4de932302d6 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -263,6 +263,16 @@ static const struct ts_dmi_data digma_citi_e200_data = { .properties = digma_citi_e200_props, }; +static const struct property_entry estar_beauty_hd_props[] = { + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + { } +}; + +static const struct ts_dmi_data estar_beauty_hd_data = { + .acpi_name = "GDIX1001:00", + .properties = estar_beauty_hd_props, +}; + static const struct property_entry gp_electronic_t701_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 960), PROPERTY_ENTRY_U32("touchscreen-size-y", 640), @@ -943,6 +953,14 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Estar Beauty HD (MID 7316R) */ + .driver_data = (void *)&estar_beauty_hd_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Estar"), + DMI_MATCH(DMI_PRODUCT_NAME, "eSTAR BEAUTY HD Intel Quad core"), + }, + }, + { /* GP-electronic T701 */ .driver_data = (void *)&gp_electronic_t701_data, .matches = { diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index ca03d8e70bd1..67a768fe5b2a 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -1813,13 +1813,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) { struct regulator_dev *r; struct device *dev = rdev->dev.parent; - int ret; + int ret = 0; /* No supply to resolve? */ if (!rdev->supply_name) return 0; - /* Supply already resolved? */ + /* Supply already resolved? (fast-path without locking contention) */ if (rdev->supply) return 0; @@ -1829,7 +1829,7 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) /* Did the lookup explicitly defer for us? */ if (ret == -EPROBE_DEFER) - return ret; + goto out; if (have_full_constraints()) { r = dummy_regulator_rdev; @@ -1837,15 +1837,18 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) } else { dev_err(dev, "Failed to resolve %s-supply for %s\n", rdev->supply_name, rdev->desc->name); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out; } } if (r == rdev) { dev_err(dev, "Supply for %s (%s) resolved to itself\n", rdev->desc->name, rdev->supply_name); - if (!have_full_constraints()) - return -EINVAL; + if (!have_full_constraints()) { + ret = -EINVAL; + goto out; + } r = dummy_regulator_rdev; get_device(&r->dev); } @@ -1859,7 +1862,8 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) if (r->dev.parent && r->dev.parent != rdev->dev.parent) { if (!device_is_bound(r->dev.parent)) { put_device(&r->dev); - return -EPROBE_DEFER; + ret = -EPROBE_DEFER; + goto out; } } @@ -1867,15 +1871,32 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) ret = regulator_resolve_supply(r); if (ret < 0) { put_device(&r->dev); - return ret; + goto out; + } + + /* + * Recheck rdev->supply with rdev->mutex lock held to avoid a race + * between rdev->supply null check and setting rdev->supply in + * set_supply() from concurrent tasks. + */ + regulator_lock(rdev); + + /* Supply just resolved by a concurrent task? */ + if (rdev->supply) { + regulator_unlock(rdev); + put_device(&r->dev); + goto out; } ret = set_supply(rdev, r); if (ret < 0) { + regulator_unlock(rdev); put_device(&r->dev); - return ret; + goto out; } + regulator_unlock(rdev); + /* * In set_machine_constraints() we may have turned this regulator on * but we couldn't propagate to the supply if it hadn't been resolved @@ -1886,11 +1907,12 @@ static int regulator_resolve_supply(struct regulator_dev *rdev) if (ret < 0) { _regulator_put(rdev->supply); rdev->supply = NULL; - return ret; + goto out; } } - return 0; +out: + return ret; } /* Internal regulator request function */ diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 51e80bc70d42..a701dae653c4 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -805,6 +805,14 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq) spin_lock_irq(&rtc_lock); + /* Ensure that the RTC is accessible. Bit 6 must be 0! */ + if ((CMOS_READ(RTC_VALID) & 0x40) != 0) { + spin_unlock_irq(&rtc_lock); + dev_warn(dev, "not accessible\n"); + retval = -ENXIO; + goto cleanup1; + } + if (!(flags & CMOS_RTC_FLAGS_NOFREQ)) { /* force periodic irq to CMOS reset default of 1024Hz; * diff --git a/drivers/rtc/rtc-mc146818-lib.c b/drivers/rtc/rtc-mc146818-lib.c index 972a5b9a629d..dcfaf09946ee 100644 --- a/drivers/rtc/rtc-mc146818-lib.c +++ b/drivers/rtc/rtc-mc146818-lib.c @@ -21,6 +21,13 @@ unsigned int mc146818_get_time(struct rtc_time *time) again: spin_lock_irqsave(&rtc_lock, flags); + /* Ensure that the RTC is accessible. Bit 6 must be 0! */ + if (WARN_ON_ONCE((CMOS_READ(RTC_VALID) & 0x40) != 0)) { + spin_unlock_irqrestore(&rtc_lock, flags); + memset(time, 0xff, sizeof(*time)); + return 0; + } + /* * Check whether there is an update in progress during which the * readout is unspecified. The maximum update time is ~2ms. Poll diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 16bb135c20aa..03d27ee9cac6 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c @@ -1874,18 +1874,26 @@ void dasd_path_create_kobjects(struct dasd_device *device) } EXPORT_SYMBOL(dasd_path_create_kobjects); -/* - * As we keep kobjects for the lifetime of a device, this function must not be - * called anywhere but in the context of offlining a device. - */ -void dasd_path_remove_kobj(struct dasd_device *device, int chp) +static void dasd_path_remove_kobj(struct dasd_device *device, int chp) { if (device->path[chp].in_sysfs) { kobject_put(&device->path[chp].kobj); device->path[chp].in_sysfs = false; } } -EXPORT_SYMBOL(dasd_path_remove_kobj); + +/* + * As we keep kobjects for the lifetime of a device, this function must not be + * called anywhere but in the context of offlining a device. + */ +void dasd_path_remove_kobjects(struct dasd_device *device) +{ + int i; + + for (i = 0; i < 8; i++) + dasd_path_remove_kobj(device, i); +} +EXPORT_SYMBOL(dasd_path_remove_kobjects); int dasd_add_sysfs_files(struct ccw_device *cdev) { diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 3caa1ee5f4b0..65eb87cbbb9b 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c @@ -1036,7 +1036,6 @@ static void dasd_eckd_clear_conf_data(struct dasd_device *device) device->path[i].ssid = 0; device->path[i].chpid = 0; dasd_path_notoper(device, i); - dasd_path_remove_kobj(device, i); } } @@ -2173,6 +2172,7 @@ out_err2: device->block = NULL; out_err1: dasd_eckd_clear_conf_data(device); + dasd_path_remove_kobjects(device); kfree(device->private); device->private = NULL; return rc; @@ -2191,6 +2191,7 @@ static void dasd_eckd_uncheck_device(struct dasd_device *device) private->vdsneq = NULL; private->gneq = NULL; dasd_eckd_clear_conf_data(device); + dasd_path_remove_kobjects(device); } static struct dasd_ccw_req * diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 3bc008f9136c..b8a04c42d1d2 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -858,7 +858,7 @@ int dasd_add_sysfs_files(struct ccw_device *); void dasd_remove_sysfs_files(struct ccw_device *); void dasd_path_create_kobj(struct dasd_device *, int); void dasd_path_create_kobjects(struct dasd_device *); -void dasd_path_remove_kobj(struct dasd_device *, int); +void dasd_path_remove_kobjects(struct dasd_device *); struct dasd_device *dasd_device_from_cdev(struct ccw_device *); struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *); diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c index be2520cc010b..7dc72cb718b0 100644 --- a/drivers/s390/crypto/vfio_ap_drv.c +++ b/drivers/s390/crypto/vfio_ap_drv.c @@ -71,15 +71,11 @@ static int vfio_ap_queue_dev_probe(struct ap_device *apdev) static void vfio_ap_queue_dev_remove(struct ap_device *apdev) { struct vfio_ap_queue *q; - int apid, apqi; mutex_lock(&matrix_dev->lock); q = dev_get_drvdata(&apdev->device); + vfio_ap_mdev_reset_queue(q, 1); dev_set_drvdata(&apdev->device, NULL); - apid = AP_QID_CARD(q->apqn); - apqi = AP_QID_QUEUE(q->apqn); - vfio_ap_mdev_reset_queue(apid, apqi, 1); - vfio_ap_irq_disable(q); kfree(q); mutex_unlock(&matrix_dev->lock); } diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c index e0bde8518745..41fc2e4135fe 100644 --- a/drivers/s390/crypto/vfio_ap_ops.c +++ b/drivers/s390/crypto/vfio_ap_ops.c @@ -25,6 +25,7 @@ #define VFIO_AP_MDEV_NAME_HWVIRT "VFIO AP Passthrough Device" static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev); +static struct vfio_ap_queue *vfio_ap_find_queue(int apqn); static int match_apqn(struct device *dev, const void *data) { @@ -49,20 +50,15 @@ static struct vfio_ap_queue *vfio_ap_get_queue( int apqn) { struct vfio_ap_queue *q; - struct device *dev; if (!test_bit_inv(AP_QID_CARD(apqn), matrix_mdev->matrix.apm)) return NULL; if (!test_bit_inv(AP_QID_QUEUE(apqn), matrix_mdev->matrix.aqm)) return NULL; - dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, - &apqn, match_apqn); - if (!dev) - return NULL; - q = dev_get_drvdata(dev); - q->matrix_mdev = matrix_mdev; - put_device(dev); + q = vfio_ap_find_queue(apqn); + if (q) + q->matrix_mdev = matrix_mdev; return q; } @@ -119,13 +115,18 @@ static void vfio_ap_wait_for_irqclear(int apqn) */ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) { - if (q->saved_isc != VFIO_AP_ISC_INVALID && q->matrix_mdev) + if (!q) + return; + if (q->saved_isc != VFIO_AP_ISC_INVALID && + !WARN_ON(!(q->matrix_mdev && q->matrix_mdev->kvm))) { kvm_s390_gisc_unregister(q->matrix_mdev->kvm, q->saved_isc); - if (q->saved_pfn && q->matrix_mdev) + q->saved_isc = VFIO_AP_ISC_INVALID; + } + if (q->saved_pfn && !WARN_ON(!q->matrix_mdev)) { vfio_unpin_pages(mdev_dev(q->matrix_mdev->mdev), &q->saved_pfn, 1); - q->saved_pfn = 0; - q->saved_isc = VFIO_AP_ISC_INVALID; + q->saved_pfn = 0; + } } /** @@ -144,7 +145,7 @@ static void vfio_ap_free_aqic_resources(struct vfio_ap_queue *q) * Returns if ap_aqic function failed with invalid, deconfigured or * checkstopped AP. */ -struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) +static struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q) { struct ap_qirq_ctrl aqic_gisa = {}; struct ap_queue_status status; @@ -1037,19 +1038,14 @@ static int vfio_ap_mdev_set_kvm(struct ap_matrix_mdev *matrix_mdev, { struct ap_matrix_mdev *m; - mutex_lock(&matrix_dev->lock); - list_for_each_entry(m, &matrix_dev->mdev_list, node) { - if ((m != matrix_mdev) && (m->kvm == kvm)) { - mutex_unlock(&matrix_dev->lock); + if ((m != matrix_mdev) && (m->kvm == kvm)) return -EPERM; - } } matrix_mdev->kvm = kvm; kvm_get_kvm(kvm); kvm->arch.crypto.pqap_hook = &matrix_mdev->pqap_hook; - mutex_unlock(&matrix_dev->lock); return 0; } @@ -1083,79 +1079,118 @@ static int vfio_ap_mdev_iommu_notifier(struct notifier_block *nb, return NOTIFY_DONE; } +static void vfio_ap_mdev_unset_kvm(struct ap_matrix_mdev *matrix_mdev) +{ + kvm_arch_crypto_clear_masks(matrix_mdev->kvm); + matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; + vfio_ap_mdev_reset_queues(matrix_mdev->mdev); + kvm_put_kvm(matrix_mdev->kvm); + matrix_mdev->kvm = NULL; +} + static int vfio_ap_mdev_group_notifier(struct notifier_block *nb, unsigned long action, void *data) { - int ret; + int ret, notify_rc = NOTIFY_OK; struct ap_matrix_mdev *matrix_mdev; if (action != VFIO_GROUP_NOTIFY_SET_KVM) return NOTIFY_OK; matrix_mdev = container_of(nb, struct ap_matrix_mdev, group_notifier); + mutex_lock(&matrix_dev->lock); if (!data) { - matrix_mdev->kvm = NULL; - return NOTIFY_OK; + if (matrix_mdev->kvm) + vfio_ap_mdev_unset_kvm(matrix_mdev); + goto notify_done; } ret = vfio_ap_mdev_set_kvm(matrix_mdev, data); - if (ret) - return NOTIFY_DONE; + if (ret) { + notify_rc = NOTIFY_DONE; + goto notify_done; + } /* If there is no CRYCB pointer, then we can't copy the masks */ - if (!matrix_mdev->kvm->arch.crypto.crycbd) - return NOTIFY_DONE; + if (!matrix_mdev->kvm->arch.crypto.crycbd) { + notify_rc = NOTIFY_DONE; + goto notify_done; + } kvm_arch_crypto_set_masks(matrix_mdev->kvm, matrix_mdev->matrix.apm, matrix_mdev->matrix.aqm, matrix_mdev->matrix.adm); - return NOTIFY_OK; +notify_done: + mutex_unlock(&matrix_dev->lock); + return notify_rc; } -static void vfio_ap_irq_disable_apqn(int apqn) +static struct vfio_ap_queue *vfio_ap_find_queue(int apqn) { struct device *dev; - struct vfio_ap_queue *q; + struct vfio_ap_queue *q = NULL; dev = driver_find_device(&matrix_dev->vfio_ap_drv->driver, NULL, &apqn, match_apqn); if (dev) { q = dev_get_drvdata(dev); - vfio_ap_irq_disable(q); put_device(dev); } + + return q; } -int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, +int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, unsigned int retry) { struct ap_queue_status status; + int ret; int retry2 = 2; - int apqn = AP_MKQID(apid, apqi); - do { - status = ap_zapq(apqn); - switch (status.response_code) { - case AP_RESPONSE_NORMAL: - while (!status.queue_empty && retry2--) { - msleep(20); - status = ap_tapq(apqn, NULL); - } - WARN_ON_ONCE(retry2 <= 0); - return 0; - case AP_RESPONSE_RESET_IN_PROGRESS: - case AP_RESPONSE_BUSY: + if (!q) + return 0; + +retry_zapq: + status = ap_zapq(q->apqn); + switch (status.response_code) { + case AP_RESPONSE_NORMAL: + ret = 0; + break; + case AP_RESPONSE_RESET_IN_PROGRESS: + if (retry--) { msleep(20); - break; - default: - /* things are really broken, give up */ - return -EIO; + goto retry_zapq; } - } while (retry--); + ret = -EBUSY; + break; + case AP_RESPONSE_Q_NOT_AVAIL: + case AP_RESPONSE_DECONFIGURED: + case AP_RESPONSE_CHECKSTOPPED: + WARN_ON_ONCE(status.irq_enabled); + ret = -EBUSY; + goto free_resources; + default: + /* things are really broken, give up */ + WARN(true, "PQAP/ZAPQ completed with invalid rc (%x)\n", + status.response_code); + return -EIO; + } + + /* wait for the reset to take effect */ + while (retry2--) { + if (status.queue_empty && !status.irq_enabled) + break; + msleep(20); + status = ap_tapq(q->apqn, NULL); + } + WARN_ON_ONCE(retry2 <= 0); - return -EBUSY; +free_resources: + vfio_ap_free_aqic_resources(q); + + return ret; } static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) @@ -1163,13 +1198,15 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) int ret; int rc = 0; unsigned long apid, apqi; + struct vfio_ap_queue *q; struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); for_each_set_bit_inv(apid, matrix_mdev->matrix.apm, matrix_mdev->matrix.apm_max + 1) { for_each_set_bit_inv(apqi, matrix_mdev->matrix.aqm, matrix_mdev->matrix.aqm_max + 1) { - ret = vfio_ap_mdev_reset_queue(apid, apqi, 1); + q = vfio_ap_find_queue(AP_MKQID(apid, apqi)); + ret = vfio_ap_mdev_reset_queue(q, 1); /* * Regardless whether a queue turns out to be busy, or * is not operational, we need to continue resetting @@ -1177,7 +1214,6 @@ static int vfio_ap_mdev_reset_queues(struct mdev_device *mdev) */ if (ret) rc = ret; - vfio_ap_irq_disable_apqn(AP_MKQID(apid, apqi)); } } @@ -1222,13 +1258,8 @@ static void vfio_ap_mdev_release(struct mdev_device *mdev) struct ap_matrix_mdev *matrix_mdev = mdev_get_drvdata(mdev); mutex_lock(&matrix_dev->lock); - if (matrix_mdev->kvm) { - kvm_arch_crypto_clear_masks(matrix_mdev->kvm); - matrix_mdev->kvm->arch.crypto.pqap_hook = NULL; - vfio_ap_mdev_reset_queues(mdev); - kvm_put_kvm(matrix_mdev->kvm); - matrix_mdev->kvm = NULL; - } + if (matrix_mdev->kvm) + vfio_ap_mdev_unset_kvm(matrix_mdev); mutex_unlock(&matrix_dev->lock); vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index f46dde56b464..28e9d9989768 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -88,11 +88,6 @@ struct ap_matrix_mdev { struct mdev_device *mdev; }; -extern int vfio_ap_mdev_register(void); -extern void vfio_ap_mdev_unregister(void); -int vfio_ap_mdev_reset_queue(unsigned int apid, unsigned int apqi, - unsigned int retry); - struct vfio_ap_queue { struct ap_matrix_mdev *matrix_mdev; unsigned long saved_pfn; @@ -100,5 +95,10 @@ struct vfio_ap_queue { #define VFIO_AP_ISC_INVALID 0xff unsigned char saved_isc; }; -struct ap_queue_status vfio_ap_irq_disable(struct vfio_ap_queue *q); + +int vfio_ap_mdev_register(void); +void vfio_ap_mdev_unregister(void); +int vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q, + unsigned int retry); + #endif /* _VFIO_AP_PRIVATE_H_ */ diff --git a/drivers/scsi/fnic/vnic_dev.c b/drivers/scsi/fnic/vnic_dev.c index a2beee6e09f0..5988c300cc82 100644 --- a/drivers/scsi/fnic/vnic_dev.c +++ b/drivers/scsi/fnic/vnic_dev.c @@ -444,7 +444,8 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) fetch_index = ioread32(&vdev->devcmd2->wq.ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ pr_err("error in devcmd2 init"); - return -ENODEV; + err = -ENODEV; + goto err_free_wq; } /* @@ -460,7 +461,7 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) err = vnic_dev_alloc_desc_ring(vdev, &vdev->devcmd2->results_ring, DEVCMD2_RING_SIZE, DEVCMD2_DESC_SIZE); if (err) - goto err_free_wq; + goto err_disable_wq; vdev->devcmd2->result = (struct devcmd2_result *) vdev->devcmd2->results_ring.descs; @@ -481,8 +482,9 @@ static int vnic_dev_init_devcmd2(struct vnic_dev *vdev) err_free_desc_ring: vnic_dev_free_desc_ring(vdev, &vdev->devcmd2->results_ring); -err_free_wq: +err_disable_wq: vnic_wq_disable(&vdev->devcmd2->wq); +err_free_wq: vnic_wq_free(&vdev->devcmd2->wq); err_free_devcmd2: kfree(vdev->devcmd2); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 42e4d35e0d35..65f168c41d23 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1744,7 +1744,7 @@ static int ibmvfc_queuecommand_lck(struct scsi_cmnd *cmnd, iu->pri_task_attr = IBMVFC_SIMPLE_TASK; } - vfc_cmd->correlation = cpu_to_be64(evt); + vfc_cmd->correlation = cpu_to_be64((u64)evt); if (likely(!(rc = ibmvfc_map_sg_data(cmnd, evt, vfc_cmd, vhost->dev)))) return ibmvfc_send_event(evt, vhost, 0); @@ -2418,7 +2418,7 @@ static int ibmvfc_abort_task_set(struct scsi_device *sdev) tmf->flags = cpu_to_be16((IBMVFC_NO_MEM_DESC | IBMVFC_TMF)); evt->sync_iu = &rsp_iu; - tmf->correlation = cpu_to_be64(evt); + tmf->correlation = cpu_to_be64((u64)evt); init_completion(&evt->comp); rsp_rc = ibmvfc_send_event(evt, vhost, default_timeout); @@ -3007,8 +3007,10 @@ static int ibmvfc_slave_configure(struct scsi_device *sdev) unsigned long flags = 0; spin_lock_irqsave(shost->host_lock, flags); - if (sdev->type == TYPE_DISK) + if (sdev->type == TYPE_DISK) { sdev->allow_restart = 1; + blk_queue_rq_timeout(sdev->request_queue, 120 * HZ); + } spin_unlock_irqrestore(shost->host_lock, flags); return 0; } diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c index d71afae6191c..841000445b9a 100644 --- a/drivers/scsi/libfc/fc_exch.c +++ b/drivers/scsi/libfc/fc_exch.c @@ -1623,8 +1623,13 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) rc = fc_exch_done_locked(ep); WARN_ON(fc_seq_exch(sp) != ep); spin_unlock_bh(&ep->ex_lock); - if (!rc) + if (!rc) { fc_exch_delete(ep); + } else { + FC_EXCH_DBG(ep, "ep is completed already," + "hence skip calling the resp\n"); + goto skip_resp; + } } /* @@ -1643,6 +1648,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp) if (!fc_invoke_resp(ep, sp, fp)) fc_frame_free(fp); +skip_resp: fc_exch_release(ep); return; rel: @@ -1899,10 +1905,16 @@ static void fc_exch_reset(struct fc_exch *ep) fc_exch_hold(ep); - if (!rc) + if (!rc) { fc_exch_delete(ep); + } else { + FC_EXCH_DBG(ep, "ep is completed already," + "hence skip calling the resp\n"); + goto skip_resp; + } fc_invoke_resp(ep, sp, ERR_PTR(-FC_EX_CLOSED)); +skip_resp: fc_seq_set_resp(sp, NULL, ep->arg); fc_exch_release(ep); } diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 1cb82fa6a60e..39d147e251bf 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -559,6 +559,9 @@ __lpfc_nvme_ls_req(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return -ENODEV; } + if (!vport->phba->sli4_hba.nvmels_wq) + return -ENOMEM; + /* * there are two dma buf in the request, actually there is one and * the second one is just the start address + cmd size. diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index af192096a82b..63a4f48bdc75 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -8244,11 +8244,9 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, goto out; } + /* always store 64 bits regardless of addressing */ sense_ptr = (void *)cmd->frame + ioc->sense_off; - if (instance->consistent_mask_64bit) - put_unaligned_le64(sense_handle, sense_ptr); - else - put_unaligned_le32(sense_handle, sense_ptr); + put_unaligned_le64(sense_handle, sense_ptr); } /* diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index f80abe28f35a..0e0fe5b09496 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -42,7 +42,7 @@ MODULE_PARM_DESC(ql2xfulldump_on_mpifail, int ql2xenforce_iocb_limit = 1; module_param(ql2xenforce_iocb_limit, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ql2xenforce_iocb_limit, - "Enforce IOCB throttling, to avoid FW congestion. (default: 0)"); + "Enforce IOCB throttling, to avoid FW congestion. (default: 1)"); /* * CT6 CTX allocation cache diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 4a08c450b756..b6540b92f566 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -6881,6 +6881,7 @@ static void __exit scsi_debug_exit(void) sdebug_erase_all_stores(false); xa_destroy(per_store_ap); + kfree(sdebug_q_arr); } device_initcall(scsi_debug_init); diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c index cba1cf6a1c12..1e939a2a387f 100644 --- a/drivers/scsi/scsi_transport_srp.c +++ b/drivers/scsi/scsi_transport_srp.c @@ -541,7 +541,14 @@ int srp_reconnect_rport(struct srp_rport *rport) res = mutex_lock_interruptible(&rport->mutex); if (res) goto out; - scsi_target_block(&shost->shost_gendev); + if (rport->state != SRP_RPORT_FAIL_FAST) + /* + * sdev state must be SDEV_TRANSPORT_OFFLINE, transition + * to SDEV_BLOCK is illegal. Calling scsi_target_unblock() + * later is ok though, scsi_internal_device_unblock_nowait() + * treats SDEV_TRANSPORT_OFFLINE like SDEV_BLOCK. + */ + scsi_target_block(&shost->shost_gendev); res = rport->state != SRP_RPORT_LOST ? i->f->reconnect(rport) : -ENODEV; pr_debug("%s (state %d): transport.reconnect() returned %d\n", dev_name(&shost->shost_gendev), rport->state, res); diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig index 3f6dfed4fe84..b915b38c2b27 100644 --- a/drivers/scsi/ufs/Kconfig +++ b/drivers/scsi/ufs/Kconfig @@ -72,6 +72,7 @@ config SCSI_UFS_DWC_TC_PCI config SCSI_UFSHCD_PLATFORM tristate "Platform bus based UFS Controller support" depends on SCSI_UFSHCD + depends on HAS_IOMEM help This selects the UFS host controller support. Select this if you have an UFS controller on Platform bus. diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c index e31d2c5c7b23..fb32d122f2e3 100644 --- a/drivers/scsi/ufs/ufshcd.c +++ b/drivers/scsi/ufs/ufshcd.c @@ -3996,6 +3996,8 @@ int ufshcd_link_recovery(struct ufs_hba *hba) if (ret) dev_err(hba->dev, "%s: link recovery failed, err %d", __func__, ret); + else + ufshcd_clear_ua_wluns(hba); return ret; } @@ -4992,7 +4994,8 @@ ufshcd_transfer_rsp_status(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) break; } /* end of switch */ - if ((host_byte(result) != DID_OK) && !hba->silence_err_logs) + if ((host_byte(result) != DID_OK) && + (host_byte(result) != DID_REQUEUE) && !hba->silence_err_logs) ufshcd_print_trs(hba, 1 << lrbp->task_tag, true); return result; } @@ -6001,6 +6004,9 @@ skip_err_handling: ufshcd_scsi_unblock_requests(hba); ufshcd_err_handling_unprepare(hba); up(&hba->eh_sem); + + if (!err && needs_reset) + ufshcd_clear_ua_wluns(hba); } /** @@ -6295,9 +6301,13 @@ static irqreturn_t ufshcd_intr(int irq, void *__hba) intr_status = ufshcd_readl(hba, REG_INTERRUPT_STATUS); } - if (enabled_intr_status && retval == IRQ_NONE) { - dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x\n", - __func__, intr_status); + if (enabled_intr_status && retval == IRQ_NONE && + !ufshcd_eh_in_progress(hba)) { + dev_err(hba->dev, "%s: Unhandled interrupt 0x%08x (0x%08x, 0x%08x)\n", + __func__, + intr_status, + hba->ufs_stats.last_intr_status, + enabled_intr_status); ufshcd_dump_regs(hba, 0, UFSHCI_REG_SPACE_SIZE, "host_regs: "); } @@ -6341,7 +6351,10 @@ static int __ufshcd_issue_tm_cmd(struct ufs_hba *hba, * Even though we use wait_event() which sleeps indefinitely, * the maximum wait time is bounded by %TM_CMD_TIMEOUT. */ - req = blk_get_request(q, REQ_OP_DRV_OUT, BLK_MQ_REQ_RESERVED); + req = blk_get_request(q, REQ_OP_DRV_OUT, 0); + if (IS_ERR(req)) + return PTR_ERR(req); + req->end_io_data = &wait; free_slot = req->tag; WARN_ON_ONCE(free_slot < 0 || free_slot >= hba->nutmrs); @@ -6938,14 +6951,11 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba) ufshcd_set_clk_freq(hba, true); err = ufshcd_hba_enable(hba); - if (err) - goto out; /* Establish the link again and restore the device */ - err = ufshcd_probe_hba(hba, false); if (!err) - ufshcd_clear_ua_wluns(hba); -out: + err = ufshcd_probe_hba(hba, false); + if (err) dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err); ufshcd_update_evt_hist(hba, UFS_EVT_HOST_RESET, (u32)err); @@ -7716,6 +7726,8 @@ static int ufshcd_add_lus(struct ufs_hba *hba) if (ret) goto out; + ufshcd_clear_ua_wluns(hba); + /* Initialize devfreq after UFS device is detected */ if (ufshcd_is_clkscaling_supported(hba)) { memcpy(&hba->clk_scaling.saved_pwr_info.info, @@ -7917,8 +7929,6 @@ out: pm_runtime_put_sync(hba->dev); ufshcd_exit_clk_scaling(hba); ufshcd_hba_exit(hba); - } else { - ufshcd_clear_ua_wluns(hba); } } @@ -8775,6 +8785,7 @@ enable_gating: ufshcd_resume_clkscaling(hba); hba->clk_gating.is_suspended = false; hba->dev_info.b_rpm_dev_flush_capable = false; + ufshcd_clear_ua_wluns(hba); ufshcd_release(hba); out: if (hba->dev_info.b_rpm_dev_flush_capable) { @@ -8885,6 +8896,8 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) cancel_delayed_work(&hba->rpm_dev_flush_recheck_work); } + ufshcd_clear_ua_wluns(hba); + /* Schedule clock gating in case of no access to UFS device yet */ ufshcd_release(hba); diff --git a/drivers/sh/intc/core.c b/drivers/sh/intc/core.c index f8e070d67fa3..a14684ffe4c1 100644 --- a/drivers/sh/intc/core.c +++ b/drivers/sh/intc/core.c @@ -214,7 +214,7 @@ int __init register_intc_controller(struct intc_desc *desc) d->window[k].phys = res->start; d->window[k].size = resource_size(res); d->window[k].virt = ioremap(res->start, - resource_size(res)); + resource_size(res)); if (!d->window[k].virt) goto err2; } diff --git a/drivers/sh/intc/virq-debugfs.c b/drivers/sh/intc/virq-debugfs.c index 9e62ba9311f0..939915a07d99 100644 --- a/drivers/sh/intc/virq-debugfs.c +++ b/drivers/sh/intc/virq-debugfs.c @@ -16,7 +16,7 @@ #include <linux/debugfs.h> #include "internals.h" -static int intc_irq_xlate_debug(struct seq_file *m, void *priv) +static int intc_irq_xlate_show(struct seq_file *m, void *priv) { int i; @@ -37,17 +37,7 @@ static int intc_irq_xlate_debug(struct seq_file *m, void *priv) return 0; } -static int intc_irq_xlate_open(struct inode *inode, struct file *file) -{ - return single_open(file, intc_irq_xlate_debug, inode->i_private); -} - -static const struct file_operations intc_irq_xlate_fops = { - .open = intc_irq_xlate_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(intc_irq_xlate); static int __init intc_irq_xlate_init(void) { diff --git a/drivers/soc/atmel/soc.c b/drivers/soc/atmel/soc.c index c4472b68b7c2..698d21f50516 100644 --- a/drivers/soc/atmel/soc.c +++ b/drivers/soc/atmel/soc.c @@ -271,8 +271,21 @@ struct soc_device * __init at91_soc_init(const struct at91_soc *socs) return soc_dev; } +static const struct of_device_id at91_soc_allowed_list[] __initconst = { + { .compatible = "atmel,at91rm9200", }, + { .compatible = "atmel,at91sam9", }, + { .compatible = "atmel,sama5", }, + { .compatible = "atmel,samv7", }, + { } +}; + static int __init atmel_soc_device_init(void) { + struct device_node *np = of_find_node_by_path("/"); + + if (!of_match_node(at91_soc_allowed_list, np)) + return 0; + at91_soc_init(socs); return 0; diff --git a/drivers/soc/imx/Kconfig b/drivers/soc/imx/Kconfig index a9370f4aacca..05812f8ae734 100644 --- a/drivers/soc/imx/Kconfig +++ b/drivers/soc/imx/Kconfig @@ -13,7 +13,7 @@ config SOC_IMX8M depends on ARCH_MXC || COMPILE_TEST default ARCH_MXC && ARM64 select SOC_BUS - select ARM_GIC_V3 if ARCH_MXC + select ARM_GIC_V3 if ARCH_MXC && ARCH_MULTI_V7 help If you say yes here you get support for the NXP i.MX8M family support, it will provide the SoC info like SoC family, diff --git a/drivers/soc/litex/Kconfig b/drivers/soc/litex/Kconfig index 7c6b009b6f6c..7a7c38282e11 100644 --- a/drivers/soc/litex/Kconfig +++ b/drivers/soc/litex/Kconfig @@ -8,6 +8,7 @@ config LITEX config LITEX_SOC_CONTROLLER tristate "Enable LiteX SoC Controller driver" depends on OF || COMPILE_TEST + depends on HAS_IOMEM select LITEX help This option enables the SoC Controller Driver which verifies diff --git a/drivers/soc/litex/litex_soc_ctrl.c b/drivers/soc/litex/litex_soc_ctrl.c index 1217cafdfd4d..9b0766384570 100644 --- a/drivers/soc/litex/litex_soc_ctrl.c +++ b/drivers/soc/litex/litex_soc_ctrl.c @@ -140,12 +140,13 @@ struct litex_soc_ctrl_device { void __iomem *base; }; +#ifdef CONFIG_OF static const struct of_device_id litex_soc_ctrl_of_match[] = { {.compatible = "litex,soc-controller"}, {}, }; - MODULE_DEVICE_TABLE(of, litex_soc_ctrl_of_match); +#endif /* CONFIG_OF */ static int litex_soc_ctrl_probe(struct platform_device *pdev) { diff --git a/drivers/soc/sunxi/sunxi_mbus.c b/drivers/soc/sunxi/sunxi_mbus.c index e9925c8487d7..d90e4a264b6f 100644 --- a/drivers/soc/sunxi/sunxi_mbus.c +++ b/drivers/soc/sunxi/sunxi_mbus.c @@ -23,12 +23,7 @@ static const char * const sunxi_mbus_devices[] = { "allwinner,sun7i-a20-display-engine", "allwinner,sun8i-a23-display-engine", "allwinner,sun8i-a33-display-engine", - "allwinner,sun8i-a83t-display-engine", - "allwinner,sun8i-h3-display-engine", - "allwinner,sun8i-r40-display-engine", - "allwinner,sun8i-v3s-display-engine", "allwinner,sun9i-a80-display-engine", - "allwinner,sun50i-a64-display-engine", /* * And now we have the regular devices connected to the MBUS diff --git a/drivers/soc/ti/omap_prm.c b/drivers/soc/ti/omap_prm.c index 77f0051358f1..bf1468e5bccb 100644 --- a/drivers/soc/ti/omap_prm.c +++ b/drivers/soc/ti/omap_prm.c @@ -860,6 +860,7 @@ static int omap_prm_reset_init(struct platform_device *pdev, const struct omap_rst_map *map; struct ti_prm_platform_data *pdata = dev_get_platdata(&pdev->dev); char buf[32]; + u32 v; /* * Check if we have controllable resets. If either rstctrl is non-zero @@ -907,6 +908,16 @@ static int omap_prm_reset_init(struct platform_device *pdev, map++; } + /* Quirk handling to assert rst_map_012 bits on reset and avoid errors */ + if (prm->data->rstmap == rst_map_012) { + v = readl_relaxed(reset->prm->base + reset->prm->data->rstctrl); + if ((v & reset->mask) != reset->mask) { + dev_dbg(&pdev->dev, "Asserting all resets: %08x\n", v); + writel_relaxed(reset->mask, reset->prm->base + + reset->prm->data->rstctrl); + } + } + return devm_reset_controller_register(&pdev->dev, &reset->rcdev); } diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c index cbc4c28c1541..62ea0c9e321b 100644 --- a/drivers/spi/spi-altera.c +++ b/drivers/spi/spi-altera.c @@ -254,7 +254,8 @@ static int altera_spi_probe(struct platform_device *pdev) dev_err(&pdev->dev, "Invalid number of chipselect: %hu\n", pdata->num_chipselect); - return -EINVAL; + err = -EINVAL; + goto exit; } master->num_chipselect = pdata->num_chipselect; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index 70467b9d61ba..a3afd1b9ac56 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -115,6 +115,7 @@ struct cdns_spi { void __iomem *regs; struct clk *ref_clk; struct clk *pclk; + unsigned int clk_rate; u32 speed_hz; const u8 *txbuf; u8 *rxbuf; @@ -250,7 +251,7 @@ static void cdns_spi_config_clock_freq(struct spi_device *spi, u32 ctrl_reg, baud_rate_val; unsigned long frequency; - frequency = clk_get_rate(xspi->ref_clk); + frequency = xspi->clk_rate; ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); @@ -558,8 +559,9 @@ static int cdns_spi_probe(struct platform_device *pdev) master->auto_runtime_pm = true; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + xspi->clk_rate = clk_get_rate(xspi->ref_clk); /* Set to default valid value */ - master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4; + master->max_speed_hz = xspi->clk_rate / 4; xspi->speed_hz = master->max_speed_hz; master->bits_per_word_mask = SPI_BPW_MASK(8); diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index 9494257e1c33..6d8e0a05a535 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -115,14 +115,13 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); struct fsl_spi_platform_data *pdata; - bool pol = spi->mode & SPI_CS_HIGH; struct spi_mpc8xxx_cs *cs = spi->controller_state; pdata = spi->dev.parent->parent->platform_data; if (value == BITBANG_CS_INACTIVE) { if (pdata->cs_control) - pdata->cs_control(spi, !pol); + pdata->cs_control(spi, false); } if (value == BITBANG_CS_ACTIVE) { @@ -134,7 +133,7 @@ static void fsl_spi_chipselect(struct spi_device *spi, int value) fsl_spi_change_mode(spi); if (pdata->cs_control) - pdata->cs_control(spi, pol); + pdata->cs_control(spi, true); } } diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c index 859910ec8d9f..8cb4d923aeaa 100644 --- a/drivers/spi/spidev.c +++ b/drivers/spi/spidev.c @@ -682,6 +682,7 @@ static const struct of_device_id spidev_dt_ids[] = { { .compatible = "lwn,bk4" }, { .compatible = "dh,dhcom-board" }, { .compatible = "menlo,m53cpld" }, + { .compatible = "cisco,spi-petra" }, {}, }; MODULE_DEVICE_TABLE(of, spidev_dt_ids); diff --git a/drivers/staging/media/hantro/hantro_v4l2.c b/drivers/staging/media/hantro/hantro_v4l2.c index b668a82d40ad..f5fbdbc4ffdb 100644 --- a/drivers/staging/media/hantro/hantro_v4l2.c +++ b/drivers/staging/media/hantro/hantro_v4l2.c @@ -367,7 +367,7 @@ hantro_reset_raw_fmt(struct hantro_ctx *ctx) hantro_reset_fmt(raw_fmt, raw_vpu_fmt); raw_fmt->width = encoded_fmt->width; - raw_fmt->width = encoded_fmt->width; + raw_fmt->height = encoded_fmt->height; if (ctx->is_encoder) hantro_set_fmt_out(ctx, raw_fmt); else diff --git a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c index 781c84a9b1b7..de7442d4834d 100644 --- a/drivers/staging/media/sunxi/cedrus/cedrus_h264.c +++ b/drivers/staging/media/sunxi/cedrus/cedrus_h264.c @@ -203,7 +203,7 @@ static void _cedrus_write_ref_list(struct cedrus_ctx *ctx, position = cedrus_buf->codec.h264.position; sram_array[i] |= position << 1; - if (ref_list[i].fields & V4L2_H264_BOTTOM_FIELD_REF) + if (ref_list[i].fields == V4L2_H264_BOTTOM_FIELD_REF) sram_array[i] |= BIT(0); } diff --git a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h index ab5a8627d371..f798b0c744a4 100644 --- a/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h +++ b/drivers/staging/rtl8723bs/include/rtw_wifi_regd.h @@ -20,9 +20,9 @@ enum country_code_type_t { COUNTRY_CODE_MAX }; -int rtw_regd_init(struct adapter *padapter, - void (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)); +void rtw_regd_init(struct wiphy *wiphy, + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)); void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request); diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c index bf1417236161..11032316c53d 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_cfg80211.c @@ -3211,9 +3211,6 @@ void rtw_cfg80211_init_wiphy(struct adapter *padapter) rtw_cfg80211_init_ht_capab(&bands->ht_cap, NL80211_BAND_2GHZ, rf_type); } - /* init regulary domain */ - rtw_regd_init(padapter, rtw_reg_notifier); - /* copy mac_addr to wiphy */ memcpy(wiphy->perm_addr, padapter->eeprompriv.mac_addr, ETH_ALEN); @@ -3328,6 +3325,9 @@ int rtw_wdev_alloc(struct adapter *padapter, struct device *dev) *((struct adapter **)wiphy_priv(wiphy)) = padapter; rtw_cfg80211_preinit_wiphy(padapter, wiphy); + /* init regulary domain */ + rtw_regd_init(wiphy, rtw_reg_notifier); + ret = wiphy_register(wiphy); if (ret < 0) { DBG_8192C("Couldn't register wiphy device\n"); diff --git a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c index b2208e5f190a..301ffff12e82 100644 --- a/drivers/staging/rtl8723bs/os_dep/sdio_intf.c +++ b/drivers/staging/rtl8723bs/os_dep/sdio_intf.c @@ -339,8 +339,6 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct padapter = rtw_netdev_priv(pnetdev); - rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj)); - /* 3 3. init driver special setting, interface, OS and hardware relative */ /* 4 3.1 set hardware operation functions */ @@ -378,6 +376,8 @@ static struct adapter *rtw_sdio_if1_init(struct dvobj_priv *dvobj, const struct goto free_hal_data; } + rtw_wdev_alloc(padapter, dvobj_to_dev(dvobj)); + /* 3 8. get WLan MAC address */ /* set mac addr */ rtw_macaddr_cfg(&psdio->func->dev, padapter->eeprompriv.mac_addr); diff --git a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c index 578b9f734231..2833fc6901e6 100644 --- a/drivers/staging/rtl8723bs/os_dep/wifi_regd.c +++ b/drivers/staging/rtl8723bs/os_dep/wifi_regd.c @@ -139,15 +139,11 @@ static void _rtw_regd_init_wiphy(struct rtw_regulatory *reg, _rtw_reg_apply_flags(wiphy); } -int rtw_regd_init(struct adapter *padapter, - void (*reg_notifier)(struct wiphy *wiphy, - struct regulatory_request *request)) +void rtw_regd_init(struct wiphy *wiphy, + void (*reg_notifier)(struct wiphy *wiphy, + struct regulatory_request *request)) { - struct wiphy *wiphy = padapter->rtw_wdev->wiphy; - _rtw_regd_init_wiphy(NULL, wiphy, reg_notifier); - - return 0; } void rtw_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index 893d1b406c29..1a9c50401bdb 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -896,7 +896,7 @@ int iscsit_setup_np( else len = sizeof(struct sockaddr_in); /* - * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY. + * Set SO_REUSEADDR, and disable Nagle Algorithm with TCP_NODELAY. */ if (np->np_network_transport == ISCSI_TCP) tcp_sock_set_nodelay(sock->sk); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 6b171fff007b..a5991df23581 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -562,8 +562,6 @@ tcmu_get_block_page(struct tcmu_dev *udev, uint32_t dbi) static inline void tcmu_free_cmd(struct tcmu_cmd *tcmu_cmd) { - if (tcmu_cmd->se_cmd) - tcmu_cmd->se_cmd->priv = NULL; kfree(tcmu_cmd->dbi); kmem_cache_free(tcmu_cmd_cache, tcmu_cmd); } @@ -1174,11 +1172,12 @@ tcmu_queue_cmd(struct se_cmd *se_cmd) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; mutex_lock(&udev->cmdr_lock); - se_cmd->priv = tcmu_cmd; if (!(se_cmd->transport_state & CMD_T_ABORTED)) ret = queue_cmd_ring(tcmu_cmd, &scsi_ret); if (ret < 0) tcmu_free_cmd(tcmu_cmd); + else + se_cmd->priv = tcmu_cmd; mutex_unlock(&udev->cmdr_lock); return scsi_ret; } @@ -1241,6 +1240,7 @@ tcmu_tmr_notify(struct se_device *se_dev, enum tcm_tmreq_table tmf, list_del_init(&cmd->queue_entry); tcmu_free_cmd(cmd); + se_cmd->priv = NULL; target_complete_cmd(se_cmd, SAM_STAT_TASK_ABORTED); unqueued = true; } @@ -1332,6 +1332,7 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry * } done: + se_cmd->priv = NULL; if (read_len_valid) { pr_debug("read_len = %d\n", read_len); target_complete_cmd_with_length(cmd->se_cmd, @@ -1478,6 +1479,7 @@ static void tcmu_check_expired_queue_cmd(struct tcmu_cmd *cmd) se_cmd = cmd->se_cmd; tcmu_free_cmd(cmd); + se_cmd->priv = NULL; target_complete_cmd(se_cmd, SAM_STAT_TASK_SET_FULL); } @@ -1592,6 +1594,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail) * removed then LIO core will do the right thing and * fail the retry. */ + tcmu_cmd->se_cmd->priv = NULL; target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_BUSY); tcmu_free_cmd(tcmu_cmd); continue; @@ -1605,6 +1608,7 @@ static void run_qfull_queue(struct tcmu_dev *udev, bool fail) * Ignore scsi_ret for now. target_complete_cmd * drops it. */ + tcmu_cmd->se_cmd->priv = NULL; target_complete_cmd(tcmu_cmd->se_cmd, SAM_STAT_CHECK_CONDITION); tcmu_free_cmd(tcmu_cmd); @@ -2212,6 +2216,7 @@ static void tcmu_reset_ring(struct tcmu_dev *udev, u8 err_level) if (!test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) { WARN_ON(!cmd->se_cmd); list_del_init(&cmd->queue_entry); + cmd->se_cmd->priv = NULL; if (err_level == 1) { /* * Userspace was not able to start the diff --git a/drivers/tee/optee/call.c b/drivers/tee/optee/call.c index c981757ba0d4..780d7c4fd756 100644 --- a/drivers/tee/optee/call.c +++ b/drivers/tee/optee/call.c @@ -7,6 +7,7 @@ #include <linux/err.h> #include <linux/errno.h> #include <linux/mm.h> +#include <linux/sched.h> #include <linux/slab.h> #include <linux/tee_drv.h> #include <linux/types.h> @@ -148,7 +149,8 @@ u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg) */ optee_cq_wait_for_completion(&optee->call_queue, &w); } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) { - might_sleep(); + if (need_resched()) + cond_resched(); param.a0 = res.a0; param.a1 = res.a1; param.a2 = res.a2; diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c index a5f988a9f948..b5442f979b4d 100644 --- a/drivers/thunderbolt/acpi.c +++ b/drivers/thunderbolt/acpi.c @@ -56,7 +56,7 @@ static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, * managed with the xHCI and the SuperSpeed hub so we create the * link from xHCI instead. */ - while (!dev_is_pci(dev)) + while (dev && !dev_is_pci(dev)) dev = dev->parent; if (!dev) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 8b7f941a9bb7..b8c4159bc32d 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -2316,7 +2316,7 @@ static int icm_usb4_switch_nvm_authenticate_status(struct tb_switch *sw, if (auth && auth->reply.route_hi == sw->config.route_hi && auth->reply.route_lo == sw->config.route_lo) { - tb_dbg(tb, "NVM_AUTH found for %llx flags 0x%#x status %#x\n", + tb_dbg(tb, "NVM_AUTH found for %llx flags %#x status %#x\n", tb_route(sw), auth->reply.hdr.flags, auth->reply.status); if (auth->reply.hdr.flags & ICM_FLAGS_ERROR) ret = -EIO; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 319d68c8a5df..219e85756171 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2081,9 +2081,6 @@ static int canon_copy_from_read_buf(struct tty_struct *tty, return 0; } -extern ssize_t redirected_tty_write(struct file *, const char __user *, - size_t, loff_t *); - /** * job_control - check job control * @tty: tty @@ -2105,7 +2102,7 @@ static int job_control(struct tty_struct *tty, struct file *file) /* NOTE: not yet done after every sleep pending a thorough check of the logic of this change. -- jlc */ /* don't stop on /dev/console */ - if (file->f_op->write == redirected_tty_write) + if (file->f_op->write_iter == redirected_tty_write) return 0; return __tty_check_change(tty, SIGTTIN); @@ -2309,7 +2306,7 @@ static ssize_t n_tty_write(struct tty_struct *tty, struct file *file, ssize_t retval = 0; /* Job control check -- must be done at start (POSIX.1 7.1.1.4). */ - if (L_TOSTOP(tty) && file->f_op->write != redirected_tty_write) { + if (L_TOSTOP(tty) && file->f_op->write_iter != redirected_tty_write) { retval = tty_check_change(tty); if (retval) return retval; diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c index 118b29912289..e0c00a1b0763 100644 --- a/drivers/tty/serial/mvebu-uart.c +++ b/drivers/tty/serial/mvebu-uart.c @@ -648,6 +648,14 @@ static void wait_for_xmitr(struct uart_port *port) (val & STAT_TX_RDY(port)), 1, 10000); } +static void wait_for_xmite(struct uart_port *port) +{ + u32 val; + + readl_poll_timeout_atomic(port->membase + UART_STAT, val, + (val & STAT_TX_EMP), 1, 10000); +} + static void mvebu_uart_console_putchar(struct uart_port *port, int ch) { wait_for_xmitr(port); @@ -675,7 +683,7 @@ static void mvebu_uart_console_write(struct console *co, const char *s, uart_console_write(port, s, count, mvebu_uart_console_putchar); - wait_for_xmitr(port); + wait_for_xmite(port); if (ier) writel(ier, port->membase + UART_CTRL(port)); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 8034489337d7..816e709afa56 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -143,12 +143,9 @@ LIST_HEAD(tty_drivers); /* linked list of tty drivers */ DEFINE_MUTEX(tty_mutex); static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); -static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); -ssize_t redirected_tty_write(struct file *, const char __user *, - size_t, loff_t *); +static ssize_t tty_write(struct kiocb *, struct iov_iter *); static __poll_t tty_poll(struct file *, poll_table *); static int tty_open(struct inode *, struct file *); -long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); #ifdef CONFIG_COMPAT static long tty_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); @@ -438,8 +435,7 @@ static ssize_t hung_up_tty_read(struct file *file, char __user *buf, return 0; } -static ssize_t hung_up_tty_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t hung_up_tty_write(struct kiocb *iocb, struct iov_iter *from) { return -EIO; } @@ -478,7 +474,8 @@ static void tty_show_fdinfo(struct seq_file *m, struct file *file) static const struct file_operations tty_fops = { .llseek = no_llseek, .read = tty_read, - .write = tty_write, + .write_iter = tty_write, + .splice_write = iter_file_splice_write, .poll = tty_poll, .unlocked_ioctl = tty_ioctl, .compat_ioctl = tty_compat_ioctl, @@ -491,7 +488,8 @@ static const struct file_operations tty_fops = { static const struct file_operations console_fops = { .llseek = no_llseek, .read = tty_read, - .write = redirected_tty_write, + .write_iter = redirected_tty_write, + .splice_write = iter_file_splice_write, .poll = tty_poll, .unlocked_ioctl = tty_ioctl, .compat_ioctl = tty_compat_ioctl, @@ -503,7 +501,7 @@ static const struct file_operations console_fops = { static const struct file_operations hung_up_tty_fops = { .llseek = no_llseek, .read = hung_up_tty_read, - .write = hung_up_tty_write, + .write_iter = hung_up_tty_write, .poll = hung_up_tty_poll, .unlocked_ioctl = hung_up_tty_ioctl, .compat_ioctl = hung_up_tty_compat_ioctl, @@ -606,9 +604,9 @@ static void __tty_hangup(struct tty_struct *tty, int exit_session) /* This breaks for file handles being sent over AF_UNIX sockets ? */ list_for_each_entry(priv, &tty->tty_files, list) { filp = priv->file; - if (filp->f_op->write == redirected_tty_write) + if (filp->f_op->write_iter == redirected_tty_write) cons_filp = filp; - if (filp->f_op->write != tty_write) + if (filp->f_op->write_iter != tty_write) continue; closecount++; __tty_fasync(-1, filp, 0); /* can't block */ @@ -901,9 +899,9 @@ static inline ssize_t do_tty_write( ssize_t (*write)(struct tty_struct *, struct file *, const unsigned char *, size_t), struct tty_struct *tty, struct file *file, - const char __user *buf, - size_t count) + struct iov_iter *from) { + size_t count = iov_iter_count(from); ssize_t ret, written = 0; unsigned int chunk; @@ -955,14 +953,20 @@ static inline ssize_t do_tty_write( size_t size = count; if (size > chunk) size = chunk; + ret = -EFAULT; - if (copy_from_user(tty->write_buf, buf, size)) + if (copy_from_iter(tty->write_buf, size, from) != size) break; + ret = write(tty, file, tty->write_buf, size); if (ret <= 0) break; + + /* FIXME! Have Al check this! */ + if (ret != size) + iov_iter_revert(from, size-ret); + written += ret; - buf += ret; count -= ret; if (!count) break; @@ -1022,8 +1026,7 @@ void tty_write_message(struct tty_struct *tty, char *msg) * write method will not be invoked in parallel for each device. */ -static ssize_t tty_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t file_tty_write(struct file *file, struct kiocb *iocb, struct iov_iter *from) { struct tty_struct *tty = file_tty(file); struct tty_ldisc *ld; @@ -1038,17 +1041,21 @@ static ssize_t tty_write(struct file *file, const char __user *buf, tty_err(tty, "missing write_room method\n"); ld = tty_ldisc_ref_wait(tty); if (!ld) - return hung_up_tty_write(file, buf, count, ppos); + return hung_up_tty_write(iocb, from); if (!ld->ops->write) ret = -EIO; else - ret = do_tty_write(ld->ops->write, tty, file, buf, count); + ret = do_tty_write(ld->ops->write, tty, file, from); tty_ldisc_deref(ld); return ret; } -ssize_t redirected_tty_write(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t tty_write(struct kiocb *iocb, struct iov_iter *from) +{ + return file_tty_write(iocb->ki_filp, iocb, from); +} + +ssize_t redirected_tty_write(struct kiocb *iocb, struct iov_iter *iter) { struct file *p = NULL; @@ -1057,13 +1064,17 @@ ssize_t redirected_tty_write(struct file *file, const char __user *buf, p = get_file(redirect); spin_unlock(&redirect_lock); + /* + * We know the redirected tty is just another tty, we can can + * call file_tty_write() directly with that file pointer. + */ if (p) { ssize_t res; - res = vfs_write(p, buf, count, &p->f_pos); + res = file_tty_write(p, iocb, iter); fput(p); return res; } - return tty_write(file, buf, count, ppos); + return tty_write(iocb, iter); } /* @@ -2295,7 +2306,7 @@ static int tioccons(struct file *file) { if (!capable(CAP_SYS_ADMIN)) return -EPERM; - if (file->f_op->write == redirected_tty_write) { + if (file->f_op->write_iter == redirected_tty_write) { struct file *f; spin_lock(&redirect_lock); f = redirect; @@ -2305,6 +2316,12 @@ static int tioccons(struct file *file) fput(f); return 0; } + if (file->f_op->write_iter != tty_write) + return -ENOTTY; + if (!(file->f_mode & FMODE_WRITE)) + return -EBADF; + if (!(file->f_mode & FMODE_CAN_WRITE)) + return -EINVAL; spin_lock(&redirect_lock); if (redirect) { spin_unlock(&redirect_lock); diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c index 22a56c4dce67..7990fee03fe4 100644 --- a/drivers/usb/cdns3/cdns3-imx.c +++ b/drivers/usb/cdns3/cdns3-imx.c @@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev) } data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks); - data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks; + data->clks = devm_kmemdup(dev, imx_cdns3_core_clks, + sizeof(imx_cdns3_core_clks), GFP_KERNEL); + if (!data->clks) + return -ENOMEM; + ret = devm_clk_bulk_get(dev, data->num_clks, data->clks); if (ret) return ret; @@ -214,20 +218,16 @@ err: return ret; } -static int cdns_imx_remove_core(struct device *dev, void *data) -{ - struct platform_device *pdev = to_platform_device(dev); - - platform_device_unregister(pdev); - - return 0; -} - static int cdns_imx_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; + struct cdns_imx *data = dev_get_drvdata(dev); - device_for_each_child(dev, NULL, cdns_imx_remove_core); + pm_runtime_get_sync(dev); + of_platform_depopulate(dev); + clk_bulk_disable_unprepare(data->num_clks, data->clks); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); platform_set_drvdata(pdev, NULL); return 0; diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c index 134dc2005ce9..c9f6e9758288 100644 --- a/drivers/usb/class/usblp.c +++ b/drivers/usb/class/usblp.c @@ -1329,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol) if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL) return -EINVAL; - alts = usblp->protocol[protocol].alt_setting; - if (alts < 0) - return -EINVAL; - r = usb_set_interface(usblp->dev, usblp->ifnum, alts); - if (r < 0) { - printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", - alts, usblp->ifnum); - return r; + /* Don't unnecessarily set the interface if there's a single alt. */ + if (usblp->intf->num_altsetting > 1) { + alts = usblp->protocol[protocol].alt_setting; + if (alts < 0) + return -EINVAL; + r = usb_set_interface(usblp->dev, usblp->ifnum, alts); + if (r < 0) { + printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n", + alts, usblp->ifnum); + return r; + } } usblp->bidir = (usblp->protocol[protocol].epread != NULL); diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 0a0d11151cfb..ad4c94366dad 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep, static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, u32 windex) { - struct dwc2_hsotg_ep *ep; int dir = (windex & USB_DIR_IN) ? 1 : 0; int idx = windex & 0x7F; @@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg, if (idx > hsotg->num_of_eps) return NULL; - ep = index_to_ep(hsotg, idx, dir); - - if (idx && ep->dir_in != dir) - return NULL; - - return ep; + return index_to_ep(hsotg, idx, dir); } /** diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 841daec70b6e..3101f0dcf6ae 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1758,7 +1758,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) if (PMSG_IS_AUTO(msg)) break; - ret = dwc3_core_init(dwc); + ret = dwc3_core_init_for_resume(dwc); if (ret) return ret; diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c index 30313b233680..99c7fc0d1d59 100644 --- a/drivers/usb/gadget/legacy/ether.c +++ b/drivers/usb/gadget/legacy/ether.c @@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev) struct usb_descriptor_header *usb_desc; usb_desc = usb_otg_descriptor_alloc(gadget); - if (!usb_desc) + if (!usb_desc) { + status = -ENOMEM; goto fail1; + } usb_otg_descriptor_init(gadget, usb_desc); otg_desc[0] = usb_desc; otg_desc[1] = NULL; diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c index 0bd6b20435b8..02d8bfae58fb 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c @@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep, u32 state, reg, loops; /* Stop DMA activity */ - writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); + if (ep->epn.desc_mode) + writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); + else + writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT); /* Wait for it to complete */ for (loops = 0; loops < 1000; loops++) { diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c index 6497185ec4e7..bfd8e77788e2 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c @@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub, str_array[offset].s = NULL; ret = ast_vhub_str_alloc_add(vhub, &lang_str); - if (ret) + if (ret) { + of_node_put(child); break; + } } return ret; diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig index 3e88c7670b2e..fb01ff47b64c 100644 --- a/drivers/usb/gadget/udc/bdc/Kconfig +++ b/drivers/usb/gadget/udc/bdc/Kconfig @@ -17,7 +17,7 @@ if USB_BDC_UDC comment "Platform Support" config USB_BDC_PCI tristate "BDC support for PCIe based platforms" - depends on USB_PCI + depends on USB_PCI && BROKEN default USB_BDC_UDC help Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform. diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 6a62bbd01324..ea114f922ccf 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -1529,10 +1529,13 @@ static ssize_t soft_connect_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { struct usb_udc *udc = container_of(dev, struct usb_udc, dev); + ssize_t ret; + mutex_lock(&udc_lock); if (!udc->driver) { dev_err(dev, "soft-connect without a gadget driver\n"); - return -EOPNOTSUPP; + ret = -EOPNOTSUPP; + goto out; } if (sysfs_streq(buf, "connect")) { @@ -1543,10 +1546,14 @@ static ssize_t soft_connect_store(struct device *dev, usb_gadget_udc_stop(udc); } else { dev_err(dev, "unsupported command '%s'\n", buf); - return -EINVAL; + ret = -EINVAL; + goto out; } - return n; + ret = n; +out: + mutex_unlock(&udc_lock); + return ret; } static DEVICE_ATTR_WO(soft_connect); diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c index 1a953f44183a..57067763b100 100644 --- a/drivers/usb/gadget/udc/dummy_hcd.c +++ b/drivers/usb/gadget/udc/dummy_hcd.c @@ -2270,17 +2270,20 @@ static int dummy_hub_control( } fallthrough; case USB_PORT_FEAT_RESET: + if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION)) + break; /* if it's already enabled, disable */ if (hcd->speed == HCD_USB3) { - dum_hcd->port_status = 0; dum_hcd->port_status = (USB_SS_PORT_STAT_POWER | USB_PORT_STAT_CONNECTION | USB_PORT_STAT_RESET); - } else + } else { dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); + dum_hcd->port_status |= USB_PORT_STAT_RESET; + } /* * We want to reset device status. All but the * Self powered feature @@ -2292,7 +2295,8 @@ static int dummy_hub_control( * interval? Is it still 50msec as for HS? */ dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50); - fallthrough; + set_link_state(dum_hcd); + break; case USB_PORT_FEAT_C_CONNECTION: case USB_PORT_FEAT_C_RESET: case USB_PORT_FEAT_C_ENABLE: diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c index e358ae17d51e..1926b328b6aa 100644 --- a/drivers/usb/host/ehci-hcd.c +++ b/drivers/usb/host/ehci-hcd.c @@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd) struct ehci_hcd *ehci = hcd_to_ehci (hcd); u32 temp; u32 hcc_params; + int rc; hcd->uses_new_polling = 1; @@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd) down_write(&ehci_cf_port_reset_rwsem); ehci->rh_state = EHCI_RH_RUNNING; ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag); + + /* Wait until HC become operational */ ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */ msleep(5); + rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000); + up_write(&ehci_cf_port_reset_rwsem); + + if (rc) { + ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n", + ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc); + return rc; + } + ehci->last_periodic_enable = ktime_get_real(); temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase)); diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c index 087402aec5cb..9f9ab5ccea88 100644 --- a/drivers/usb/host/ehci-hub.c +++ b/drivers/usb/host/ehci-hub.c @@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd) unlink_empty_async_suspended(ehci); + /* Some Synopsys controllers mistakenly leave IAA turned on */ + ehci_writel(ehci, STS_IAA, &ehci->regs->status); + /* Any IAA cycle that started before the suspend is now invalid */ end_iaa_cycle(ehci); ehci_handle_start_intr_unlinks(ehci); diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c index 45c54d56ecbd..b45e5bf08997 100644 --- a/drivers/usb/host/xhci-mtk-sch.c +++ b/drivers/usb/host/xhci-mtk-sch.c @@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev, sch_ep->sch_tt = tt; sch_ep->ep = ep; + INIT_LIST_HEAD(&sch_ep->endpoint); + INIT_LIST_HEAD(&sch_ep->tt_endpoint); return sch_ep; } @@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw, sch_ep->bw_budget_table[j]; } } + sch_ep->allocated = used; } static int check_sch_tt(struct usb_device *udev, @@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev, return 0; } +static void destroy_sch_ep(struct usb_device *udev, + struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep) +{ + /* only release ep bw check passed by check_sch_bw() */ + if (sch_ep->allocated) + update_bus_bw(sch_bw, sch_ep, 0); + + list_del(&sch_ep->endpoint); + + if (sch_ep->sch_tt) { + list_del(&sch_ep->tt_endpoint); + drop_tt(udev); + } + kfree(sch_ep); +} + static bool need_bw_sch(struct usb_host_endpoint *ep, enum usb_device_speed speed, int has_tt) { @@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk) mtk->sch_array = sch_array; + INIT_LIST_HEAD(&mtk->bw_ep_chk_list); + return 0; } EXPORT_SYMBOL_GPL(xhci_mtk_sch_init); @@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_ep_ctx *ep_ctx; struct xhci_slot_ctx *slot_ctx; struct xhci_virt_device *virt_dev; - struct mu3h_sch_bw_info *sch_bw; struct mu3h_sch_ep_info *sch_ep; - struct mu3h_sch_bw_info *sch_array; unsigned int ep_index; - int bw_index; - int ret = 0; xhci = hcd_to_xhci(hcd); virt_dev = xhci->devs[udev->slot_id]; ep_index = xhci_get_endpoint_index(&ep->desc); slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx); ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); - sch_array = mtk->sch_array; xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n", __func__, usb_endpoint_type(&ep->desc), udev->speed, @@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, return 0; } - bw_index = get_bw_index(xhci, udev, ep); - sch_bw = &sch_array[bw_index]; - sch_ep = create_sch_ep(udev, ep, ep_ctx); if (IS_ERR_OR_NULL(sch_ep)) return -ENOMEM; setup_sch_info(udev, ep_ctx, sch_ep); - ret = check_sch_bw(udev, sch_bw, sch_ep); - if (ret) { - xhci_err(xhci, "Not enough bandwidth!\n"); - if (is_fs_or_ls(udev->speed)) - drop_tt(udev); - - kfree(sch_ep); - return -ENOSPC; - } - - list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); - - ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) - | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode)); - ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) - | EP_BREPEAT(sch_ep->repeat)); - - xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", - sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, - sch_ep->offset, sch_ep->repeat); + list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list); return 0; } @@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct xhci_virt_device *virt_dev; struct mu3h_sch_bw_info *sch_array; struct mu3h_sch_bw_info *sch_bw; - struct mu3h_sch_ep_info *sch_ep; + struct mu3h_sch_ep_info *sch_ep, *tmp; int bw_index; xhci = hcd_to_xhci(hcd); @@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, bw_index = get_bw_index(xhci, udev, ep); sch_bw = &sch_array[bw_index]; - list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) { + list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) { if (sch_ep->ep == ep) { - update_bus_bw(sch_bw, sch_ep, 0); - list_del(&sch_ep->endpoint); - if (is_fs_or_ls(udev->speed)) { - list_del(&sch_ep->tt_endpoint); - drop_tt(udev); - } - kfree(sch_ep); + destroy_sch_ep(udev, sch_bw, sch_ep); break; } } } EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk); + +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id]; + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index, ret; + + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); + + list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) { + bw_index = get_bw_index(xhci, udev, sch_ep->ep); + sch_bw = &mtk->sch_array[bw_index]; + + ret = check_sch_bw(udev, sch_bw, sch_ep); + if (ret) { + xhci_err(xhci, "Not enough bandwidth!\n"); + return -ENOSPC; + } + } + + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { + struct xhci_ep_ctx *ep_ctx; + struct usb_host_endpoint *ep = sch_ep->ep; + unsigned int ep_index = xhci_get_endpoint_index(&ep->desc); + + bw_index = get_bw_index(xhci, udev, ep); + sch_bw = &mtk->sch_array[bw_index]; + + list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list); + + ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); + ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts) + | EP_BCSCOUNT(sch_ep->cs_count) + | EP_BBM(sch_ep->burst_mode)); + ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset) + | EP_BREPEAT(sch_ep->repeat)); + + xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n", + sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode, + sch_ep->offset, sch_ep->repeat); + } + + return xhci_check_bandwidth(hcd, udev); +} +EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth); + +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +{ + struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd); + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct mu3h_sch_bw_info *sch_bw; + struct mu3h_sch_ep_info *sch_ep, *tmp; + int bw_index; + + xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev)); + + list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) { + bw_index = get_bw_index(xhci, udev, sch_ep->ep); + sch_bw = &mtk->sch_array[bw_index]; + destroy_sch_ep(udev, sch_bw, sch_ep); + } + + xhci_reset_bandwidth(hcd, udev); +} +EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth); diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 8f321f39ab96..fe010cc61f19 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable) static int xhci_mtk_setup(struct usb_hcd *hcd); static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = { .reset = xhci_mtk_setup, + .check_bandwidth = xhci_mtk_check_bandwidth, + .reset_bandwidth = xhci_mtk_reset_bandwidth, }; static struct hc_driver __read_mostly xhci_mtk_hc_driver; diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h index a93cfe817904..cbb09dfea62e 100644 --- a/drivers/usb/host/xhci-mtk.h +++ b/drivers/usb/host/xhci-mtk.h @@ -59,6 +59,7 @@ struct mu3h_sch_bw_info { * @ep_type: endpoint type * @maxpkt: max packet size of endpoint * @ep: address of usb_host_endpoint struct + * @allocated: the bandwidth is aready allocated from bus_bw * @offset: which uframe of the interval that transfer should be * scheduled first time within the interval * @repeat: the time gap between two uframes that transfers are @@ -86,6 +87,7 @@ struct mu3h_sch_ep_info { u32 ep_type; u32 maxpkt; void *ep; + bool allocated; /* * mtk xHCI scheduling information put into reserved DWs * in ep context @@ -131,6 +133,7 @@ struct xhci_hcd_mtk { struct device *dev; struct usb_hcd *hcd; struct mu3h_sch_bw_info *sch_array; + struct list_head bw_ep_chk_list; struct mu3c_ippc_regs __iomem *ippc_regs; bool has_ippc; int num_u2_ports; @@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep); +int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); #else static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, @@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, { } +static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, + struct usb_device *udev) +{ + return 0; +} + +static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, + struct usb_device *udev) +{ +} #endif #endif /* _XHCI_MTK_H_ */ diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c index 60651a50770f..8ca1a235d164 100644 --- a/drivers/usb/host/xhci-mvebu.c +++ b/drivers/usb/host/xhci-mvebu.c @@ -8,6 +8,7 @@ #include <linux/mbus.h> #include <linux/of.h> #include <linux/platform_device.h> +#include <linux/phy/phy.h> #include <linux/usb.h> #include <linux/usb/hcd.h> @@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct device *dev = hcd->self.controller; + struct phy *phy; + int ret; + + /* Old bindings miss the PHY handle */ + phy = of_phy_get(dev->of_node, "usb3-phy"); + if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER) + return -EPROBE_DEFER; + else if (IS_ERR(phy)) + goto phy_out; + + ret = phy_init(phy); + if (ret) + goto phy_put; + + ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS); + if (ret) + goto phy_exit; + + ret = phy_power_on(phy); + if (ret == -EOPNOTSUPP) { + /* Skip initializatin of XHCI PHY when it is unsupported by firmware */ + dev_warn(dev, "PHY unsupported by firmware\n"); + xhci->quirks |= XHCI_SKIP_PHY_INIT; + } + if (ret) + goto phy_exit; + + phy_power_off(phy); +phy_exit: + phy_exit(phy); +phy_put: + of_phy_put(phy); +phy_out: + + return 0; +} + int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h index 3be021793cc8..01bf3fcb3eca 100644 --- a/drivers/usb/host/xhci-mvebu.h +++ b/drivers/usb/host/xhci-mvebu.h @@ -12,6 +12,7 @@ struct usb_hcd; #if IS_ENABLED(CONFIG_USB_XHCI_MVEBU) int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd); +int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd); int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd); #else static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) @@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd) return 0; } +static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd) +{ + return 0; +} + static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd) { return 0; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 4d34f6005381..c1edcc9b13ce 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd) priv->plat_start(hcd); } +static int xhci_priv_plat_setup(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->plat_setup) + return 0; + + return priv->plat_setup(hcd); +} + static int xhci_priv_init_quirk(struct usb_hcd *hcd) { struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); @@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = { }; static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = { + .plat_setup = xhci_mvebu_a3700_plat_setup, .init_quirk = xhci_mvebu_a3700_init_quirk, }; @@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev) hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); xhci->shared_hcd->tpl_support = hcd->tpl_support; - if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) + + if (priv) { + ret = xhci_priv_plat_setup(hcd); + if (ret) + goto disable_usb_phy; + } + + if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))) hcd->skip_phy_initialization = 1; if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK)) diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index 1fb149d1fbce..561d0b7bce09 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -13,6 +13,7 @@ struct xhci_plat_priv { const char *firmware_name; unsigned long long quirks; + int (*plat_setup)(struct usb_hcd *); void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); int (*suspend_quirk)(struct usb_hcd *); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 5677b81c0915..89c3be9917f6 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -699,11 +699,16 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci, dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len, DMA_FROM_DEVICE); /* for in tranfers we need to copy the data from bounce to sg */ - len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, - seg->bounce_len, seg->bounce_offs); - if (len != seg->bounce_len) - xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", - len, seg->bounce_len); + if (urb->num_sgs) { + len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf, + seg->bounce_len, seg->bounce_offs); + if (len != seg->bounce_len) + xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n", + len, seg->bounce_len); + } else { + memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf, + seg->bounce_len); + } seg->bounce_len = 0; seg->bounce_offs = 0; } @@ -2931,6 +2936,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring, trb->field[0] = cpu_to_le32(field1); trb->field[1] = cpu_to_le32(field2); trb->field[2] = cpu_to_le32(field3); + /* make sure TRB is fully written before giving it to the controller */ + wmb(); trb->field[3] = cpu_to_le32(field4); trace_xhci_queue_trb(ring, trb); @@ -3275,12 +3282,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len, /* create a max max_pkt sized bounce buffer pointed to by last trb */ if (usb_urb_dir_out(urb)) { - len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, - seg->bounce_buf, new_buff_len, enqd_len); - if (len != new_buff_len) - xhci_warn(xhci, - "WARN Wrong bounce buffer write length: %zu != %d\n", - len, new_buff_len); + if (urb->num_sgs) { + len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs, + seg->bounce_buf, new_buff_len, enqd_len); + if (len != new_buff_len) + xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n", + len, new_buff_len); + } else { + memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len); + } + seg->bounce_dma = dma_map_single(dev, seg->bounce_buf, max_pkt, DMA_TO_DEVICE); } else { diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 934be1686352..50bb91b6a4b8 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra, enable); if (err < 0) break; + + /* + * wait 500us for LFPS detector to be disabled before + * sending ACK + */ + if (!enable) + usleep_range(500, 1000); } if (err < 0) { diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index e86940571b4c..345a221028c6 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -2985,7 +2985,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci, * else should be touching the xhci->devs[slot_id] structure, so we * don't need to take the xhci->lock for manipulating that. */ -static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { int i; int ret = 0; @@ -3083,7 +3083,7 @@ command_cleanup: return ret; } -static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci; struct xhci_virt_device *virt_dev; @@ -5510,6 +5510,10 @@ void xhci_init_driver(struct hc_driver *drv, drv->reset = over->reset; if (over->start) drv->start = over->start; + if (over->check_bandwidth) + drv->check_bandwidth = over->check_bandwidth; + if (over->reset_bandwidth) + drv->reset_bandwidth = over->reset_bandwidth; } } EXPORT_SYMBOL_GPL(xhci_init_driver); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 25e57bc9c3cc..07ff95016f11 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1920,6 +1920,8 @@ struct xhci_driver_overrides { size_t extra_priv_size; int (*reset)(struct usb_hcd *hcd); int (*start)(struct usb_hcd *hcd); + int (*check_bandwidth)(struct usb_hcd *, struct usb_device *); + void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *); }; #define XHCI_CFC_DELAY 10 @@ -2074,6 +2076,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks); void xhci_shutdown(struct usb_hcd *hcd); void xhci_init_driver(struct hc_driver *drv, const struct xhci_driver_overrides *over); +int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); +void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev); int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id); int xhci_ext_cap_init(struct xhci_hcd *xhci); diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c index ac9a81ae8216..e6fa13701808 100644 --- a/drivers/usb/renesas_usbhs/fifo.c +++ b/drivers/usb/renesas_usbhs/fifo.c @@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt) } usbhs_pipe_clear_without_sequence(pipe, 0, 0); + usbhs_pipe_running(pipe, 0); __usbhsf_pkt_del(pkt); } diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index fbb10dfc56e3..7bec1e730b20 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */ + { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ @@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */ { USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */ { USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */ + { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */ { USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */ { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 3fe959104311..2049e66f34a3 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_AHXX_2RMNET 0x0084 #define CINTERION_PRODUCT_AHXX_AUDIO 0x0085 #define CINTERION_PRODUCT_CLS8 0x00b0 +#define CINTERION_PRODUCT_MV31_MBIM 0x00b3 +#define CINTERION_PRODUCT_MV31_RMNET 0x00b7 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c @@ -1914,6 +1916,10 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), + .driver_info = RSVD(0)}, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100), .driver_info = RSVD(4) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120), diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 5c92a576edae..08f742fd2409 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -15,6 +15,7 @@ struct mlx5_vdpa_direct_mr { struct sg_table sg_head; int log_size; int nsg; + int nent; struct list_head list; u64 offset; }; diff --git a/drivers/vdpa/mlx5/core/mr.c b/drivers/vdpa/mlx5/core/mr.c index 4b6195666c58..d300f799efcd 100644 --- a/drivers/vdpa/mlx5/core/mr.c +++ b/drivers/vdpa/mlx5/core/mr.c @@ -25,17 +25,6 @@ static int get_octo_len(u64 len, int page_shift) return (npages + 1) / 2; } -static void fill_sg(struct mlx5_vdpa_direct_mr *mr, void *in) -{ - struct scatterlist *sg; - __be64 *pas; - int i; - - pas = MLX5_ADDR_OF(create_mkey_in, in, klm_pas_mtt); - for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i) - (*pas) = cpu_to_be64(sg_dma_address(sg)); -} - static void mlx5_set_access_mode(void *mkc, int mode) { MLX5_SET(mkc, mkc, access_mode_1_0, mode & 0x3); @@ -45,10 +34,18 @@ static void mlx5_set_access_mode(void *mkc, int mode) static void populate_mtts(struct mlx5_vdpa_direct_mr *mr, __be64 *mtt) { struct scatterlist *sg; + int nsg = mr->nsg; + u64 dma_addr; + u64 dma_len; + int j = 0; int i; - for_each_sg(mr->sg_head.sgl, sg, mr->nsg, i) - mtt[i] = cpu_to_be64(sg_dma_address(sg)); + for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { + for (dma_addr = sg_dma_address(sg), dma_len = sg_dma_len(sg); + nsg && dma_len; + nsg--, dma_addr += BIT(mr->log_size), dma_len -= BIT(mr->log_size)) + mtt[j++] = cpu_to_be64(dma_addr); + } } static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr *mr) @@ -64,7 +61,6 @@ static int create_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct return -ENOMEM; MLX5_SET(create_mkey_in, in, uid, mvdev->res.uid); - fill_sg(mr, in); mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); MLX5_SET(mkc, mkc, lw, !!(mr->perm & VHOST_MAP_WO)); MLX5_SET(mkc, mkc, lr, !!(mr->perm & VHOST_MAP_RO)); @@ -276,8 +272,8 @@ static int map_direct_mr(struct mlx5_vdpa_dev *mvdev, struct mlx5_vdpa_direct_mr done: mr->log_size = log_entity_size; mr->nsg = nsg; - err = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); - if (!err) + mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); + if (!mr->nent) goto err_map; err = create_direct_mr(mvdev, mr); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 88dde3455bfd..b5fe6d2ad22f 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -87,6 +87,7 @@ struct mlx5_vq_restore_info { u64 device_addr; u64 driver_addr; u16 avail_index; + u16 used_index; bool ready; struct vdpa_callback cb; bool restore; @@ -121,6 +122,7 @@ struct mlx5_vdpa_virtqueue { u32 virtq_id; struct mlx5_vdpa_net *ndev; u16 avail_idx; + u16 used_idx; int fw_state; /* keep last in the struct */ @@ -804,6 +806,7 @@ static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtque obj_context = MLX5_ADDR_OF(create_virtio_net_q_in, in, obj_context); MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); + MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); MLX5_SET(virtio_net_q_object, obj_context, queue_feature_bit_mask_12_3, get_features_12_3(ndev->mvdev.actual_features)); vq_ctx = MLX5_ADDR_OF(virtio_net_q_object, obj_context, virtio_q_context); @@ -1022,6 +1025,7 @@ static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *m struct mlx5_virtq_attr { u8 state; u16 available_index; + u16 used_index; }; static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, @@ -1052,6 +1056,7 @@ static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueu memset(attr, 0, sizeof(*attr)); attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); + attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index); kfree(out); return 0; @@ -1535,6 +1540,16 @@ static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) } } +static void clear_virtqueues(struct mlx5_vdpa_net *ndev) +{ + int i; + + for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { + ndev->vqs[i].avail_idx = 0; + ndev->vqs[i].used_idx = 0; + } +} + /* TODO: cross-endian support */ static inline bool mlx5_vdpa_is_little_endian(struct mlx5_vdpa_dev *mvdev) { @@ -1610,6 +1625,7 @@ static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqu return err; ri->avail_index = attr.available_index; + ri->used_index = attr.used_index; ri->ready = mvq->ready; ri->num_ent = mvq->num_ent; ri->desc_addr = mvq->desc_addr; @@ -1654,6 +1670,7 @@ static void restore_channels_info(struct mlx5_vdpa_net *ndev) continue; mvq->avail_idx = ri->avail_index; + mvq->used_idx = ri->used_index; mvq->ready = ri->ready; mvq->num_ent = ri->num_ent; mvq->desc_addr = ri->desc_addr; @@ -1768,6 +1785,7 @@ static void mlx5_vdpa_set_status(struct vdpa_device *vdev, u8 status) if (!status) { mlx5_vdpa_info(mvdev, "performing device reset\n"); teardown_driver(ndev); + clear_virtqueues(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; ndev->mvdev.mlx_features = 0; diff --git a/drivers/xen/xenbus/xenbus.h b/drivers/xen/xenbus/xenbus.h index dc1537335414..2a93b7c9c159 100644 --- a/drivers/xen/xenbus/xenbus.h +++ b/drivers/xen/xenbus/xenbus.h @@ -115,7 +115,6 @@ int xenbus_probe_node(struct xen_bus_type *bus, const char *type, const char *nodename); int xenbus_probe_devices(struct xen_bus_type *bus); -void xenbus_probe(void); void xenbus_dev_changed(const char *node, struct xen_bus_type *bus); diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c index c8f0282bb649..8a75092bb148 100644 --- a/drivers/xen/xenbus/xenbus_probe.c +++ b/drivers/xen/xenbus/xenbus_probe.c @@ -683,7 +683,7 @@ void unregister_xenstore_notifier(struct notifier_block *nb) } EXPORT_SYMBOL_GPL(unregister_xenstore_notifier); -void xenbus_probe(void) +static void xenbus_probe(void) { xenstored_ready = 1; @@ -714,6 +714,23 @@ static bool xs_hvm_defer_init_for_callback(void) #endif } +static int xenbus_probe_thread(void *unused) +{ + DEFINE_WAIT(w); + + /* + * We actually just want to wait for *any* trigger of xb_waitq, + * and run xenbus_probe() the moment it occurs. + */ + prepare_to_wait(&xb_waitq, &w, TASK_INTERRUPTIBLE); + schedule(); + finish_wait(&xb_waitq, &w); + + DPRINTK("probing"); + xenbus_probe(); + return 0; +} + static int __init xenbus_probe_initcall(void) { /* @@ -725,6 +742,20 @@ static int __init xenbus_probe_initcall(void) !xs_hvm_defer_init_for_callback())) xenbus_probe(); + /* + * For XS_LOCAL, spawn a thread which will wait for xenstored + * or a xenstore-stubdom to be started, then probe. It will be + * triggered when communication starts happening, by waiting + * on xb_waitq. + */ + if (xen_store_domain_type == XS_LOCAL) { + struct task_struct *probe_task; + + probe_task = kthread_run(xenbus_probe_thread, NULL, + "xenbus_probe"); + if (IS_ERR(probe_task)) + return PTR_ERR(probe_task); + } return 0; } device_initcall(xenbus_probe_initcall); |