diff options
Diffstat (limited to 'drivers')
376 files changed, 6102 insertions, 2698 deletions
diff --git a/drivers/acpi/acpica/dsmethod.c b/drivers/acpi/acpica/dsmethod.c index e809c2aed78a..a232746d150a 100644 --- a/drivers/acpi/acpica/dsmethod.c +++ b/drivers/acpi/acpica/dsmethod.c @@ -483,6 +483,13 @@ acpi_ds_call_control_method(struct acpi_thread_state *thread, return_ACPI_STATUS(AE_NULL_OBJECT); } + if (this_walk_state->num_operands < obj_desc->method.param_count) { + ACPI_ERROR((AE_INFO, "Missing argument for method [%4.4s]", + acpi_ut_get_node_name(method_node))); + + return_ACPI_STATUS(AE_AML_UNINITIALIZED_ARG); + } + /* Init for new method, possibly wait on method mutex */ status = diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 6a7ac34d73bd..65fa3444367a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -243,23 +243,10 @@ static int acpi_battery_get_property(struct power_supply *psy, break; case POWER_SUPPLY_PROP_CURRENT_NOW: case POWER_SUPPLY_PROP_POWER_NOW: - if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) { + if (battery->rate_now == ACPI_BATTERY_VALUE_UNKNOWN) ret = -ENODEV; - break; - } - - val->intval = battery->rate_now * 1000; - /* - * When discharging, the current should be reported as a - * negative number as per the power supply class interface - * definition. - */ - if (psp == POWER_SUPPLY_PROP_CURRENT_NOW && - (battery->state & ACPI_BATTERY_STATE_DISCHARGING) && - acpi_battery_handle_discharging(battery) - == POWER_SUPPLY_STATUS_DISCHARGING) - val->intval = -val->intval; - + else + val->intval = battery->rate_now * 1000; break; case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: case POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN: diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c index 78db38c7076e..125d7df8f30a 100644 --- a/drivers/acpi/thermal.c +++ b/drivers/acpi/thermal.c @@ -803,7 +803,13 @@ static int acpi_thermal_add(struct acpi_device *device) acpi_thermal_aml_dependency_fix(tz); - /* Get trip points [_CRT, _PSV, etc.] (required). */ + /* + * Set the cooling mode [_SCP] to active cooling. This needs to happen before + * we retrieve the trip point values. + */ + acpi_execute_simple_method(tz->device->handle, "_SCP", ACPI_THERMAL_MODE_ACTIVE); + + /* Get trip points [_ACi, _PSV, etc.] (required). */ acpi_thermal_get_trip_points(tz); crit_temp = acpi_thermal_get_critical_trip(tz); @@ -814,10 +820,6 @@ static int acpi_thermal_add(struct acpi_device *device) if (result) goto free_memory; - /* Set the cooling mode [_SCP] to active cooling. */ - acpi_execute_simple_method(tz->device->handle, "_SCP", - ACPI_THERMAL_MODE_ACTIVE); - /* Determine the default polling frequency [_TZP]. */ if (tzp) tz->polling_frequency = tzp; diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index d36e71f475ab..39a350755a1b 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c @@ -514,15 +514,19 @@ unsigned int ata_acpi_gtm_xfermask(struct ata_device *dev, EXPORT_SYMBOL_GPL(ata_acpi_gtm_xfermask); /** - * ata_acpi_cbl_80wire - Check for 80 wire cable + * ata_acpi_cbl_pata_type - Return PATA cable type * @ap: Port to check - * @gtm: GTM data to use * - * Return 1 if the @gtm indicates the BIOS selected an 80wire mode. + * Return ATA_CBL_PATA* according to the transfer mode selected by BIOS */ -int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) +int ata_acpi_cbl_pata_type(struct ata_port *ap) { struct ata_device *dev; + int ret = ATA_CBL_PATA_UNK; + const struct ata_acpi_gtm *gtm = ata_acpi_init_gtm(ap); + + if (!gtm) + return ATA_CBL_PATA40; ata_for_each_dev(dev, &ap->link, ENABLED) { unsigned int xfer_mask, udma_mask; @@ -530,13 +534,17 @@ int ata_acpi_cbl_80wire(struct ata_port *ap, const struct ata_acpi_gtm *gtm) xfer_mask = ata_acpi_gtm_xfermask(dev, gtm); ata_unpack_xfermask(xfer_mask, NULL, NULL, &udma_mask); - if (udma_mask & ~ATA_UDMA_MASK_40C) - return 1; + ret = ATA_CBL_PATA40; + + if (udma_mask & ~ATA_UDMA_MASK_40C) { + ret = ATA_CBL_PATA80; + break; + } } - return 0; + return ret; } -EXPORT_SYMBOL_GPL(ata_acpi_cbl_80wire); +EXPORT_SYMBOL_GPL(ata_acpi_cbl_pata_type); static void ata_acpi_gtf_to_tf(struct ata_device *dev, const struct ata_acpi_gtf *gtf, diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c index b811efd2cc34..73e81e160c91 100644 --- a/drivers/ata/pata_cs5536.c +++ b/drivers/ata/pata_cs5536.c @@ -27,7 +27,7 @@ #include <scsi/scsi_host.h> #include <linux/dmi.h> -#ifdef CONFIG_X86_32 +#if defined(CONFIG_X86) && defined(CONFIG_X86_32) #include <asm/msr.h> static int use_msr; module_param_named(msr, use_msr, int, 0644); diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c index d82728a01832..bb80e7800dcb 100644 --- a/drivers/ata/pata_via.c +++ b/drivers/ata/pata_via.c @@ -201,11 +201,9 @@ static int via_cable_detect(struct ata_port *ap) { two drives */ if (ata66 & (0x10100000 >> (16 * ap->port_no))) return ATA_CBL_PATA80; + /* Check with ACPI so we can spot BIOS reported SATA bridges */ - if (ata_acpi_init_gtm(ap) && - ata_acpi_cbl_80wire(ap, ata_acpi_init_gtm(ap))) - return ATA_CBL_PATA80; - return ATA_CBL_PATA40; + return ata_acpi_cbl_pata_type(ap); } static int via_pre_reset(struct ata_link *link, unsigned long deadline) diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c index a876024d8a05..63d41320cd5c 100644 --- a/drivers/atm/idt77252.c +++ b/drivers/atm/idt77252.c @@ -852,6 +852,8 @@ queue_skb(struct idt77252_dev *card, struct vc_map *vc, IDT77252_PRV_PADDR(skb) = dma_map_single(&card->pcidev->dev, skb->data, skb->len, DMA_TO_DEVICE); + if (dma_mapping_error(&card->pcidev->dev, IDT77252_PRV_PADDR(skb))) + return -ENOMEM; error = -EINVAL; @@ -1857,6 +1859,8 @@ add_rx_skb(struct idt77252_dev *card, int queue, paddr = dma_map_single(&card->pcidev->dev, skb->data, skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE); + if (dma_mapping_error(&card->pcidev->dev, paddr)) + goto outpoolrm; IDT77252_PRV_PADDR(skb) = paddr; if (push_rx_skb(card, skb, queue)) { @@ -1871,6 +1875,7 @@ outunmap: dma_unmap_single(&card->pcidev->dev, IDT77252_PRV_PADDR(skb), skb_end_pointer(skb) - skb->data, DMA_FROM_DEVICE); +outpoolrm: handle = IDT77252_PRV_POOL(skb); card->sbpool[POOL_QUEUE(handle)].skb[POOL_INDEX(handle)] = NULL; diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index d88f721cf68c..02870e70ed59 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -600,6 +600,7 @@ CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow); CPU_SHOW_VULN_FALLBACK(gds); CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling); CPU_SHOW_VULN_FALLBACK(indirect_target_selection); +CPU_SHOW_VULN_FALLBACK(tsa); static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); @@ -616,6 +617,7 @@ static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NU static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL); static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL); +static DEVICE_ATTR(tsa, 0444, cpu_show_tsa, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -633,6 +635,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_gather_data_sampling.attr, &dev_attr_reg_file_data_sampling.attr, &dev_attr_indirect_target_selection.attr, + &dev_attr_tsa.attr, NULL }; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 5962ea1230a1..de4e2f3db942 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1174,6 +1174,8 @@ err_name: err_map: kfree(map); err: + if (bus && bus->free_on_exit) + kfree(bus); return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(__regmap_init); diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 749ae1246f4c..d35caa3c69e1 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h @@ -80,6 +80,7 @@ enum { DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ DEVFL_FREEING = (1<<7), /* set when device is being cleaned up */ DEVFL_FREED = (1<<8), /* device has been cleaned up */ + DEVFL_DEAD = (1<<9), /* device has timed out of aoe_deadsecs */ }; enum { diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 92b06d1de4cc..6c94cfd1c480 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c @@ -754,7 +754,7 @@ rexmit_timer(struct timer_list *timer) utgts = count_targets(d, NULL); - if (d->flags & DEVFL_TKILL) { + if (d->flags & (DEVFL_TKILL | DEVFL_DEAD)) { spin_unlock_irqrestore(&d->lock, flags); return; } @@ -786,7 +786,8 @@ rexmit_timer(struct timer_list *timer) * to clean up. */ list_splice(&flist, &d->factive[0]); - aoedev_downdev(d); + d->flags |= DEVFL_DEAD; + queue_work(aoe_wq, &d->work); goto out; } @@ -898,6 +899,9 @@ aoecmd_sleepwork(struct work_struct *work) { struct aoedev *d = container_of(work, struct aoedev, work); + if (d->flags & DEVFL_DEAD) + aoedev_downdev(d); + if (d->flags & DEVFL_GDALLOC) aoeblk_gdalloc(d); diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 280679bde3a5..4240e11adfb7 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c @@ -200,8 +200,11 @@ aoedev_downdev(struct aoedev *d) struct list_head *head, *pos, *nx; struct request *rq, *rqnext; int i; + unsigned long flags; - d->flags &= ~DEVFL_UP; + spin_lock_irqsave(&d->lock, flags); + d->flags &= ~(DEVFL_UP | DEVFL_DEAD); + spin_unlock_irqrestore(&d->lock, flags); /* clean out active and to-be-retransmitted buffers */ for (i = 0; i < NFACTIVE; i++) { diff --git a/drivers/block/loop.c b/drivers/block/loop.c index e9a197474b9d..2f42d1644618 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -323,14 +323,13 @@ end_io: static void lo_rw_aio_do_completion(struct loop_cmd *cmd) { struct request *rq = blk_mq_rq_from_pdu(cmd); - struct loop_device *lo = rq->q->queuedata; if (!atomic_dec_and_test(&cmd->ref)) return; kfree(cmd->bvec); cmd->bvec = NULL; if (req_op(rq) == REQ_OP_WRITE) - file_end_write(lo->lo_backing_file); + kiocb_end_write(&cmd->iocb); if (likely(!blk_should_fake_timeout(rq->q))) blk_mq_complete_request(rq); } @@ -406,7 +405,7 @@ static int lo_rw_aio(struct loop_device *lo, struct loop_cmd *cmd, } if (rw == ITER_SOURCE) { - file_start_write(lo->lo_backing_file); + kiocb_start_write(&cmd->iocb); ret = file->f_op->write_iter(&cmd->iocb, &iter); } else ret = file->f_op->read_iter(&cmd->iocb, &iter); diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index 450458267e6e..c705acc4d6f4 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -2136,9 +2136,7 @@ again: goto out; } } - ret = nbd_start_device(nbd); - if (ret) - goto out; + if (info->attrs[NBD_ATTR_BACKEND_IDENTIFIER]) { nbd->backend = nla_strdup(info->attrs[NBD_ATTR_BACKEND_IDENTIFIER], GFP_KERNEL); @@ -2154,6 +2152,8 @@ again: goto out; } set_bit(NBD_RT_HAS_BACKEND_FILE, &config->runtime_flags); + + ret = nbd_start_device(nbd); out: mutex_unlock(&nbd->config_lock); if (!ret) { diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 746ef36e58df..3b1a5cdd6311 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -2457,7 +2457,8 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) if (copy_from_user(&info, argp, sizeof(info))) return -EFAULT; - if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || info.nr_hw_queues > UBLK_MAX_NR_QUEUES) + if (info.queue_depth > UBLK_MAX_QUEUE_DEPTH || !info.queue_depth || + info.nr_hw_queues > UBLK_MAX_NR_QUEUES || !info.nr_hw_queues) return -EINVAL; if (capable(CAP_SYS_ADMIN)) diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 51d6d91ed404..85df941afb6c 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -2656,7 +2656,7 @@ static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) * Distinguish ISO data packets form ACL data packets * based on their connection handle value range. */ - if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) { + if (iso_capable(hdev) && hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) { __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index aa6385206050..72b529757373 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -3194,6 +3194,32 @@ static const struct qca_device_info qca_devices_table[] = { { 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */ }; +static u16 qca_extract_board_id(const struct qca_version *ver) +{ + u16 flag = le16_to_cpu(ver->flag); + u16 board_id = 0; + + if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) { + /* The board_id should be split into two bytes + * The 1st byte is chip ID, and the 2nd byte is platform ID + * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID + * we have several platforms, and platform IDs are continuously added + * Platform ID: + * 0x00 is for Mobile + * 0x01 is for X86 + * 0x02 is for Automotive + * 0x03 is for Consumer electronic + */ + board_id = (ver->chip_id << 8) + ver->platform_id; + } + + /* Take 0xffff as invalid board ID */ + if (board_id == 0xffff) + board_id = 0; + + return board_id; +} + static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, void *data, u16 size) { @@ -3350,44 +3376,28 @@ static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size, const struct qca_version *ver) { u32 rom_version = le32_to_cpu(ver->rom_version); - u16 flag = le16_to_cpu(ver->flag); + const char *variant; + int len; + u16 board_id; - if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) { - /* The board_id should be split into two bytes - * The 1st byte is chip ID, and the 2nd byte is platform ID - * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID - * we have several platforms, and platform IDs are continuously added - * Platform ID: - * 0x00 is for Mobile - * 0x01 is for X86 - * 0x02 is for Automotive - * 0x03 is for Consumer electronic - */ - u16 board_id = (ver->chip_id << 8) + ver->platform_id; - const char *variant; + board_id = qca_extract_board_id(ver); - switch (le32_to_cpu(ver->ram_version)) { - case WCN6855_2_0_RAM_VERSION_GF: - case WCN6855_2_1_RAM_VERSION_GF: - variant = "_gf"; - break; - default: - variant = ""; - break; - } - - if (board_id == 0) { - snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin", - rom_version, variant); - } else { - snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin", - rom_version, variant, board_id); - } - } else { - snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin", - rom_version); + switch (le32_to_cpu(ver->ram_version)) { + case WCN6855_2_0_RAM_VERSION_GF: + case WCN6855_2_1_RAM_VERSION_GF: + variant = "_gf"; + break; + default: + variant = NULL; + break; } + len = snprintf(fwname, max_size, "qca/nvm_usb_%08x", rom_version); + if (variant) + len += snprintf(fwname + len, max_size - len, "%s", variant); + if (board_id) + len += snprintf(fwname + len, max_size - len, "_%04x", board_id); + len += snprintf(fwname + len, max_size - len, ".bin"); } static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 025b9a07c087..e6ad01d5e1d5 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -2363,10 +2363,17 @@ static int qca_serdev_probe(struct serdev_device *serdev) */ qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, "bluetooth"); - if (IS_ERR(qcadev->bt_power->pwrseq)) - return PTR_ERR(qcadev->bt_power->pwrseq); - break; + /* + * Some modules have BT_EN enabled via a hardware pull-up, + * meaning it is not defined in the DTS and is not controlled + * through the power sequence. In such cases, fall through + * to follow the legacy flow. + */ + if (IS_ERR(qcadev->bt_power->pwrseq)) + qcadev->bt_power->pwrseq = NULL; + else + break; } fallthrough; case QCA_WCN3988: diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index 58d16ff166c2..4575d9a4e5ed 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -942,6 +942,7 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, struct fsl_mc_obj_desc endpoint_desc = {{ 0 }}; struct dprc_endpoint endpoint1 = {{ 0 }}; struct dprc_endpoint endpoint2 = {{ 0 }}; + struct fsl_mc_bus *mc_bus; int state, err; mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); @@ -965,6 +966,8 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, strcpy(endpoint_desc.type, endpoint2.type); endpoint_desc.id = endpoint2.id; endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); + if (endpoint) + return endpoint; /* * We know that the device has an endpoint because we verified by @@ -972,17 +975,13 @@ struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev, * yet discovered by the fsl-mc bus, thus the lookup returned NULL. * Force a rescan of the devices in this container and retry the lookup. */ - if (!endpoint) { - struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); - - if (mutex_trylock(&mc_bus->scan_mutex)) { - err = dprc_scan_objects(mc_bus_dev, true); - mutex_unlock(&mc_bus->scan_mutex); - } - - if (err < 0) - return ERR_PTR(err); + mc_bus = to_fsl_mc_bus(mc_bus_dev); + if (mutex_trylock(&mc_bus->scan_mutex)) { + err = dprc_scan_objects(mc_bus_dev, true); + mutex_unlock(&mc_bus->scan_mutex); } + if (err < 0) + return ERR_PTR(err); endpoint = fsl_mc_device_lookup(&endpoint_desc, mc_bus_dev); /* diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index e12b531f5c2f..6a4a8ecd0edd 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c @@ -1241,7 +1241,7 @@ int ipmi_create_user(unsigned int if_num, } /* Not found, return an error */ rv = -EINVAL; - goto out_kfree; + goto out_unlock; found: if (atomic_add_return(1, &intf->nr_users) > max_users) { @@ -1283,6 +1283,7 @@ int ipmi_create_user(unsigned int if_num, out_kfree: atomic_dec(&intf->nr_users); +out_unlock: srcu_read_unlock(&ipmi_interfaces_srcu, index); vfree(new_user); return rv; diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c index 15510c2ff21c..1b1561c84127 100644 --- a/drivers/clk/clk-scmi.c +++ b/drivers/clk/clk-scmi.c @@ -404,6 +404,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev) const struct scmi_handle *handle = sdev->handle; struct scmi_protocol_handle *ph; const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {}; + struct scmi_clk *sclks; if (!handle) return -ENODEV; @@ -430,18 +431,21 @@ static int scmi_clocks_probe(struct scmi_device *sdev) transport_is_atomic = handle->is_transport_atomic(handle, &atomic_threshold_us); + sclks = devm_kcalloc(dev, count, sizeof(*sclks), GFP_KERNEL); + if (!sclks) + return -ENOMEM; + + for (idx = 0; idx < count; idx++) + hws[idx] = &sclks[idx].hw; + for (idx = 0; idx < count; idx++) { - struct scmi_clk *sclk; + struct scmi_clk *sclk = &sclks[idx]; const struct clk_ops *scmi_ops; - sclk = devm_kzalloc(dev, sizeof(*sclk), GFP_KERNEL); - if (!sclk) - return -ENOMEM; - sclk->info = scmi_proto_clk_ops->info_get(ph, idx); if (!sclk->info) { dev_dbg(dev, "invalid clock info for idx %d\n", idx); - devm_kfree(dev, sclk); + hws[idx] = NULL; continue; } @@ -479,13 +483,11 @@ static int scmi_clocks_probe(struct scmi_device *sdev) if (err) { dev_err(dev, "failed to register clock %d\n", idx); devm_kfree(dev, sclk->parent_data); - devm_kfree(dev, sclk); hws[idx] = NULL; } else { dev_dbg(dev, "Registered clock:%s%s\n", sclk->info->name, scmi_ops->enable ? " (atomic ops)" : ""); - hws[idx] = &sclk->hw; } } diff --git a/drivers/clk/imx/clk-imx95-blk-ctl.c b/drivers/clk/imx/clk-imx95-blk-ctl.c index 19a62da74be4..564e9f3f7508 100644 --- a/drivers/clk/imx/clk-imx95-blk-ctl.c +++ b/drivers/clk/imx/clk-imx95-blk-ctl.c @@ -219,11 +219,15 @@ static const struct imx95_blk_ctl_dev_data lvds_csr_dev_data = { .clk_reg_offset = 0, }; +static const char * const disp_engine_parents[] = { + "videopll1", "dsi_pll", "ldb_pll_div7" +}; + static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { [IMX95_CLK_DISPMIX_ENG0_SEL] = { .name = "disp_engine0_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 0, .bit_width = 2, @@ -232,8 +236,8 @@ static const struct imx95_blk_ctl_clk_dev_data dispmix_csr_clk_dev_data[] = { }, [IMX95_CLK_DISPMIX_ENG1_SEL] = { .name = "disp_engine1_sel", - .parent_names = (const char *[]){"videopll1", "dsi_pll", "ldb_pll_div7", }, - .num_parents = 4, + .parent_names = disp_engine_parents, + .num_parents = ARRAY_SIZE(disp_engine_parents), .reg = 0, .bit_idx = 2, .bit_width = 2, diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index b9df9b19d4bd..07bc81a706b4 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -1556,21 +1556,27 @@ static int do_insnlist_ioctl(struct comedi_device *dev, } for (i = 0; i < n_insns; ++i) { + unsigned int n = insns[i].n; + if (insns[i].insn & INSN_MASK_WRITE) { if (copy_from_user(data, insns[i].data, - insns[i].n * sizeof(unsigned int))) { + n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_from_user failed\n"); ret = -EFAULT; goto error; } + if (n < MIN_SAMPLES) { + memset(&data[n], 0, (MIN_SAMPLES - n) * + sizeof(unsigned int)); + } } ret = parse_insn(dev, insns + i, data, file); if (ret < 0) goto error; if (insns[i].insn & INSN_MASK_READ) { if (copy_to_user(insns[i].data, data, - insns[i].n * sizeof(unsigned int))) { + n * sizeof(unsigned int))) { dev_dbg(dev->class_dev, "copy_to_user failed\n"); ret = -EFAULT; @@ -1589,6 +1595,16 @@ error: return i; } +#define MAX_INSNS MAX_SAMPLES +static int check_insnlist_len(struct comedi_device *dev, unsigned int n_insns) +{ + if (n_insns > MAX_INSNS) { + dev_dbg(dev->class_dev, "insnlist length too large\n"); + return -EINVAL; + } + return 0; +} + /* * COMEDI_INSN ioctl * synchronous instruction @@ -1633,6 +1649,10 @@ static int do_insn_ioctl(struct comedi_device *dev, ret = -EFAULT; goto error; } + if (insn->n < MIN_SAMPLES) { + memset(&data[insn->n], 0, + (MIN_SAMPLES - insn->n) * sizeof(unsigned int)); + } } ret = parse_insn(dev, insn, data, file); if (ret < 0) @@ -2239,6 +2259,9 @@ static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd, rc = -EFAULT; break; } + rc = check_insnlist_len(dev, insnlist.n_insns); + if (rc) + break; insns = kcalloc(insnlist.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) { rc = -ENOMEM; @@ -3090,6 +3113,9 @@ static int compat_insnlist(struct file *file, unsigned long arg) if (copy_from_user(&insnlist32, compat_ptr(arg), sizeof(insnlist32))) return -EFAULT; + rc = check_insnlist_len(dev, insnlist32.n_insns); + if (rc) + return rc; insns = kcalloc(insnlist32.n_insns, sizeof(*insns), GFP_KERNEL); if (!insns) return -ENOMEM; diff --git a/drivers/comedi/drivers.c b/drivers/comedi/drivers.c index 376130bfba8a..9e4b7c840a8f 100644 --- a/drivers/comedi/drivers.c +++ b/drivers/comedi/drivers.c @@ -339,10 +339,10 @@ int comedi_dio_insn_config(struct comedi_device *dev, unsigned int *data, unsigned int mask) { - unsigned int chan_mask = 1 << CR_CHAN(insn->chanspec); + unsigned int chan = CR_CHAN(insn->chanspec); - if (!mask) - mask = chan_mask; + if (!mask && chan < 32) + mask = 1U << chan; switch (data[0]) { case INSN_CONFIG_DIO_INPUT: @@ -382,7 +382,7 @@ EXPORT_SYMBOL_GPL(comedi_dio_insn_config); unsigned int comedi_dio_update_state(struct comedi_subdevice *s, unsigned int *data) { - unsigned int chanmask = (s->n_chan < 32) ? ((1 << s->n_chan) - 1) + unsigned int chanmask = (s->n_chan < 32) ? ((1U << s->n_chan) - 1) : 0xffffffff; unsigned int mask = data[0] & chanmask; unsigned int bits = data[1]; @@ -615,6 +615,9 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, unsigned int _data[2]; int ret; + if (insn->n == 0) + return 0; + memset(_data, 0, sizeof(_data)); memset(&_insn, 0, sizeof(_insn)); _insn.insn = INSN_BITS; @@ -625,8 +628,8 @@ static int insn_rw_emulate_bits(struct comedi_device *dev, if (insn->insn == INSN_WRITE) { if (!(s->subdev_flags & SDF_WRITABLE)) return -EINVAL; - _data[0] = 1 << (chan - base_chan); /* mask */ - _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */ + _data[0] = 1U << (chan - base_chan); /* mask */ + _data[1] = data[0] ? (1U << (chan - base_chan)) : 0; /* bits */ } ret = s->insn_bits(dev, s, &_insn, _data); @@ -709,7 +712,7 @@ static int __comedi_device_postconfig(struct comedi_device *dev) if (s->type == COMEDI_SUBD_DO) { if (s->n_chan < 32) - s->io_bits = (1 << s->n_chan) - 1; + s->io_bits = (1U << s->n_chan) - 1; else s->io_bits = 0xffffffff; } diff --git a/drivers/comedi/drivers/aio_iiro_16.c b/drivers/comedi/drivers/aio_iiro_16.c index b00fab0b89d4..739cc4db52ac 100644 --- a/drivers/comedi/drivers/aio_iiro_16.c +++ b/drivers/comedi/drivers/aio_iiro_16.c @@ -177,7 +177,8 @@ static int aio_iiro_16_attach(struct comedi_device *dev, * Digital input change of state interrupts are optionally supported * using IRQ 2-7, 10-12, 14, or 15. */ - if ((1 << it->options[1]) & 0xdcfc) { + if (it->options[1] > 0 && it->options[1] < 16 && + (1 << it->options[1]) & 0xdcfc) { ret = request_irq(it->options[1], aio_iiro_16_cos, 0, dev->board_name, dev); if (ret == 0) diff --git a/drivers/comedi/drivers/comedi_test.c b/drivers/comedi/drivers/comedi_test.c index 05ae9122823f..e713ef611434 100644 --- a/drivers/comedi/drivers/comedi_test.c +++ b/drivers/comedi/drivers/comedi_test.c @@ -790,7 +790,7 @@ static void waveform_detach(struct comedi_device *dev) { struct waveform_private *devpriv = dev->private; - if (devpriv) { + if (devpriv && dev->n_subdevices) { del_timer_sync(&devpriv->ai_timer); del_timer_sync(&devpriv->ao_timer); } diff --git a/drivers/comedi/drivers/das16m1.c b/drivers/comedi/drivers/das16m1.c index b8ea737ad3d1..1b638f5b5a4f 100644 --- a/drivers/comedi/drivers/das16m1.c +++ b/drivers/comedi/drivers/das16m1.c @@ -522,7 +522,8 @@ static int das16m1_attach(struct comedi_device *dev, devpriv->extra_iobase = dev->iobase + DAS16M1_8255_IOBASE; /* only irqs 2, 3, 4, 5, 6, 7, 10, 11, 12, 14, and 15 are valid */ - if ((1 << it->options[1]) & 0xdcfc) { + if (it->options[1] >= 2 && it->options[1] <= 15 && + (1 << it->options[1]) & 0xdcfc) { ret = request_irq(it->options[1], das16m1_interrupt, 0, dev->board_name, dev); if (ret == 0) diff --git a/drivers/comedi/drivers/das6402.c b/drivers/comedi/drivers/das6402.c index 68f95330de45..7660487e563c 100644 --- a/drivers/comedi/drivers/das6402.c +++ b/drivers/comedi/drivers/das6402.c @@ -567,7 +567,8 @@ static int das6402_attach(struct comedi_device *dev, das6402_reset(dev); /* IRQs 2,3,5,6,7, 10,11,15 are valid for "enhanced" mode */ - if ((1 << it->options[1]) & 0x8cec) { + if (it->options[1] > 0 && it->options[1] < 16 && + (1 << it->options[1]) & 0x8cec) { ret = request_irq(it->options[1], das6402_interrupt, 0, dev->board_name, dev); if (ret == 0) { diff --git a/drivers/comedi/drivers/pcl812.c b/drivers/comedi/drivers/pcl812.c index 0df639c6a595..abca61a72cf7 100644 --- a/drivers/comedi/drivers/pcl812.c +++ b/drivers/comedi/drivers/pcl812.c @@ -1149,7 +1149,8 @@ static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it) if (IS_ERR(dev->pacer)) return PTR_ERR(dev->pacer); - if ((1 << it->options[1]) & board->irq_bits) { + if (it->options[1] > 0 && it->options[1] < 16 && + (1 << it->options[1]) & board->irq_bits) { ret = request_irq(it->options[1], pcl812_interrupt, 0, dev->board_name, dev); if (ret == 0) diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 2562dc001fc1..f3c30037dc8e 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -38,7 +38,6 @@ struct psci_cpuidle_data { static DEFINE_PER_CPU_READ_MOSTLY(struct psci_cpuidle_data, psci_cpuidle_data); static DEFINE_PER_CPU(u32, domain_state); static bool psci_cpuidle_use_syscore; -static bool psci_cpuidle_use_cpuhp; void psci_set_domain_state(u32 state) { @@ -105,8 +104,12 @@ static int psci_idle_cpuhp_up(unsigned int cpu) { struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); - if (pd_dev) - pm_runtime_get_sync(pd_dev); + if (pd_dev) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + pm_runtime_get_sync(pd_dev); + else + dev_pm_genpd_resume(pd_dev); + } return 0; } @@ -116,7 +119,11 @@ static int psci_idle_cpuhp_down(unsigned int cpu) struct device *pd_dev = __this_cpu_read(psci_cpuidle_data.dev); if (pd_dev) { - pm_runtime_put_sync(pd_dev); + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + pm_runtime_put_sync(pd_dev); + else + dev_pm_genpd_suspend(pd_dev); + /* Clear domain state to start fresh at next online. */ psci_set_domain_state(0); } @@ -177,9 +184,6 @@ static void psci_idle_init_cpuhp(void) { int err; - if (!psci_cpuidle_use_cpuhp) - return; - err = cpuhp_setup_state_nocalls(CPUHP_AP_CPU_PM_STARTING, "cpuidle/psci:online", psci_idle_cpuhp_up, @@ -240,10 +244,8 @@ static int psci_dt_cpu_init_topology(struct cpuidle_driver *drv, * s2ram and s2idle. */ drv->states[state_count - 1].enter_s2idle = psci_enter_s2idle_domain_idle_state; - if (!IS_ENABLED(CONFIG_PREEMPT_RT)) { + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) drv->states[state_count - 1].enter = psci_enter_domain_idle_state; - psci_cpuidle_use_cpuhp = true; - } return 0; } @@ -320,7 +322,6 @@ static void psci_cpu_deinit_idle(int cpu) dt_idle_detach_cpu(data->dev); psci_cpuidle_use_syscore = false; - psci_cpuidle_use_cpuhp = false; } static int psci_idle_init_cpu(struct device *dev, int cpu) diff --git a/drivers/crypto/intel/iaa/iaa_crypto_main.c b/drivers/crypto/intel/iaa/iaa_crypto_main.c index e1f60f0f507c..df2728cccf8b 100644 --- a/drivers/crypto/intel/iaa/iaa_crypto_main.c +++ b/drivers/crypto/intel/iaa/iaa_crypto_main.c @@ -1126,8 +1126,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, struct idxd_wq *wq, dma_addr_t src_addr, unsigned int slen, dma_addr_t dst_addr, unsigned int *dlen, - u32 *compression_crc, - bool disable_async) + u32 *compression_crc) { struct iaa_device_compression_mode *active_compression_mode; struct iaa_compression_ctx *ctx = crypto_tfm_ctx(tfm); @@ -1170,7 +1169,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, desc->src2_size = sizeof(struct aecs_comp_table_record); desc->completion_addr = idxd_desc->compl_dma; - if (ctx->use_irq && !disable_async) { + if (ctx->use_irq) { desc->flags |= IDXD_OP_FLAG_RCI; idxd_desc->crypto.req = req; @@ -1183,8 +1182,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: compression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1204,7 +1202,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, update_total_comp_calls(); update_wq_comp_calls(wq); - if (ctx->async_mode && !disable_async) { + if (ctx->async_mode) { ret = -EINPROGRESS; dev_dbg(dev, "%s: returning -EINPROGRESS\n", __func__); goto out; @@ -1224,7 +1222,7 @@ static int iaa_compress(struct crypto_tfm *tfm, struct acomp_req *req, *compression_crc = idxd_desc->iax_completion->crc; - if (!ctx->async_mode || disable_async) + if (!ctx->async_mode) idxd_free_desc(wq, idxd_desc); out: return ret; @@ -1421,8 +1419,7 @@ static int iaa_decompress(struct crypto_tfm *tfm, struct acomp_req *req, " src_addr %llx, dst_addr %llx\n", __func__, active_compression_mode->name, src_addr, dst_addr); - } else if (ctx->async_mode && !disable_async) - req->base.data = idxd_desc; + } dev_dbg(dev, "%s: decompression mode %s," " desc->src1_addr %llx, desc->src1_size %d," @@ -1490,13 +1487,11 @@ static int iaa_comp_acompress(struct acomp_req *req) struct iaa_compression_ctx *compression_ctx; struct crypto_tfm *tfm = req->base.tfm; dma_addr_t src_addr, dst_addr; - bool disable_async = false; int nr_sgs, cpu, ret = 0; struct iaa_wq *iaa_wq; u32 compression_crc; struct idxd_wq *wq; struct device *dev; - int order = -1; compression_ctx = crypto_tfm_ctx(tfm); @@ -1526,21 +1521,6 @@ static int iaa_comp_acompress(struct acomp_req *req) iaa_wq = idxd_wq_get_private(wq); - if (!req->dst) { - gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; - - /* incompressible data will always be < 2 * slen */ - req->dlen = 2 * req->slen; - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - disable_async = true; - } - dev = &wq->idxd->pdev->dev; nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); @@ -1570,7 +1550,7 @@ static int iaa_comp_acompress(struct acomp_req *req) req->dst, req->dlen, sg_dma_len(req->dst)); ret = iaa_compress(tfm, req, wq, src_addr, req->slen, dst_addr, - &req->dlen, &compression_crc, disable_async); + &req->dlen, &compression_crc); if (ret == -EINPROGRESS) return ret; @@ -1601,100 +1581,6 @@ err_map_dst: out: iaa_wq_put(wq); - if (order >= 0) - sgl_free_order(req->dst, order); - - return ret; -} - -static int iaa_comp_adecompress_alloc_dest(struct acomp_req *req) -{ - gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? - GFP_KERNEL : GFP_ATOMIC; - struct crypto_tfm *tfm = req->base.tfm; - dma_addr_t src_addr, dst_addr; - int nr_sgs, cpu, ret = 0; - struct iaa_wq *iaa_wq; - struct device *dev; - struct idxd_wq *wq; - int order = -1; - - cpu = get_cpu(); - wq = wq_table_next_wq(cpu); - put_cpu(); - if (!wq) { - pr_debug("no wq configured for cpu=%d\n", cpu); - return -ENODEV; - } - - ret = iaa_wq_get(wq); - if (ret) { - pr_debug("no wq available for cpu=%d\n", cpu); - return -ENODEV; - } - - iaa_wq = idxd_wq_get_private(wq); - - dev = &wq->idxd->pdev->dev; - - nr_sgs = dma_map_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map src sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto out; - } - src_addr = sg_dma_address(req->src); - dev_dbg(dev, "dma_map_sg, src_addr %llx, nr_sgs %d, req->src %p," - " req->slen %d, sg_dma_len(sg) %d\n", src_addr, nr_sgs, - req->src, req->slen, sg_dma_len(req->src)); - - req->dlen = 4 * req->slen; /* start with ~avg comp rato */ -alloc_dest: - order = order_base_2(round_up(req->dlen, PAGE_SIZE) / PAGE_SIZE); - req->dst = sgl_alloc_order(req->dlen, order, false, flags, NULL); - if (!req->dst) { - ret = -ENOMEM; - order = -1; - goto out; - } - - nr_sgs = dma_map_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - if (nr_sgs <= 0 || nr_sgs > 1) { - dev_dbg(dev, "couldn't map dst sg for iaa device %d," - " wq %d: ret=%d\n", iaa_wq->iaa_device->idxd->id, - iaa_wq->wq->id, ret); - ret = -EIO; - goto err_map_dst; - } - - dst_addr = sg_dma_address(req->dst); - dev_dbg(dev, "dma_map_sg, dst_addr %llx, nr_sgs %d, req->dst %p," - " req->dlen %d, sg_dma_len(sg) %d\n", dst_addr, nr_sgs, - req->dst, req->dlen, sg_dma_len(req->dst)); - ret = iaa_decompress(tfm, req, wq, src_addr, req->slen, - dst_addr, &req->dlen, true); - if (ret == -EOVERFLOW) { - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); - req->dlen *= 2; - if (req->dlen > CRYPTO_ACOMP_DST_MAX) - goto err_map_dst; - goto alloc_dest; - } - - if (ret != 0) - dev_dbg(dev, "asynchronous decompress failed ret=%d\n", ret); - - dma_unmap_sg(dev, req->dst, sg_nents(req->dst), DMA_FROM_DEVICE); -err_map_dst: - dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_TO_DEVICE); -out: - iaa_wq_put(wq); - - if (order >= 0) - sgl_free_order(req->dst, order); - return ret; } @@ -1717,9 +1603,6 @@ static int iaa_comp_adecompress(struct acomp_req *req) return -EINVAL; } - if (!req->dst) - return iaa_comp_adecompress_alloc_dest(req); - cpu = get_cpu(); wq = wq_table_next_wq(cpu); put_cpu(); @@ -1800,19 +1683,10 @@ static int iaa_comp_init_fixed(struct crypto_acomp *acomp_tfm) return 0; } -static void dst_free(struct scatterlist *sgl) -{ - /* - * Called for req->dst = NULL cases but we free elsewhere - * using sgl_free_order(). - */ -} - static struct acomp_alg iaa_acomp_fixed_deflate = { .init = iaa_comp_init_fixed, .compress = iaa_comp_acompress, .decompress = iaa_comp_adecompress, - .dst_free = dst_free, .base = { .cra_name = "deflate", .cra_driver_name = "deflate-iaa", diff --git a/drivers/crypto/xilinx/zynqmp-sha.c b/drivers/crypto/xilinx/zynqmp-sha.c index 1bcec6f46c9c..9b5345068604 100644 --- a/drivers/crypto/xilinx/zynqmp-sha.c +++ b/drivers/crypto/xilinx/zynqmp-sha.c @@ -3,18 +3,19 @@ * Xilinx ZynqMP SHA Driver. * Copyright (c) 2022 Xilinx Inc. */ -#include <linux/cacheflush.h> #include <crypto/hash.h> #include <crypto/internal/hash.h> #include <crypto/sha3.h> -#include <linux/crypto.h> +#include <linux/cacheflush.h> +#include <linux/cleanup.h> #include <linux/device.h> #include <linux/dma-mapping.h> +#include <linux/err.h> #include <linux/firmware/xlnx-zynqmp.h> -#include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/spinlock.h> #include <linux/platform_device.h> #define ZYNQMP_DMA_BIT_MASK 32U @@ -43,6 +44,8 @@ struct zynqmp_sha_desc_ctx { static dma_addr_t update_dma_addr, final_dma_addr; static char *ubuf, *fbuf; +static DEFINE_SPINLOCK(zynqmp_sha_lock); + static int zynqmp_sha_init_tfm(struct crypto_shash *hash) { const char *fallback_driver_name = crypto_shash_alg_name(hash); @@ -124,7 +127,8 @@ static int zynqmp_sha_export(struct shash_desc *desc, void *out) return crypto_shash_export(&dctx->fbk_req, out); } -static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +static int __zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, + unsigned int len, u8 *out) { unsigned int remaining_len = len; int update_size; @@ -159,6 +163,12 @@ static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned i return ret; } +static int zynqmp_sha_digest(struct shash_desc *desc, const u8 *data, unsigned int len, u8 *out) +{ + scoped_guard(spinlock_bh, &zynqmp_sha_lock) + return __zynqmp_sha_digest(desc, data, len, out); +} + static struct zynqmp_sha_drv_ctx sha3_drv_ctx = { .sha3_384 = { .init = zynqmp_sha_init, diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index b1ef4546346d..bea3e9858aca 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -685,11 +685,13 @@ long dma_resv_wait_timeout(struct dma_resv *obj, enum dma_resv_usage usage, dma_resv_iter_begin(&cursor, obj, usage); dma_resv_for_each_fence_unlocked(&cursor, fence) { - ret = dma_fence_wait_timeout(fence, intr, ret); - if (ret <= 0) { - dma_resv_iter_end(&cursor); - return ret; - } + ret = dma_fence_wait_timeout(fence, intr, timeout); + if (ret <= 0) + break; + + /* Even for zero timeout the return value is 1 */ + if (timeout) + timeout = ret; } dma_resv_iter_end(&cursor); diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c index 3b011a91d48e..5f5d6242427e 100644 --- a/drivers/dma/nbpfaxi.c +++ b/drivers/dma/nbpfaxi.c @@ -1351,7 +1351,7 @@ static int nbpf_probe(struct platform_device *pdev) if (irqs == 1) { eirq = irqbuf[0]; - for (i = 0; i <= num_channels; i++) + for (i = 0; i < num_channels; i++) nbpf->chan[i].irq = irqbuf[0]; } else { eirq = platform_get_irq_byname(pdev, "error"); @@ -1361,16 +1361,15 @@ static int nbpf_probe(struct platform_device *pdev) if (irqs == num_channels + 1) { struct nbpf_channel *chan; - for (i = 0, chan = nbpf->chan; i <= num_channels; + for (i = 0, chan = nbpf->chan; i < num_channels; i++, chan++) { /* Skip the error IRQ */ if (irqbuf[i] == eirq) i++; + if (i >= ARRAY_SIZE(irqbuf)) + return -EINVAL; chan->irq = irqbuf[i]; } - - if (chan != nbpf->chan + num_channels) - return -EINVAL; } else { /* 2 IRQs and more than one channel */ if (irqbuf[0] == eirq) @@ -1378,7 +1377,7 @@ static int nbpf_probe(struct platform_device *pdev) else irq = irqbuf[0]; - for (i = 0; i <= num_channels; i++) + for (i = 0; i < num_channels; i++) nbpf->chan[i].irq = irq; } } diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index 47751b2c057a..83dad9c2da06 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -110,7 +110,7 @@ struct ffa_drv_info { struct work_struct sched_recv_irq_work; struct xarray partition_info; DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS)); - struct mutex notify_lock; /* lock to protect notifier hashtable */ + rwlock_t notify_lock; /* lock to protect notifier hashtable */ }; static struct ffa_drv_info *drv_info; @@ -1141,12 +1141,11 @@ notifier_hash_node_get(u16 notify_id, enum notify_type type) return NULL; } -static int -update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, - void *cb_data, bool is_registration) +static int update_notifier_cb(int notify_id, enum notify_type type, + struct notifier_cb_info *cb) { struct notifier_cb_info *cb_info = NULL; - bool cb_found; + bool cb_found, is_registration = !!cb; cb_info = notifier_hash_node_get(notify_id, type); cb_found = !!cb_info; @@ -1155,17 +1154,10 @@ update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb, return -EINVAL; if (is_registration) { - cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); - if (!cb_info) - return -ENOMEM; - - cb_info->type = type; - cb_info->cb = cb; - cb_info->cb_data = cb_data; - - hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id); + hash_add(drv_info->notifier_hash, &cb->hnode, notify_id); } else { hash_del(&cb_info->hnode); + kfree(cb_info); } return 0; @@ -1190,18 +1182,18 @@ static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id) if (notify_id >= FFA_MAX_NOTIFICATIONS) return -EINVAL; - mutex_lock(&drv_info->notify_lock); + write_lock(&drv_info->notify_lock); - rc = update_notifier_cb(notify_id, type, NULL, NULL, false); + rc = update_notifier_cb(notify_id, type, NULL); if (rc) { pr_err("Could not unregister notification callback\n"); - mutex_unlock(&drv_info->notify_lock); + write_unlock(&drv_info->notify_lock); return rc; } rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id)); - mutex_unlock(&drv_info->notify_lock); + write_unlock(&drv_info->notify_lock); return rc; } @@ -1211,6 +1203,7 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, { int rc; u32 flags = 0; + struct notifier_cb_info *cb_info = NULL; enum notify_type type = ffa_notify_type_get(dev->vm_id); if (ffa_notifications_disabled()) @@ -1219,24 +1212,34 @@ static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu, if (notify_id >= FFA_MAX_NOTIFICATIONS) return -EINVAL; - mutex_lock(&drv_info->notify_lock); + cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL); + if (!cb_info) + return -ENOMEM; + + cb_info->type = type; + cb_info->cb_data = cb_data; + cb_info->cb = cb; + + write_lock(&drv_info->notify_lock); if (is_per_vcpu) flags = PER_VCPU_NOTIFICATION_FLAG; rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags); - if (rc) { - mutex_unlock(&drv_info->notify_lock); - return rc; - } + if (rc) + goto out_unlock_free; - rc = update_notifier_cb(notify_id, type, cb, cb_data, true); + rc = update_notifier_cb(notify_id, type, cb_info); if (rc) { pr_err("Failed to register callback for %d - %d\n", notify_id, rc); ffa_notification_unbind(dev->vm_id, BIT(notify_id)); } - mutex_unlock(&drv_info->notify_lock); + +out_unlock_free: + write_unlock(&drv_info->notify_lock); + if (rc) + kfree(cb_info); return rc; } @@ -1266,9 +1269,9 @@ static void handle_notif_callbacks(u64 bitmap, enum notify_type type) if (!(bitmap & 1)) continue; - mutex_lock(&drv_info->notify_lock); + read_lock(&drv_info->notify_lock); cb_info = notifier_hash_node_get(notify_id, type); - mutex_unlock(&drv_info->notify_lock); + read_unlock(&drv_info->notify_lock); if (cb_info && cb_info->cb) cb_info->cb(notify_id, cb_info->cb_data); @@ -1718,7 +1721,7 @@ static void ffa_notifications_setup(void) goto cleanup; hash_init(drv_info->notifier_hash); - mutex_init(&drv_info->notify_lock); + rwlock_init(&drv_info->notify_lock); drv_info->notif_enabled = true; return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index a55e611605fc..8cf224fd4ff2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4144,7 +4144,6 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->grbm_idx_mutex); mutex_init(&adev->mn_lock); mutex_init(&adev->virt.vf_errors.lock); - mutex_init(&adev->virt.rlcg_reg_lock); hash_init(adev->mn_hash); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); @@ -4170,6 +4169,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, spin_lock_init(&adev->se_cac_idx_lock); spin_lock_init(&adev->audio_endpt_idx_lock); spin_lock_init(&adev->mm_stats.lock); + spin_lock_init(&adev->virt.rlcg_reg_lock); spin_lock_init(&adev->wb.lock); INIT_LIST_HEAD(&adev->reset_list); @@ -4954,6 +4954,8 @@ exit: dev->dev->power.disable_depth--; #endif } + + amdgpu_vram_mgr_clear_reset_blocks(adev); adev->in_suspend = false; if (adev->enable_mes) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 34d41e3ce347..eee434743deb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -112,6 +112,14 @@ #endif MODULE_FIRMWARE("amdgpu/ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/vega10_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/vega12_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/vega20_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/raven_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/raven2_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/picasso_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/arcturus_ip_discovery.bin"); +MODULE_FIRMWARE("amdgpu/aldebaran_ip_discovery.bin"); #define mmIP_DISCOVERY_VERSION 0x16A00 #define mmRCC_CONFIG_MEMSIZE 0xde3 @@ -400,7 +408,27 @@ static const char *amdgpu_discovery_get_fw_name(struct amdgpu_device *adev) if (amdgpu_discovery == 2) return "amdgpu/ip_discovery.bin"; - return NULL; + switch (adev->asic_type) { + case CHIP_VEGA10: + return "amdgpu/vega10_ip_discovery.bin"; + case CHIP_VEGA12: + return "amdgpu/vega12_ip_discovery.bin"; + case CHIP_RAVEN: + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + return "amdgpu/raven2_ip_discovery.bin"; + else if (adev->apu_flags & AMD_APU_IS_PICASSO) + return "amdgpu/picasso_ip_discovery.bin"; + else + return "amdgpu/raven_ip_discovery.bin"; + case CHIP_VEGA20: + return "amdgpu/vega20_ip_discovery.bin"; + case CHIP_ARCTURUS: + return "amdgpu/arcturus_ip_discovery.bin"; + case CHIP_ALDEBARAN: + return "amdgpu/aldebaran_ip_discovery.bin"; + default: + return NULL; + } } static int amdgpu_discovery_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 7d4b540340e0..41b88e0ea98b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -860,7 +860,9 @@ int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev, queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); queue_input.wptr_addr = ring->wptr_gpu_addr; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->map_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to map legacy queue\n"); @@ -883,7 +885,9 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev, queue_input.trail_fence_addr = gpu_addr; queue_input.trail_fence_data = seq; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->unmap_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to unmap legacy queue\n"); @@ -910,7 +914,9 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev, queue_input.vmid = vmid; queue_input.use_mmio = use_mmio; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to reset legacy queue\n"); @@ -931,7 +937,9 @@ uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg) goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to read reg (0x%x)\n", reg); else @@ -957,7 +965,9 @@ int amdgpu_mes_wreg(struct amdgpu_device *adev, goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to write reg (0x%x)\n", reg); @@ -984,7 +994,9 @@ int amdgpu_mes_reg_write_reg_wait(struct amdgpu_device *adev, goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to reg_write_reg_wait\n"); @@ -1009,7 +1021,9 @@ int amdgpu_mes_reg_wait(struct amdgpu_device *adev, uint32_t reg, goto error; } + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); + amdgpu_mes_unlock(&adev->mes); if (r) DRM_ERROR("failed to reg_write_reg_wait\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 48e30e5f8338..3d42f6c3308e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -3430,7 +3430,10 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name) uint8_t *ucode_array_start_addr; int err = 0; - err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.sos_fw, "amdgpu/%s_sos.bin", chip_name); if (err) goto out; @@ -3672,7 +3675,10 @@ int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name) struct amdgpu_device *adev = psp->adev; int err; - err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta_kicker.bin", chip_name); + else + err = amdgpu_ucode_request(adev, &adev->psp.ta_fw, "amdgpu/%s_ta.bin", chip_name); if (err) return err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 690976665cf6..10da6e550d76 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -439,6 +439,7 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { unsigned long flags; ktime_t deadline; + bool ret; if (unlikely(ring->adev->debug_disable_soft_recovery)) return false; @@ -453,12 +454,16 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, dma_fence_set_error(fence, -ENODATA); spin_unlock_irqrestore(fence->lock, flags); - atomic_inc(&ring->adev->gpu_reset_counter); while (!dma_fence_is_signaled(fence) && ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) ring->funcs->soft_recovery(ring, vmid); - return dma_fence_is_signaled(fence); + ret = dma_fence_is_signaled(fence); + /* increment the counter only if soft reset worked */ + if (ret) + atomic_inc(&ring->adev->gpu_reset_counter); + + return ret; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index b7742fa74e1d..3c883f1cf068 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -153,6 +153,7 @@ int amdgpu_vram_mgr_reserve_range(struct amdgpu_vram_mgr *mgr, uint64_t start, uint64_t size); int amdgpu_vram_mgr_query_page_status(struct amdgpu_vram_mgr *mgr, uint64_t start); +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev); bool amdgpu_res_cpu_visible(struct amdgpu_device *adev, struct ttm_resource *res); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index b6397d3229e1..01dccd489a80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -1010,6 +1010,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f void *scratch_reg2; void *scratch_reg3; void *spare_int; + unsigned long flags; if (!adev->gfx.rlc.rlcg_reg_access_supported) { dev_err(adev->dev, @@ -1031,7 +1032,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; - mutex_lock(&adev->virt.rlcg_reg_lock); + spin_lock_irqsave(&adev->virt.rlcg_reg_lock, flags); if (reg_access_ctrl->spare_int) spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; @@ -1090,7 +1091,7 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f ret = readl(scratch_reg0); - mutex_unlock(&adev->virt.rlcg_reg_lock); + spin_unlock_irqrestore(&adev->virt.rlcg_reg_lock, flags); return ret; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b650a2032c42..6a2087abfb7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -275,7 +275,8 @@ struct amdgpu_virt { /* the ucode id to signal the autoload */ uint32_t autoload_ucode_id; - struct mutex rlcg_reg_lock; + /* Spinlock to protect access to the RLCG register interface */ + spinlock_t rlcg_reg_lock; }; struct amdgpu_video_codec_info; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 8f58ec6f1400..732c79e201c6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -783,6 +783,23 @@ uint64_t amdgpu_vram_mgr_vis_usage(struct amdgpu_vram_mgr *mgr) } /** + * amdgpu_vram_mgr_clear_reset_blocks - reset clear blocks + * + * @adev: amdgpu device pointer + * + * Reset the cleared drm buddy blocks. + */ +void amdgpu_vram_mgr_clear_reset_blocks(struct amdgpu_device *adev) +{ + struct amdgpu_vram_mgr *mgr = &adev->mman.vram_mgr; + struct drm_buddy *mm = &mgr->mm; + + mutex_lock(&mgr->lock); + drm_buddy_reset_clear(mm, false); + mutex_unlock(&mgr->lock); +} + +/** * amdgpu_vram_mgr_intersects - test each drm buddy block for intersection * * @man: TTM memory type manager diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 1f06b22dbe7c..96e5c520af31 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -84,6 +84,7 @@ MODULE_FIRMWARE("amdgpu/gc_11_0_0_pfp.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_me.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_mec.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_rlc_1.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_0_toc.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_pfp.bin"); @@ -734,6 +735,9 @@ static int gfx_v11_0_init_microcode(struct amdgpu_device *adev) adev->pdev->revision == 0xCE) err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, "amdgpu/gc_11_0_0_rlc_1.bin"); + else if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, + "amdgpu/%s_rlc_kicker.bin", ucode_prefix); else err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, "amdgpu/%s_rlc.bin", ucode_prefix); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 9d741695ca07..1f675d67a1a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4652,6 +4652,7 @@ static int gfx_v8_0_kcq_init_queue(struct amdgpu_ring *ring) memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation)); /* reset ring buffer */ ring->wptr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index d4f72e47ae9e..c4f5cbf1ecd7 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -32,6 +32,7 @@ #include "gc/gc_11_0_0_sh_mask.h" MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu.bin"); +MODULE_FIRMWARE("amdgpu/gc_11_0_0_imu_kicker.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_1_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_2_imu.bin"); MODULE_FIRMWARE("amdgpu/gc_11_0_3_imu.bin"); @@ -50,7 +51,10 @@ static int imu_v11_0_init_microcode(struct amdgpu_device *adev) DRM_DEBUG("\n"); amdgpu_ucode_ip_version_decode(adev, GC_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->gfx.imu_fw, "amdgpu/%s_imu.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index bf00de763acb..124f74e862d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -42,7 +42,9 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_5_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_toc.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_8_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_sos_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta_kicker.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c new file mode 100644 index 000000000000..cdefd7fcb0da --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v5_0_1.c @@ -0,0 +1,1624 @@ +/* + * Copyright 2024 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include <linux/firmware.h> +#include "amdgpu.h" +#include "amdgpu_vcn.h" +#include "amdgpu_pm.h" +#include "soc15.h" +#include "soc15d.h" +#include "soc15_hw_ip.h" +#include "vcn_v2_0.h" +#include "vcn_v4_0_3.h" +#include "mmsch_v5_0.h" + +#include "vcn/vcn_5_0_0_offset.h" +#include "vcn/vcn_5_0_0_sh_mask.h" +#include "ivsrcid/vcn/irqsrcs_vcn_5_0.h" +#include "vcn_v5_0_0.h" +#include "vcn_v5_0_1.h" + +#include <drm/drm_drv.h> + +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev); +static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev); +static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev); +static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, + enum amd_powergating_state state); +static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring); +static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev); +/** + * vcn_v5_0_1_early_init - set function pointers and load microcode + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Set ring and irq function pointers + * Load microcode from filesystem + */ +static int vcn_v5_0_1_early_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) + /* re-use enc ring as unified ring */ + adev->vcn.inst[i].num_enc_rings = 1; + + vcn_v5_0_1_set_unified_ring_funcs(adev); + vcn_v5_0_1_set_irq_funcs(adev); + vcn_v5_0_1_set_ras_funcs(adev); + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + adev->vcn.inst[i].set_pg_state = vcn_v5_0_1_set_pg_state; + + r = amdgpu_vcn_early_init(adev, i); + if (r) + return r; + } + + return 0; +} + +static void vcn_v5_0_1_fw_shared_init(struct amdgpu_device *adev, int inst_idx) +{ + struct amdgpu_vcn5_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + + if (fw_shared->sq.is_enabled) + return; + fw_shared->present_flag_0 = + cpu_to_le32(AMDGPU_FW_SHARED_FLAG_0_UNIFIED_QUEUE); + fw_shared->sq.is_enabled = 1; + + if (amdgpu_vcnfw_log) + amdgpu_vcn_fwlog_init(&adev->vcn.inst[inst_idx]); +} + +/** + * vcn_v5_0_1_sw_init - sw init for VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Load firmware and sw initialization + */ +static int vcn_v5_0_1_sw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ring *ring; + int i, r, vcn_inst; + + /* VCN UNIFIED TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE, &adev->vcn.inst->irq); + if (r) + return r; + + /* VCN POISON TRAP */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN, + VCN_5_0__SRCID_UVD_POISON, &adev->vcn.inst->ras_poison_irq); + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + vcn_inst = GET_INST(VCN, i); + + r = amdgpu_vcn_sw_init(adev, i); + if (r) + return r; + + amdgpu_vcn_setup_ucode(adev, i); + + r = amdgpu_vcn_resume(adev, i); + if (r) + return r; + + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->use_doorbell = true; + if (!amdgpu_sriov_vf(adev)) + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst; + else + ring->doorbell_index = + (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 32 * vcn_inst; + + ring->vm_hub = AMDGPU_MMHUB0(adev->vcn.inst[i].aid_id); + sprintf(ring->name, "vcn_unified_%d", adev->vcn.inst[i].aid_id); + + r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst[i].irq, 0, + AMDGPU_RING_PRIO_DEFAULT, &adev->vcn.inst[i].sched_score); + if (r) + return r; + + vcn_v5_0_1_fw_shared_init(adev, i); + } + + /* TODO: Add queue reset mask when FW fully supports it */ + adev->vcn.supported_reset = + amdgpu_get_soft_full_reset_mask(&adev->vcn.inst[0].ring_enc[0]); + + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_virt_alloc_mm_table(adev); + if (r) + return r; + } + + vcn_v5_0_0_alloc_ip_dump(adev); + + return amdgpu_vcn_sysfs_reset_mask_init(adev); +} + +/** + * vcn_v5_0_1_sw_fini - sw fini for VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * VCN suspend and free up sw allocation + */ +static int vcn_v5_0_1_sw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, r, idx; + + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->present_flag_0 = 0; + fw_shared->sq.is_enabled = 0; + } + + drm_dev_exit(idx); + } + + if (amdgpu_sriov_vf(adev)) + amdgpu_virt_free_mm_table(adev); + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(adev, i); + if (r) + return r; + } + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_sw_fini(adev, i); + if (r) + return r; + } + + amdgpu_vcn_sysfs_reset_mask_fini(adev); + + kfree(adev->vcn.ip_dump); + + return 0; +} + +/** + * vcn_v5_0_1_hw_init - start and test VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Initialize the hardware, boot up the VCPU and do some testing + */ +static int vcn_v5_0_1_hw_init(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + struct amdgpu_ring *ring; + int i, r, vcn_inst; + + if (amdgpu_sriov_vf(adev)) { + r = vcn_v5_0_1_start_sriov(adev); + if (r) + return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ring = &adev->vcn.inst[i].ring_enc[0]; + ring->wptr = 0; + ring->wptr_old = 0; + vcn_v5_0_1_unified_ring_set_wptr(ring); + ring->sched.ready = true; + } + } else { + if (RREG32_SOC15(VCN, GET_INST(VCN, 0), regVCN_RRMT_CNTL) & 0x100) + adev->vcn.caps |= AMDGPU_VCN_CAPS(RRMT_ENABLED); + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + vcn_inst = GET_INST(VCN, i); + ring = &adev->vcn.inst[i].ring_enc[0]; + + if (ring->use_doorbell) + adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell, + ((adev->doorbell_index.vcn.vcn_ring0_1 << 1) + + 11 * vcn_inst), + adev->vcn.inst[i].aid_id); + + /* Re-init fw_shared, if required */ + vcn_v5_0_1_fw_shared_init(adev, i); + + r = amdgpu_ring_test_helper(ring); + if (r) + return r; + } + } + + return 0; +} + +/** + * vcn_v5_0_1_hw_fini - stop the hardware block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Stop the VCN block, mark ring as not ready any more + */ +static int vcn_v5_0_1_hw_fini(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + + cancel_delayed_work_sync(&adev->vcn.inst[i].idle_work); + if (vinst->cur_state != AMD_PG_STATE_GATE) + vinst->set_pg_state(vinst, AMD_PG_STATE_GATE); + } + + if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN)) + amdgpu_irq_put(adev, &adev->vcn.inst->ras_poison_irq, 0); + + return 0; +} + +/** + * vcn_v5_0_1_suspend - suspend VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * HW fini and suspend VCN block + */ +static int vcn_v5_0_1_suspend(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r, i; + + r = vcn_v5_0_1_hw_fini(ip_block); + if (r) + return r; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + r = amdgpu_vcn_suspend(ip_block->adev, i); + if (r) + return r; + } + + return r; +} + +/** + * vcn_v5_0_1_resume - resume VCN block + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Resume firmware and hw init VCN block + */ +static int vcn_v5_0_1_resume(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int r, i; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + + if (amdgpu_in_reset(adev)) + vinst->cur_state = AMD_PG_STATE_GATE; + + r = amdgpu_vcn_resume(ip_block->adev, i); + if (r) + return r; + } + + r = vcn_v5_0_1_hw_init(ip_block); + + return r; +} + +/** + * vcn_v5_0_1_mc_resume - memory controller programming + * + * @vinst: VCN instance + * + * Let the VCN memory controller know it's offsets + */ +static void vcn_v5_0_1_mc_resume(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int inst = vinst->inst; + uint32_t offset, size, vcn_inst; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst].fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + vcn_inst = GET_INST(VCN, inst); + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_lo)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + inst].tmr_mc_addr_hi)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, 0); + offset = 0; + } else { + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr)); + offset = size; + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET0, + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE0, size); + + /* cache window 1: stack */ + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET1, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_STACK_SIZE); + + /* cache window 2: context */ + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].gpu_addr + offset + AMDGPU_VCN_STACK_SIZE)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_OFFSET2, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_CACHE_SIZE2, AMDGPU_VCN_CONTEXT_SIZE); + + /* non-cache window */ + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW, + lower_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH, + upper_32_bits(adev->vcn.inst[inst].fw_shared.gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_OFFSET0, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_VCPU_NONCACHE_SIZE0, + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared))); +} + +/** + * vcn_v5_0_1_mc_resume_dpg_mode - memory controller programming for dpg mode + * + * @vinst: VCN instance + * @indirect: indirectly write sram + * + * Let the VCN memory controller know it's offsets with dpg mode + */ +static void vcn_v5_0_1_mc_resume_dpg_mode(struct amdgpu_vcn_inst *vinst, + bool indirect) +{ + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; + uint32_t offset, size; + const struct common_firmware_header *hdr; + + hdr = (const struct common_firmware_header *)adev->vcn.inst[inst_idx].fw->data; + size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8); + + /* cache window 0: fw */ + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + if (!indirect) { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + + inst_idx].tmr_mc_addr_lo), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + + inst_idx].tmr_mc_addr_hi), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), 0, 0, indirect); + } + offset = 0; + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr), 0, indirect); + offset = size; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3, 0, indirect); + } + + if (!indirect) + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE0), size, 0, indirect); + else + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE0), 0, 0, indirect); + + /* cache window 1: stack */ + if (!indirect) { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } else { + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET1), 0, 0, indirect); + } + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE, 0, indirect); + + /* cache window 2: context */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_OFFSET2), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE, 0, indirect); + + /* non-cache window */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[inst_idx].fw_shared.gpu_addr), 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_NONCACHE_OFFSET0), 0, 0, indirect); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn5_fw_shared)), 0, indirect); + + /* VCN global tiling registers */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_GFX10_ADDR_CONFIG), adev->gfx.config.gb_addr_config, 0, indirect); +} + +/** + * vcn_v5_0_1_disable_clock_gating - disable VCN clock gating + * + * @vinst: VCN instance + * + * Disable clock gating for VCN block + */ +static void vcn_v5_0_1_disable_clock_gating(struct amdgpu_vcn_inst *vinst) +{ +} + +/** + * vcn_v5_0_1_enable_clock_gating - enable VCN clock gating + * + * @vinst: VCN instance + * + * Enable clock gating for VCN block + */ +static void vcn_v5_0_1_enable_clock_gating(struct amdgpu_vcn_inst *vinst) +{ +} + +/** + * vcn_v5_0_1_pause_dpg_mode - VCN pause with dpg mode + * + * @vinst: VCN instance + * @new_state: pause state + * + * Pause dpg mode for VCN block + */ +static int vcn_v5_0_1_pause_dpg_mode(struct amdgpu_vcn_inst *vinst, + struct dpg_pause_state *new_state) +{ + struct amdgpu_device *adev = vinst->adev; + uint32_t reg_data = 0; + int vcn_inst; + + vcn_inst = GET_INST(VCN, vinst->inst); + + /* pause/unpause if state is changed */ + if (vinst->pause_state.fw_based != new_state->fw_based) { + DRM_DEV_DEBUG(adev->dev, "dpg pause state changed %d -> %d %s\n", + vinst->pause_state.fw_based, new_state->fw_based, + new_state->fw_based ? "VCN_DPG_STATE__PAUSE" : "VCN_DPG_STATE__UNPAUSE"); + reg_data = RREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE) & + (~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + + if (new_state->fw_based == VCN_DPG_STATE__PAUSE) { + /* pause DPG */ + reg_data |= UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + + /* wait for ACK */ + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_DPG_PAUSE, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK, + UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK); + } else { + /* unpause DPG, no need to wait */ + reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_DPG_PAUSE, reg_data); + } + vinst->pause_state.fw_based = new_state->fw_based; + } + + return 0; +} + + +/** + * vcn_v5_0_1_start_dpg_mode - VCN start with dpg mode + * + * @vinst: VCN instance + * @indirect: indirectly write sram + * + * Start VCN block with dpg mode + */ +static int vcn_v5_0_1_start_dpg_mode(struct amdgpu_vcn_inst *vinst, + bool indirect) +{ + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; + volatile struct amdgpu_vcn5_fw_shared *fw_shared = + adev->vcn.inst[inst_idx].fw_shared.cpu_addr; + struct amdgpu_ring *ring; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__PAUSE}; + int vcn_inst; + uint32_t tmp; + + vcn_inst = GET_INST(VCN, inst_idx); + + /* disable register anti-hang mechanism */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 1, + ~UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* enable dynamic power gating mode */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS); + tmp |= UVD_POWER_STATUS__UVD_PG_MODE_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_POWER_STATUS, tmp); + + if (indirect) { + adev->vcn.inst[inst_idx].dpg_sram_curr_addr = + (uint32_t *)adev->vcn.inst[inst_idx].dpg_sram_cpu_addr; + /* Use dummy register 0xDEADBEEF passing AID selection to PSP FW */ + WREG32_SOC24_DPG_MODE(inst_idx, 0xDEADBEEF, + adev->vcn.inst[inst_idx].aid_id, 0, true); + } + + /* enable VCPU clock */ + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK | UVD_VCPU_CNTL__BLK_RST_MASK; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* disable master interrupt */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_MASTINT_EN), 0, 0, indirect); + + /* setup regUVD_LMI_CTRL */ + tmp = (UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__REQ_MODE_MASK | + UVD_LMI_CTRL__CRC_RESET_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK | + (8 << UVD_LMI_CTRL__WRITE_CLEAN_TIMER__SHIFT) | + 0x00100000L); + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_CTRL), tmp, 0, indirect); + + vcn_v5_0_1_mc_resume_dpg_mode(vinst, indirect); + + tmp = (0xFF << UVD_VCPU_CNTL__PRB_TIMEOUT_VAL__SHIFT); + tmp |= UVD_VCPU_CNTL__CLK_EN_MASK; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_VCPU_CNTL), tmp, 0, indirect); + + /* enable LMI MC and UMC channels */ + tmp = 0x1f << UVD_LMI_CTRL2__RE_OFLD_MIF_WR_REQ_NUM__SHIFT; + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_LMI_CTRL2), tmp, 0, indirect); + + /* enable master interrupt */ + WREG32_SOC24_DPG_MODE(inst_idx, SOC24_DPG_MODE_OFFSET( + VCN, 0, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, 0, indirect); + + if (indirect) + amdgpu_vcn_psp_update_sram(adev, inst_idx, AMDGPU_UCODE_ID_VCN0_RAM); + + /* resetting ring, fw should not check RB ring */ + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + + /* Pause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + + ring = &adev->vcn.inst[inst_idx].ring_enc[0]; + + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, lower_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / sizeof(uint32_t)); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + /* resetting done, fw can check RB ring */ + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + + WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + /* Read DB_CTRL to flush the write DB_CTRL command. */ + RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); + + return 0; +} + +static int vcn_v5_0_1_start_sriov(struct amdgpu_device *adev) +{ + int i, vcn_inst; + struct amdgpu_ring *ring_enc; + uint64_t cache_addr; + uint64_t rb_enc_addr; + uint64_t ctx_addr; + uint32_t param, resp, expected; + uint32_t offset, cache_size; + uint32_t tmp, timeout; + + struct amdgpu_mm_table *table = &adev->virt.mm_table; + uint32_t *table_loc; + uint32_t table_size; + uint32_t size, size_dw; + uint32_t init_status; + uint32_t enabled_vcn; + + struct mmsch_v5_0_cmd_direct_write + direct_wt = { {0} }; + struct mmsch_v5_0_cmd_direct_read_modify_write + direct_rd_mod_wt = { {0} }; + struct mmsch_v5_0_cmd_end end = { {0} }; + struct mmsch_v5_0_init_header header; + + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + volatile struct amdgpu_fw_shared_rb_setup *rb_setup; + + direct_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_WRITE; + direct_rd_mod_wt.cmd_header.command_type = + MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE; + end.cmd_header.command_type = MMSCH_COMMAND__END; + + for (i = 0; i < adev->vcn.num_vcn_inst; i++) { + vcn_inst = GET_INST(VCN, i); + + vcn_v5_0_1_fw_shared_init(adev, vcn_inst); + + memset(&header, 0, sizeof(struct mmsch_v5_0_init_header)); + header.version = MMSCH_VERSION; + header.total_size = sizeof(struct mmsch_v5_0_init_header) >> 2; + + table_loc = (uint32_t *)table->cpu_addr; + table_loc += header.total_size; + + table_size = 0; + + MMSCH_V5_0_INSERT_DIRECT_RD_MOD_WT(SOC15_REG_OFFSET(VCN, 0, regUVD_STATUS), + ~UVD_STATUS__UVD_BUSY, UVD_STATUS__UVD_BUSY); + + cache_size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.inst[i].fw->size + 4); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_lo); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + adev->firmware.ucode[AMDGPU_UCODE_ID_VCN + i].tmr_mc_addr_hi); + + offset = 0; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), 0); + } else { + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[i].gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[i].gpu_addr)); + offset = cache_size; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET0), + AMDGPU_UVD_FIRMWARE_OFFSET >> 3); + } + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE0), + cache_size); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset; + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET1), 0); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE1), AMDGPU_VCN_STACK_SIZE); + + cache_addr = adev->vcn.inst[vcn_inst].gpu_addr + offset + + AMDGPU_VCN_STACK_SIZE; + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW), lower_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH), upper_32_bits(cache_addr)); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_OFFSET2), 0); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_CACHE_SIZE2), AMDGPU_VCN_CONTEXT_SIZE); + + fw_shared = adev->vcn.inst[vcn_inst].fw_shared.cpu_addr; + rb_setup = &fw_shared->rb_setup; + + ring_enc = &adev->vcn.inst[vcn_inst].ring_enc[0]; + ring_enc->wptr = 0; + rb_enc_addr = ring_enc->gpu_addr; + + rb_setup->is_rb_enabled_flags |= RB_ENABLED; + rb_setup->rb_addr_lo = lower_32_bits(rb_enc_addr); + rb_setup->rb_addr_hi = upper_32_bits(rb_enc_addr); + rb_setup->rb_size = ring_enc->ring_size / 4; + fw_shared->present_flag_0 |= cpu_to_le32(AMDGPU_VCN_VF_RB_SETUP_FLAG); + + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_LOW), + lower_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_LMI_VCPU_NC0_64BIT_BAR_HIGH), + upper_32_bits(adev->vcn.inst[vcn_inst].fw_shared.gpu_addr)); + MMSCH_V5_0_INSERT_DIRECT_WT(SOC15_REG_OFFSET(VCN, 0, + regUVD_VCPU_NONCACHE_SIZE0), + AMDGPU_GPU_PAGE_ALIGN(sizeof(struct amdgpu_vcn4_fw_shared))); + MMSCH_V5_0_INSERT_END(); + + header.vcn0.init_status = 0; + header.vcn0.table_offset = header.total_size; + header.vcn0.table_size = table_size; + header.total_size += table_size; + + /* Send init table to mmsch */ + size = sizeof(struct mmsch_v5_0_init_header); + table_loc = (uint32_t *)table->cpu_addr; + memcpy((void *)table_loc, &header, size); + + ctx_addr = table->gpu_addr; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_LO, lower_32_bits(ctx_addr)); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_ADDR_HI, upper_32_bits(ctx_addr)); + + tmp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID); + tmp &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK; + tmp |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT); + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_VMID, tmp); + + size = header.total_size; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_CTX_SIZE, size); + + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP, 0); + + param = 0x00000001; + WREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_HOST, param); + tmp = 0; + timeout = 1000; + resp = 0; + expected = MMSCH_VF_MAILBOX_RESP__OK; + while (resp != expected) { + resp = RREG32_SOC15(VCN, vcn_inst, regMMSCH_VF_MAILBOX_RESP); + if (resp != 0) + break; + + udelay(10); + tmp = tmp + 10; + if (tmp >= timeout) { + DRM_ERROR("failed to init MMSCH. TIME-OUT after %d usec"\ + " waiting for regMMSCH_VF_MAILBOX_RESP "\ + "(expected=0x%08x, readback=0x%08x)\n", + tmp, expected, resp); + return -EBUSY; + } + } + + enabled_vcn = amdgpu_vcn_is_disabled_vcn(adev, VCN_DECODE_RING, 0) ? 1 : 0; + init_status = ((struct mmsch_v5_0_init_header *)(table_loc))->vcn0.init_status; + if (resp != expected && resp != MMSCH_VF_MAILBOX_RESP__INCOMPLETE + && init_status != MMSCH_VF_ENGINE_STATUS__PASS) { + DRM_ERROR("MMSCH init status is incorrect! readback=0x%08x, header init "\ + "status for VCN%x: 0x%x\n", resp, enabled_vcn, init_status); + } + } + + return 0; +} + +/** + * vcn_v5_0_1_start - VCN start + * + * @vinst: VCN instance + * + * Start VCN block + */ +static int vcn_v5_0_1_start(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + struct amdgpu_ring *ring; + uint32_t tmp; + int j, k, r, vcn_inst; + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) + return vcn_v5_0_1_start_dpg_mode(vinst, adev->vcn.inst[i].indirect_sram); + + vcn_inst = GET_INST(VCN, i); + + /* set VCN status busy */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS) | UVD_STATUS__UVD_BUSY; + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, tmp); + + /* enable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__CLK_EN_MASK, ~UVD_VCPU_CNTL__CLK_EN_MASK); + + /* disable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), 0, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* enable LMI MC and UMC channels */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_LMI_CTRL2), 0, + ~UVD_LMI_CTRL2__STALL_ARB_UMC_MASK); + + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp &= ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + tmp &= ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + + /* setup regUVD_LMI_CTRL */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL); + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL, tmp | + UVD_LMI_CTRL__WRITE_CLEAN_TIMER_EN_MASK | + UVD_LMI_CTRL__MASK_MC_URGENT_MASK | + UVD_LMI_CTRL__DATA_COHERENCY_EN_MASK | + UVD_LMI_CTRL__VCPU_DATA_COHERENCY_EN_MASK); + + vcn_v5_0_1_mc_resume(vinst); + + /* VCN global tiling registers */ + WREG32_SOC15(VCN, vcn_inst, regUVD_GFX10_ADDR_CONFIG, + adev->gfx.config.gb_addr_config); + + /* unblock VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), 0, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* release VCPU reset to boot */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + for (j = 0; j < 10; ++j) { + uint32_t status; + + for (k = 0; k < 100; ++k) { + status = RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + if (status & 2) + break; + mdelay(100); + if (amdgpu_emu_mode == 1) + msleep(20); + } + + if (amdgpu_emu_mode == 1) { + r = -1; + if (status & 2) { + r = 0; + break; + } + } else { + r = 0; + if (status & 2) + break; + + dev_err(adev->dev, + "VCN[%d] is not responding, trying to reset the VCPU!!!\n", i); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + mdelay(10); + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + mdelay(10); + r = -1; + } + } + + if (r) { + dev_err(adev->dev, "VCN[%d] is not responding, giving up!!!\n", i); + return r; + } + + /* enable master interrupt */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_MASTINT_EN), + UVD_MASTINT_EN__VCPU_EN_MASK, + ~UVD_MASTINT_EN__VCPU_EN_MASK); + + /* clear the busy bit of VCN_STATUS */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_STATUS), 0, + ~(2 << UVD_STATUS__VCPU_REPORT__SHIFT)); + + ring = &adev->vcn.inst[i].ring_enc[0]; + + WREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL, + ring->doorbell_index << VCN_RB1_DB_CTRL__OFFSET__SHIFT | + VCN_RB1_DB_CTRL__EN_MASK); + + /* Read DB_CTRL to flush the write DB_CTRL command. */ + RREG32_SOC15(VCN, vcn_inst, regVCN_RB1_DB_CTRL); + + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_LO, ring->gpu_addr); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_BASE_HI, upper_32_bits(ring->gpu_addr)); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_SIZE, ring->ring_size / 4); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp &= ~(VCN_RB_ENABLE__RB1_EN_MASK); + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode |= FW_QUEUE_RING_RESET; + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR, 0); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, 0); + + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_RPTR); + WREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR, tmp); + ring->wptr = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + + tmp = RREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE); + tmp |= VCN_RB_ENABLE__RB1_EN_MASK; + WREG32_SOC15(VCN, vcn_inst, regVCN_RB_ENABLE, tmp); + fw_shared->sq.queue_mode &= ~(FW_QUEUE_RING_RESET | FW_QUEUE_DPG_HOLD_OFF); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + + return 0; +} + +/** + * vcn_v5_0_1_stop_dpg_mode - VCN stop with dpg mode + * + * @vinst: VCN instance + * + * Stop VCN block with dpg mode + */ +static void vcn_v5_0_1_stop_dpg_mode(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int inst_idx = vinst->inst; + uint32_t tmp; + int vcn_inst; + struct dpg_pause_state state = {.fw_based = VCN_DPG_STATE__UNPAUSE}; + + vcn_inst = GET_INST(VCN, inst_idx); + + /* Unpause dpg */ + vcn_v5_0_1_pause_dpg_mode(vinst, &state); + + /* Wait for power status to be 1 */ + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_POWER_STATUS, 1, + UVD_POWER_STATUS__UVD_POWER_STATUS_MASK); + + /* wait for read ptr to be equal to write ptr */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_RB_WPTR); + SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_RB_RPTR, tmp, 0xFFFFFFFF); + + /* disable dynamic power gating mode */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_POWER_STATUS), 0, + ~UVD_POWER_STATUS__UVD_PG_MODE_MASK); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); +} + +/** + * vcn_v5_0_1_stop - VCN stop + * + * @vinst: VCN instance + * + * Stop VCN block + */ +static int vcn_v5_0_1_stop(struct amdgpu_vcn_inst *vinst) +{ + struct amdgpu_device *adev = vinst->adev; + int i = vinst->inst; + volatile struct amdgpu_vcn5_fw_shared *fw_shared; + uint32_t tmp; + int r = 0, vcn_inst; + + vcn_inst = GET_INST(VCN, i); + + fw_shared = adev->vcn.inst[i].fw_shared.cpu_addr; + fw_shared->sq.queue_mode |= FW_QUEUE_DPG_HOLD_OFF; + + if (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG) { + vcn_v5_0_1_stop_dpg_mode(vinst); + return 0; + } + + /* wait for vcn idle */ + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_STATUS, UVD_STATUS__IDLE, 0x7); + if (r) + return r; + + tmp = UVD_LMI_STATUS__VCPU_LMI_WRITE_CLEAN_MASK | + UVD_LMI_STATUS__READ_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_MASK | + UVD_LMI_STATUS__WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* disable LMI UMC channel */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2); + tmp |= UVD_LMI_CTRL2__STALL_ARB_UMC_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_LMI_CTRL2, tmp); + tmp = UVD_LMI_STATUS__UMC_READ_CLEAN_RAW_MASK | + UVD_LMI_STATUS__UMC_WRITE_CLEAN_RAW_MASK; + r = SOC15_WAIT_ON_RREG(VCN, vcn_inst, regUVD_LMI_STATUS, tmp, tmp); + if (r) + return r; + + /* block VCPU register access */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_RB_ARB_CTRL), + UVD_RB_ARB_CTRL__VCPU_DIS_MASK, + ~UVD_RB_ARB_CTRL__VCPU_DIS_MASK); + + /* reset VCPU */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), + UVD_VCPU_CNTL__BLK_RST_MASK, + ~UVD_VCPU_CNTL__BLK_RST_MASK); + + /* disable VCPU clock */ + WREG32_P(SOC15_REG_OFFSET(VCN, vcn_inst, regUVD_VCPU_CNTL), 0, + ~(UVD_VCPU_CNTL__CLK_EN_MASK)); + + /* apply soft reset */ + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + tmp = RREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET); + tmp |= UVD_SOFT_RESET__LMI_SOFT_RESET_MASK; + WREG32_SOC15(VCN, vcn_inst, regUVD_SOFT_RESET, tmp); + + /* clear status */ + WREG32_SOC15(VCN, vcn_inst, regUVD_STATUS, 0); + + /* Keeping one read-back to ensure all register writes are done, + * otherwise it may introduce race conditions. + */ + RREG32_SOC15(VCN, vcn_inst, regUVD_STATUS); + + return 0; +} + +/** + * vcn_v5_0_1_unified_ring_get_rptr - get unified read pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified read pointer + */ +static uint64_t vcn_v5_0_1_unified_ring_get_rptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_RPTR); +} + +/** + * vcn_v5_0_1_unified_ring_get_wptr - get unified write pointer + * + * @ring: amdgpu_ring pointer + * + * Returns the current hardware unified write pointer + */ +static uint64_t vcn_v5_0_1_unified_ring_get_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) + return *ring->wptr_cpu_addr; + else + return RREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR); +} + +/** + * vcn_v5_0_1_unified_ring_set_wptr - set enc write pointer + * + * @ring: amdgpu_ring pointer + * + * Commits the enc write pointer to the hardware + */ +static void vcn_v5_0_1_unified_ring_set_wptr(struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = ring->adev; + + if (ring != &adev->vcn.inst[ring->me].ring_enc[0]) + DRM_ERROR("wrong ring id is identified in %s", __func__); + + if (ring->use_doorbell) { + *ring->wptr_cpu_addr = lower_32_bits(ring->wptr); + WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr)); + } else { + WREG32_SOC15(VCN, GET_INST(VCN, ring->me), regUVD_RB_WPTR, + lower_32_bits(ring->wptr)); + } +} + +static const struct amdgpu_ring_funcs vcn_v5_0_1_unified_ring_vm_funcs = { + .type = AMDGPU_RING_TYPE_VCN_ENC, + .align_mask = 0x3f, + .nop = VCN_ENC_CMD_NO_OP, + .get_rptr = vcn_v5_0_1_unified_ring_get_rptr, + .get_wptr = vcn_v5_0_1_unified_ring_get_wptr, + .set_wptr = vcn_v5_0_1_unified_ring_set_wptr, + .emit_frame_size = SOC15_FLUSH_GPU_TLB_NUM_WREG * 3 + + SOC15_FLUSH_GPU_TLB_NUM_REG_WAIT * 4 + + 4 + /* vcn_v2_0_enc_ring_emit_vm_flush */ + 5 + + 5 + /* vcn_v2_0_enc_ring_emit_fence x2 vm fence */ + 1, /* vcn_v2_0_enc_ring_insert_end */ + .emit_ib_size = 5, /* vcn_v2_0_enc_ring_emit_ib */ + .emit_ib = vcn_v2_0_enc_ring_emit_ib, + .emit_fence = vcn_v2_0_enc_ring_emit_fence, + .emit_vm_flush = vcn_v4_0_3_enc_ring_emit_vm_flush, + .emit_hdp_flush = vcn_v4_0_3_ring_emit_hdp_flush, + .test_ring = amdgpu_vcn_enc_ring_test_ring, + .test_ib = amdgpu_vcn_unified_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .insert_end = vcn_v2_0_enc_ring_insert_end, + .pad_ib = amdgpu_ring_generic_pad_ib, + .begin_use = amdgpu_vcn_ring_begin_use, + .end_use = amdgpu_vcn_ring_end_use, + .emit_wreg = vcn_v4_0_3_enc_ring_emit_wreg, + .emit_reg_wait = vcn_v4_0_3_enc_ring_emit_reg_wait, + .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, +}; + +/** + * vcn_v5_0_1_set_unified_ring_funcs - set unified ring functions + * + * @adev: amdgpu_device pointer + * + * Set unified ring functions + */ +static void vcn_v5_0_1_set_unified_ring_funcs(struct amdgpu_device *adev) +{ + int i, vcn_inst; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + adev->vcn.inst[i].ring_enc[0].funcs = &vcn_v5_0_1_unified_ring_vm_funcs; + adev->vcn.inst[i].ring_enc[0].me = i; + vcn_inst = GET_INST(VCN, i); + adev->vcn.inst[i].aid_id = vcn_inst / adev->vcn.num_inst_per_aid; + } +} + +/** + * vcn_v5_0_1_is_idle - check VCN block is idle + * + * @ip_block: Pointer to the amdgpu_ip_block structure + * + * Check whether VCN block is idle + */ +static bool vcn_v5_0_1_is_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, ret = 1; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) + ret &= (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) == UVD_STATUS__IDLE); + + return ret; +} + +/** + * vcn_v5_0_1_wait_for_idle - wait for VCN block idle + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * + * Wait for VCN block idle + */ +static int vcn_v5_0_1_wait_for_idle(struct amdgpu_ip_block *ip_block) +{ + struct amdgpu_device *adev = ip_block->adev; + int i, ret = 0; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + ret = SOC15_WAIT_ON_RREG(VCN, GET_INST(VCN, i), regUVD_STATUS, UVD_STATUS__IDLE, + UVD_STATUS__IDLE); + if (ret) + return ret; + } + + return ret; +} + +/** + * vcn_v5_0_1_set_clockgating_state - set VCN block clockgating state + * + * @ip_block: Pointer to the amdgpu_ip_block for this hw instance. + * @state: clock gating state + * + * Set VCN block clockgating state + */ +static int vcn_v5_0_1_set_clockgating_state(struct amdgpu_ip_block *ip_block, + enum amd_clockgating_state state) +{ + struct amdgpu_device *adev = ip_block->adev; + bool enable = state == AMD_CG_STATE_GATE; + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) { + struct amdgpu_vcn_inst *vinst = &adev->vcn.inst[i]; + + if (enable) { + if (RREG32_SOC15(VCN, GET_INST(VCN, i), regUVD_STATUS) != UVD_STATUS__IDLE) + return -EBUSY; + vcn_v5_0_1_enable_clock_gating(vinst); + } else { + vcn_v5_0_1_disable_clock_gating(vinst); + } + } + + return 0; +} + +static int vcn_v5_0_1_set_pg_state(struct amdgpu_vcn_inst *vinst, + enum amd_powergating_state state) +{ + struct amdgpu_device *adev = vinst->adev; + int ret = 0; + + /* for SRIOV, guest should not control VCN Power-gating + * MMSCH FW should control Power-gating and clock-gating + * guest should avoid touching CGC and PG + */ + if (amdgpu_sriov_vf(adev)) { + vinst->cur_state = AMD_PG_STATE_UNGATE; + return 0; + } + + if (state == vinst->cur_state) + return 0; + + if (state == AMD_PG_STATE_GATE) + ret = vcn_v5_0_1_stop(vinst); + else + ret = vcn_v5_0_1_start(vinst); + + if (!ret) + vinst->cur_state = state; + + return ret; +} + +/** + * vcn_v5_0_1_process_interrupt - process VCN block interrupt + * + * @adev: amdgpu_device pointer + * @source: interrupt sources + * @entry: interrupt entry from clients and sources + * + * Process VCN block interrupt + */ +static int vcn_v5_0_1_process_interrupt(struct amdgpu_device *adev, struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + uint32_t i, inst; + + i = node_id_to_phys_map[entry->node_id]; + + DRM_DEV_DEBUG(adev->dev, "IH: VCN TRAP\n"); + + for (inst = 0; inst < adev->vcn.num_vcn_inst; ++inst) + if (adev->vcn.inst[inst].aid_id == i) + break; + if (inst >= adev->vcn.num_vcn_inst) { + dev_WARN_ONCE(adev->dev, 1, + "Interrupt received for unknown VCN instance %d", + entry->node_id); + return 0; + } + + switch (entry->src_id) { + case VCN_5_0__SRCID__UVD_ENC_GENERAL_PURPOSE: + amdgpu_fence_process(&adev->vcn.inst[inst].ring_enc[0]); + break; + default: + DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n", + entry->src_id, entry->src_data[0]); + break; + } + + return 0; +} + +static int vcn_v5_0_1_set_ras_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + return 0; +} + +static const struct amdgpu_irq_src_funcs vcn_v5_0_1_irq_funcs = { + .process = vcn_v5_0_1_process_interrupt, +}; + +static const struct amdgpu_irq_src_funcs vcn_v5_0_1_ras_irq_funcs = { + .set = vcn_v5_0_1_set_ras_interrupt_state, + .process = amdgpu_vcn_process_poison_irq, +}; + + +/** + * vcn_v5_0_1_set_irq_funcs - set VCN block interrupt irq functions + * + * @adev: amdgpu_device pointer + * + * Set VCN block interrupt irq functions + */ +static void vcn_v5_0_1_set_irq_funcs(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->vcn.num_vcn_inst; ++i) + adev->vcn.inst->irq.num_types++; + + adev->vcn.inst->irq.funcs = &vcn_v5_0_1_irq_funcs; + + adev->vcn.inst->ras_poison_irq.num_types = 1; + adev->vcn.inst->ras_poison_irq.funcs = &vcn_v5_0_1_ras_irq_funcs; + +} + +static const struct amd_ip_funcs vcn_v5_0_1_ip_funcs = { + .name = "vcn_v5_0_1", + .early_init = vcn_v5_0_1_early_init, + .late_init = NULL, + .sw_init = vcn_v5_0_1_sw_init, + .sw_fini = vcn_v5_0_1_sw_fini, + .hw_init = vcn_v5_0_1_hw_init, + .hw_fini = vcn_v5_0_1_hw_fini, + .suspend = vcn_v5_0_1_suspend, + .resume = vcn_v5_0_1_resume, + .is_idle = vcn_v5_0_1_is_idle, + .wait_for_idle = vcn_v5_0_1_wait_for_idle, + .check_soft_reset = NULL, + .pre_soft_reset = NULL, + .soft_reset = NULL, + .post_soft_reset = NULL, + .set_clockgating_state = vcn_v5_0_1_set_clockgating_state, + .set_powergating_state = vcn_set_powergating_state, + .dump_ip_state = vcn_v5_0_0_dump_ip_state, + .print_ip_state = vcn_v5_0_0_print_ip_state, +}; + +const struct amdgpu_ip_block_version vcn_v5_0_1_ip_block = { + .type = AMD_IP_BLOCK_TYPE_VCN, + .major = 5, + .minor = 0, + .rev = 1, + .funcs = &vcn_v5_0_1_ip_funcs, +}; + +static uint32_t vcn_v5_0_1_query_poison_by_instance(struct amdgpu_device *adev, + uint32_t instance, uint32_t sub_block) +{ + uint32_t poison_stat = 0, reg_value = 0; + + switch (sub_block) { + case AMDGPU_VCN_V5_0_1_VCPU_VCODEC: + reg_value = RREG32_SOC15(VCN, instance, regUVD_RAS_VCPU_VCODEC_STATUS); + poison_stat = REG_GET_FIELD(reg_value, UVD_RAS_VCPU_VCODEC_STATUS, POISONED_PF); + break; + default: + break; + } + + if (poison_stat) + dev_info(adev->dev, "Poison detected in VCN%d, sub_block%d\n", + instance, sub_block); + + return poison_stat; +} + +static bool vcn_v5_0_1_query_poison_status(struct amdgpu_device *adev) +{ + uint32_t inst, sub; + uint32_t poison_stat = 0; + + for (inst = 0; inst < adev->vcn.num_vcn_inst; inst++) + for (sub = 0; sub < AMDGPU_VCN_V5_0_1_MAX_SUB_BLOCK; sub++) + poison_stat += + vcn_v5_0_1_query_poison_by_instance(adev, inst, sub); + + return !!poison_stat; +} + +static const struct amdgpu_ras_block_hw_ops vcn_v5_0_1_ras_hw_ops = { + .query_poison_status = vcn_v5_0_1_query_poison_status, +}; + +static int vcn_v5_0_1_aca_bank_parser(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + struct aca_bank_info info; + u64 misc0; + int ret; + + ret = aca_bank_info_decode(bank, &info); + if (ret) + return ret; + + misc0 = bank->regs[ACA_REG_IDX_MISC0]; + switch (type) { + case ACA_SMU_TYPE_UE: + bank->aca_err_type = ACA_ERROR_TYPE_UE; + ret = aca_error_cache_log_bank_error(handle, &info, ACA_ERROR_TYPE_UE, + 1ULL); + break; + case ACA_SMU_TYPE_CE: + bank->aca_err_type = ACA_ERROR_TYPE_CE; + ret = aca_error_cache_log_bank_error(handle, &info, bank->aca_err_type, + ACA_REG__MISC0__ERRCNT(misc0)); + break; + default: + return -EINVAL; + } + + return ret; +} + +/* reference to smu driver if header file */ +static int vcn_v5_0_1_err_codes[] = { + 14, 15, /* VCN */ +}; + +static bool vcn_v5_0_1_aca_bank_is_valid(struct aca_handle *handle, struct aca_bank *bank, + enum aca_smu_type type, void *data) +{ + u32 instlo; + + instlo = ACA_REG__IPID__INSTANCEIDLO(bank->regs[ACA_REG_IDX_IPID]); + instlo &= GENMASK(31, 1); + + if (instlo != mmSMNAID_AID0_MCA_SMU) + return false; + + if (aca_bank_check_error_codes(handle->adev, bank, + vcn_v5_0_1_err_codes, + ARRAY_SIZE(vcn_v5_0_1_err_codes))) + return false; + + return true; +} + +static const struct aca_bank_ops vcn_v5_0_1_aca_bank_ops = { + .aca_bank_parser = vcn_v5_0_1_aca_bank_parser, + .aca_bank_is_valid = vcn_v5_0_1_aca_bank_is_valid, +}; + +static const struct aca_info vcn_v5_0_1_aca_info = { + .hwip = ACA_HWIP_TYPE_SMU, + .mask = ACA_ERROR_UE_MASK, + .bank_ops = &vcn_v5_0_1_aca_bank_ops, +}; + +static int vcn_v5_0_1_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) +{ + int r; + + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + + r = amdgpu_ras_bind_aca(adev, AMDGPU_RAS_BLOCK__VCN, + &vcn_v5_0_1_aca_info, NULL); + if (r) + goto late_fini; + + return 0; + +late_fini: + amdgpu_ras_block_late_fini(adev, ras_block); + + return r; +} + +static struct amdgpu_vcn_ras vcn_v5_0_1_ras = { + .ras_block = { + .hw_ops = &vcn_v5_0_1_ras_hw_ops, + .ras_late_init = vcn_v5_0_1_ras_late_init, + }, +}; + +static void vcn_v5_0_1_set_ras_funcs(struct amdgpu_device *adev) +{ + adev->vcn.ras = &vcn_v5_0_1_ras; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index f00d41be7fca..3e9e0f36cd3f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1170,13 +1170,12 @@ svm_range_split_head(struct svm_range *prange, uint64_t new_start, } static void -svm_range_add_child(struct svm_range *prange, struct mm_struct *mm, - struct svm_range *pchild, enum svm_work_list_ops op) +svm_range_add_child(struct svm_range *prange, struct svm_range *pchild, enum svm_work_list_ops op) { pr_debug("add child 0x%p [0x%lx 0x%lx] to prange 0x%p child list %d\n", pchild, pchild->start, pchild->last, prange, op); - pchild->work_item.mm = mm; + pchild->work_item.mm = NULL; pchild->work_item.op = op; list_add_tail(&pchild->child_list, &prange->child_list); } @@ -2384,15 +2383,17 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, prange->work_item.op != SVM_OP_UNMAP_RANGE) prange->work_item.op = op; } else { - prange->work_item.op = op; - - /* Pairs with mmput in deferred_list_work */ - mmget(mm); - prange->work_item.mm = mm; - list_add_tail(&prange->deferred_list, - &prange->svms->deferred_range_list); - pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", - prange, prange->start, prange->last, op); + /* Pairs with mmput in deferred_list_work. + * If process is exiting and mm is gone, don't update mmu notifier. + */ + if (mmget_not_zero(mm)) { + prange->work_item.mm = mm; + prange->work_item.op = op; + list_add_tail(&prange->deferred_list, + &prange->svms->deferred_range_list); + pr_debug("add prange 0x%p [0x%lx 0x%lx] to work list op %d\n", + prange, prange->start, prange->last, op); + } } spin_unlock(&svms->deferred_list_lock); } @@ -2406,8 +2407,7 @@ void schedule_deferred_list_work(struct svm_range_list *svms) } static void -svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, - struct svm_range *prange, unsigned long start, +svm_range_unmap_split(struct svm_range *parent, struct svm_range *prange, unsigned long start, unsigned long last) { struct svm_range *head; @@ -2428,12 +2428,12 @@ svm_range_unmap_split(struct mm_struct *mm, struct svm_range *parent, svm_range_split(tail, last + 1, tail->last, &head); if (head != prange && tail != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); - svm_range_add_child(parent, mm, tail, SVM_OP_ADD_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_ADD_RANGE); } else if (tail != prange) { - svm_range_add_child(parent, mm, tail, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, tail, SVM_OP_UNMAP_RANGE); } else if (head != prange) { - svm_range_add_child(parent, mm, head, SVM_OP_UNMAP_RANGE); + svm_range_add_child(parent, head, SVM_OP_UNMAP_RANGE); } else if (parent != prange) { prange->work_item.op = SVM_OP_UNMAP_RANGE; } @@ -2510,14 +2510,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange, l = min(last, pchild->last); if (l >= s) svm_range_unmap_from_gpus(pchild, s, l, trigger); - svm_range_unmap_split(mm, prange, pchild, start, last); + svm_range_unmap_split(prange, pchild, start, last); mutex_unlock(&pchild->lock); } s = max(start, prange->start); l = min(last, prange->last); if (l >= s) svm_range_unmap_from_gpus(prange, s, l, trigger); - svm_range_unmap_split(mm, prange, prange, start, last); + svm_range_unmap_split(prange, prange, start, last); if (unmap_parent) svm_range_add_list_work(svms, prange, mm, SVM_OP_UNMAP_RANGE); @@ -2560,8 +2560,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; - if (!mmget_not_zero(mni->mm)) - return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2588,7 +2586,6 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); - mmput(mni->mm); return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 2ac56e79df05..9a31e5da3687 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -731,7 +731,16 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, * support programmable degamma anywhere. */ is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; - drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, + /* Dont't enable DRM CRTC degamma property for DCN401 since the + * pre-blending degamma LUT doesn't apply to cursor, and therefore + * can't work similar to a post-blending degamma LUT as in other hw + * versions. + * TODO: revisit it once KMS plane color API is merged. + */ + drm_crtc_enable_color_mgmt(&acrtc->base, + (is_dcn && + dm->adev->dm.dc->ctx->dce_version != DCN_VERSION_4_01) ? + MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c index 313e52997596..2ee034879f9f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c @@ -1700,7 +1700,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( clk_mgr->base.bw_params = kzalloc(sizeof(*clk_mgr->base.bw_params), GFP_KERNEL); if (!clk_mgr->base.bw_params) { BREAK_TO_DEBUGGER(); - kfree(clk_mgr); + kfree(clk_mgr401); return NULL; } @@ -1711,6 +1711,7 @@ struct clk_mgr_internal *dcn401_clk_mgr_construct( if (!clk_mgr->wm_range_table) { BREAK_TO_DEBUGGER(); kfree(clk_mgr->base.bw_params); + kfree(clk_mgr401); return NULL; } diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index ca446e08f6a2..21aff7fa6375 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -1019,8 +1019,22 @@ void dcn35_calc_blocks_to_gate(struct dc *dc, struct dc_state *context, if (pipe_ctx->plane_res.dpp || pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_MPCC][pipe_ctx->plane_res.mpcc_inst] = false; - if (pipe_ctx->stream_res.dsc) + if (pipe_ctx->stream_res.dsc) { update_state->pg_pipe_res_update[PG_DSC][pipe_ctx->stream_res.dsc->inst] = false; + if (dc->caps.sequential_ono) { + update_state->pg_pipe_res_update[PG_HUBP][pipe_ctx->stream_res.dsc->inst] = false; + update_state->pg_pipe_res_update[PG_DPP][pipe_ctx->stream_res.dsc->inst] = false; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (!pipe_ctx->top_pipe && pipe_ctx->plane_res.hubp && + pipe_ctx->plane_res.hubp->inst != pipe_ctx->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = false; + update_state->pg_pipe_res_update[PG_DPP][j] = false; + } + } + } + } if (pipe_ctx->stream_res.opp) update_state->pg_pipe_res_update[PG_OPP][pipe_ctx->stream_res.opp->inst] = false; @@ -1165,6 +1179,25 @@ void dcn35_calc_blocks_to_ungate(struct dc *dc, struct dc_state *context, update_state->pg_pipe_res_update[PG_HDMISTREAM][0] = true; if (dc->caps.sequential_ono) { + for (i = 0; i < dc->res_pool->pipe_count; i++) { + struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i]; + + if (new_pipe->stream_res.dsc && !new_pipe->top_pipe && + update_state->pg_pipe_res_update[PG_DSC][new_pipe->stream_res.dsc->inst]) { + update_state->pg_pipe_res_update[PG_HUBP][new_pipe->stream_res.dsc->inst] = true; + update_state->pg_pipe_res_update[PG_DPP][new_pipe->stream_res.dsc->inst] = true; + + /* All HUBP/DPP instances must be powered if the DSC inst != HUBP inst */ + if (new_pipe->plane_res.hubp && + new_pipe->plane_res.hubp->inst != new_pipe->stream_res.dsc->inst) { + for (j = 0; j < dc->res_pool->pipe_count; ++j) { + update_state->pg_pipe_res_update[PG_HUBP][j] = true; + update_state->pg_pipe_res_update[PG_DPP][j] = true; + } + } + } + } + for (i = dc->res_pool->pipe_count - 1; i >= 0; i--) { if (update_state->pg_pipe_res_update[PG_HUBP][i] && update_state->pg_pipe_res_update[PG_DPP][i]) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 4f78c84da780..c5bca3019de0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -58,6 +58,7 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin"); +MODULE_FIRMWARE("amdgpu/smu_13_0_0_kicker.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin"); MODULE_FIRMWARE("amdgpu/smu_13_0_10.bin"); @@ -92,7 +93,7 @@ const int pmfw_decoded_link_width[7] = {0, 1, 2, 4, 8, 12, 16}; int smu_v13_0_init_microcode(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; - char ucode_prefix[15]; + char ucode_prefix[30]; int err = 0; const struct smc_firmware_header_v1_0 *hdr; const struct common_firmware_header *header; @@ -103,7 +104,10 @@ int smu_v13_0_init_microcode(struct smu_context *smu) return 0; amdgpu_ucode_ip_version_decode(adev, MP1_HWIP, ucode_prefix, sizeof(ucode_prefix)); - err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); + if (amdgpu_is_kicker_fw(adev)) + err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s_kicker.bin", ucode_prefix); + else + err = amdgpu_ucode_request(adev, &adev->pm.fw, "amdgpu/%s.bin", ucode_prefix); if (err) goto out; diff --git a/drivers/gpu/drm/bridge/aux-hpd-bridge.c b/drivers/gpu/drm/bridge/aux-hpd-bridge.c index 6886db2d9e00..8e889a38fad0 100644 --- a/drivers/gpu/drm/bridge/aux-hpd-bridge.c +++ b/drivers/gpu/drm/bridge/aux-hpd-bridge.c @@ -64,10 +64,11 @@ struct auxiliary_device *devm_drm_dp_hpd_bridge_alloc(struct device *parent, str adev->id = ret; adev->name = "dp_hpd_bridge"; adev->dev.parent = parent; - adev->dev.of_node = of_node_get(parent->of_node); adev->dev.release = drm_aux_hpd_bridge_release; adev->dev.platform_data = of_node_get(np); + device_set_of_node_from_dev(&adev->dev, parent); + ret = auxiliary_device_init(adev); if (ret) { of_node_put(adev->dev.platform_data); diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c index 5500767cda7e..4d17d1e1c38b 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c @@ -1352,7 +1352,7 @@ static int ti_sn_bridge_probe(struct auxiliary_device *adev, regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE, 0); mutex_unlock(&pdata->comms_mutex); - }; + } drm_bridge_add(&pdata->bridge); diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c index ca42e6081d27..16dea3f2fb11 100644 --- a/drivers/gpu/drm/drm_buddy.c +++ b/drivers/gpu/drm/drm_buddy.c @@ -401,6 +401,49 @@ drm_get_buddy(struct drm_buddy_block *block) EXPORT_SYMBOL(drm_get_buddy); /** + * drm_buddy_reset_clear - reset blocks clear state + * + * @mm: DRM buddy manager + * @is_clear: blocks clear state + * + * Reset the clear state based on @is_clear value for each block + * in the freelist. + */ +void drm_buddy_reset_clear(struct drm_buddy *mm, bool is_clear) +{ + u64 root_size, size, start; + unsigned int order; + int i; + + size = mm->size; + for (i = 0; i < mm->n_roots; ++i) { + order = ilog2(size) - ilog2(mm->chunk_size); + start = drm_buddy_block_offset(mm->roots[i]); + __force_merge(mm, start, start + size, order); + + root_size = mm->chunk_size << order; + size -= root_size; + } + + for (i = 0; i <= mm->max_order; ++i) { + struct drm_buddy_block *block; + + list_for_each_entry_reverse(block, &mm->free_list[i], link) { + if (is_clear != drm_buddy_block_is_clear(block)) { + if (is_clear) { + mark_cleared(block); + mm->clear_avail += drm_buddy_block_size(mm, block); + } else { + clear_reset(block); + mm->clear_avail -= drm_buddy_block_size(mm, block); + } + } + } + } +} +EXPORT_SYMBOL(drm_buddy_reset_clear); + +/** * drm_buddy_free_block - free a block * * @mm: DRM buddy manager diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 888aadb6a4ac..d6550b54fac1 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -860,11 +860,23 @@ void drm_framebuffer_free(struct kref *kref) int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, const struct drm_framebuffer_funcs *funcs) { + unsigned int i; int ret; + bool exists; if (WARN_ON_ONCE(fb->dev != dev || !fb->format)) return -EINVAL; + for (i = 0; i < fb->format->num_planes; i++) { + if (drm_WARN_ON_ONCE(dev, fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i))) + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + if (fb->obj[i]) { + exists = drm_gem_object_handle_get_if_exists_unlocked(fb->obj[i]); + if (exists) + fb->internal_flags |= DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } + INIT_LIST_HEAD(&fb->filp_head); fb->funcs = funcs; @@ -873,7 +885,7 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, ret = __drm_mode_object_add(dev, &fb->base, DRM_MODE_OBJECT_FB, false, drm_framebuffer_free); if (ret) - goto out; + goto err; mutex_lock(&dev->mode_config.fb_lock); dev->mode_config.num_fb++; @@ -881,7 +893,16 @@ int drm_framebuffer_init(struct drm_device *dev, struct drm_framebuffer *fb, mutex_unlock(&dev->mode_config.fb_lock); drm_mode_object_register(dev, &fb->base); -out: + + return 0; + +err: + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) { + drm_gem_object_handle_put_unlocked(fb->obj[i]); + fb->internal_flags &= ~DRM_FRAMEBUFFER_HAS_HANDLE_REF(i); + } + } return ret; } EXPORT_SYMBOL(drm_framebuffer_init); @@ -958,6 +979,12 @@ EXPORT_SYMBOL(drm_framebuffer_unregister_private); void drm_framebuffer_cleanup(struct drm_framebuffer *fb) { struct drm_device *dev = fb->dev; + unsigned int i; + + for (i = 0; i < fb->format->num_planes; i++) { + if (fb->internal_flags & DRM_FRAMEBUFFER_HAS_HANDLE_REF(i)) + drm_gem_object_handle_put_unlocked(fb->obj[i]); + } mutex_lock(&dev->mode_config.fb_lock); list_del(&fb->head); diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 426d0867882d..9e8a4da313a0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -186,6 +186,46 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_private_object_fini); +static void drm_gem_object_handle_get(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + drm_WARN_ON(dev, !mutex_is_locked(&dev->object_name_lock)); + + if (obj->handle_count++ == 0) + drm_gem_object_get(obj); +} + +/** + * drm_gem_object_handle_get_if_exists_unlocked - acquire reference on user-space handle, if any + * @obj: GEM object + * + * Acquires a reference on the GEM buffer object's handle. Required to keep + * the GEM object alive. Call drm_gem_object_handle_put_if_exists_unlocked() + * to release the reference. Does nothing if the buffer object has no handle. + * + * Returns: + * True if a handle exists, or false otherwise + */ +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj) +{ + struct drm_device *dev = obj->dev; + + guard(mutex)(&dev->object_name_lock); + + /* + * First ref taken during GEM object creation, if any. Some + * drivers set up internal framebuffers with GEM objects that + * do not have a GEM handle. Hence, this counter can be zero. + */ + if (!obj->handle_count) + return false; + + drm_gem_object_handle_get(obj); + + return true; +} + /** * drm_gem_object_handle_free - release resources bound to userspace handles * @obj: GEM object to clean up. @@ -216,20 +256,26 @@ static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) } } -static void -drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) +/** + * drm_gem_object_handle_put_unlocked - releases reference on user-space handle + * @obj: GEM object + * + * Releases a reference on the GEM buffer object's handle. Possibly releases + * the GEM buffer object and associated dma-buf objects. + */ +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj) { struct drm_device *dev = obj->dev; bool final = false; - if (WARN_ON(READ_ONCE(obj->handle_count) == 0)) + if (drm_WARN_ON(dev, READ_ONCE(obj->handle_count) == 0)) return; /* - * Must bump handle count first as this may be the last - * ref, in which case the object would disappear before we - * checked for a name - */ + * Must bump handle count first as this may be the last + * ref, in which case the object would disappear before + * we checked for a name. + */ mutex_lock(&dev->object_name_lock); if (--obj->handle_count == 0) { @@ -253,6 +299,9 @@ drm_gem_object_release_handle(int id, void *ptr, void *data) struct drm_file *file_priv = data; struct drm_gem_object *obj = ptr; + if (drm_WARN_ON(obj->dev, !data)) + return 0; + if (obj->funcs->close) obj->funcs->close(obj, file_priv); @@ -363,8 +412,8 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, int ret; WARN_ON(!mutex_is_locked(&dev->object_name_lock)); - if (obj->handle_count++ == 0) - drm_gem_object_get(obj); + + drm_gem_object_handle_get(obj); /* * Get the user-visible handle using idr. Preload and perform @@ -373,7 +422,7 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, idr_preload(GFP_KERNEL); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); + ret = idr_alloc(&file_priv->object_idr, NULL, 1, 0, GFP_NOWAIT); spin_unlock(&file_priv->table_lock); idr_preload_end(); @@ -394,6 +443,11 @@ drm_gem_handle_create_tail(struct drm_file *file_priv, goto err_revoke; } + /* mirrors drm_gem_handle_delete to avoid races */ + spin_lock(&file_priv->table_lock); + obj = idr_replace(&file_priv->object_idr, obj, handle); + WARN_ON(obj != NULL); + spin_unlock(&file_priv->table_lock); *handlep = handle; return 0; diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 1705bfc90b1e..98b73c581c42 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -153,6 +153,8 @@ void drm_sysfs_lease_event(struct drm_device *dev); /* drm_gem.c */ int drm_gem_init(struct drm_device *dev); +bool drm_gem_object_handle_get_if_exists_unlocked(struct drm_gem_object *obj); +void drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj); int drm_gem_handle_create_tail(struct drm_file *file_priv, struct drm_gem_object *obj, u32 *handlep); diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c index 0d185c0564b9..9eeba254cf45 100644 --- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c +++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c @@ -601,6 +601,10 @@ static irqreturn_t decon_irq_handler(int irq, void *dev_id) if (!ctx->drm_dev) goto out; + /* check if crtc and vblank have been initialized properly */ + if (!drm_dev_has_vblank(ctx->drm_dev)) + goto out; + if (!ctx->i80_if) { drm_crtc_handle_vblank(&ctx->crtc->base); diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index f57df8c48139..05e4a5a63f5d 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -187,6 +187,7 @@ struct fimd_context { u32 i80ifcon; bool i80_if; bool suspended; + bool dp_clk_enabled; wait_queue_head_t wait_vsync_queue; atomic_t wait_vsync_event; atomic_t win_updated; @@ -1047,7 +1048,18 @@ static void fimd_dp_clock_enable(struct exynos_drm_clk *clk, bool enable) struct fimd_context *ctx = container_of(clk, struct fimd_context, dp_clk); u32 val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE; + + if (enable == ctx->dp_clk_enabled) + return; + + if (enable) + pm_runtime_resume_and_get(ctx->dev); + + ctx->dp_clk_enabled = enable; writel(val, ctx->regs + DP_MIE_CLKCON); + + if (!enable) + pm_runtime_put(ctx->dev); } static const struct exynos_drm_crtc_ops fimd_crtc_ops = { diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 45cca965c11b..af80f1ac8880 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1506,6 +1506,12 @@ int intel_dp_rate_select(struct intel_dp *intel_dp, int rate) void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock, u8 *link_bw, u8 *rate_select) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + /* FIXME g4x can't generate an exact 2.7GHz with the 96MHz non-SSC refclk */ + if (IS_G4X(i915) && port_clock == 268800) + port_clock = 270000; + /* eDP 1.4 rate select method. */ if (intel_dp->use_rate_select) { *link_bw = 0; @@ -4300,6 +4306,24 @@ intel_dp_mst_disconnect(struct intel_dp *intel_dp) static bool intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) { + struct intel_display *display = to_intel_display(intel_dp); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + /* + * Display WA for HSD #13013007775: mtl/arl/lnl + * Read the sink count and link service IRQ registers in separate + * transactions to prevent disconnecting the sink on a TBT link + * inadvertently. + */ + if (IS_DISPLAY_VER(display, 14, 20) && !IS_BATTLEMAGE(i915)) { + if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 3) != 3) + return false; + + /* DP_SINK_COUNT_ESI + 3 == DP_LINK_SERVICE_IRQ_VECTOR_ESI0 */ + return drm_dp_dpcd_readb(&intel_dp->aux, DP_LINK_SERVICE_IRQ_VECTOR_ESI0, + &esi[3]) == 1; + } + return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; } diff --git a/drivers/gpu/drm/i915/gt/intel_gsc.c b/drivers/gpu/drm/i915/gt/intel_gsc.c index 1e925c75fb08..c43febc862dc 100644 --- a/drivers/gpu/drm/i915/gt/intel_gsc.c +++ b/drivers/gpu/drm/i915/gt/intel_gsc.c @@ -284,7 +284,7 @@ static void gsc_irq_handler(struct intel_gt *gt, unsigned int intf_id) if (gt->gsc.intf[intf_id].irq < 0) return; - ret = generic_handle_irq(gt->gsc.intf[intf_id].irq); + ret = generic_handle_irq_safe(gt->gsc.intf[intf_id].irq); if (ret) gt_err_ratelimited(gt, "error handling GSC irq: %d\n", ret); } diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 72277bc8322e..f84fa09cdb33 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -575,7 +575,6 @@ static int ring_context_alloc(struct intel_context *ce) /* One ringbuffer to rule them all */ GEM_BUG_ON(!engine->legacy.ring); ce->ring = engine->legacy.ring; - ce->timeline = intel_timeline_get(engine->legacy.timeline); GEM_BUG_ON(ce->state); if (engine->context_size) { @@ -588,6 +587,8 @@ static int ring_context_alloc(struct intel_context *ce) ce->state = vma; } + ce->timeline = intel_timeline_get(engine->legacy.timeline); + return 0; } diff --git a/drivers/gpu/drm/i915/selftests/i915_request.c b/drivers/gpu/drm/i915/selftests/i915_request.c index acae30a04a94..0122719ee921 100644 --- a/drivers/gpu/drm/i915/selftests/i915_request.c +++ b/drivers/gpu/drm/i915/selftests/i915_request.c @@ -73,8 +73,8 @@ static int igt_add_request(void *arg) /* Basic preliminary test to create a request and let it loose! */ request = mock_request(rcs0(i915)->kernel_context, HZ / 10); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); i915_request_add(request); @@ -91,8 +91,8 @@ static int igt_wait_request(void *arg) /* Submit a request, then wait upon it */ request = mock_request(rcs0(i915)->kernel_context, T); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); i915_request_get(request); @@ -160,8 +160,8 @@ static int igt_fence_wait(void *arg) /* Submit a request, treat it as a fence and wait upon it */ request = mock_request(rcs0(i915)->kernel_context, T); - if (!request) - return -ENOMEM; + if (IS_ERR(request)) + return PTR_ERR(request); if (dma_fence_wait_timeout(&request->fence, false, T) != -ETIME) { pr_err("fence wait success before submit (expected timeout)!\n"); @@ -219,8 +219,8 @@ static int igt_request_rewind(void *arg) GEM_BUG_ON(IS_ERR(ce)); request = mock_request(ce, 2 * HZ); intel_context_put(ce); - if (!request) { - err = -ENOMEM; + if (IS_ERR(request)) { + err = PTR_ERR(request); goto err_context_0; } @@ -237,8 +237,8 @@ static int igt_request_rewind(void *arg) GEM_BUG_ON(IS_ERR(ce)); vip = mock_request(ce, 0); intel_context_put(ce); - if (!vip) { - err = -ENOMEM; + if (IS_ERR(vip)) { + err = PTR_ERR(vip); goto err_context_1; } diff --git a/drivers/gpu/drm/i915/selftests/mock_request.c b/drivers/gpu/drm/i915/selftests/mock_request.c index 09f747228dff..1b0cf073e964 100644 --- a/drivers/gpu/drm/i915/selftests/mock_request.c +++ b/drivers/gpu/drm/i915/selftests/mock_request.c @@ -35,7 +35,7 @@ mock_request(struct intel_context *ce, unsigned long delay) /* NB the i915->requests slab cache is enlarged to fit mock_request */ request = intel_context_create_request(ce); if (IS_ERR(request)) - return NULL; + return request; request->mock.delay = delay; return request; diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index ba7816fd28ec..850b318605da 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -363,13 +363,13 @@ pvr_power_reset(struct pvr_device *pvr_dev, bool hard_reset) if (!err) { if (hard_reset) { pvr_dev->fw_dev.booted = false; - WARN_ON(pm_runtime_force_suspend(from_pvr_device(pvr_dev)->dev)); + WARN_ON(pvr_power_device_suspend(from_pvr_device(pvr_dev)->dev)); err = pvr_fw_hard_reset(pvr_dev); if (err) goto err_device_lost; - err = pm_runtime_force_resume(from_pvr_device(pvr_dev)->dev); + err = pvr_power_device_resume(from_pvr_device(pvr_dev)->dev); pvr_dev->fw_dev.booted = true; if (err) goto err_device_lost; diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c index 8f6fba4217ec..bc7527542fdc 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.c +++ b/drivers/gpu/drm/mediatek/mtk_crtc.c @@ -719,6 +719,39 @@ int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, return 0; } +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane) +{ +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc); + struct mtk_plane_state *plane_state = to_mtk_plane_state(plane->state); + int i; + + /* no need to wait for disabling the plane by CPU */ + if (!mtk_crtc->cmdq_client.chan) + return; + + if (!mtk_crtc->enabled) + return; + + /* set pending plane state to disabled */ + for (i = 0; i < mtk_crtc->layer_nr; i++) { + struct drm_plane *mtk_plane = &mtk_crtc->planes[i]; + struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(mtk_plane->state); + + if (mtk_plane->index == plane->index) { + memcpy(mtk_plane_state, plane_state, sizeof(*plane_state)); + break; + } + } + mtk_crtc_update_config(mtk_crtc, false); + + /* wait for planes to be disabled by CMDQ */ + wait_event_timeout(mtk_crtc->cb_blocking_queue, + mtk_crtc->cmdq_vblank_cnt == 0, + msecs_to_jiffies(500)); +#endif +} + void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *state) { @@ -930,7 +963,8 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev, mtk_ddp_comp_supported_rotations(comp), mtk_ddp_comp_get_blend_modes(comp), mtk_ddp_comp_get_formats(comp), - mtk_ddp_comp_get_num_formats(comp), i); + mtk_ddp_comp_get_num_formats(comp), + mtk_ddp_comp_is_afbc_supported(comp), i); if (ret) return ret; diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.h b/drivers/gpu/drm/mediatek/mtk_crtc.h index 388e900b6f4d..828f109b83e7 100644 --- a/drivers/gpu/drm/mediatek/mtk_crtc.h +++ b/drivers/gpu/drm/mediatek/mtk_crtc.h @@ -21,6 +21,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path, unsigned int num_conn_routes); int mtk_crtc_plane_check(struct drm_crtc *crtc, struct drm_plane *plane, struct mtk_plane_state *state); +void mtk_crtc_plane_disable(struct drm_crtc *crtc, struct drm_plane *plane); void mtk_crtc_async_update(struct drm_crtc *crtc, struct drm_plane *plane, struct drm_atomic_state *plane_state); struct device *mtk_crtc_dma_dev_get(struct drm_crtc *crtc); diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c index edc6417639e6..ac6620e10262 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.c +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.c @@ -366,6 +366,7 @@ static const struct mtk_ddp_comp_funcs ddp_ovl = { .get_blend_modes = mtk_ovl_get_blend_modes, .get_formats = mtk_ovl_get_formats, .get_num_formats = mtk_ovl_get_num_formats, + .is_afbc_supported = mtk_ovl_is_afbc_supported, }; static const struct mtk_ddp_comp_funcs ddp_postmask = { diff --git a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h index 39720b27f4e9..7289b3dcf22f 100644 --- a/drivers/gpu/drm/mediatek/mtk_ddp_comp.h +++ b/drivers/gpu/drm/mediatek/mtk_ddp_comp.h @@ -83,6 +83,7 @@ struct mtk_ddp_comp_funcs { u32 (*get_blend_modes)(struct device *dev); const u32 *(*get_formats)(struct device *dev); size_t (*get_num_formats)(struct device *dev); + bool (*is_afbc_supported)(struct device *dev); void (*connect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*disconnect)(struct device *dev, struct device *mmsys_dev, unsigned int next); void (*add)(struct device *dev, struct mtk_mutex *mutex); @@ -294,6 +295,14 @@ size_t mtk_ddp_comp_get_num_formats(struct mtk_ddp_comp *comp) return 0; } +static inline bool mtk_ddp_comp_is_afbc_supported(struct mtk_ddp_comp *comp) +{ + if (comp->funcs && comp->funcs->is_afbc_supported) + return comp->funcs->is_afbc_supported(comp->dev); + + return false; +} + static inline bool mtk_ddp_comp_add(struct mtk_ddp_comp *comp, struct mtk_mutex *mutex) { if (comp->funcs && comp->funcs->add) { diff --git a/drivers/gpu/drm/mediatek/mtk_disp_drv.h b/drivers/gpu/drm/mediatek/mtk_disp_drv.h index 04154db9085c..c0f7f77e0574 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_drv.h +++ b/drivers/gpu/drm/mediatek/mtk_disp_drv.h @@ -106,6 +106,7 @@ void mtk_ovl_disable_vblank(struct device *dev); u32 mtk_ovl_get_blend_modes(struct device *dev); const u32 *mtk_ovl_get_formats(struct device *dev); size_t mtk_ovl_get_num_formats(struct device *dev); +bool mtk_ovl_is_afbc_supported(struct device *dev); void mtk_ovl_adaptor_add_comp(struct device *dev, struct mtk_mutex *mutex); void mtk_ovl_adaptor_remove_comp(struct device *dev, struct mtk_mutex *mutex); diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c index 19b0d5083981..ca4a9a60b890 100644 --- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c +++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c @@ -236,6 +236,13 @@ size_t mtk_ovl_get_num_formats(struct device *dev) return ovl->data->num_formats; } +bool mtk_ovl_is_afbc_supported(struct device *dev) +{ + struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); + + return ovl->data->supports_afbc; +} + int mtk_ovl_clk_enable(struct device *dev) { struct mtk_disp_ovl *ovl = dev_get_drvdata(dev); diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c index 8a48b3b0a956..74c2704efb66 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.c +++ b/drivers/gpu/drm/mediatek/mtk_plane.c @@ -285,9 +285,14 @@ static void mtk_plane_atomic_disable(struct drm_plane *plane, struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, plane); struct mtk_plane_state *mtk_plane_state = to_mtk_plane_state(new_state); + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(state, + plane); + mtk_plane_state->pending.enable = false; wmb(); /* Make sure the above parameter is set before update */ mtk_plane_state->pending.dirty = true; + + mtk_crtc_plane_disable(old_state->crtc, plane); } static void mtk_plane_atomic_update(struct drm_plane *plane, @@ -321,7 +326,8 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = { int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx) + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx) { int err; @@ -332,7 +338,9 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, err = drm_universal_plane_init(dev, plane, possible_crtcs, &mtk_plane_funcs, formats, - num_formats, modifiers, type, NULL); + num_formats, + supports_afbc ? modifiers : NULL, + type, NULL); if (err) { DRM_ERROR("failed to initialize plane\n"); return err; diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h index 3b13b89989c7..95c5fa5295d8 100644 --- a/drivers/gpu/drm/mediatek/mtk_plane.h +++ b/drivers/gpu/drm/mediatek/mtk_plane.h @@ -49,5 +49,6 @@ to_mtk_plane_state(struct drm_plane_state *state) int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane, unsigned long possible_crtcs, enum drm_plane_type type, unsigned int supported_rotations, const u32 blend_modes, - const u32 *formats, size_t num_formats, unsigned int plane_idx); + const u32 *formats, size_t num_formats, + bool supports_afbc, unsigned int plane_idx); #endif diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index f775638d239a..4b3a8ee8e278 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -85,6 +85,15 @@ void __msm_gem_submit_destroy(struct kref *kref) container_of(kref, struct msm_gem_submit, ref); unsigned i; + /* + * In error paths, we could unref the submit without calling + * drm_sched_entity_push_job(), so msm_job_free() will never + * get called. Since drm_sched_job_cleanup() will NULL out + * s_fence, we can use that to detect this case. + */ + if (submit->base.s_fence) + drm_sched_job_cleanup(&submit->base); + if (submit->fence_id) { spin_lock(&submit->queue->idr_lock); idr_remove(&submit->queue->fence_idr, submit->fence_id); @@ -658,6 +667,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_ringbuffer *ring; struct msm_submit_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; + struct sync_file *sync_file = NULL; int out_fence_fd = -1; unsigned i; int ret; @@ -868,7 +878,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, } if (ret == 0 && args->flags & MSM_SUBMIT_FENCE_FD_OUT) { - struct sync_file *sync_file = sync_file_create(submit->user_fence); + sync_file = sync_file_create(submit->user_fence); if (!sync_file) { ret = -ENOMEM; } else { @@ -902,8 +912,11 @@ out: out_unlock: mutex_unlock(&queue->lock); out_post_unlock: - if (ret && (out_fence_fd >= 0)) + if (ret && (out_fence_fd >= 0)) { put_unused_fd(out_fence_fd); + if (sync_file) + fput(sync_file->file); + } if (!IS_ERR_OR_NULL(submit)) { msm_gem_submit_put(submit); diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c index fc84ca214f24..3ad4f6e9a8ac 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/r535.c @@ -1454,7 +1454,6 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = 4, - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; caps->status = 0xffff; @@ -1462,17 +1461,22 @@ r535_gsp_acpi_caps(acpi_handle handle, CAPS_METHOD_DATA *caps) if (!acpi_check_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, BIT_ULL(0x1a))) return; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &NVOP_DSM_GUID, NVOP_DSM_REV, 0x1a, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; caps->status = 0; caps->optimusCaps = *(u32 *)obj->buffer.pointer; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); @@ -1489,24 +1493,28 @@ r535_gsp_acpi_jt(acpi_handle handle, JT_METHOD_DATA *jt) union acpi_object argv4 = { .buffer.type = ACPI_TYPE_BUFFER, .buffer.length = sizeof(caps), - .buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL), }, *obj; jt->status = 0xffff; + argv4.buffer.pointer = kmalloc(argv4.buffer.length, GFP_KERNEL); + if (!argv4.buffer.pointer) + return; + obj = acpi_evaluate_dsm(handle, &JT_DSM_GUID, JT_DSM_REV, 0x1, &argv4); if (!obj) - return; + goto done; if (WARN_ON(obj->type != ACPI_TYPE_BUFFER) || WARN_ON(obj->buffer.length != 4)) - return; + goto done; jt->status = 0; jt->jtCaps = *(u32 *)obj->buffer.pointer; jt->jtRevId = (jt->jtCaps & 0xfff00000) >> 20; jt->bSBIOSCaps = 0; +done: ACPI_FREE(obj); kfree(argv4.buffer.pointer); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index c9c50e3b18a2..3e75fc1f6607 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -368,17 +368,6 @@ void drm_sched_entity_destroy(struct drm_sched_entity *entity) } EXPORT_SYMBOL(drm_sched_entity_destroy); -/* drm_sched_entity_clear_dep - callback to clear the entities dependency */ -static void drm_sched_entity_clear_dep(struct dma_fence *f, - struct dma_fence_cb *cb) -{ - struct drm_sched_entity *entity = - container_of(cb, struct drm_sched_entity, cb); - - entity->dependency = NULL; - dma_fence_put(f); -} - /* * drm_sched_entity_clear_dep - callback to clear the entities dependency and * wake up scheduler @@ -389,7 +378,8 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct drm_sched_entity *entity = container_of(cb, struct drm_sched_entity, cb); - drm_sched_entity_clear_dep(f, cb); + entity->dependency = NULL; + dma_fence_put(f); drm_sched_wakeup(entity->rq->sched); } @@ -442,13 +432,6 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) fence = dma_fence_get(&s_fence->scheduled); dma_fence_put(entity->dependency); entity->dependency = fence; - if (!dma_fence_add_callback(fence, &entity->cb, - drm_sched_entity_clear_dep)) - return true; - - /* Ignore it when it is already scheduled */ - dma_fence_put(fence); - return false; } if (!dma_fence_add_callback(entity->dependency, &entity->cb, diff --git a/drivers/gpu/drm/tegra/nvdec.c b/drivers/gpu/drm/tegra/nvdec.c index 4860790666af..14ef61b44f47 100644 --- a/drivers/gpu/drm/tegra/nvdec.c +++ b/drivers/gpu/drm/tegra/nvdec.c @@ -261,10 +261,8 @@ static int nvdec_load_falcon_firmware(struct nvdec *nvdec) if (!client->group) { virt = dma_alloc_coherent(nvdec->dev, size, &iova, GFP_KERNEL); - - err = dma_mapping_error(nvdec->dev, iova); - if (err < 0) - return err; + if (!virt) + return -ENOMEM; } else { virt = tegra_drm_alloc(tegra, size, &iova); if (IS_ERR(virt)) diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c index d19e10289428..07abaf27315f 100644 --- a/drivers/gpu/drm/tiny/simpledrm.c +++ b/drivers/gpu/drm/tiny/simpledrm.c @@ -284,7 +284,7 @@ static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev) static void simpledrm_device_release_clocks(void *res) { - struct simpledrm_device *sdev = simpledrm_device_of_dev(res); + struct simpledrm_device *sdev = res; unsigned int i; for (i = 0; i < sdev->clk_count; ++i) { @@ -382,7 +382,7 @@ static int simpledrm_device_init_clocks(struct simpledrm_device *sdev) static void simpledrm_device_release_regulators(void *res) { - struct simpledrm_device *sdev = simpledrm_device_of_dev(res); + struct simpledrm_device *sdev = res; unsigned int i; for (i = 0; i < sdev->regulator_count; ++i) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 3c07f4712d5c..b600be2a5c84 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -254,6 +254,13 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, ret = dma_resv_trylock(&fbo->base.base._resv); WARN_ON(!ret); + ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); + if (ret) { + dma_resv_unlock(&fbo->base.base._resv); + kfree(fbo); + return ret; + } + if (fbo->base.resource) { ttm_resource_set_bo(fbo->base.resource, &fbo->base); bo->resource = NULL; @@ -262,12 +269,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, fbo->base.bulk_move = NULL; } - ret = dma_resv_reserve_fences(&fbo->base.base._resv, 1); - if (ret) { - kfree(fbo); - return ret; - } - ttm_bo_get(bo); fbo->bo = bo; diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 75b4725d49c7..d4b0549205c2 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -95,6 +95,12 @@ struct v3d_perfmon { u64 values[] __counted_by(ncounters); }; +enum v3d_irq { + V3D_CORE_IRQ, + V3D_HUB_IRQ, + V3D_MAX_IRQS, +}; + struct v3d_dev { struct drm_device drm; @@ -106,6 +112,8 @@ struct v3d_dev { bool single_irq_line; + int irq[V3D_MAX_IRQS]; + struct v3d_perfmon_info perfmon_info; void __iomem *hub_regs; diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index da8faf3b9011..6b6ba7a68fcb 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -118,6 +118,8 @@ v3d_reset(struct v3d_dev *v3d) if (false) v3d_idle_axi(v3d, 0); + v3d_irq_disable(v3d); + v3d_idle_gca(v3d); v3d_reset_v3d(v3d); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index 72b6a119412f..b98e1a4b33c7 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -228,7 +228,7 @@ v3d_hub_irq(int irq, void *arg) int v3d_irq_init(struct v3d_dev *v3d) { - int irq1, ret, core; + int irq, ret, core; INIT_WORK(&v3d->overflow_mem_work, v3d_overflow_mem_work); @@ -239,17 +239,24 @@ v3d_irq_init(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); V3D_WRITE(V3D_HUB_INT_CLR, V3D_HUB_IRQS(v3d->ver)); - irq1 = platform_get_irq_optional(v3d_to_pdev(v3d), 1); - if (irq1 == -EPROBE_DEFER) - return irq1; - if (irq1 > 0) { - ret = devm_request_irq(v3d->drm.dev, irq1, + irq = platform_get_irq_optional(v3d_to_pdev(v3d), 1); + if (irq == -EPROBE_DEFER) + return irq; + if (irq > 0) { + v3d->irq[V3D_CORE_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], v3d_irq, IRQF_SHARED, "v3d_core0", v3d); if (ret) goto fail; - ret = devm_request_irq(v3d->drm.dev, - platform_get_irq(v3d_to_pdev(v3d), 0), + + irq = platform_get_irq(v3d_to_pdev(v3d), 0); + if (irq < 0) + return irq; + v3d->irq[V3D_HUB_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_HUB_IRQ], v3d_hub_irq, IRQF_SHARED, "v3d_hub", v3d); if (ret) @@ -257,8 +264,12 @@ v3d_irq_init(struct v3d_dev *v3d) } else { v3d->single_irq_line = true; - ret = devm_request_irq(v3d->drm.dev, - platform_get_irq(v3d_to_pdev(v3d), 0), + irq = platform_get_irq(v3d_to_pdev(v3d), 0); + if (irq < 0) + return irq; + v3d->irq[V3D_CORE_IRQ] = irq; + + ret = devm_request_irq(v3d->drm.dev, v3d->irq[V3D_CORE_IRQ], v3d_irq, IRQF_SHARED, "v3d", v3d); if (ret) @@ -299,6 +310,12 @@ v3d_irq_disable(struct v3d_dev *v3d) V3D_CORE_WRITE(core, V3D_CTL_INT_MSK_SET, ~0); V3D_WRITE(V3D_HUB_INT_MSK_SET, ~0); + /* Finish any interrupt handler still in flight. */ + for (int i = 0; i < V3D_MAX_IRQS; i++) { + if (v3d->irq[i]) + synchronize_irq(v3d->irq[i]); + } + /* Clear any pending interrupts we might have left. */ for (core = 0; core < v3d->cores; core++) V3D_CORE_WRITE(core, V3D_CTL_INT_CLR, V3D_CORE_IRQS(v3d->ver)); diff --git a/drivers/gpu/drm/xe/Kconfig b/drivers/gpu/drm/xe/Kconfig index 7bbe46a98ff1..93e742c1f21e 100644 --- a/drivers/gpu/drm/xe/Kconfig +++ b/drivers/gpu/drm/xe/Kconfig @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_XE tristate "Intel Xe Graphics" - depends on DRM && PCI && MMU && (m || (y && KUNIT=y)) + depends on DRM && PCI && MMU + depends on KUNIT || !KUNIT select INTERVAL_TREE # we need shmfs for the swappable backing store, and in particular # the shmem_readpage() which depends upon tmpfs diff --git a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h index 8f86a16dc577..f58198cf2cf6 100644 --- a/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h +++ b/drivers/gpu/drm/xe/abi/guc_communication_ctb_abi.h @@ -52,6 +52,7 @@ struct guc_ct_buffer_desc { #define GUC_CTB_STATUS_OVERFLOW (1 << 0) #define GUC_CTB_STATUS_UNDERFLOW (1 << 1) #define GUC_CTB_STATUS_MISMATCH (1 << 2) +#define GUC_CTB_STATUS_DISABLED (1 << 3) u32 reserved[13]; } __packed; static_assert(sizeof(struct guc_ct_buffer_desc) == 64); diff --git a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h index cb6c7598824b..9c4cf050059a 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_stolen.h @@ -29,7 +29,7 @@ static inline int i915_gem_stolen_insert_node_in_range(struct xe_device *xe, bo = xe_bo_create_locked_range(xe, xe_device_get_root_tile(xe), NULL, size, start, end, - ttm_bo_type_kernel, flags); + ttm_bo_type_kernel, flags, 0); if (IS_ERR(bo)) { err = PTR_ERR(bo); bo = NULL; diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index f99d901a3214..9f941fc2e36b 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -17,10 +17,7 @@ u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_write(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - iosys_map_wr(&dsb_buf->vma->bo->vmap, idx * 4, u32, val); - xe_device_l2_flush(xe); } u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) @@ -30,12 +27,9 @@ u32 intel_dsb_buffer_read(struct intel_dsb_buffer *dsb_buf, u32 idx) void intel_dsb_buffer_memset(struct intel_dsb_buffer *dsb_buf, u32 idx, u32 val, size_t size) { - struct xe_device *xe = dsb_buf->vma->bo->tile->xe; - WARN_ON(idx > (dsb_buf->buf_size - size) / sizeof(*dsb_buf->cmd_buf)); iosys_map_memset(&dsb_buf->vma->bo->vmap, idx * 4, val, size); - xe_device_l2_flush(xe); } bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *dsb_buf, size_t size) @@ -48,11 +42,12 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d if (!vma) return false; + /* Set scanout flag for WC mapping */ obj = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_ALIGN(size), ttm_bo_type_kernel, XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(xe)) | - XE_BO_FLAG_GGTT); + XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT); if (IS_ERR(obj)) { kfree(vma); return false; @@ -73,5 +68,12 @@ void intel_dsb_buffer_cleanup(struct intel_dsb_buffer *dsb_buf) void intel_dsb_buffer_flush_map(struct intel_dsb_buffer *dsb_buf) { - /* TODO: add xe specific flush_map() for dsb buffer object. */ + struct xe_device *xe = dsb_buf->vma->bo->tile->xe; + + /* + * The memory barrier here is to ensure coherency of DSB vs MMIO, + * both for weak ordering archs and discrete cards. + */ + xe_device_wmb(xe); + xe_device_l2_flush(xe); } diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index b58fc4ba2aac..0558b106f8b6 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -153,7 +153,10 @@ static int __xe_pin_fb_vma_dpt(const struct intel_framebuffer *fb, } vma->dpt = dpt; - vma->node = dpt->ggtt_node; + vma->node = dpt->ggtt_node[tile0->id]; + + /* Ensure DPT writes are flushed */ + xe_device_l2_flush(xe); return 0; } @@ -203,8 +206,8 @@ static int __xe_pin_fb_vma_ggtt(const struct intel_framebuffer *fb, if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) align = max_t(u32, align, SZ_64K); - if (bo->ggtt_node && view->type == I915_GTT_VIEW_NORMAL) { - vma->node = bo->ggtt_node; + if (bo->ggtt_node[ggtt->tile->id] && view->type == I915_GTT_VIEW_NORMAL) { + vma->node = bo->ggtt_node[ggtt->tile->id]; } else if (view->type == I915_GTT_VIEW_NORMAL) { u32 x, size = bo->ttm.base.size; @@ -318,8 +321,6 @@ static struct i915_vma *__xe_pin_fb_vma(const struct intel_framebuffer *fb, if (ret) goto err_unpin; - /* Ensure DPT writes are flushed */ - xe_device_l2_flush(xe); return vma; err_unpin: @@ -333,10 +334,12 @@ err: static void __xe_unpin_fb_vma(struct i915_vma *vma) { + u8 tile_id = vma->node->ggtt->tile->id; + if (vma->dpt) xe_bo_unpin_map_no_vm(vma->dpt); - else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node) || - vma->bo->ggtt_node->base.start != vma->node->base.start) + else if (!xe_ggtt_node_allocated(vma->bo->ggtt_node[tile_id]) || + vma->bo->ggtt_node[tile_id]->base.start != vma->node->base.start) xe_ggtt_node_remove(vma->node, false); ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); diff --git a/drivers/gpu/drm/xe/regs/xe_reg_defs.h b/drivers/gpu/drm/xe/regs/xe_reg_defs.h index 23f7dc5bbe99..51fd40ffafcb 100644 --- a/drivers/gpu/drm/xe/regs/xe_reg_defs.h +++ b/drivers/gpu/drm/xe/regs/xe_reg_defs.h @@ -128,7 +128,7 @@ struct xe_reg_mcr { * options. */ #define XE_REG_MCR(r_, ...) ((const struct xe_reg_mcr){ \ - .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ + .__reg = XE_REG_INITIALIZER(r_, ##__VA_ARGS__, .mcr = 1) \ }) static inline bool xe_reg_is_valid(struct xe_reg r) diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c index 61a7d20ce42b..bf3f97d0c9c7 100644 --- a/drivers/gpu/drm/xe/tests/xe_mocs.c +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -43,14 +43,12 @@ static void read_l3cc_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 l3cc, l3cc_expected; - unsigned int fw_ref, i; + unsigned int i; u32 reg_val; + u32 ret; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { - xe_force_wake_put(gt_to_fw(gt), fw_ref); - KUNIT_ASSERT_TRUE_MSG(test, true, "Forcewake Failed.\n"); - } + ret = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (!(i & 1)) { @@ -74,7 +72,7 @@ static void read_l3cc_table(struct xe_gt *gt, KUNIT_EXPECT_EQ_MSG(test, l3cc_expected, l3cc, "l3cc idx=%u has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); } static void read_mocs_table(struct xe_gt *gt, @@ -82,14 +80,15 @@ static void read_mocs_table(struct xe_gt *gt, { struct kunit *test = kunit_get_current_test(); u32 mocs, mocs_expected; - unsigned int fw_ref, i; + unsigned int i; u32 reg_val; + u32 ret; KUNIT_EXPECT_TRUE_MSG(test, info->unused_entries_index, "Unused entries index should have been defined\n"); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - KUNIT_ASSERT_NE_MSG(test, fw_ref, 0, "Forcewake Failed.\n"); + ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); for (i = 0; i < info->num_mocs_regs; i++) { if (regs_are_mcr(gt)) @@ -107,7 +106,7 @@ static void read_mocs_table(struct xe_gt *gt, "mocs reg 0x%x has incorrect val.\n", i); } - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); } static int mocs_kernel_test_run_device(struct xe_device *xe) diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c index 8acc4640f0a2..5f745d9ed6cc 100644 --- a/drivers/gpu/drm/xe/xe_bo.c +++ b/drivers/gpu/drm/xe/xe_bo.c @@ -1130,6 +1130,8 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) { struct xe_bo *bo = ttm_to_xe_bo(ttm_bo); struct xe_device *xe = ttm_to_xe_device(ttm_bo->bdev); + struct xe_tile *tile; + u8 id; if (bo->ttm.base.import_attach) drm_prime_gem_destroy(&bo->ttm.base, NULL); @@ -1137,8 +1139,9 @@ static void xe_ttm_bo_destroy(struct ttm_buffer_object *ttm_bo) xe_assert(xe, list_empty(&ttm_bo->base.gpuva.list)); - if (bo->ggtt_node && bo->ggtt_node->base.size) - xe_ggtt_remove_bo(bo->tile->mem.ggtt, bo); + for_each_tile(tile, xe, id) + if (bo->ggtt_node[id] && bo->ggtt_node[id]->base.size) + xe_ggtt_remove_bo(tile->mem.ggtt, bo); #ifdef CONFIG_PROC_FS if (bo->client) @@ -1308,6 +1311,10 @@ struct xe_bo *___xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo, return ERR_PTR(-EINVAL); } + /* XE_BO_FLAG_GGTTx requires XE_BO_FLAG_GGTT also be set */ + if ((flags & XE_BO_FLAG_GGTT_ALL) && !(flags & XE_BO_FLAG_GGTT)) + return ERR_PTR(-EINVAL); + if (flags & (XE_BO_FLAG_VRAM_MASK | XE_BO_FLAG_STOLEN) && !(flags & XE_BO_FLAG_IGNORE_MIN_PAGE_SIZE) && ((xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) || @@ -1454,7 +1461,8 @@ static struct xe_bo * __xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - u16 cpu_caching, enum ttm_bo_type type, u32 flags) + u16 cpu_caching, enum ttm_bo_type type, u32 flags, + u64 alignment) { struct xe_bo *bo = NULL; int err; @@ -1483,6 +1491,8 @@ __xe_bo_create_locked(struct xe_device *xe, if (IS_ERR(bo)) return bo; + bo->min_align = alignment; + /* * Note that instead of taking a reference no the drm_gpuvm_resv_bo(), * to ensure the shared resv doesn't disappear under the bo, the bo @@ -1495,19 +1505,29 @@ __xe_bo_create_locked(struct xe_device *xe, bo->vm = vm; if (bo->flags & XE_BO_FLAG_GGTT) { - if (!tile && flags & XE_BO_FLAG_STOLEN) - tile = xe_device_get_root_tile(xe); + struct xe_tile *t; + u8 id; - xe_assert(xe, tile); + if (!(bo->flags & XE_BO_FLAG_GGTT_ALL)) { + if (!tile && flags & XE_BO_FLAG_STOLEN) + tile = xe_device_get_root_tile(xe); - if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { - err = xe_ggtt_insert_bo_at(tile->mem.ggtt, bo, - start + bo->size, U64_MAX); - } else { - err = xe_ggtt_insert_bo(tile->mem.ggtt, bo); + xe_assert(xe, tile); + } + + for_each_tile(t, xe, id) { + if (t != tile && !(bo->flags & XE_BO_FLAG_GGTTx(t))) + continue; + + if (flags & XE_BO_FLAG_FIXED_PLACEMENT) { + err = xe_ggtt_insert_bo_at(t->mem.ggtt, bo, + start + bo->size, U64_MAX); + } else { + err = xe_ggtt_insert_bo(t->mem.ggtt, bo); + } + if (err) + goto err_unlock_put_bo; } - if (err) - goto err_unlock_put_bo; } return bo; @@ -1523,16 +1543,18 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags) + enum ttm_bo_type type, u32 flags, u64 alignment) { - return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, start, end, 0, type, + flags, alignment); } struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags) { - return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, flags); + return __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, 0, type, + flags, 0); } struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, @@ -1542,7 +1564,7 @@ struct xe_bo *xe_bo_create_user(struct xe_device *xe, struct xe_tile *tile, { struct xe_bo *bo = __xe_bo_create_locked(xe, tile, vm, size, 0, ~0ULL, cpu_caching, ttm_bo_type_device, - flags | XE_BO_FLAG_USER); + flags | XE_BO_FLAG_USER, 0); if (!IS_ERR(bo)) xe_bo_unlock_vm_held(bo); @@ -1566,6 +1588,17 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile size_t size, u64 offset, enum ttm_bo_type type, u32 flags) { + return xe_bo_create_pin_map_at_aligned(xe, tile, vm, size, offset, + type, flags, 0); +} + +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment) +{ struct xe_bo *bo; int err; u64 start = offset == ~0ull ? 0 : offset; @@ -1576,7 +1609,8 @@ struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile flags |= XE_BO_FLAG_GGTT; bo = xe_bo_create_locked_range(xe, tile, vm, size, start, end, type, - flags | XE_BO_FLAG_NEEDS_CPU_ACCESS); + flags | XE_BO_FLAG_NEEDS_CPU_ACCESS, + alignment); if (IS_ERR(bo)) return bo; @@ -2355,14 +2389,18 @@ void xe_bo_put_commit(struct llist_head *deferred) void xe_bo_put(struct xe_bo *bo) { + struct xe_tile *tile; + u8 id; + might_sleep(); if (bo) { #ifdef CONFIG_PROC_FS if (bo->client) might_lock(&bo->client->bos_lock); #endif - if (bo->ggtt_node && bo->ggtt_node->ggtt) - might_lock(&bo->ggtt_node->ggtt->lock); + for_each_tile(tile, xe_bo_device(bo), id) + if (bo->ggtt_node[id] && bo->ggtt_node[id]->ggtt) + might_lock(&bo->ggtt_node[id]->ggtt->lock); drm_gem_object_put(&bo->ttm.base); } } diff --git a/drivers/gpu/drm/xe/xe_bo.h b/drivers/gpu/drm/xe/xe_bo.h index d22269a230aa..d04159c59846 100644 --- a/drivers/gpu/drm/xe/xe_bo.h +++ b/drivers/gpu/drm/xe/xe_bo.h @@ -39,10 +39,22 @@ #define XE_BO_FLAG_NEEDS_64K BIT(15) #define XE_BO_FLAG_NEEDS_2M BIT(16) #define XE_BO_FLAG_GGTT_INVALIDATE BIT(17) +#define XE_BO_FLAG_GGTT0 BIT(18) +#define XE_BO_FLAG_GGTT1 BIT(19) +#define XE_BO_FLAG_GGTT2 BIT(20) +#define XE_BO_FLAG_GGTT3 BIT(21) +#define XE_BO_FLAG_GGTT_ALL (XE_BO_FLAG_GGTT0 | \ + XE_BO_FLAG_GGTT1 | \ + XE_BO_FLAG_GGTT2 | \ + XE_BO_FLAG_GGTT3) + /* this one is trigger internally only */ #define XE_BO_FLAG_INTERNAL_TEST BIT(30) #define XE_BO_FLAG_INTERNAL_64K BIT(31) +#define XE_BO_FLAG_GGTTx(tile) \ + (XE_BO_FLAG_GGTT0 << (tile)->id) + #define XE_PTE_SHIFT 12 #define XE_PAGE_SIZE (1 << XE_PTE_SHIFT) #define XE_PTE_MASK (XE_PAGE_SIZE - 1) @@ -77,7 +89,7 @@ struct xe_bo * xe_bo_create_locked_range(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 start, u64 end, - enum ttm_bo_type type, u32 flags); + enum ttm_bo_type type, u32 flags, u64 alignment); struct xe_bo *xe_bo_create_locked(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, enum ttm_bo_type type, u32 flags); @@ -94,6 +106,12 @@ struct xe_bo *xe_bo_create_pin_map(struct xe_device *xe, struct xe_tile *tile, struct xe_bo *xe_bo_create_pin_map_at(struct xe_device *xe, struct xe_tile *tile, struct xe_vm *vm, size_t size, u64 offset, enum ttm_bo_type type, u32 flags); +struct xe_bo *xe_bo_create_pin_map_at_aligned(struct xe_device *xe, + struct xe_tile *tile, + struct xe_vm *vm, + size_t size, u64 offset, + enum ttm_bo_type type, u32 flags, + u64 alignment); struct xe_bo *xe_bo_create_from_data(struct xe_device *xe, struct xe_tile *tile, const void *data, size_t size, enum ttm_bo_type type, u32 flags); @@ -188,14 +206,24 @@ xe_bo_main_addr(struct xe_bo *bo, size_t page_size) } static inline u32 -xe_bo_ggtt_addr(struct xe_bo *bo) +__xe_bo_ggtt_addr(struct xe_bo *bo, u8 tile_id) { - if (XE_WARN_ON(!bo->ggtt_node)) + struct xe_ggtt_node *ggtt_node = bo->ggtt_node[tile_id]; + + if (XE_WARN_ON(!ggtt_node)) return 0; - XE_WARN_ON(bo->ggtt_node->base.size > bo->size); - XE_WARN_ON(bo->ggtt_node->base.start + bo->ggtt_node->base.size > (1ull << 32)); - return bo->ggtt_node->base.start; + XE_WARN_ON(ggtt_node->base.size > bo->size); + XE_WARN_ON(ggtt_node->base.start + ggtt_node->base.size > (1ull << 32)); + return ggtt_node->base.start; +} + +static inline u32 +xe_bo_ggtt_addr(struct xe_bo *bo) +{ + xe_assert(xe_bo_device(bo), bo->tile); + + return __xe_bo_ggtt_addr(bo, bo->tile->id); } int xe_bo_vmap(struct xe_bo *bo); diff --git a/drivers/gpu/drm/xe/xe_bo_evict.c b/drivers/gpu/drm/xe/xe_bo_evict.c index 8fb2be061003..6a40eedd9db1 100644 --- a/drivers/gpu/drm/xe/xe_bo_evict.c +++ b/drivers/gpu/drm/xe/xe_bo_evict.c @@ -152,11 +152,17 @@ int xe_bo_restore_kernel(struct xe_device *xe) } if (bo->flags & XE_BO_FLAG_GGTT) { - struct xe_tile *tile = bo->tile; + struct xe_tile *tile; + u8 id; - mutex_lock(&tile->mem.ggtt->lock); - xe_ggtt_map_bo(tile->mem.ggtt, bo); - mutex_unlock(&tile->mem.ggtt->lock); + for_each_tile(tile, xe, id) { + if (tile != bo->tile && !(bo->flags & XE_BO_FLAG_GGTTx(tile))) + continue; + + mutex_lock(&tile->mem.ggtt->lock); + xe_ggtt_map_bo(tile->mem.ggtt, bo); + mutex_unlock(&tile->mem.ggtt->lock); + } } /* diff --git a/drivers/gpu/drm/xe/xe_bo_types.h b/drivers/gpu/drm/xe/xe_bo_types.h index 2ed558ac2264..aa298d33c250 100644 --- a/drivers/gpu/drm/xe/xe_bo_types.h +++ b/drivers/gpu/drm/xe/xe_bo_types.h @@ -13,6 +13,7 @@ #include <drm/ttm/ttm_execbuf_util.h> #include <drm/ttm/ttm_placement.h> +#include "xe_device_types.h" #include "xe_ggtt_types.h" struct xe_device; @@ -39,8 +40,8 @@ struct xe_bo { struct ttm_place placements[XE_BO_MAX_PLACEMENTS]; /** @placement: current placement for this BO */ struct ttm_placement placement; - /** @ggtt_node: GGTT node if this BO is mapped in the GGTT */ - struct xe_ggtt_node *ggtt_node; + /** @ggtt_node: Array of GGTT nodes if this BO is mapped in the GGTTs */ + struct xe_ggtt_node *ggtt_node[XE_MAX_TILES_PER_DEVICE]; /** @vmap: iosys map of this buffer */ struct iosys_map vmap; /** @ttm_kmap: TTM bo kmap object for internal use only. Keep off. */ @@ -76,6 +77,11 @@ struct xe_bo { /** @vram_userfault_link: Link into @mem_access.vram_userfault.list */ struct list_head vram_userfault_link; + + /** @min_align: minimum alignment needed for this BO if different + * from default + */ + u64 min_align; }; #define intel_bo_to_drm_bo(bo) (&(bo)->ttm.base) diff --git a/drivers/gpu/drm/xe/xe_devcoredump.c b/drivers/gpu/drm/xe/xe_devcoredump.c index 8050938389b6..e412a70323cc 100644 --- a/drivers/gpu/drm/xe/xe_devcoredump.c +++ b/drivers/gpu/drm/xe/xe_devcoredump.c @@ -197,7 +197,6 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work) struct xe_devcoredump_snapshot *ss = container_of(work, typeof(*ss), work); struct xe_devcoredump *coredump = container_of(ss, typeof(*coredump), snapshot); struct xe_device *xe = coredump_to_xe(coredump); - unsigned int fw_ref; /* * NB: Despite passing a GFP_ flags parameter here, more allocations are done @@ -211,12 +210,11 @@ static void xe_devcoredump_deferred_snap_work(struct work_struct *work) xe_pm_runtime_get(xe); /* keep going if fw fails as we still want to save the memory and SW data */ - fw_ref = xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + if (xe_force_wake_get(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL)) xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); xe_vm_snapshot_capture_delayed(ss->vm); xe_guc_exec_queue_snapshot_capture_delayed(ss->ge); - xe_force_wake_put(gt_to_fw(ss->gt), fw_ref); + xe_force_wake_put(gt_to_fw(ss->gt), XE_FORCEWAKE_ALL); xe_pm_runtime_put(xe); @@ -243,9 +241,8 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, u32 width_mask = (0x1 << q->width) - 1; const char *process_name = "no process"; - unsigned int fw_ref; - bool cookie; int i; + bool cookie; ss->snapshot_time = ktime_get_real(); ss->boot_time = ktime_get_boottime(); @@ -268,7 +265,8 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, } /* keep going if fw fails as we still want to save the memory and SW data */ - fw_ref = xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); + if (xe_force_wake_get(gt_to_fw(q->gt), XE_FORCEWAKE_ALL)) + xe_gt_info(ss->gt, "failed to get forcewake for coredump capture\n"); ss->ct = xe_guc_ct_snapshot_capture(&guc->ct, true); ss->ge = xe_guc_exec_queue_snapshot_capture(q); @@ -286,7 +284,7 @@ static void devcoredump_snapshot(struct xe_devcoredump *coredump, queue_work(system_unbound_wq, &ss->work); - xe_force_wake_put(gt_to_fw(q->gt), fw_ref); + xe_force_wake_put(gt_to_fw(q->gt), XE_FORCEWAKE_ALL); dma_fence_end_signalling(cookie); } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index 0c3db53b93d8..82da51a6616a 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -37,6 +37,7 @@ #include "xe_gt_printk.h" #include "xe_gt_sriov_vf.h" #include "xe_guc.h" +#include "xe_guc_pc.h" #include "xe_hw_engine_group.h" #include "xe_hwmon.h" #include "xe_irq.h" @@ -871,31 +872,37 @@ void xe_device_td_flush(struct xe_device *xe) if (!IS_DGFX(xe) || GRAPHICS_VER(xe) < 20) return; - if (XE_WA(xe_root_mmio_gt(xe), 16023588340)) { + gt = xe_root_mmio_gt(xe); + if (XE_WA(gt, 16023588340)) { + /* A transient flush is not sufficient: flush the L2 */ xe_device_l2_flush(xe); - return; - } - - for_each_gt(gt, xe, id) { - if (xe_gt_is_media_type(gt)) - continue; - - if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) - return; - - xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); - /* - * FIXME: We can likely do better here with our choice of - * timeout. Currently we just assume the worst case, i.e. 150us, - * which is believed to be sufficient to cover the worst case - * scenario on current platforms if all cache entries are - * transient and need to be flushed.. - */ - if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, - 150, NULL, false)) - xe_gt_err_once(gt, "TD flush timeout\n"); - - xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } else { + xe_guc_pc_apply_flush_freq_limit(>->uc.guc.pc); + + /* Execute TDF flush on all graphics GTs */ + for_each_gt(gt, xe, id) { + if (xe_gt_is_media_type(gt)) + continue; + + if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GT)) + return; + + xe_mmio_write32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST); + /* + * FIXME: We can likely do better here with our choice of + * timeout. Currently we just assume the worst case, i.e. 150us, + * which is believed to be sufficient to cover the worst case + * scenario on current platforms if all cache entries are + * transient and need to be flushed.. + */ + if (xe_mmio_wait32(gt, XE2_TDF_CTRL, TRANSIENT_FLUSH_REQUEST, 0, + 150, NULL, false)) + xe_gt_err_once(gt, "TD flush timeout\n"); + + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + } + + xe_guc_pc_remove_flush_freq_limit(&xe_root_mmio_gt(xe)->uc.guc.pc); } } diff --git a/drivers/gpu/drm/xe/xe_force_wake.h b/drivers/gpu/drm/xe/xe_force_wake.h index 1608a55edc84..a2577672f4e3 100644 --- a/drivers/gpu/drm/xe/xe_force_wake.h +++ b/drivers/gpu/drm/xe/xe_force_wake.h @@ -46,20 +46,4 @@ xe_force_wake_assert_held(struct xe_force_wake *fw, xe_gt_assert(fw->gt, fw->awake_domains & domain); } -/** - * xe_force_wake_ref_has_domain - verifies if the domains are in fw_ref - * @fw_ref : the force_wake reference - * @domain : forcewake domain to verify - * - * This function confirms whether the @fw_ref includes a reference to the - * specified @domain. - * - * Return: true if domain is refcounted. - */ -static inline bool -xe_force_wake_ref_has_domain(unsigned int fw_ref, enum xe_force_wake_domains domain) -{ - return fw_ref & domain; -} - #endif diff --git a/drivers/gpu/drm/xe/xe_ggtt.c b/drivers/gpu/drm/xe/xe_ggtt.c index e9820126feb9..76e1092f51d9 100644 --- a/drivers/gpu/drm/xe/xe_ggtt.c +++ b/drivers/gpu/drm/xe/xe_ggtt.c @@ -605,10 +605,10 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) u64 start; u64 offset, pte; - if (XE_WARN_ON(!bo->ggtt_node)) + if (XE_WARN_ON(!bo->ggtt_node[ggtt->tile->id])) return; - start = bo->ggtt_node->base.start; + start = bo->ggtt_node[ggtt->tile->id]->base.start; for (offset = 0; offset < bo->size; offset += XE_PAGE_SIZE) { pte = ggtt->pt_ops->pte_encode_bo(bo, offset, pat_index); @@ -619,15 +619,16 @@ void xe_ggtt_map_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, u64 start, u64 end) { + u64 alignment = bo->min_align > 0 ? bo->min_align : XE_PAGE_SIZE; + u8 tile_id = ggtt->tile->id; int err; - u64 alignment = XE_PAGE_SIZE; if (xe_bo_is_vram(bo) && ggtt->flags & XE_GGTT_FLAGS_64K) alignment = SZ_64K; - if (XE_WARN_ON(bo->ggtt_node)) { + if (XE_WARN_ON(bo->ggtt_node[tile_id])) { /* Someone's already inserted this BO in the GGTT */ - xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size); + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); return 0; } @@ -637,19 +638,19 @@ static int __xe_ggtt_insert_bo_at(struct xe_ggtt *ggtt, struct xe_bo *bo, xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile)); - bo->ggtt_node = xe_ggtt_node_init(ggtt); - if (IS_ERR(bo->ggtt_node)) { - err = PTR_ERR(bo->ggtt_node); - bo->ggtt_node = NULL; + bo->ggtt_node[tile_id] = xe_ggtt_node_init(ggtt); + if (IS_ERR(bo->ggtt_node[tile_id])) { + err = PTR_ERR(bo->ggtt_node[tile_id]); + bo->ggtt_node[tile_id] = NULL; goto out; } mutex_lock(&ggtt->lock); - err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node->base, bo->size, - alignment, 0, start, end, 0); + err = drm_mm_insert_node_in_range(&ggtt->mm, &bo->ggtt_node[tile_id]->base, + bo->size, alignment, 0, start, end, 0); if (err) { - xe_ggtt_node_fini(bo->ggtt_node); - bo->ggtt_node = NULL; + xe_ggtt_node_fini(bo->ggtt_node[tile_id]); + bo->ggtt_node[tile_id] = NULL; } else { xe_ggtt_map_bo(ggtt, bo); } @@ -698,13 +699,15 @@ int xe_ggtt_insert_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) */ void xe_ggtt_remove_bo(struct xe_ggtt *ggtt, struct xe_bo *bo) { - if (XE_WARN_ON(!bo->ggtt_node)) + u8 tile_id = ggtt->tile->id; + + if (XE_WARN_ON(!bo->ggtt_node[tile_id])) return; /* This BO is not currently in the GGTT */ - xe_tile_assert(ggtt->tile, bo->ggtt_node->base.size == bo->size); + xe_tile_assert(ggtt->tile, bo->ggtt_node[tile_id]->base.size == bo->size); - xe_ggtt_node_remove(bo->ggtt_node, + xe_ggtt_node_remove(bo->ggtt_node[tile_id], bo->flags & XE_BO_FLAG_GGTT_INVALIDATE); } diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c index 231ed53cf907..3b53d46aad54 100644 --- a/drivers/gpu/drm/xe/xe_gt.c +++ b/drivers/gpu/drm/xe/xe_gt.c @@ -98,14 +98,14 @@ void xe_gt_sanitize(struct xe_gt *gt) static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) { - unsigned int fw_ref; u32 reg; + int err; if (!XE_WA(gt, 16023588340)) return; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (WARN_ON(err)) return; if (!xe_gt_is_media_type(gt)) { @@ -115,13 +115,13 @@ static void xe_gt_enable_host_l2_vram(struct xe_gt *gt) } xe_gt_mcr_multicast_write(gt, XEHPC_L3CLOS_MASK(3), 0xF); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); } static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) { - unsigned int fw_ref; u32 reg; + int err; if (!XE_WA(gt, 16023588340)) return; @@ -129,15 +129,15 @@ static void xe_gt_disable_host_l2_vram(struct xe_gt *gt) if (xe_gt_is_media_type(gt)) return; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (WARN_ON(err)) return; reg = xe_gt_mcr_unicast_read_any(gt, XE2_GAMREQSTRM_CTRL); reg &= ~CG_DIS_CNTLBUS; xe_gt_mcr_multicast_write(gt, XE2_GAMREQSTRM_CTRL, reg); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); } /** @@ -389,6 +389,8 @@ int xe_gt_init_early(struct xe_gt *gt) if (err) return err; + xe_mocs_init_early(gt); + return 0; } @@ -405,14 +407,11 @@ static void dump_pat_on_error(struct xe_gt *gt) static int gt_fw_domain_init(struct xe_gt *gt) { - unsigned int fw_ref; int err, i; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) { - err = -ETIMEDOUT; + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (err) goto err_hw_fence_irq; - } if (!xe_gt_is_media_type(gt)) { err = xe_ggtt_init(gt_to_tile(gt)->mem.ggtt); @@ -447,12 +446,14 @@ static int gt_fw_domain_init(struct xe_gt *gt) */ gt->info.gmdid = xe_mmio_read32(gt, GMD_ID); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + err = xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + XE_WARN_ON(err); + return 0; err_force_wake: dump_pat_on_error(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); err_hw_fence_irq: for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -462,14 +463,11 @@ err_hw_fence_irq: static int all_fw_domain_init(struct xe_gt *gt) { - unsigned int fw_ref; int err, i; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { - err = -ETIMEDOUT; - goto err_force_wake; - } + err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (err) + goto err_hw_fence_irq; xe_gt_mcr_set_implicit_defaults(gt); xe_wa_process_gt(gt); @@ -535,12 +533,14 @@ static int all_fw_domain_init(struct xe_gt *gt) if (IS_SRIOV_PF(gt_to_xe(gt))) xe_gt_sriov_pf_init_hw(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + XE_WARN_ON(err); return 0; err_force_wake: - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); +err_hw_fence_irq: for (i = 0; i < XE_ENGINE_CLASS_MAX; ++i) xe_hw_fence_irq_finish(>->fence_irq[i]); @@ -553,12 +553,11 @@ err_force_wake: */ int xe_gt_init_hwconfig(struct xe_gt *gt) { - unsigned int fw_ref; int err; - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); - if (!fw_ref) - return -ETIMEDOUT; + err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + if (err) + goto out; xe_gt_mcr_init_early(gt); xe_pat_init(gt); @@ -576,7 +575,8 @@ int xe_gt_init_hwconfig(struct xe_gt *gt) xe_gt_enable_host_l2_vram(gt); out_fw: - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); +out: return err; } @@ -592,17 +592,15 @@ int xe_gt_init(struct xe_gt *gt) xe_hw_fence_irq_init(>->fence_irq[i]); } - err = xe_gt_pagefault_init(gt); + err = xe_gt_sysfs_init(gt); if (err) return err; - xe_mocs_init_early(gt); - - err = xe_gt_sysfs_init(gt); + err = gt_fw_domain_init(gt); if (err) return err; - err = gt_fw_domain_init(gt); + err = xe_gt_pagefault_init(gt); if (err) return err; @@ -746,7 +744,6 @@ static int do_gt_restart(struct xe_gt *gt) static int gt_reset(struct xe_gt *gt) { - unsigned int fw_ref; int err; if (xe_device_wedged(gt_to_xe(gt))) @@ -767,11 +764,12 @@ static int gt_reset(struct xe_gt *gt) xe_gt_sanitize(gt); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) { - err = -ETIMEDOUT; - goto err_out; - } + err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (err) + goto err_msg; + + if (IS_SRIOV_PF(gt_to_xe(gt))) + xe_gt_sriov_pf_stop_prepare(gt); xe_uc_gucrc_disable(>->uc); xe_uc_stop_prepare(>->uc); @@ -789,7 +787,8 @@ static int gt_reset(struct xe_gt *gt) if (err) goto err_out; - xe_force_wake_put(gt_to_fw(gt), fw_ref); + err = xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); + XE_WARN_ON(err); xe_pm_runtime_put(gt_to_xe(gt)); xe_gt_info(gt, "reset done\n"); @@ -797,7 +796,8 @@ static int gt_reset(struct xe_gt *gt) return 0; err_out: - xe_force_wake_put(gt_to_fw(gt), fw_ref); + XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); +err_msg: XE_WARN_ON(xe_uc_start(>->uc)); err_fail: xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err)); @@ -829,25 +829,22 @@ void xe_gt_reset_async(struct xe_gt *gt) void xe_gt_suspend_prepare(struct xe_gt *gt) { - unsigned int fw_ref; - - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + XE_WARN_ON(xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL)); xe_uc_suspend_prepare(>->uc); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); } int xe_gt_suspend(struct xe_gt *gt) { - unsigned int fw_ref; int err; xe_gt_dbg(gt, "suspending\n"); xe_gt_sanitize(gt); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (err) goto err_msg; err = xe_uc_suspend(>->uc); @@ -858,15 +855,14 @@ int xe_gt_suspend(struct xe_gt *gt) xe_gt_disable_host_l2_vram(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); xe_gt_dbg(gt, "suspended\n"); return 0; -err_msg: - err = -ETIMEDOUT; err_force_wake: - xe_force_wake_put(gt_to_fw(gt), fw_ref); + XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); +err_msg: xe_gt_err(gt, "suspend failed (%pe)\n", ERR_PTR(err)); return err; @@ -874,11 +870,9 @@ err_force_wake: void xe_gt_shutdown(struct xe_gt *gt) { - unsigned int fw_ref; - - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); do_gt_reset(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL); } /** @@ -903,12 +897,11 @@ int xe_gt_sanitize_freq(struct xe_gt *gt) int xe_gt_resume(struct xe_gt *gt) { - unsigned int fw_ref; int err; xe_gt_dbg(gt, "resuming\n"); - fw_ref = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); - if (!xe_force_wake_ref_has_domain(fw_ref, XE_FORCEWAKE_ALL)) + err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL); + if (err) goto err_msg; err = do_gt_restart(gt); @@ -917,15 +910,14 @@ int xe_gt_resume(struct xe_gt *gt) xe_gt_idle_enable_pg(gt); - xe_force_wake_put(gt_to_fw(gt), fw_ref); + XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); xe_gt_dbg(gt, "resumed\n"); return 0; -err_msg: - err = -ETIMEDOUT; err_force_wake: - xe_force_wake_put(gt_to_fw(gt), fw_ref); + XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL)); +err_msg: xe_gt_err(gt, "resume failed (%pe)\n", ERR_PTR(err)); return err; diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c index db540c8be6c7..656c2ab6ca9f 100644 --- a/drivers/gpu/drm/xe/xe_gt_pagefault.c +++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c @@ -432,6 +432,7 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue) #define PF_MULTIPLIER 8 pf_queue->num_dw = (num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW * PF_MULTIPLIER; + pf_queue->num_dw = roundup_pow_of_two(pf_queue->num_dw); #undef PF_MULTIPLIER pf_queue->gt = gt; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c index 905f409db74b..57e9eddc092e 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c @@ -5,14 +5,20 @@ #include <drm/drm_managed.h> +#include "regs/xe_guc_regs.h" #include "regs/xe_regs.h" +#include "xe_gt.h" #include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" #include "xe_gt_sriov_pf_service.h" +#include "xe_gt_sriov_printk.h" #include "xe_mmio.h" +#include "xe_pm.h" + +static void pf_worker_restart_func(struct work_struct *w); /* * VF's metadata is maintained in the flexible array where: @@ -38,6 +44,11 @@ static int pf_alloc_metadata(struct xe_gt *gt) return 0; } +static void pf_init_workers(struct xe_gt *gt) +{ + INIT_WORK(>->sriov.pf.workers.restart, pf_worker_restart_func); +} + /** * xe_gt_sriov_pf_init_early - Prepare SR-IOV PF data structures on PF. * @gt: the &xe_gt to initialize @@ -62,6 +73,8 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt) if (err) return err; + pf_init_workers(gt); + return 0; } @@ -89,14 +102,111 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) xe_gt_sriov_pf_service_update(gt); } +static u32 pf_get_vf_regs_stride(struct xe_device *xe) +{ + return GRAPHICS_VERx100(xe) > 1200 ? 0x400 : 0x1000; +} + +static struct xe_reg xe_reg_vf_to_pf(struct xe_reg vf_reg, unsigned int vfid, u32 stride) +{ + struct xe_reg pf_reg = vf_reg; + + pf_reg.vf = 0; + pf_reg.addr += stride * vfid; + + return pf_reg; +} + +static void pf_clear_vf_scratch_regs(struct xe_gt *gt, unsigned int vfid) +{ + u32 stride = pf_get_vf_regs_stride(gt_to_xe(gt)); + struct xe_reg scratch; + int n, count; + + if (xe_gt_is_media_type(gt)) { + count = MED_VF_SW_FLAG_COUNT; + for (n = 0; n < count; n++) { + scratch = xe_reg_vf_to_pf(MED_VF_SW_FLAG(n), vfid, stride); + xe_mmio_write32(gt, scratch, 0); + } + } else { + count = VF_SW_FLAG_COUNT; + for (n = 0; n < count; n++) { + scratch = xe_reg_vf_to_pf(VF_SW_FLAG(n), vfid, stride); + xe_mmio_write32(gt, scratch, 0); + } + } +} + /** - * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset. + * xe_gt_sriov_pf_sanitize_hw() - Reset hardware state related to a VF. * @gt: the &xe_gt + * @vfid: the VF identifier * * This function can only be called on PF. */ -void xe_gt_sriov_pf_restart(struct xe_gt *gt) +void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + pf_clear_vf_scratch_regs(gt, vfid); +} + +static void pf_cancel_restart(struct xe_gt *gt) +{ + xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt))); + + if (cancel_work_sync(>->sriov.pf.workers.restart)) + xe_gt_sriov_dbg_verbose(gt, "pending restart canceled!\n"); +} + +/** + * xe_gt_sriov_pf_stop_prepare() - Prepare to stop SR-IOV support. + * @gt: the &xe_gt + * + * This function can only be called on the PF. + */ +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) { + pf_cancel_restart(gt); +} + +static void pf_restart(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + xe_pm_runtime_get(xe); xe_gt_sriov_pf_config_restart(gt); xe_gt_sriov_pf_control_restart(gt); + xe_pm_runtime_put(xe); + + xe_gt_sriov_dbg(gt, "restart completed\n"); +} + +static void pf_worker_restart_func(struct work_struct *w) +{ + struct xe_gt *gt = container_of(w, typeof(*gt), sriov.pf.workers.restart); + + pf_restart(gt); +} + +static void pf_queue_restart(struct xe_gt *gt) +{ + struct xe_device *xe = gt_to_xe(gt); + + xe_gt_assert(gt, IS_SRIOV_PF(xe)); + + if (!queue_work(xe->sriov.wq, >->sriov.pf.workers.restart)) + xe_gt_sriov_dbg(gt, "restart already in queue!\n"); +} + +/** + * xe_gt_sriov_pf_restart - Restart SR-IOV support after a GT reset. + * @gt: the &xe_gt + * + * This function can only be called on PF. + */ +void xe_gt_sriov_pf_restart(struct xe_gt *gt) +{ + pf_queue_restart(gt); } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h index f0cb726a6919..165ba31d0391 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.h @@ -11,6 +11,8 @@ struct xe_gt; #ifdef CONFIG_PCI_IOV int xe_gt_sriov_pf_init_early(struct xe_gt *gt); void xe_gt_sriov_pf_init_hw(struct xe_gt *gt); +void xe_gt_sriov_pf_sanitize_hw(struct xe_gt *gt, unsigned int vfid); +void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt); void xe_gt_sriov_pf_restart(struct xe_gt *gt); #else static inline int xe_gt_sriov_pf_init_early(struct xe_gt *gt) @@ -22,6 +24,10 @@ static inline void xe_gt_sriov_pf_init_hw(struct xe_gt *gt) { } +static inline void xe_gt_sriov_pf_stop_prepare(struct xe_gt *gt) +{ +} + static inline void xe_gt_sriov_pf_restart(struct xe_gt *gt) { } diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c index 02f7328bd6ce..b4fd5a81aff1 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c @@ -9,6 +9,7 @@ #include "xe_device.h" #include "xe_gt.h" +#include "xe_gt_sriov_pf.h" #include "xe_gt_sriov_pf_config.h" #include "xe_gt_sriov_pf_control.h" #include "xe_gt_sriov_pf_helpers.h" @@ -1008,7 +1009,7 @@ static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid) if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO)) return false; - /* XXX: placeholder */ + xe_gt_sriov_pf_sanitize_hw(gt, vfid); pf_enter_vf_flr_send_finish(gt, vfid); return true; diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h index 28e1b130bf87..a69d128c4f45 100644 --- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h +++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h @@ -32,7 +32,16 @@ struct xe_gt_sriov_metadata { }; /** + * struct xe_gt_sriov_pf_workers - GT level workers used by the PF. + */ +struct xe_gt_sriov_pf_workers { + /** @restart: worker that executes actions post GT reset */ + struct work_struct restart; +}; + +/** * struct xe_gt_sriov_pf - GT level PF virtualization data. + * @workers: workers data. * @service: service data. * @control: control data. * @policy: policy data. @@ -40,6 +49,7 @@ struct xe_gt_sriov_metadata { * @vfs: metadata for all VFs. */ struct xe_gt_sriov_pf { + struct xe_gt_sriov_pf_workers workers; struct xe_gt_sriov_pf_service service; struct xe_gt_sriov_pf_control control; struct xe_gt_sriov_pf_policy policy; diff --git a/drivers/gpu/drm/xe/xe_guc.c b/drivers/gpu/drm/xe/xe_guc.c index 52df28032a6f..96373cdb366b 100644 --- a/drivers/gpu/drm/xe/xe_guc.c +++ b/drivers/gpu/drm/xe/xe_guc.c @@ -985,7 +985,7 @@ timeout: BUILD_BUG_ON(FIELD_MAX(GUC_HXG_MSG_0_TYPE) != GUC_HXG_TYPE_RESPONSE_SUCCESS); BUILD_BUG_ON((GUC_HXG_TYPE_RESPONSE_SUCCESS ^ GUC_HXG_TYPE_RESPONSE_FAILURE) != 1); - ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask, + ret = xe_mmio_wait32(gt, reply_reg, resp_mask, resp_mask, 1000000, &header, false); if (unlikely(FIELD_GET(GUC_HXG_MSG_0_ORIGIN, header) != @@ -1175,7 +1175,7 @@ void xe_guc_print_info(struct xe_guc *guc, struct drm_printer *p) xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); - xe_guc_ct_print(&guc->ct, p, false); + xe_guc_ct_print(&guc->ct, p); xe_guc_submit_print(guc, p); } diff --git a/drivers/gpu/drm/xe/xe_guc_ct.c b/drivers/gpu/drm/xe/xe_guc_ct.c index 1f74f6bd50f3..f1ce4e14dcb5 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.c +++ b/drivers/gpu/drm/xe/xe_guc_ct.c @@ -25,12 +25,53 @@ #include "xe_gt_sriov_pf_monitor.h" #include "xe_gt_tlb_invalidation.h" #include "xe_guc.h" +#include "xe_guc_log.h" #include "xe_guc_relay.h" #include "xe_guc_submit.h" #include "xe_map.h" #include "xe_pm.h" #include "xe_trace_guc.h" +static void receive_g2h(struct xe_guc_ct *ct); +static void g2h_worker_func(struct work_struct *w); +static void safe_mode_worker_func(struct work_struct *w); +static void ct_exit_safe_mode(struct xe_guc_ct *ct); + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +enum { + /* Internal states, not error conditions */ + CT_DEAD_STATE_REARM, /* 0x0001 */ + CT_DEAD_STATE_CAPTURE, /* 0x0002 */ + + /* Error conditions */ + CT_DEAD_SETUP, /* 0x0004 */ + CT_DEAD_H2G_WRITE, /* 0x0008 */ + CT_DEAD_H2G_HAS_ROOM, /* 0x0010 */ + CT_DEAD_G2H_READ, /* 0x0020 */ + CT_DEAD_G2H_RECV, /* 0x0040 */ + CT_DEAD_G2H_RELEASE, /* 0x0080 */ + CT_DEAD_DEADLOCK, /* 0x0100 */ + CT_DEAD_PROCESS_FAILED, /* 0x0200 */ + CT_DEAD_FAST_G2H, /* 0x0400 */ + CT_DEAD_PARSE_G2H_RESPONSE, /* 0x0800 */ + CT_DEAD_PARSE_G2H_UNKNOWN, /* 0x1000 */ + CT_DEAD_PARSE_G2H_ORIGIN, /* 0x2000 */ + CT_DEAD_PARSE_G2H_TYPE, /* 0x4000 */ +}; + +static void ct_dead_worker_func(struct work_struct *w); +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code); + +#define CT_DEAD(ct, ctb, reason_code) ct_dead_capture((ct), (ctb), CT_DEAD_##reason_code) +#else +#define CT_DEAD(ct, ctb, reason) \ + do { \ + struct guc_ctb *_ctb = (ctb); \ + if (_ctb) \ + _ctb->info.broken = true; \ + } while (0) +#endif + /* Used when a CT send wants to block and / or receive data */ struct g2h_fence { u32 *response_buffer; @@ -147,14 +188,11 @@ static void guc_ct_fini(struct drm_device *drm, void *arg) { struct xe_guc_ct *ct = arg; + ct_exit_safe_mode(ct); destroy_workqueue(ct->g2h_wq); xa_destroy(&ct->fence_lookup); } -static void receive_g2h(struct xe_guc_ct *ct); -static void g2h_worker_func(struct work_struct *w); -static void safe_mode_worker_func(struct work_struct *w); - static void primelockdep(struct xe_guc_ct *ct) { if (!IS_ENABLED(CONFIG_LOCKDEP)) @@ -182,7 +220,11 @@ int xe_guc_ct_init(struct xe_guc_ct *ct) spin_lock_init(&ct->fast_lock); xa_init(&ct->fence_lookup); INIT_WORK(&ct->g2h_worker, g2h_worker_func); - INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); + INIT_DELAYED_WORK(&ct->safe_mode_worker, safe_mode_worker_func); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + spin_lock_init(&ct->dead.lock); + INIT_WORK(&ct->dead.worker, ct_dead_worker_func); +#endif init_waitqueue_head(&ct->wq); init_waitqueue_head(&ct->g2h_fence_wq); @@ -419,10 +461,22 @@ int xe_guc_ct_enable(struct xe_guc_ct *ct) if (ct_needs_safe_mode(ct)) ct_enter_safe_mode(ct); +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /* + * The CT has now been reset so the dumper can be re-armed + * after any existing dead state has been dumped. + */ + spin_lock_irq(&ct->dead.lock); + if (ct->dead.reason) + ct->dead.reason |= (1 << CT_DEAD_STATE_REARM); + spin_unlock_irq(&ct->dead.lock); +#endif + return 0; err_out: xe_gt_err(gt, "Failed to enable GuC CT (%pe)\n", ERR_PTR(err)); + CT_DEAD(ct, NULL, SETUP); return err; } @@ -469,6 +523,19 @@ static bool h2g_has_room(struct xe_guc_ct *ct, u32 cmd_len) if (cmd_len > h2g->info.space) { h2g->info.head = desc_read(ct_to_xe(ct), h2g, head); + + if (h2g->info.head > h2g->info.size) { + struct xe_device *xe = ct_to_xe(ct); + u32 desc_status = desc_read(xe, h2g, status); + + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + + xe_gt_err(ct_to_gt(ct), "CT: invalid head offset %u >= %u)\n", + h2g->info.head, h2g->info.size); + CT_DEAD(ct, h2g, H2G_HAS_ROOM); + return false; + } + h2g->info.space = CIRC_SPACE(h2g->info.tail, h2g->info.head, h2g->info.size) - h2g->info.resv_space; @@ -524,10 +591,24 @@ static void __g2h_reserve_space(struct xe_guc_ct *ct, u32 g2h_len, u32 num_g2h) static void __g2h_release_space(struct xe_guc_ct *ct, u32 g2h_len) { + bool bad = false; + lockdep_assert_held(&ct->fast_lock); - xe_gt_assert(ct_to_gt(ct), ct->ctbs.g2h.info.space + g2h_len <= - ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space); - xe_gt_assert(ct_to_gt(ct), ct->g2h_outstanding); + + bad = ct->ctbs.g2h.info.space + g2h_len > + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space; + bad |= !ct->g2h_outstanding; + + if (bad) { + xe_gt_err(ct_to_gt(ct), "Invalid G2H release: %d + %d vs %d - %d -> %d vs %d, outstanding = %d!\n", + ct->ctbs.g2h.info.space, g2h_len, + ct->ctbs.g2h.info.size, ct->ctbs.g2h.info.resv_space, + ct->ctbs.g2h.info.space + g2h_len, + ct->ctbs.g2h.info.size - ct->ctbs.g2h.info.resv_space, + ct->g2h_outstanding); + CT_DEAD(ct, &ct->ctbs.g2h, G2H_RELEASE); + return; + } ct->ctbs.g2h.info.space += g2h_len; if (!--ct->g2h_outstanding) @@ -554,12 +635,43 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, u32 full_len; struct iosys_map map = IOSYS_MAP_INIT_OFFSET(&h2g->cmds, tail * sizeof(u32)); + u32 desc_status; full_len = len + GUC_CTB_HDR_LEN; lockdep_assert_held(&ct->lock); xe_gt_assert(gt, full_len <= GUC_CTB_MSG_MAX_LEN); - xe_gt_assert(gt, tail <= h2g->info.size); + + desc_status = desc_read(xe, h2g, status); + if (desc_status) { + xe_gt_err(gt, "CT write: non-zero status: %u\n", desc_status); + goto corrupted; + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, h2g, tail); + u32 desc_head = desc_read(xe, h2g, head); + + if (tail != desc_tail) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT write: tail was modified %u != %u\n", desc_tail, tail); + goto corrupted; + } + + if (tail > h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: tail out of range: %u vs %u\n", + tail, h2g->info.size); + goto corrupted; + } + + if (desc_head >= h2g->info.size) { + desc_write(xe, h2g, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT write: invalid head offset %u >= %u)\n", + desc_head, h2g->info.size); + goto corrupted; + } + } /* Command will wrap, zero fill (NOPs), return and check credits again */ if (tail + full_len > h2g->info.size) { @@ -612,6 +724,10 @@ static int h2g_write(struct xe_guc_ct *ct, const u32 *action, u32 len, desc_read(xe, h2g, head), h2g->info.tail); return 0; + +corrupted: + CT_DEAD(ct, &ct->ctbs.h2g, H2G_WRITE); + return -EPIPE; } /* @@ -719,7 +835,6 @@ static int guc_ct_send_locked(struct xe_guc_ct *ct, const u32 *action, u32 len, { struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); - struct drm_printer p = xe_gt_info_printer(gt); unsigned int sleep_period_ms = 1; int ret; @@ -772,8 +887,13 @@ try_again: goto broken; #undef g2h_avail - if (dequeue_one_g2h(ct) < 0) + ret = dequeue_one_g2h(ct); + if (ret < 0) { + if (ret != -ECANCELED) + xe_gt_err(ct_to_gt(ct), "CTB receive failed (%pe)", + ERR_PTR(ret)); goto broken; + } goto try_again; } @@ -782,8 +902,7 @@ try_again: broken: xe_gt_err(gt, "No forward process on H2G, reset required\n"); - xe_guc_ct_print(ct, &p, true); - ct->ctbs.h2g.info.broken = true; + CT_DEAD(ct, &ct->ctbs.h2g, DEADLOCK); return -EDEADLK; } @@ -851,7 +970,7 @@ static bool retry_failure(struct xe_guc_ct *ct, int ret) #define ct_alive(ct) \ (xe_guc_ct_enabled(ct) && !ct->ctbs.h2g.info.broken && \ !ct->ctbs.g2h.info.broken) - if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) + if (!wait_event_interruptible_timeout(ct->wq, ct_alive(ct), HZ * 5)) return false; #undef ct_alive @@ -1049,6 +1168,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) else xe_gt_err(gt, "unexpected response %u for FAST_REQ H2G fence 0x%x!\n", type, fence); + CT_DEAD(ct, NULL, PARSE_G2H_RESPONSE); return -EPROTO; } @@ -1056,6 +1176,7 @@ static int parse_g2h_response(struct xe_guc_ct *ct, u32 *msg, u32 len) g2h_fence = xa_erase(&ct->fence_lookup, fence); if (unlikely(!g2h_fence)) { /* Don't tear down channel, as send could've timed out */ + /* CT_DEAD(ct, NULL, PARSE_G2H_UNKNOWN); */ xe_gt_warn(gt, "G2H fence (%u) not found!\n", fence); g2h_release_space(ct, GUC_CTB_HXG_MSG_MAX_LEN); return 0; @@ -1100,7 +1221,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) if (unlikely(origin != GUC_HXG_ORIGIN_GUC)) { xe_gt_err(gt, "G2H channel broken on read, origin=%u, reset required\n", origin); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_ORIGIN); return -EPROTO; } @@ -1118,7 +1239,7 @@ static int parse_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) default: xe_gt_err(gt, "G2H channel broken on read, type=%u, reset required\n", type); - ct->ctbs.g2h.info.broken = true; + CT_DEAD(ct, &ct->ctbs.g2h, PARSE_G2H_TYPE); ret = -EOPNOTSUPP; } @@ -1195,9 +1316,11 @@ static int process_g2h_msg(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_err(gt, "unexpected G2H action 0x%04x\n", action); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, PROCESS_FAILED); + } return 0; } @@ -1207,7 +1330,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) struct xe_device *xe = ct_to_xe(ct); struct xe_gt *gt = ct_to_gt(ct); struct guc_ctb *g2h = &ct->ctbs.g2h; - u32 tail, head, len; + u32 tail, head, len, desc_status; s32 avail; u32 action; u32 *hxg; @@ -1226,6 +1349,63 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) xe_gt_assert(gt, xe_guc_ct_enabled(ct)); + desc_status = desc_read(xe, g2h, status); + if (desc_status) { + if (desc_status & GUC_CTB_STATUS_DISABLED) { + /* + * Potentially valid if a CLIENT_RESET request resulted in + * contexts/engines being reset. But should never happen as + * no contexts should be active when CLIENT_RESET is sent. + */ + xe_gt_err(gt, "CT read: unexpected G2H after GuC has stopped!\n"); + desc_status &= ~GUC_CTB_STATUS_DISABLED; + } + + if (desc_status) { + xe_gt_err(gt, "CT read: non-zero status: %u\n", desc_status); + goto corrupted; + } + } + + if (IS_ENABLED(CONFIG_DRM_XE_DEBUG)) { + u32 desc_tail = desc_read(xe, g2h, tail); + /* + u32 desc_head = desc_read(xe, g2h, head); + + * info.head and desc_head are updated back-to-back at the end of + * this function and nowhere else. Hence, they cannot be different + * unless two g2h_read calls are running concurrently. Which is not + * possible because it is guarded by ct->fast_lock. And yet, some + * discrete platforms are reguarly hitting this error :(. + * + * desc_head rolling backwards shouldn't cause any noticeable + * problems - just a delay in GuC being allowed to proceed past that + * point in the queue. So for now, just disable the error until it + * can be root caused. + * + if (g2h->info.head != desc_head) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_MISMATCH); + xe_gt_err(gt, "CT read: head was modified %u != %u\n", + desc_head, g2h->info.head); + goto corrupted; + } + */ + + if (g2h->info.head > g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: head out of range: %u vs %u\n", + g2h->info.head, g2h->info.size); + goto corrupted; + } + + if (desc_tail >= g2h->info.size) { + desc_write(xe, g2h, status, desc_status | GUC_CTB_STATUS_OVERFLOW); + xe_gt_err(gt, "CT read: invalid tail offset %u >= %u)\n", + desc_tail, g2h->info.size); + goto corrupted; + } + } + /* Calculate DW available to read */ tail = desc_read(xe, g2h, tail); avail = tail - g2h->info.head; @@ -1242,9 +1422,7 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) if (len > avail) { xe_gt_err(gt, "G2H channel broken on read, avail=%d, len=%d, reset required\n", avail, len); - g2h->info.broken = true; - - return -EPROTO; + goto corrupted; } head = (g2h->info.head + 1) % g2h->info.size; @@ -1290,6 +1468,10 @@ static int g2h_read(struct xe_guc_ct *ct, u32 *msg, bool fast_path) action, len, g2h->info.head, tail); return len; + +corrupted: + CT_DEAD(ct, &ct->ctbs.g2h, G2H_READ); + return -EPROTO; } static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) @@ -1316,9 +1498,11 @@ static void g2h_fast_path(struct xe_guc_ct *ct, u32 *msg, u32 len) xe_gt_warn(gt, "NOT_POSSIBLE"); } - if (ret) + if (ret) { xe_gt_err(gt, "G2H action 0x%04x failed (%pe)\n", action, ERR_PTR(ret)); + CT_DEAD(ct, NULL, FAST_G2H); + } } /** @@ -1378,7 +1562,6 @@ static int dequeue_one_g2h(struct xe_guc_ct *ct) static void receive_g2h(struct xe_guc_ct *ct) { - struct xe_gt *gt = ct_to_gt(ct); bool ongoing; int ret; @@ -1415,9 +1598,8 @@ static void receive_g2h(struct xe_guc_ct *ct) mutex_unlock(&ct->lock); if (unlikely(ret == -EPROTO || ret == -EOPNOTSUPP)) { - struct drm_printer p = xe_gt_info_printer(gt); - - xe_guc_ct_print(ct, &p, false); + xe_gt_err(ct_to_gt(ct), "CT dequeue failed: %d", ret); + CT_DEAD(ct, NULL, G2H_RECV); kick_reset(ct); } } while (ret == 1); @@ -1445,9 +1627,8 @@ static void guc_ctb_snapshot_capture(struct xe_device *xe, struct guc_ctb *ctb, snapshot->cmds = kmalloc_array(ctb->info.size, sizeof(u32), atomic ? GFP_ATOMIC : GFP_KERNEL); - if (!snapshot->cmds) { - drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CTB info will be available.\n"); + drm_err(&xe->drm, "Skipping CTB commands snapshot. Only CT info will be available.\n"); return; } @@ -1528,7 +1709,7 @@ struct xe_guc_ct_snapshot *xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, atomic ? GFP_ATOMIC : GFP_KERNEL); if (!snapshot) { - drm_err(&xe->drm, "Skipping CTB snapshot entirely.\n"); + xe_gt_err(ct_to_gt(ct), "Skipping CTB snapshot entirely.\n"); return NULL; } @@ -1592,16 +1773,119 @@ void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot) * xe_guc_ct_print - GuC CT Print. * @ct: GuC CT. * @p: drm_printer where it will be printed out. - * @atomic: Boolean to indicate if this is called from atomic context like - * reset or CTB handler or from some regular path like debugfs. * * This function quickly capture a snapshot and immediately print it out. */ -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic) +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p) { struct xe_guc_ct_snapshot *snapshot; - snapshot = xe_guc_ct_snapshot_capture(ct, atomic); + snapshot = xe_guc_ct_snapshot_capture(ct, false); xe_guc_ct_snapshot_print(snapshot, p); xe_guc_ct_snapshot_free(snapshot); } + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +static void ct_dead_capture(struct xe_guc_ct *ct, struct guc_ctb *ctb, u32 reason_code) +{ + struct xe_guc_log_snapshot *snapshot_log; + struct xe_guc_ct_snapshot *snapshot_ct; + struct xe_guc *guc = ct_to_guc(ct); + unsigned long flags; + bool have_capture; + + if (ctb) + ctb->info.broken = true; + + /* Ignore further errors after the first dump until a reset */ + if (ct->dead.reported) + return; + + spin_lock_irqsave(&ct->dead.lock, flags); + + /* And only capture one dump at a time */ + have_capture = ct->dead.reason & (1 << CT_DEAD_STATE_CAPTURE); + ct->dead.reason |= (1 << reason_code) | + (1 << CT_DEAD_STATE_CAPTURE); + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + if (have_capture) + return; + + snapshot_log = xe_guc_log_snapshot_capture(&guc->log, true); + snapshot_ct = xe_guc_ct_snapshot_capture((ct), true); + + spin_lock_irqsave(&ct->dead.lock, flags); + + if (ct->dead.snapshot_log || ct->dead.snapshot_ct) { + xe_gt_err(ct_to_gt(ct), "Got unexpected dead CT capture!\n"); + xe_guc_log_snapshot_free(snapshot_log); + xe_guc_ct_snapshot_free(snapshot_ct); + } else { + ct->dead.snapshot_log = snapshot_log; + ct->dead.snapshot_ct = snapshot_ct; + } + + spin_unlock_irqrestore(&ct->dead.lock, flags); + + queue_work(system_unbound_wq, &(ct)->dead.worker); +} + +static void ct_dead_print(struct xe_dead_ct *dead) +{ + struct xe_guc_ct *ct = container_of(dead, struct xe_guc_ct, dead); + struct xe_device *xe = ct_to_xe(ct); + struct xe_gt *gt = ct_to_gt(ct); + static int g_count; + struct drm_printer ip = xe_gt_info_printer(gt); + struct drm_printer lp = drm_line_printer(&ip, "Capture", ++g_count); + + if (!dead->reason) { + xe_gt_err(gt, "CTB is dead for no reason!?\n"); + return; + } + + drm_printf(&lp, "CTB is dead - reason=0x%X\n", dead->reason); + + /* Can't generate a genuine core dump at this point, so just do the good bits */ + drm_puts(&lp, "**** Xe Device Coredump ****\n"); + xe_device_snapshot_print(xe, &lp); + + drm_printf(&lp, "**** GT #%d ****\n", gt->info.id); + drm_printf(&lp, "\tTile: %d\n", gt->tile->id); + + drm_puts(&lp, "**** GuC Log ****\n"); + xe_guc_log_snapshot_print(dead->snapshot_log, &lp); + + drm_puts(&lp, "**** GuC CT ****\n"); + xe_guc_ct_snapshot_print(dead->snapshot_ct, &lp); + + drm_puts(&lp, "Done.\n"); +} + +static void ct_dead_worker_func(struct work_struct *w) +{ + struct xe_guc_ct *ct = container_of(w, struct xe_guc_ct, dead.worker); + + if (!ct->dead.reported) { + ct->dead.reported = true; + ct_dead_print(&ct->dead); + } + + spin_lock_irq(&ct->dead.lock); + + xe_guc_log_snapshot_free(ct->dead.snapshot_log); + ct->dead.snapshot_log = NULL; + xe_guc_ct_snapshot_free(ct->dead.snapshot_ct); + ct->dead.snapshot_ct = NULL; + + if (ct->dead.reason & (1 << CT_DEAD_STATE_REARM)) { + /* A reset has occurred so re-arm the error reporting */ + ct->dead.reason = 0; + ct->dead.reported = false; + } + + spin_unlock_irq(&ct->dead.lock); +} +#endif diff --git a/drivers/gpu/drm/xe/xe_guc_ct.h b/drivers/gpu/drm/xe/xe_guc_ct.h index 13e316668e90..c7ac9407b861 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct.h +++ b/drivers/gpu/drm/xe/xe_guc_ct.h @@ -21,7 +21,7 @@ xe_guc_ct_snapshot_capture(struct xe_guc_ct *ct, bool atomic); void xe_guc_ct_snapshot_print(struct xe_guc_ct_snapshot *snapshot, struct drm_printer *p); void xe_guc_ct_snapshot_free(struct xe_guc_ct_snapshot *snapshot); -void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p, bool atomic); +void xe_guc_ct_print(struct xe_guc_ct *ct, struct drm_printer *p); static inline bool xe_guc_ct_initialized(struct xe_guc_ct *ct) { diff --git a/drivers/gpu/drm/xe/xe_guc_ct_types.h b/drivers/gpu/drm/xe/xe_guc_ct_types.h index 761cb9031298..85e127ec91d7 100644 --- a/drivers/gpu/drm/xe/xe_guc_ct_types.h +++ b/drivers/gpu/drm/xe/xe_guc_ct_types.h @@ -86,6 +86,24 @@ enum xe_guc_ct_state { XE_GUC_CT_STATE_ENABLED, }; +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) +/** struct xe_dead_ct - Information for debugging a dead CT */ +struct xe_dead_ct { + /** @lock: protects memory allocation/free operations, and @reason updates */ + spinlock_t lock; + /** @reason: bit mask of CT_DEAD_* reason codes */ + unsigned int reason; + /** @reported: for preventing multiple dumps per error sequence */ + bool reported; + /** @worker: worker thread to get out of interrupt context before dumping */ + struct work_struct worker; + /** snapshot_ct: copy of CT state and CTB content at point of error */ + struct xe_guc_ct_snapshot *snapshot_ct; + /** snapshot_log: copy of GuC log at point of error */ + struct xe_guc_log_snapshot *snapshot_log; +}; +#endif + /** * struct xe_guc_ct - GuC command transport (CT) layer * @@ -128,6 +146,11 @@ struct xe_guc_ct { u32 msg[GUC_CTB_MSG_MAX_LEN]; /** @fast_msg: Message buffer */ u32 fast_msg[GUC_CTB_MSG_MAX_LEN]; + +#if IS_ENABLED(CONFIG_DRM_XE_DEBUG) + /** @dead: information for debugging dead CTs */ + struct xe_dead_ct dead; +#endif }; #endif diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c index f978da8be35c..af02803c145b 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.c +++ b/drivers/gpu/drm/xe/xe_guc_pc.c @@ -6,6 +6,9 @@ #include "xe_guc_pc.h" #include <linux/delay.h> +#include <linux/jiffies.h> +#include <linux/ktime.h> +#include <linux/wait_bit.h> #include <drm/drm_managed.h> #include <generated/xe_wa_oob.h> @@ -47,6 +50,12 @@ #define LNL_MERT_FREQ_CAP 800 #define BMG_MERT_FREQ_CAP 2133 +#define BMG_MIN_FREQ 1200 +#define BMG_MERT_FLUSH_FREQ_CAP 2600 + +#define SLPC_RESET_TIMEOUT_MS 5 /* roughly 5ms, but no need for precision */ +#define SLPC_RESET_EXTENDED_TIMEOUT_MS 1000 /* To be used only at pc_start */ +#define SLPC_ACT_FREQ_TIMEOUT_MS 100 /** * DOC: GuC Power Conservation (PC) @@ -133,6 +142,36 @@ static int wait_for_pc_state(struct xe_guc_pc *pc, return -ETIMEDOUT; } +static int wait_for_flush_complete(struct xe_guc_pc *pc) +{ + const unsigned long timeout = msecs_to_jiffies(30); + + if (!wait_var_event_timeout(&pc->flush_freq_limit, + !atomic_read(&pc->flush_freq_limit), + timeout)) + return -ETIMEDOUT; + + return 0; +} + +static int wait_for_act_freq_limit(struct xe_guc_pc *pc, u32 freq) +{ + int timeout_us = SLPC_ACT_FREQ_TIMEOUT_MS * USEC_PER_MSEC; + int slept, wait = 10; + + for (slept = 0; slept < timeout_us;) { + if (xe_guc_pc_get_act_freq(pc) <= freq) + return 0; + + usleep_range(wait, wait << 1); + slept += wait; + wait <<= 1; + if (slept + wait > timeout_us) + wait = timeout_us - slept; + } + + return -ETIMEDOUT; +} static int pc_action_reset(struct xe_guc_pc *pc) { struct xe_guc_ct *ct = pc_to_ct(pc); @@ -584,6 +623,11 @@ int xe_guc_pc_set_max_freq(struct xe_guc_pc *pc, u32 freq) { int ret; + if (XE_WA(pc_to_gt(pc), 22019338487)) { + if (wait_for_flush_complete(pc) != 0) + return -EAGAIN; + } + mutex_lock(&pc->freq_lock); if (!pc->freq_ready) { /* Might be in the middle of a gt reset */ @@ -793,6 +837,106 @@ static int pc_adjust_requested_freq(struct xe_guc_pc *pc) return ret; } +static bool needs_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + + return XE_WA(gt, 22019338487) && + pc->rp0_freq > BMG_MERT_FLUSH_FREQ_CAP; +} + +/** + * xe_guc_pc_apply_flush_freq_limit() - Limit max GT freq during L2 flush + * @pc: the xe_guc_pc object + * + * As per the WA, reduce max GT frequency during L2 cache flush + */ +void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + u32 max_freq; + int ret; + + if (!needs_flush_freq_limit(pc)) + return; + + mutex_lock(&pc->freq_lock); + + if (!pc->freq_ready) { + mutex_unlock(&pc->freq_lock); + return; + } + + ret = pc_action_query_task_state(pc); + if (ret) { + mutex_unlock(&pc->freq_lock); + return; + } + + max_freq = pc_get_max_freq(pc); + if (max_freq > BMG_MERT_FLUSH_FREQ_CAP) { + ret = pc_set_max_freq(pc, BMG_MERT_FLUSH_FREQ_CAP); + if (ret) { + xe_gt_err_once(gt, "Failed to cap max freq on flush to %u, %pe\n", + BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); + mutex_unlock(&pc->freq_lock); + return; + } + + atomic_set(&pc->flush_freq_limit, 1); + + /* + * If user has previously changed max freq, stash that value to + * restore later, otherwise use the current max. New user + * requests wait on flush. + */ + if (pc->user_requested_max != 0) + pc->stashed_max_freq = pc->user_requested_max; + else + pc->stashed_max_freq = max_freq; + } + + mutex_unlock(&pc->freq_lock); + + /* + * Wait for actual freq to go below the flush cap: even if the previous + * max was below cap, the current one might still be above it + */ + ret = wait_for_act_freq_limit(pc, BMG_MERT_FLUSH_FREQ_CAP); + if (ret) + xe_gt_err_once(gt, "Actual freq did not reduce to %u, %pe\n", + BMG_MERT_FLUSH_FREQ_CAP, ERR_PTR(ret)); +} + +/** + * xe_guc_pc_remove_flush_freq_limit() - Remove max GT freq limit after L2 flush completes. + * @pc: the xe_guc_pc object + * + * Retrieve the previous GT max frequency value. + */ +void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc) +{ + struct xe_gt *gt = pc_to_gt(pc); + int ret = 0; + + if (!needs_flush_freq_limit(pc)) + return; + + if (!atomic_read(&pc->flush_freq_limit)) + return; + + mutex_lock(&pc->freq_lock); + + ret = pc_set_max_freq(>->uc.guc.pc, pc->stashed_max_freq); + if (ret) + xe_gt_err_once(gt, "Failed to restore max freq %u:%d", + pc->stashed_max_freq, ret); + + atomic_set(&pc->flush_freq_limit, 0); + mutex_unlock(&pc->freq_lock); + wake_up_var(&pc->flush_freq_limit); +} + static int pc_set_mert_freq_cap(struct xe_guc_pc *pc) { int ret = 0; diff --git a/drivers/gpu/drm/xe/xe_guc_pc.h b/drivers/gpu/drm/xe/xe_guc_pc.h index efda432fadfc..7154b3aab0d8 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc.h +++ b/drivers/gpu/drm/xe/xe_guc_pc.h @@ -34,5 +34,7 @@ u64 xe_guc_pc_mc6_residency(struct xe_guc_pc *pc); void xe_guc_pc_init_early(struct xe_guc_pc *pc); int xe_guc_pc_restore_stashed_freq(struct xe_guc_pc *pc); void xe_guc_pc_raise_unslice(struct xe_guc_pc *pc); +void xe_guc_pc_apply_flush_freq_limit(struct xe_guc_pc *pc); +void xe_guc_pc_remove_flush_freq_limit(struct xe_guc_pc *pc); #endif /* _XE_GUC_PC_H_ */ diff --git a/drivers/gpu/drm/xe/xe_guc_pc_types.h b/drivers/gpu/drm/xe/xe_guc_pc_types.h index 13810be015db..5b86d91296cb 100644 --- a/drivers/gpu/drm/xe/xe_guc_pc_types.h +++ b/drivers/gpu/drm/xe/xe_guc_pc_types.h @@ -15,6 +15,8 @@ struct xe_guc_pc { /** @bo: GGTT buffer object that is shared with GuC PC */ struct xe_bo *bo; + /** @flush_freq_limit: 1 when max freq changes are limited by driver */ + atomic_t flush_freq_limit; /** @rp0_freq: HW RP0 frequency - The Maximum one */ u32 rp0_freq; /** @rpe_freq: HW RPe frequency - The Efficient one */ diff --git a/drivers/gpu/drm/xe/xe_irq.c b/drivers/gpu/drm/xe/xe_irq.c index 5f2c368c35ad..14c3a476597a 100644 --- a/drivers/gpu/drm/xe/xe_irq.c +++ b/drivers/gpu/drm/xe/xe_irq.c @@ -173,7 +173,7 @@ void xe_irq_enable_hwe(struct xe_gt *gt) if (ccs_mask & (BIT(0)|BIT(1))) xe_mmio_write32(gt, CCS0_CCS1_INTR_MASK, ~dmask); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); + xe_mmio_write32(gt, CCS2_CCS3_INTR_MASK, ~dmask); } if (xe_gt_is_media_type(gt) || MEDIA_VER(xe) < 13) { @@ -504,7 +504,7 @@ static void gt_irq_reset(struct xe_tile *tile) if (ccs_mask & (BIT(0)|BIT(1))) xe_mmio_write32(mmio, CCS0_CCS1_INTR_MASK, ~0); if (ccs_mask & (BIT(2)|BIT(3))) - xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); + xe_mmio_write32(mmio, CCS2_CCS3_INTR_MASK, ~0); if ((tile->media_gt && xe_hw_engine_mask_per_class(tile->media_gt, XE_ENGINE_CLASS_OTHER)) || diff --git a/drivers/gpu/drm/xe/xe_lmtt.c b/drivers/gpu/drm/xe/xe_lmtt.c index 8999ac511555..485658f69fba 100644 --- a/drivers/gpu/drm/xe/xe_lmtt.c +++ b/drivers/gpu/drm/xe/xe_lmtt.c @@ -78,6 +78,9 @@ static struct xe_lmtt_pt *lmtt_pt_alloc(struct xe_lmtt *lmtt, unsigned int level } lmtt_assert(lmtt, xe_bo_is_vram(bo)); + lmtt_debug(lmtt, "level=%u addr=%#llx\n", level, (u64)xe_bo_main_addr(bo, XE_PAGE_SIZE)); + + xe_map_memset(lmtt_to_xe(lmtt), &bo->vmap, 0, 0, bo->size); pt->level = level; pt->bo = bo; @@ -91,6 +94,9 @@ out: static void lmtt_pt_free(struct xe_lmtt_pt *pt) { + lmtt_debug(&pt->bo->tile->sriov.pf.lmtt, "level=%u addr=%llx\n", + pt->level, (u64)xe_bo_main_addr(pt->bo, XE_PAGE_SIZE)); + xe_bo_unpin_map_no_vm(pt->bo); kfree(pt); } @@ -226,9 +232,14 @@ static void lmtt_write_pte(struct xe_lmtt *lmtt, struct xe_lmtt_pt *pt, switch (lmtt->ops->lmtt_pte_size(level)) { case sizeof(u32): + lmtt_assert(lmtt, !overflows_type(pte, u32)); + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u32), u32)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u32), u32, pte); break; case sizeof(u64): + lmtt_assert(lmtt, !pte || !iosys_map_rd(&pt->bo->vmap, idx * sizeof(u64), u64)); + xe_map_wr(lmtt_to_xe(lmtt), &pt->bo->vmap, idx * sizeof(u64), u64, pte); break; default: diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c index 6431697c6169..c2da2691fd2b 100644 --- a/drivers/gpu/drm/xe/xe_migrate.c +++ b/drivers/gpu/drm/xe/xe_migrate.c @@ -860,7 +860,7 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m, if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it)) xe_res_next(&src_it, src_L0); else - emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs, + emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs || use_comp_pat, &src_it, src_L0, src); if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it)) diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 23028afbbe1d..da09c26249f5 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -164,7 +164,6 @@ static const struct xe_graphics_desc graphics_xelpg = { .has_asid = 1, \ .has_atomic_enable_pte_bit = 1, \ .has_flat_ccs = 1, \ - .has_indirect_ring_state = 1, \ .has_range_tlb_invalidation = 1, \ .has_usm = 1, \ .va_bits = 48, \ diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c index 06f50aa31326..46c73ff10c74 100644 --- a/drivers/gpu/drm/xe/xe_pm.c +++ b/drivers/gpu/drm/xe/xe_pm.c @@ -682,11 +682,13 @@ void xe_pm_assert_unbounded_bridge(struct xe_device *xe) } /** - * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold + * xe_pm_set_vram_threshold - Set a VRAM threshold for allowing/blocking D3Cold * @xe: xe device instance - * @threshold: VRAM size in bites for the D3cold threshold + * @threshold: VRAM size in MiB for the D3cold threshold * - * Returns 0 for success, negative error code otherwise. + * Return: + * * 0 - success + * * -EINVAL - invalid argument */ int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold) { diff --git a/drivers/gpu/drm/xe/xe_trace_bo.h b/drivers/gpu/drm/xe/xe_trace_bo.h index ba0f61e7d2d6..4ff023b5d040 100644 --- a/drivers/gpu/drm/xe/xe_trace_bo.h +++ b/drivers/gpu/drm/xe/xe_trace_bo.h @@ -189,7 +189,7 @@ DECLARE_EVENT_CLASS(xe_vm, ), TP_printk("dev=%s, vm=%p, asid=0x%05x", __get_str(dev), - __entry->vm, __entry->asid) + __entry->vm, __entry->asid) ); DEFINE_EVENT(xe_vm, xe_vm_kill, diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 155deef867ac..c2783d04c6e0 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1873,9 +1873,12 @@ u8 *hid_alloc_report_buf(struct hid_report *report, gfp_t flags) /* * 7 extra bytes are necessary to achieve proper functionality * of implement() working on 8 byte chunks + * 1 extra byte for the report ID if it is null (not used) so + * we can reserve that extra byte in the first position of the buffer + * when sending it to .raw_request() */ - u32 len = hid_report_len(report) + 7; + u32 len = hid_report_len(report) + 7 + (report->id == 0); return kzalloc(len, flags); } @@ -1963,7 +1966,7 @@ static struct hid_report *hid_get_report(struct hid_report_enum *report_enum, int __hid_request(struct hid_device *hid, struct hid_report *report, enum hid_class_request reqtype) { - char *buf; + char *buf, *data_buf; int ret; u32 len; @@ -1971,13 +1974,19 @@ int __hid_request(struct hid_device *hid, struct hid_report *report, if (!buf) return -ENOMEM; + data_buf = buf; len = hid_report_len(report); + if (report->id == 0) { + /* reserve the first byte for the report ID */ + data_buf++; + len++; + } + if (reqtype == HID_REQ_SET_REPORT) - hid_output_report(report, buf); + hid_output_report(report, data_buf); - ret = hid->ll_driver->raw_request(hid, report->id, buf, len, - report->type, reqtype); + ret = hid_hw_raw_request(hid, report->id, buf, len, report->type, reqtype); if (ret < 0) { dbg_hid("unable to complete request: %d\n", ret); goto out; diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index c6424f625948..b472140421f5 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -311,6 +311,8 @@ #define USB_DEVICE_ID_ASUS_AK1D 0x1125 #define USB_DEVICE_ID_CHICONY_TOSHIBA_WT10A 0x1408 #define USB_DEVICE_ID_CHICONY_ACER_SWITCH12 0x1421 +#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA 0xb824 +#define USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2 0xb82c #define USB_VENDOR_ID_CHUNGHWAT 0x2247 #define USB_DEVICE_ID_CHUNGHWAT_MULTITOUCH 0x0001 @@ -814,6 +816,7 @@ #define USB_DEVICE_ID_LENOVO_TPPRODOCK 0x6067 #define USB_DEVICE_ID_LENOVO_X1_COVER 0x6085 #define USB_DEVICE_ID_LENOVO_X1_TAB 0x60a3 +#define USB_DEVICE_ID_LENOVO_X1_TAB2 0x60a4 #define USB_DEVICE_ID_LENOVO_X1_TAB3 0x60b5 #define USB_DEVICE_ID_LENOVO_X12_TAB 0x60fe #define USB_DEVICE_ID_LENOVO_X12_TAB2 0x61ae @@ -1518,4 +1521,7 @@ #define USB_VENDOR_ID_SIGNOTEC 0x2133 #define USB_DEVICE_ID_SIGNOTEC_VIEWSONIC_PD1011 0x0018 +#define USB_VENDOR_ID_SMARTLINKTECHNOLOGY 0x4c4a +#define USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155 0x4155 + #endif diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 56e530860cae..8482852c662d 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -473,6 +473,7 @@ static int lenovo_input_mapping(struct hid_device *hdev, return lenovo_input_mapping_tp10_ultrabook_kbd(hdev, hi, field, usage, bit, max); case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_input_mapping_x1_tab_kbd(hdev, hi, field, usage, bit, max); default: @@ -587,6 +588,7 @@ static ssize_t attr_fn_lock_store(struct device *dev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, TP10UBKBD_FN_LOCK_LED, value); if (ret) @@ -781,6 +783,7 @@ static int lenovo_event(struct hid_device *hdev, struct hid_field *field, return lenovo_event_cptkbd(hdev, field, usage, value); case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: return lenovo_event_tp10ubkbd(hdev, field, usage, value); default: @@ -1062,6 +1065,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_led_set_tp10ubkbd(hdev, tp10ubkbd_led[led_nr], value); break; @@ -1293,6 +1297,7 @@ static int lenovo_probe(struct hid_device *hdev, break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: ret = lenovo_probe_tp10ubkbd(hdev); break; @@ -1380,6 +1385,7 @@ static void lenovo_remove(struct hid_device *hdev) break; case USB_DEVICE_ID_LENOVO_TP10UBKBD: case USB_DEVICE_ID_LENOVO_X1_TAB: + case USB_DEVICE_ID_LENOVO_X1_TAB2: case USB_DEVICE_ID_LENOVO_X1_TAB3: lenovo_remove_tp10ubkbd(hdev); break; @@ -1431,6 +1437,8 @@ static const struct hid_device_id lenovo_devices[] = { { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, + USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB2) }, + { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB3) }, { } }; diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c index 93b5c648ef82..641292cfdaa6 100644 --- a/drivers/hid/hid-multitouch.c +++ b/drivers/hid/hid-multitouch.c @@ -2116,12 +2116,18 @@ static const struct hid_device_id mt_devices[] = { HID_DEVICE(BUS_I2C, HID_GROUP_GENERIC, USB_VENDOR_ID_LG, I2C_DEVICE_ID_LG_7010) }, - /* Lenovo X1 TAB Gen 2 */ + /* Lenovo X1 TAB Gen 1 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_TAB) }, + /* Lenovo X1 TAB Gen 2 */ + { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, + HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, + USB_VENDOR_ID_LENOVO, + USB_DEVICE_ID_LENOVO_X1_TAB2) }, + /* Lenovo X1 TAB Gen 3 */ { .driver_data = MT_CLS_WIN_8_FORCE_MULTI_INPUT, HID_DEVICE(BUS_USB, HID_GROUP_MULTITOUCH_WIN_8, diff --git a/drivers/hid/hid-nintendo.c b/drivers/hid/hid-nintendo.c index 55153a2f7988..2a3ae1068739 100644 --- a/drivers/hid/hid-nintendo.c +++ b/drivers/hid/hid-nintendo.c @@ -308,6 +308,7 @@ enum joycon_ctlr_state { JOYCON_CTLR_STATE_INIT, JOYCON_CTLR_STATE_READ, JOYCON_CTLR_STATE_REMOVED, + JOYCON_CTLR_STATE_SUSPENDED, }; /* Controller type received as part of device info */ @@ -2754,14 +2755,46 @@ static void nintendo_hid_remove(struct hid_device *hdev) static int nintendo_hid_resume(struct hid_device *hdev) { - int ret = joycon_init(hdev); + struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); + int ret; + + hid_dbg(hdev, "resume\n"); + if (!joycon_using_usb(ctlr)) { + hid_dbg(hdev, "no-op resume for bt ctlr\n"); + ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; + return 0; + } + ret = joycon_init(hdev); if (ret) - hid_err(hdev, "Failed to restore controller after resume"); + hid_err(hdev, + "Failed to restore controller after resume: %d\n", + ret); + else + ctlr->ctlr_state = JOYCON_CTLR_STATE_READ; return ret; } +static int nintendo_hid_suspend(struct hid_device *hdev, pm_message_t message) +{ + struct joycon_ctlr *ctlr = hid_get_drvdata(hdev); + + hid_dbg(hdev, "suspend: %d\n", message.event); + /* + * Avoid any blocking loops in suspend/resume transitions. + * + * joycon_enforce_subcmd_rate() can result in repeated retries if for + * whatever reason the controller stops providing input reports. + * + * This has been observed with bluetooth controllers which lose + * connectivity prior to suspend (but not long enough to result in + * complete disconnection). + */ + ctlr->ctlr_state = JOYCON_CTLR_STATE_SUSPENDED; + return 0; +} + #endif static const struct hid_device_id nintendo_hid_devices[] = { @@ -2800,6 +2833,7 @@ static struct hid_driver nintendo_hid_driver = { #ifdef CONFIG_PM .resume = nintendo_hid_resume, + .suspend = nintendo_hid_suspend, #endif }; static int __init nintendo_init(void) diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 73979643315b..80372342c176 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -747,6 +747,8 @@ static const struct hid_device_id hid_ignore_list[] = { { HID_USB_DEVICE(USB_VENDOR_ID_AVERMEDIA, USB_DEVICE_ID_AVER_FM_MR800) }, { HID_USB_DEVICE(USB_VENDOR_ID_AXENTIA, USB_DEVICE_ID_AXENTIA_FM_RADIO) }, { HID_USB_DEVICE(USB_VENDOR_ID_BERKSHIRE, USB_DEVICE_ID_BERKSHIRE_PCWD) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA) }, + { HID_USB_DEVICE(USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_HP_5MP_CAMERA2) }, { HID_USB_DEVICE(USB_VENDOR_ID_CIDC, 0x0103) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI470X) }, { HID_USB_DEVICE(USB_VENDOR_ID_CYGNAL, USB_DEVICE_ID_CYGNAL_RADIO_SI4713) }, @@ -894,6 +896,7 @@ static const struct hid_device_id hid_ignore_list[] = { #endif { HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) }, { HID_USB_DEVICE(USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_QUANTA_HP_5MP_CAMERA_5473) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SMARTLINKTECHNOLOGY, USB_DEVICE_ID_SMARTLINKTECHNOLOGY_4155) }, { } }; diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 1f519e925f06..616e63fb2f15 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1810,7 +1810,6 @@ static struct bin_attribute chan_attr_ring_buffer = { .name = "ring", .mode = 0600, }, - .size = 2 * SZ_2M, .mmap = hv_mmap_ring_buffer_wrapper, }; static struct attribute *vmbus_chan_attrs[] = { @@ -1866,6 +1865,7 @@ static umode_t vmbus_chan_bin_attr_is_visible(struct kobject *kobj, /* Hide ring attribute if channel's ring_sysfs_visible is set to false */ if (attr == &chan_attr_ring_buffer && !channel->ring_sysfs_visible) return 0; + attr->size = channel->ringbuffer_pagecount << PAGE_SHIFT; return attr->attr.mode; } diff --git a/drivers/hwmon/corsair-cpro.c b/drivers/hwmon/corsair-cpro.c index e1a7f7aa7f80..b7b911f8359c 100644 --- a/drivers/hwmon/corsair-cpro.c +++ b/drivers/hwmon/corsair-cpro.c @@ -89,6 +89,7 @@ struct ccp_device { struct mutex mutex; /* whenever buffer is used, lock before send_usb_cmd */ u8 *cmd_buffer; u8 *buffer; + int buffer_recv_size; /* number of received bytes in buffer */ int target[6]; DECLARE_BITMAP(temp_cnct, NUM_TEMP_SENSORS); DECLARE_BITMAP(fan_cnct, NUM_FANS); @@ -146,6 +147,9 @@ static int send_usb_cmd(struct ccp_device *ccp, u8 command, u8 byte1, u8 byte2, if (!t) return -ETIMEDOUT; + if (ccp->buffer_recv_size != IN_BUFFER_SIZE) + return -EPROTO; + return ccp_get_errno(ccp); } @@ -157,6 +161,7 @@ static int ccp_raw_event(struct hid_device *hdev, struct hid_report *report, u8 spin_lock(&ccp->wait_input_report_lock); if (!completion_done(&ccp->wait_input_report)) { memcpy(ccp->buffer, data, min(IN_BUFFER_SIZE, size)); + ccp->buffer_recv_size = size; complete_all(&ccp->wait_input_report); } spin_unlock(&ccp->wait_input_report_lock); diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 2254abda5c46..0e679cc50148 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -937,6 +937,7 @@ config I2C_OMAP tristate "OMAP I2C adapter" depends on ARCH_OMAP || ARCH_K3 || COMPILE_TEST default MACH_OMAP_OSK + select MULTIPLEXER help If you say yes to this option, support will be included for the I2C interface on the Texas Instruments OMAP1/2 family of processors. diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index 28188c6d0555..52dc666c3ef4 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -346,6 +346,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs, dev->msgs = msgs; dev->msgs_num = num_msgs; + dev->msg_write_idx = 0; i2c_dw_xfer_init(dev); /* Initiate messages read/write transaction */ diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c index 8c9cf08ad45e..0bdee43dc134 100644 --- a/drivers/i2c/busses/i2c-omap.c +++ b/drivers/i2c/busses/i2c-omap.c @@ -24,6 +24,7 @@ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> +#include <linux/mux/consumer.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/platform_data/i2c-omap.h> @@ -211,6 +212,7 @@ struct omap_i2c_dev { u16 syscstate; u16 westate; u16 errata; + struct mux_state *mux_state; }; static const u8 reg_map_ip_v1[] = { @@ -1452,8 +1454,27 @@ omap_i2c_probe(struct platform_device *pdev) (1000 * omap->speed / 8); } + if (of_property_present(node, "mux-states")) { + struct mux_state *mux_state; + + mux_state = devm_mux_state_get(&pdev->dev, NULL); + if (IS_ERR(mux_state)) { + r = PTR_ERR(mux_state); + dev_dbg(&pdev->dev, "failed to get I2C mux: %d\n", r); + goto err_put_pm; + } + omap->mux_state = mux_state; + r = mux_state_select(omap->mux_state); + if (r) { + dev_err(&pdev->dev, "failed to select I2C mux: %d\n", r); + goto err_put_pm; + } + } + /* reset ASAP, clearing any IRQs */ - omap_i2c_init(omap); + r = omap_i2c_init(omap); + if (r) + goto err_mux_state_deselect; if (omap->rev < OMAP_I2C_OMAP1_REV_2) r = devm_request_irq(&pdev->dev, omap->irq, omap_i2c_omap1_isr, @@ -1496,6 +1517,10 @@ omap_i2c_probe(struct platform_device *pdev) err_unuse_clocks: omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0); +err_mux_state_deselect: + if (omap->mux_state) + mux_state_deselect(omap->mux_state); +err_put_pm: pm_runtime_dont_use_autosuspend(omap->dev); pm_runtime_put_sync(omap->dev); err_disable_pm: @@ -1511,6 +1536,9 @@ static void omap_i2c_remove(struct platform_device *pdev) i2c_del_adapter(&omap->adapter); + if (omap->mux_state) + mux_state_deselect(omap->mux_state); + ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) dev_err(omap->dev, "Failed to resume hardware, skip disable\n"); diff --git a/drivers/i2c/busses/i2c-qup.c b/drivers/i2c/busses/i2c-qup.c index eb97abcb4cd3..592b754f48e9 100644 --- a/drivers/i2c/busses/i2c-qup.c +++ b/drivers/i2c/busses/i2c-qup.c @@ -452,8 +452,10 @@ static int qup_i2c_bus_active(struct qup_i2c_dev *qup, int len) if (!(status & I2C_STATUS_BUS_ACTIVE)) break; - if (time_after(jiffies, timeout)) + if (time_after(jiffies, timeout)) { ret = -ETIMEDOUT; + break; + } usleep_range(len, len * 2); } diff --git a/drivers/i2c/busses/i2c-stm32.c b/drivers/i2c/busses/i2c-stm32.c index 157c64e27d0b..f84ec056e36d 100644 --- a/drivers/i2c/busses/i2c-stm32.c +++ b/drivers/i2c/busses/i2c-stm32.c @@ -102,7 +102,6 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma, void *dma_async_param) { struct dma_async_tx_descriptor *txdesc; - struct device *chan_dev; int ret; if (rd_wr) { @@ -116,11 +115,10 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma, } dma->dma_len = len; - chan_dev = dma->chan_using->device->dev; - dma->dma_buf = dma_map_single(chan_dev, buf, dma->dma_len, + dma->dma_buf = dma_map_single(dev, buf, dma->dma_len, dma->dma_data_dir); - if (dma_mapping_error(chan_dev, dma->dma_buf)) { + if (dma_mapping_error(dev, dma->dma_buf)) { dev_err(dev, "DMA mapping failed\n"); return -EINVAL; } @@ -150,7 +148,7 @@ int stm32_i2c_prep_dma_xfer(struct device *dev, struct stm32_i2c_dma *dma, return 0; err: - dma_unmap_single(chan_dev, dma->dma_buf, dma->dma_len, + dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); return ret; } diff --git a/drivers/i2c/busses/i2c-stm32f7.c b/drivers/i2c/busses/i2c-stm32f7.c index 0174ead99de6..a4587f281216 100644 --- a/drivers/i2c/busses/i2c-stm32f7.c +++ b/drivers/i2c/busses/i2c-stm32f7.c @@ -739,12 +739,13 @@ static void stm32f7_i2c_disable_dma_req(struct stm32f7_i2c_dev *i2c_dev) static void stm32f7_i2c_dma_callback(void *arg) { - struct stm32f7_i2c_dev *i2c_dev = (struct stm32f7_i2c_dev *)arg; + struct stm32f7_i2c_dev *i2c_dev = arg; struct stm32_i2c_dma *dma = i2c_dev->dma; - struct device *dev = dma->chan_using->device->dev; stm32f7_i2c_disable_dma_req(i2c_dev); - dma_unmap_single(dev, dma->dma_buf, dma->dma_len, dma->dma_data_dir); + dmaengine_terminate_async(dma->chan_using); + dma_unmap_single(i2c_dev->dev, dma->dma_buf, dma->dma_len, + dma->dma_data_dir); complete(&dma->dma_complete); } @@ -1510,7 +1511,6 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev, u16 addr = f7_msg->addr; void __iomem *base = i2c_dev->base; struct device *dev = i2c_dev->dev; - struct stm32_i2c_dma *dma = i2c_dev->dma; /* Bus error */ if (status & STM32F7_I2C_ISR_BERR) { @@ -1551,10 +1551,8 @@ static irqreturn_t stm32f7_i2c_handle_isr_errs(struct stm32f7_i2c_dev *i2c_dev, } /* Disable dma */ - if (i2c_dev->use_dma) { - stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_async(dma->chan_using); - } + if (i2c_dev->use_dma) + stm32f7_i2c_dma_callback(i2c_dev); i2c_dev->master_mode = false; complete(&i2c_dev->complete); @@ -1600,7 +1598,6 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) { struct stm32f7_i2c_dev *i2c_dev = data; struct stm32f7_i2c_msg *f7_msg = &i2c_dev->f7_msg; - struct stm32_i2c_dma *dma = i2c_dev->dma; void __iomem *base = i2c_dev->base; u32 status, mask; int ret; @@ -1619,10 +1616,8 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) dev_dbg(i2c_dev->dev, "<%s>: Receive NACK (addr %x)\n", __func__, f7_msg->addr); writel_relaxed(STM32F7_I2C_ICR_NACKCF, base + STM32F7_I2C_ICR); - if (i2c_dev->use_dma) { - stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_async(dma->chan_using); - } + if (i2c_dev->use_dma) + stm32f7_i2c_dma_callback(i2c_dev); f7_msg->result = -ENXIO; } @@ -1640,8 +1635,7 @@ static irqreturn_t stm32f7_i2c_isr_event_thread(int irq, void *data) ret = wait_for_completion_timeout(&i2c_dev->dma->dma_complete, HZ); if (!ret) { dev_dbg(i2c_dev->dev, "<%s>: Timed out\n", __func__); - stm32f7_i2c_disable_dma_req(i2c_dev); - dmaengine_terminate_async(dma->chan_using); + stm32f7_i2c_dma_callback(i2c_dev); f7_msg->result = -ETIMEDOUT; } } diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 89ce8a62b37c..fbab82d457fb 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -607,7 +607,6 @@ static int tegra_i2c_wait_for_config_load(struct tegra_i2c_dev *i2c_dev) static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) { u32 val, clk_divisor, clk_multiplier, tsu_thd, tlow, thigh, non_hs_mode; - acpi_handle handle = ACPI_HANDLE(i2c_dev->dev); struct i2c_timings *t = &i2c_dev->timings; int err; @@ -619,11 +618,7 @@ static int tegra_i2c_init(struct tegra_i2c_dev *i2c_dev) * emit a noisy warning on error, which won't stay unnoticed and * won't hose machine entirely. */ - if (handle) - err = acpi_evaluate_object(handle, "_RST", NULL, NULL); - else - err = reset_control_reset(i2c_dev->rst); - + err = device_reset(i2c_dev->dev); WARN_ON_ONCE(err); if (IS_DVC(i2c_dev)) @@ -1666,19 +1661,6 @@ static void tegra_i2c_parse_dt(struct tegra_i2c_dev *i2c_dev) i2c_dev->is_vi = true; } -static int tegra_i2c_init_reset(struct tegra_i2c_dev *i2c_dev) -{ - if (ACPI_HANDLE(i2c_dev->dev)) - return 0; - - i2c_dev->rst = devm_reset_control_get_exclusive(i2c_dev->dev, "i2c"); - if (IS_ERR(i2c_dev->rst)) - return dev_err_probe(i2c_dev->dev, PTR_ERR(i2c_dev->rst), - "failed to get reset control\n"); - - return 0; -} - static int tegra_i2c_init_clocks(struct tegra_i2c_dev *i2c_dev) { int err; @@ -1788,10 +1770,6 @@ static int tegra_i2c_probe(struct platform_device *pdev) tegra_i2c_parse_dt(i2c_dev); - err = tegra_i2c_init_reset(i2c_dev); - if (err) - return err; - err = tegra_i2c_init_clocks(i2c_dev); if (err) return err; diff --git a/drivers/i2c/busses/i2c-virtio.c b/drivers/i2c/busses/i2c-virtio.c index 2a351f961b89..c8c40ff9765d 100644 --- a/drivers/i2c/busses/i2c-virtio.c +++ b/drivers/i2c/busses/i2c-virtio.c @@ -116,15 +116,16 @@ static int virtio_i2c_complete_reqs(struct virtqueue *vq, for (i = 0; i < num; i++) { struct virtio_i2c_req *req = &reqs[i]; - wait_for_completion(&req->completion); - - if (!failed && req->in_hdr.status != VIRTIO_I2C_MSG_OK) - failed = true; + if (!failed) { + if (wait_for_completion_interruptible(&req->completion)) + failed = true; + else if (req->in_hdr.status != VIRTIO_I2C_MSG_OK) + failed = true; + else + j++; + } i2c_put_dma_safe_msg_buf(reqs[i].buf, &msgs[i], !failed); - - if (!failed) - j++; } return j; diff --git a/drivers/iio/accel/fxls8962af-core.c b/drivers/iio/accel/fxls8962af-core.c index 5e17c1e6d2c7..a4e4e7964a1a 100644 --- a/drivers/iio/accel/fxls8962af-core.c +++ b/drivers/iio/accel/fxls8962af-core.c @@ -865,6 +865,8 @@ static int fxls8962af_buffer_predisable(struct iio_dev *indio_dev) if (ret) return ret; + synchronize_irq(data->irq); + ret = __fxls8962af_fifo_set_mode(data, false); if (data->enable_event) diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c index 0e371efbda70..7394ea72948b 100644 --- a/drivers/iio/accel/st_accel_core.c +++ b/drivers/iio/accel/st_accel_core.c @@ -1353,6 +1353,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) union acpi_object *ont; union acpi_object *elements; acpi_status status; + struct device *parent = indio_dev->dev.parent; int ret = -EINVAL; unsigned int val; int i, j; @@ -1371,7 +1372,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) }; - adev = ACPI_COMPANION(indio_dev->dev.parent); + adev = ACPI_COMPANION(parent); if (!adev) return -ENXIO; @@ -1380,8 +1381,7 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) if (status == AE_NOT_FOUND) { return -ENXIO; } else if (ACPI_FAILURE(status)) { - dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n", - status); + dev_warn(parent, "failed to execute _ONT: %d\n", status); return status; } @@ -1457,12 +1457,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev) } ret = 0; - dev_info(&indio_dev->dev, "computed mount matrix from ACPI\n"); + dev_info(parent, "computed mount matrix from ACPI\n"); out: kfree(buffer.pointer); if (ret) - dev_dbg(&indio_dev->dev, + dev_dbg(parent, "failed to apply ACPI orientation data: %d\n", ret); return ret; diff --git a/drivers/iio/adc/ad7949.c b/drivers/iio/adc/ad7949.c index edd0c3a35ab7..202561cad401 100644 --- a/drivers/iio/adc/ad7949.c +++ b/drivers/iio/adc/ad7949.c @@ -308,7 +308,6 @@ static void ad7949_disable_reg(void *reg) static int ad7949_spi_probe(struct spi_device *spi) { - u32 spi_ctrl_mask = spi->controller->bits_per_word_mask; struct device *dev = &spi->dev; const struct ad7949_adc_spec *spec; struct ad7949_adc_chip *ad7949_adc; @@ -337,11 +336,11 @@ static int ad7949_spi_probe(struct spi_device *spi) ad7949_adc->resolution = spec->resolution; /* Set SPI bits per word */ - if (spi_ctrl_mask & SPI_BPW_MASK(ad7949_adc->resolution)) { + if (spi_is_bpw_supported(spi, ad7949_adc->resolution)) { spi->bits_per_word = ad7949_adc->resolution; - } else if (spi_ctrl_mask == SPI_BPW_MASK(16)) { + } else if (spi_is_bpw_supported(spi, 16)) { spi->bits_per_word = 16; - } else if (spi_ctrl_mask == SPI_BPW_MASK(8)) { + } else if (spi_is_bpw_supported(spi, 8)) { spi->bits_per_word = 8; } else { dev_err(dev, "unable to find common BPW with spi controller\n"); diff --git a/drivers/iio/adc/axp20x_adc.c b/drivers/iio/adc/axp20x_adc.c index 6c1a5d1b0a83..0226dfbcf4ae 100644 --- a/drivers/iio/adc/axp20x_adc.c +++ b/drivers/iio/adc/axp20x_adc.c @@ -217,6 +217,7 @@ static struct iio_map axp717_maps[] = { .consumer_channel = "batt_chrg_i", .adc_channel_label = "batt_chrg_i", }, + { } }; /* diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c index d0c6e94f7204..c9d531f233eb 100644 --- a/drivers/iio/adc/max1363.c +++ b/drivers/iio/adc/max1363.c @@ -504,10 +504,10 @@ static const struct iio_event_spec max1363_events[] = { MAX1363_CHAN_U(1, _s1, 1, bits, ev_spec, num_ev_spec), \ MAX1363_CHAN_U(2, _s2, 2, bits, ev_spec, num_ev_spec), \ MAX1363_CHAN_U(3, _s3, 3, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(0, 1, d0m1, 4, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(2, 3, d2m3, 5, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(1, 0, d1m0, 6, bits, ev_spec, num_ev_spec), \ - MAX1363_CHAN_B(3, 2, d3m2, 7, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(0, 1, d0m1, 12, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(2, 3, d2m3, 13, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(1, 0, d1m0, 18, bits, ev_spec, num_ev_spec), \ + MAX1363_CHAN_B(3, 2, d3m2, 19, bits, ev_spec, num_ev_spec), \ IIO_CHAN_SOFT_TIMESTAMP(8) \ } @@ -525,23 +525,23 @@ static const struct iio_chan_spec max1363_channels[] = /* Applies to max1236, max1237 */ static const enum max1363_modes max1236_mode_list[] = { _s0, _s1, _s2, _s3, - s0to1, s0to2, s0to3, + s0to1, s0to2, s2to3, s0to3, d0m1, d2m3, d1m0, d3m2, d0m1to2m3, d1m0to3m2, - s2to3, }; /* Applies to max1238, max1239 */ static const enum max1363_modes max1238_mode_list[] = { _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, _s8, _s9, _s10, _s11, s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, + s6to7, s6to8, s6to9, s6to10, s6to11, s0to7, s0to8, s0to9, s0to10, s0to11, d0m1, d2m3, d4m5, d6m7, d8m9, d10m11, d1m0, d3m2, d5m4, d7m6, d9m8, d11m10, - d0m1to2m3, d0m1to4m5, d0m1to6m7, d0m1to8m9, d0m1to10m11, - d1m0to3m2, d1m0to5m4, d1m0to7m6, d1m0to9m8, d1m0to11m10, - s6to7, s6to8, s6to9, s6to10, s6to11, - d6m7to8m9, d6m7to10m11, d7m6to9m8, d7m6to11m10, + d0m1to2m3, d0m1to4m5, d0m1to6m7, d6m7to8m9, + d0m1to8m9, d6m7to10m11, d0m1to10m11, d1m0to3m2, + d1m0to5m4, d1m0to7m6, d7m6to9m8, d1m0to9m8, + d7m6to11m10, d1m0to11m10, }; #define MAX1363_12X_CHANS(bits) { \ @@ -577,16 +577,15 @@ static const struct iio_chan_spec max1238_channels[] = MAX1363_12X_CHANS(12); static const enum max1363_modes max11607_mode_list[] = { _s0, _s1, _s2, _s3, - s0to1, s0to2, s0to3, - s2to3, + s0to1, s0to2, s2to3, + s0to3, d0m1, d2m3, d1m0, d3m2, d0m1to2m3, d1m0to3m2, }; static const enum max1363_modes max11608_mode_list[] = { _s0, _s1, _s2, _s3, _s4, _s5, _s6, _s7, - s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s0to7, - s6to7, + s0to1, s0to2, s0to3, s0to4, s0to5, s0to6, s6to7, s0to7, d0m1, d2m3, d4m5, d6m7, d1m0, d3m2, d5m4, d7m6, d0m1to2m3, d0m1to4m5, d0m1to6m7, @@ -602,14 +601,14 @@ static const enum max1363_modes max11608_mode_list[] = { MAX1363_CHAN_U(5, _s5, 5, bits, NULL, 0), \ MAX1363_CHAN_U(6, _s6, 6, bits, NULL, 0), \ MAX1363_CHAN_U(7, _s7, 7, bits, NULL, 0), \ - MAX1363_CHAN_B(0, 1, d0m1, 8, bits, NULL, 0), \ - MAX1363_CHAN_B(2, 3, d2m3, 9, bits, NULL, 0), \ - MAX1363_CHAN_B(4, 5, d4m5, 10, bits, NULL, 0), \ - MAX1363_CHAN_B(6, 7, d6m7, 11, bits, NULL, 0), \ - MAX1363_CHAN_B(1, 0, d1m0, 12, bits, NULL, 0), \ - MAX1363_CHAN_B(3, 2, d3m2, 13, bits, NULL, 0), \ - MAX1363_CHAN_B(5, 4, d5m4, 14, bits, NULL, 0), \ - MAX1363_CHAN_B(7, 6, d7m6, 15, bits, NULL, 0), \ + MAX1363_CHAN_B(0, 1, d0m1, 12, bits, NULL, 0), \ + MAX1363_CHAN_B(2, 3, d2m3, 13, bits, NULL, 0), \ + MAX1363_CHAN_B(4, 5, d4m5, 14, bits, NULL, 0), \ + MAX1363_CHAN_B(6, 7, d6m7, 15, bits, NULL, 0), \ + MAX1363_CHAN_B(1, 0, d1m0, 18, bits, NULL, 0), \ + MAX1363_CHAN_B(3, 2, d3m2, 19, bits, NULL, 0), \ + MAX1363_CHAN_B(5, 4, d5m4, 20, bits, NULL, 0), \ + MAX1363_CHAN_B(7, 6, d7m6, 21, bits, NULL, 0), \ IIO_CHAN_SOFT_TIMESTAMP(16) \ } static const struct iio_chan_spec max11602_channels[] = MAX1363_8X_CHANS(8); diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index 616dd729666a..97ea15cba9f7 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -429,10 +429,9 @@ static int stm32_adc_irq_probe(struct platform_device *pdev, return -ENOMEM; } - for (i = 0; i < priv->cfg->num_irqs; i++) { - irq_set_chained_handler(priv->irq[i], stm32_adc_irq_handler); - irq_set_handler_data(priv->irq[i], priv); - } + for (i = 0; i < priv->cfg->num_irqs; i++) + irq_set_chained_handler_and_data(priv->irq[i], + stm32_adc_irq_handler, priv); return 0; } diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c index 1b4287991d00..48a194b8e060 100644 --- a/drivers/iio/common/st_sensors/st_sensors_core.c +++ b/drivers/iio/common/st_sensors/st_sensors_core.c @@ -154,7 +154,7 @@ static int st_sensors_set_fullscale(struct iio_dev *indio_dev, unsigned int fs) return err; st_accel_set_fullscale_error: - dev_err(&indio_dev->dev, "failed to set new fullscale.\n"); + dev_err(indio_dev->dev.parent, "failed to set new fullscale.\n"); return err; } @@ -231,8 +231,7 @@ int st_sensors_power_enable(struct iio_dev *indio_dev) ARRAY_SIZE(regulator_names), regulator_names); if (err) - return dev_err_probe(&indio_dev->dev, err, - "unable to enable supplies\n"); + return dev_err_probe(parent, err, "unable to enable supplies\n"); return 0; } @@ -241,13 +240,14 @@ EXPORT_SYMBOL_NS(st_sensors_power_enable, IIO_ST_SENSORS); static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev, struct st_sensors_platform_data *pdata) { + struct device *parent = indio_dev->dev.parent; struct st_sensor_data *sdata = iio_priv(indio_dev); /* Sensor does not support interrupts */ if (!sdata->sensor_settings->drdy_irq.int1.addr && !sdata->sensor_settings->drdy_irq.int2.addr) { if (pdata->drdy_int_pin) - dev_info(&indio_dev->dev, + dev_info(parent, "DRDY on pin INT%d specified, but sensor does not support interrupts\n", pdata->drdy_int_pin); return 0; @@ -256,29 +256,27 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev, switch (pdata->drdy_int_pin) { case 1: if (!sdata->sensor_settings->drdy_irq.int1.mask) { - dev_err(&indio_dev->dev, - "DRDY on INT1 not available.\n"); + dev_err(parent, "DRDY on INT1 not available.\n"); return -EINVAL; } sdata->drdy_int_pin = 1; break; case 2: if (!sdata->sensor_settings->drdy_irq.int2.mask) { - dev_err(&indio_dev->dev, - "DRDY on INT2 not available.\n"); + dev_err(parent, "DRDY on INT2 not available.\n"); return -EINVAL; } sdata->drdy_int_pin = 2; break; default: - dev_err(&indio_dev->dev, "DRDY on pdata not valid.\n"); + dev_err(parent, "DRDY on pdata not valid.\n"); return -EINVAL; } if (pdata->open_drain) { if (!sdata->sensor_settings->drdy_irq.int1.addr_od && !sdata->sensor_settings->drdy_irq.int2.addr_od) - dev_err(&indio_dev->dev, + dev_err(parent, "open drain requested but unsupported.\n"); else sdata->int_pin_open_drain = true; @@ -336,6 +334,7 @@ EXPORT_SYMBOL_NS(st_sensors_dev_name_probe, IIO_ST_SENSORS); int st_sensors_init_sensor(struct iio_dev *indio_dev, struct st_sensors_platform_data *pdata) { + struct device *parent = indio_dev->dev.parent; struct st_sensor_data *sdata = iio_priv(indio_dev); struct st_sensors_platform_data *of_pdata; int err = 0; @@ -343,7 +342,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, mutex_init(&sdata->odr_lock); /* If OF/DT pdata exists, it will take precedence of anything else */ - of_pdata = st_sensors_dev_probe(indio_dev->dev.parent, pdata); + of_pdata = st_sensors_dev_probe(parent, pdata); if (IS_ERR(of_pdata)) return PTR_ERR(of_pdata); if (of_pdata) @@ -370,7 +369,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, if (err < 0) return err; } else - dev_info(&indio_dev->dev, "Full-scale not possible\n"); + dev_info(parent, "Full-scale not possible\n"); err = st_sensors_set_odr(indio_dev, sdata->odr); if (err < 0) @@ -405,7 +404,7 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev, mask = sdata->sensor_settings->drdy_irq.int2.mask_od; } - dev_info(&indio_dev->dev, + dev_info(parent, "set interrupt line to open drain mode on pin %d\n", sdata->drdy_int_pin); err = st_sensors_write_data_with_mask(indio_dev, addr, @@ -594,21 +593,20 @@ EXPORT_SYMBOL_NS(st_sensors_get_settings_index, IIO_ST_SENSORS); int st_sensors_verify_id(struct iio_dev *indio_dev) { struct st_sensor_data *sdata = iio_priv(indio_dev); + struct device *parent = indio_dev->dev.parent; int wai, err; if (sdata->sensor_settings->wai_addr) { err = regmap_read(sdata->regmap, sdata->sensor_settings->wai_addr, &wai); if (err < 0) { - dev_err(&indio_dev->dev, - "failed to read Who-Am-I register.\n"); - return err; + return dev_err_probe(parent, err, + "failed to read Who-Am-I register.\n"); } if (sdata->sensor_settings->wai != wai) { - dev_warn(&indio_dev->dev, - "%s: WhoAmI mismatch (0x%x).\n", - indio_dev->name, wai); + dev_warn(parent, "%s: WhoAmI mismatch (0x%x).\n", + indio_dev->name, wai); } } diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c index a0df9250a69f..b900acd471bd 100644 --- a/drivers/iio/common/st_sensors/st_sensors_trigger.c +++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c @@ -127,7 +127,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->trig = devm_iio_trigger_alloc(parent, "%s-trigger", indio_dev->name); if (sdata->trig == NULL) { - dev_err(&indio_dev->dev, "failed to allocate iio trigger.\n"); + dev_err(parent, "failed to allocate iio trigger.\n"); return -ENOMEM; } @@ -143,7 +143,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, case IRQF_TRIGGER_FALLING: case IRQF_TRIGGER_LOW: if (!sdata->sensor_settings->drdy_irq.addr_ihl) { - dev_err(&indio_dev->dev, + dev_err(parent, "falling/low specified for IRQ but hardware supports only rising/high: will request rising/high\n"); if (irq_trig == IRQF_TRIGGER_FALLING) irq_trig = IRQF_TRIGGER_RISING; @@ -156,21 +156,19 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->sensor_settings->drdy_irq.mask_ihl, 1); if (err < 0) return err; - dev_info(&indio_dev->dev, + dev_info(parent, "interrupts on the falling edge or active low level\n"); } break; case IRQF_TRIGGER_RISING: - dev_info(&indio_dev->dev, - "interrupts on the rising edge\n"); + dev_info(parent, "interrupts on the rising edge\n"); break; case IRQF_TRIGGER_HIGH: - dev_info(&indio_dev->dev, - "interrupts active high level\n"); + dev_info(parent, "interrupts active high level\n"); break; default: /* This is the most preferred mode, if possible */ - dev_err(&indio_dev->dev, + dev_err(parent, "unsupported IRQ trigger specified (%lx), enforce rising edge\n", irq_trig); irq_trig = IRQF_TRIGGER_RISING; } @@ -179,7 +177,7 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, if (irq_trig == IRQF_TRIGGER_FALLING || irq_trig == IRQF_TRIGGER_RISING) { if (!sdata->sensor_settings->drdy_irq.stat_drdy.addr) { - dev_err(&indio_dev->dev, + dev_err(parent, "edge IRQ not supported w/o stat register.\n"); return -EOPNOTSUPP; } @@ -214,13 +212,13 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev, sdata->trig->name, sdata->trig); if (err) { - dev_err(&indio_dev->dev, "failed to request trigger IRQ.\n"); + dev_err(parent, "failed to request trigger IRQ.\n"); return err; } err = devm_iio_trigger_register(parent, sdata->trig); if (err < 0) { - dev_err(&indio_dev->dev, "failed to register iio trigger.\n"); + dev_err(parent, "failed to register iio trigger.\n"); return err; } indio_dev->trig = iio_trigger_get(sdata->trig); diff --git a/drivers/iio/industrialio-backend.c b/drivers/iio/industrialio-backend.c index 42e0ee683ef6..a3abcdeb6281 100644 --- a/drivers/iio/industrialio-backend.c +++ b/drivers/iio/industrialio-backend.c @@ -155,11 +155,14 @@ static ssize_t iio_backend_debugfs_write_reg(struct file *file, ssize_t rc; int ret; + if (count >= sizeof(buf)) + return -ENOSPC; + rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); if (rc < 0) return rc; - buf[count] = '\0'; + buf[rc] = '\0'; ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val); diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c index 26c481d2998c..25901d91a613 100644 --- a/drivers/iio/light/hid-sensor-prox.c +++ b/drivers/iio/light/hid-sensor-prox.c @@ -102,8 +102,7 @@ static int prox_read_raw(struct iio_dev *indio_dev, ret_type = prox_state->scale_precision; break; case IIO_CHAN_INFO_OFFSET: - *val = hid_sensor_convert_exponent( - prox_state->prox_attr.unit_expo); + *val = 0; ret_type = IIO_VAL_INT; break; case IIO_CHAN_INFO_SAMP_FREQ: @@ -227,6 +226,11 @@ static int prox_parse_report(struct platform_device *pdev, dev_dbg(&pdev->dev, "prox %x:%x\n", st->prox_attr.index, st->prox_attr.report_id); + st->scale_precision = hid_sensor_format_scale(hsdev->usage, + &st->prox_attr, + &st->scale_pre_decml, + &st->scale_post_decml); + return ret; } diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index b7c078b7f7cf..a1291f475466 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -582,8 +582,8 @@ static int __ib_cache_gid_add(struct ib_device *ib_dev, u32 port, out_unlock: mutex_unlock(&table->lock); if (ret) - pr_warn("%s: unable to add gid %pI6 error=%d\n", - __func__, gid->raw, ret); + pr_warn_ratelimited("%s: unable to add gid %pI6 error=%d\n", + __func__, gid->raw, ret); return ret; } diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 81cfa74147a1..ad6c195d077b 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -391,7 +391,7 @@ static int do_get_hw_stats(struct ib_device *ibdev, return ret; /* We don't expose device counters over Vports */ - if (is_mdev_switchdev_mode(dev->mdev) && port_num != 0) + if (is_mdev_switchdev_mode(dev->mdev) && dev->is_rep && port_num != 0) goto done; if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { @@ -411,7 +411,7 @@ static int do_get_hw_stats(struct ib_device *ibdev, */ goto done; } - ret = mlx5_lag_query_cong_counters(dev->mdev, + ret = mlx5_lag_query_cong_counters(mdev, stats->value + cnts->num_q_counters, cnts->num_cong_counters, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index 69999d8d24f3..f49f78b69ab9 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1914,6 +1914,7 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, /* Level1 is valid for future use, no need to free */ return -ENOMEM; + INIT_LIST_HEAD(&obj_event->obj_sub_list); err = xa_insert(&event->object_ids, key_level2, obj_event, @@ -1922,7 +1923,6 @@ subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table, kfree(obj_event); return err; } - INIT_LIST_HEAD(&obj_event->obj_sub_list); } return 0; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8c47cb4edd0a..435c456a4fd5 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1766,6 +1766,33 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, context->devx_uid); } +static int mlx5_ib_enable_lb_mp(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + int err; + + err = mlx5_nic_vport_update_local_lb(master, true); + if (err) + return err; + + err = mlx5_nic_vport_update_local_lb(slave, true); + if (err) + goto out; + + return 0; + +out: + mlx5_nic_vport_update_local_lb(master, false); + return err; +} + +static void mlx5_ib_disable_lb_mp(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + mlx5_nic_vport_update_local_lb(slave, false); + mlx5_nic_vport_update_local_lb(master, false); +} + int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) { int err = 0; @@ -3448,6 +3475,8 @@ static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, lockdep_assert_held(&mlx5_ib_multiport_mutex); + mlx5_ib_disable_lb_mp(ibdev->mdev, mpi->mdev); + mlx5_core_mp_event_replay(ibdev->mdev, MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, NULL); @@ -3543,6 +3572,10 @@ static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, MLX5_DRIVER_EVENT_AFFILIATION_DONE, &key); + err = mlx5_ib_enable_lb_mp(ibdev->mdev, mpi->mdev); + if (err) + goto unbind; + return true; unbind: diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 068eac3bdb50..726b81b6330c 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -1968,7 +1968,6 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev, if (mr->mmkey.cache_ent) { spin_lock_irq(&mr->mmkey.cache_ent->mkeys_queue.lock); - mr->mmkey.cache_ent->in_use--; goto end; } @@ -2029,32 +2028,62 @@ void mlx5_ib_revoke_data_direct_mrs(struct mlx5_ib_dev *dev) } } -static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) +static int mlx5_umr_revoke_mr_with_lock(struct mlx5_ib_mr *mr) { - struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); - struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; - bool is_odp = is_odp_mr(mr); bool is_odp_dma_buf = is_dmabuf_mr(mr) && - !to_ib_umem_dmabuf(mr->umem)->pinned; - int ret = 0; + !to_ib_umem_dmabuf(mr->umem)->pinned; + bool is_odp = is_odp_mr(mr); + int ret; if (is_odp) mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex); if (is_odp_dma_buf) - dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, NULL); + dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, + NULL); + + ret = mlx5r_umr_revoke_mr(mr); - if (mr->mmkey.cacheable && !mlx5r_umr_revoke_mr(mr) && !cache_ent_find_and_store(dev, mr)) { + if (is_odp) { + if (!ret) + to_ib_umem_odp(mr->umem)->private = NULL; + mutex_unlock(&to_ib_umem_odp(mr->umem)->umem_mutex); + } + + if (is_odp_dma_buf) { + if (!ret) + to_ib_umem_dmabuf(mr->umem)->private = NULL; + dma_resv_unlock( + to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); + } + + return ret; +} + +static int mlx5r_handle_mkey_cleanup(struct mlx5_ib_mr *mr) +{ + bool is_odp_dma_buf = is_dmabuf_mr(mr) && + !to_ib_umem_dmabuf(mr->umem)->pinned; + struct mlx5_ib_dev *dev = to_mdev(mr->ibmr.device); + struct mlx5_cache_ent *ent = mr->mmkey.cache_ent; + bool is_odp = is_odp_mr(mr); + bool from_cache = !!ent; + int ret; + + if (mr->mmkey.cacheable && !mlx5_umr_revoke_mr_with_lock(mr) && + !cache_ent_find_and_store(dev, mr)) { ent = mr->mmkey.cache_ent; /* upon storing to a clean temp entry - schedule its cleanup */ spin_lock_irq(&ent->mkeys_queue.lock); + if (from_cache) + ent->in_use--; if (ent->is_tmp && !ent->tmp_cleanup_scheduled) { mod_delayed_work(ent->dev->cache.wq, &ent->dwork, msecs_to_jiffies(30 * 1000)); ent->tmp_cleanup_scheduled = true; } spin_unlock_irq(&ent->mkeys_queue.lock); - goto out; + return 0; } if (ent) { @@ -2063,8 +2092,14 @@ static int mlx5_revoke_mr(struct mlx5_ib_mr *mr) mr->mmkey.cache_ent = NULL; spin_unlock_irq(&ent->mkeys_queue.lock); } + + if (is_odp) + mutex_lock(&to_ib_umem_odp(mr->umem)->umem_mutex); + + if (is_odp_dma_buf) + dma_resv_lock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv, + NULL); ret = destroy_mkey(dev, mr); -out: if (is_odp) { if (!ret) to_ib_umem_odp(mr->umem)->private = NULL; @@ -2074,9 +2109,9 @@ out: if (is_odp_dma_buf) { if (!ret) to_ib_umem_dmabuf(mr->umem)->private = NULL; - dma_resv_unlock(to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); + dma_resv_unlock( + to_ib_umem_dmabuf(mr->umem)->attach->dmabuf->resv); } - return ret; } @@ -2125,7 +2160,7 @@ static int __mlx5_ib_dereg_mr(struct ib_mr *ibmr) } /* Stop DMA */ - rc = mlx5_revoke_mr(mr); + rc = mlx5r_handle_mkey_cleanup(mr); if (rc) return rc; diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index e158d5b1ab17..98a76c9db7ab 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -247,8 +247,8 @@ static void destroy_unused_implicit_child_mr(struct mlx5_ib_mr *mr) } if (MLX5_CAP_ODP(mr_to_mdev(mr)->mdev, mem_page_fault)) - __xa_erase(&mr_to_mdev(mr)->odp_mkeys, - mlx5_base_mkey(mr->mmkey.key)); + xa_erase(&mr_to_mdev(mr)->odp_mkeys, + mlx5_base_mkey(mr->mmkey.key)); xa_unlock(&imr->implicit_children); /* Freeing a MR is a sleeping operation, so bounce to a work queue */ @@ -521,8 +521,8 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, } if (MLX5_CAP_ODP(dev->mdev, mem_page_fault)) { - ret = __xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), - &mr->mmkey, GFP_KERNEL); + ret = xa_store(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key), + &mr->mmkey, GFP_KERNEL); if (xa_is_err(ret)) { ret = ERR_PTR(xa_err(ret)); __xa_erase(&imr->implicit_children, idx); diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 91d329e90308..8b805b16136e 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -811,7 +811,12 @@ static void rxe_qp_do_cleanup(struct work_struct *work) spin_unlock_irqrestore(&qp->state_lock, flags); qp->qp_timeout_jiffies = 0; - if (qp_type(qp) == IB_QPT_RC) { + /* In the function timer_setup, .function is initialized. If .function + * is NULL, it indicates the function timer_setup is not called, the + * timer is not initialized. Or else, the timer is initialized. + */ + if (qp_type(qp) == IB_QPT_RC && qp->retrans_timer.function && + qp->rnr_nak_timer.function) { del_timer_sync(&qp->retrans_timer); del_timer_sync(&qp->rnr_nak_timer); } diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c index da5b14602a76..f0cab6870404 100644 --- a/drivers/input/joystick/xpad.c +++ b/drivers/input/joystick/xpad.c @@ -169,6 +169,7 @@ static const struct xpad_device { { 0x046d, 0xca88, "Logitech Compact Controller for Xbox", 0, XTYPE_XBOX }, { 0x046d, 0xca8a, "Logitech Precision Vibration Feedback Wheel", 0, XTYPE_XBOX }, { 0x046d, 0xcaa3, "Logitech DriveFx Racing Wheel", 0, XTYPE_XBOX360 }, + { 0x0502, 0x1305, "Acer NGR200", 0, XTYPE_XBOX360 }, { 0x056e, 0x2004, "Elecom JC-U3613M", 0, XTYPE_XBOX360 }, { 0x05fd, 0x1007, "Mad Catz Controller (unverified)", 0, XTYPE_XBOX }, { 0x05fd, 0x107a, "InterAct 'PowerPad Pro' X-Box pad (Germany)", 0, XTYPE_XBOX }, @@ -515,6 +516,7 @@ static const struct usb_device_id xpad_table[] = { XPAD_XBOX360_VENDOR(0x045e), /* Microsoft Xbox 360 controllers */ XPAD_XBOXONE_VENDOR(0x045e), /* Microsoft Xbox One controllers */ XPAD_XBOX360_VENDOR(0x046d), /* Logitech Xbox 360-style controllers */ + XPAD_XBOX360_VENDOR(0x0502), /* Acer Inc. Xbox 360 style controllers */ XPAD_XBOX360_VENDOR(0x056e), /* Elecom JC-U3613M */ XPAD_XBOX360_VENDOR(0x06a3), /* Saitek P3600 */ XPAD_XBOX360_VENDOR(0x0738), /* Mad Catz Xbox 360 controllers */ diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c index 9514f577995f..cd14017e7df6 100644 --- a/drivers/input/keyboard/gpio_keys.c +++ b/drivers/input/keyboard/gpio_keys.c @@ -488,7 +488,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id) if (bdata->release_delay) hrtimer_start(&bdata->release_timer, ms_to_ktime(bdata->release_delay), - HRTIMER_MODE_REL_HARD); + HRTIMER_MODE_REL); out: return IRQ_HANDLED; } @@ -633,7 +633,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev, bdata->release_delay = button->debounce_interval; hrtimer_init(&bdata->release_timer, - CLOCK_REALTIME, HRTIMER_MODE_REL_HARD); + CLOCK_REALTIME, HRTIMER_MODE_REL); bdata->release_timer.function = gpio_keys_irq_timer; isr = gpio_keys_irq_isr; diff --git a/drivers/input/misc/cs40l50-vibra.c b/drivers/input/misc/cs40l50-vibra.c index dce3b0ec8cf3..330f09123631 100644 --- a/drivers/input/misc/cs40l50-vibra.c +++ b/drivers/input/misc/cs40l50-vibra.c @@ -238,6 +238,8 @@ static int cs40l50_upload_owt(struct cs40l50_work *work_data) header.data_words = len / sizeof(u32); new_owt_effect_data = kmalloc(sizeof(header) + len, GFP_KERNEL); + if (!new_owt_effect_data) + return -ENOMEM; memcpy(new_owt_effect_data, &header, sizeof(header)); memcpy(new_owt_effect_data + sizeof(header), work_data->custom_data, len); diff --git a/drivers/input/misc/iqs7222.c b/drivers/input/misc/iqs7222.c index 01c4009fd53e..846aac9a5c9d 100644 --- a/drivers/input/misc/iqs7222.c +++ b/drivers/input/misc/iqs7222.c @@ -301,6 +301,7 @@ struct iqs7222_dev_desc { int allow_offset; int event_offset; int comms_offset; + int ext_chan; bool legacy_gesture; struct iqs7222_reg_grp_desc reg_grps[IQS7222_NUM_REG_GRPS]; }; @@ -315,6 +316,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { .allow_offset = 9, .event_offset = 10, .comms_offset = 12, + .ext_chan = 10, .reg_grps = { [IQS7222_REG_GRP_STAT] = { .base = IQS7222_SYS_STATUS, @@ -373,6 +375,7 @@ static const struct iqs7222_dev_desc iqs7222_devs[] = { .allow_offset = 9, .event_offset = 10, .comms_offset = 12, + .ext_chan = 10, .legacy_gesture = true, .reg_grps = { [IQS7222_REG_GRP_STAT] = { @@ -2244,7 +2247,7 @@ static int iqs7222_parse_chan(struct iqs7222_private *iqs7222, const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; struct i2c_client *client = iqs7222->client; int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; - int ext_chan = rounddown(num_chan, 10); + int ext_chan = dev_desc->ext_chan ? : num_chan; int error, i; u16 *chan_setup = iqs7222->chan_setup[chan_index]; u16 *sys_setup = iqs7222->sys_setup; @@ -2448,7 +2451,7 @@ static int iqs7222_parse_sldr(struct iqs7222_private *iqs7222, const struct iqs7222_dev_desc *dev_desc = iqs7222->dev_desc; struct i2c_client *client = iqs7222->client; int num_chan = dev_desc->reg_grps[IQS7222_REG_GRP_CHAN].num_row; - int ext_chan = rounddown(num_chan, 10); + int ext_chan = dev_desc->ext_chan ? : num_chan; int count, error, reg_offset, i; u16 *event_mask = &iqs7222->sys_setup[dev_desc->event_offset]; u16 *sldr_setup = iqs7222->sldr_setup[sldr_index]; diff --git a/drivers/interconnect/qcom/sc7280.c b/drivers/interconnect/qcom/sc7280.c index 167971f8e8be..fdb02a87e312 100644 --- a/drivers/interconnect/qcom/sc7280.c +++ b/drivers/interconnect/qcom/sc7280.c @@ -238,6 +238,7 @@ static struct qcom_icc_node xm_pcie3_1 = { .id = SC7280_MASTER_PCIE_1, .channels = 1, .buswidth = 8, + .num_links = 1, .links = { SC7280_SLAVE_ANOC_PCIE_GEM_NOC }, }; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 56e9f125cda9..af4e6c1e55db 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -4414,9 +4414,6 @@ static int device_set_dirty_tracking(struct list_head *devices, bool enable) break; } - if (!ret) - info->domain_attached = true; - return ret; } @@ -4600,6 +4597,9 @@ static int identity_domain_attach_dev(struct iommu_domain *domain, struct device ret = device_setup_pass_through(dev); } + if (!ret) + info->domain_attached = true; + return ret; } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index ff55b8c30712..ae69691471e9 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -1087,7 +1087,7 @@ static int ipmmu_probe(struct platform_device *pdev) * - R-Car Gen3 IPMMU (leaf devices only - skip root IPMMU-MM device) */ if (!mmu->features->has_cache_leaf_nodes || !ipmmu_is_root(mmu)) { - ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, + ret = iommu_device_sysfs_add(&mmu->iommu, &pdev->dev, NULL, "%s", dev_name(&pdev->dev)); if (ret) return ret; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 4b369419b32c..4c7f470a4752 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1154,7 +1154,6 @@ static int rk_iommu_of_xlate(struct device *dev, iommu_dev = of_find_device_by_node(args->np); data->iommu = platform_get_drvdata(iommu_dev); - data->iommu->domain = &rk_identity_domain; dev_iommu_priv_set(dev, data); platform_device_put(iommu_dev); @@ -1192,6 +1191,8 @@ static int rk_iommu_probe(struct platform_device *pdev) if (!iommu) return -ENOMEM; + iommu->domain = &rk_identity_domain; + platform_set_drvdata(pdev, iommu); iommu->dev = dev; iommu->num_mmu = 0; diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index c1f304836008..a799a89195c5 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -71,6 +71,7 @@ config ARM_VIC_NR config IRQ_MSI_LIB bool + select GENERIC_MSI_IRQ config ARMADA_370_XP_IRQ bool diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index ca60ef209df8..aaa21fe295f2 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -2741,7 +2741,11 @@ static unsigned long __evict_many(struct dm_bufio_client *c, __make_buffer_clean(b); __free_buffer_wake(b); - cond_resched(); + if (need_resched()) { + dm_bufio_unlock(c); + cond_resched(); + dm_bufio_lock(c); + } } return count; diff --git a/drivers/md/md-bitmap.c b/drivers/md/md-bitmap.c index c12359fd3a42..0da1d0723f88 100644 --- a/drivers/md/md-bitmap.c +++ b/drivers/md/md-bitmap.c @@ -2355,8 +2355,7 @@ static int bitmap_get_stats(void *data, struct md_bitmap_stats *stats) if (!bitmap) return -ENOENT; - if (!bitmap->mddev->bitmap_info.external && - !bitmap->storage.sb_page) + if (!bitmap->storage.sb_page) return -EINVAL; sb = kmap_local_page(bitmap->storage.sb_page); stats->sync_size = le64_to_cpu(sb->sync_size); diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 6b6cd753d61a..fe1599db69c8 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -3380,6 +3380,7 @@ static int raid1_reshape(struct mddev *mddev) /* ok, everything is stopped */ oldpool = conf->r1bio_pool; conf->r1bio_pool = newpool; + init_waitqueue_head(&conf->r1bio_pool.wait); for (d = d2 = 0; d < conf->raid_disks; d++) { struct md_rdev *rdev = conf->mirrors[d].rdev; diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index cc194f6ec18d..5cdc599fcad3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1181,8 +1181,11 @@ static void raid10_read_request(struct mddev *mddev, struct bio *bio, } } - if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) + if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) { + raid_end_bio_io(r10_bio); return; + } + rdev = read_balance(conf, r10_bio, &max_sectors); if (!rdev) { if (err_rdev) { @@ -1368,8 +1371,11 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio, } sectors = r10_bio->sectors; - if (!regular_request_wait(mddev, conf, bio, sectors)) + if (!regular_request_wait(mddev, conf, bio, sectors)) { + raid_end_bio_io(r10_bio); return; + } + if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && (mddev->reshape_backwards ? (bio->bi_iter.bi_sector < conf->reshape_safe && diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 9a3a784054cc..e6801ad14318 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -322,7 +322,7 @@ EXPORT_SYMBOL(memstick_init_req); static int h_memstick_read_dev_id(struct memstick_dev *card, struct memstick_request **mrq) { - struct ms_id_register id_reg; + struct ms_id_register id_reg = {}; if (!(*mrq)) { memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg, diff --git a/drivers/mfd/exynos-lpass.c b/drivers/mfd/exynos-lpass.c index e36805f07282..8b5fed476039 100644 --- a/drivers/mfd/exynos-lpass.c +++ b/drivers/mfd/exynos-lpass.c @@ -104,11 +104,22 @@ static const struct regmap_config exynos_lpass_reg_conf = { .fast_io = true, }; +static void exynos_lpass_disable_lpass(void *data) +{ + struct platform_device *pdev = data; + struct exynos_lpass *lpass = platform_get_drvdata(pdev); + + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) + exynos_lpass_disable(lpass); +} + static int exynos_lpass_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_lpass *lpass; void __iomem *base_top; + int ret; lpass = devm_kzalloc(dev, sizeof(*lpass), GFP_KERNEL); if (!lpass) @@ -134,16 +145,11 @@ static int exynos_lpass_probe(struct platform_device *pdev) pm_runtime_enable(dev); exynos_lpass_enable(lpass); - return devm_of_platform_populate(dev); -} - -static void exynos_lpass_remove(struct platform_device *pdev) -{ - struct exynos_lpass *lpass = platform_get_drvdata(pdev); + ret = devm_add_action_or_reset(dev, exynos_lpass_disable_lpass, pdev); + if (ret) + return ret; - pm_runtime_disable(&pdev->dev); - if (!pm_runtime_status_suspended(&pdev->dev)) - exynos_lpass_disable(lpass); + return devm_of_platform_populate(dev); } static int __maybe_unused exynos_lpass_suspend(struct device *dev) @@ -183,7 +189,6 @@ static struct platform_driver exynos_lpass_driver = { .of_match_table = exynos_lpass_of_match, }, .probe = exynos_lpass_probe, - .remove_new = exynos_lpass_remove, }; module_platform_driver(exynos_lpass_driver); diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h index 7f893bafaa60..c417ed34c057 100644 --- a/drivers/mmc/core/quirks.h +++ b/drivers/mmc/core/quirks.h @@ -44,6 +44,12 @@ static const struct mmc_fixup __maybe_unused mmc_sd_fixups[] = { 0, -1ull, SDIO_ANY_ID, SDIO_ANY_ID, add_quirk_sd, MMC_QUIRK_NO_UHS_DDR50_TUNING, EXT_CSD_REV_ANY), + /* + * Some SD cards reports discard support while they don't + */ + MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, + MMC_QUIRK_BROKEN_SD_DISCARD), + END_FIXUP }; @@ -147,12 +153,6 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = { MMC_FIXUP("M62704", CID_MANFID_KINGSTON, 0x0100, add_quirk_mmc, MMC_QUIRK_TRIM_BROKEN), - /* - * Some SD cards reports discard support while they don't - */ - MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd, - MMC_QUIRK_BROKEN_SD_DISCARD), - END_FIXUP }; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 35d8fdea668b..f923447ed2ce 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -502,7 +502,8 @@ void bcm2835_prepare_dma(struct bcm2835_host *host, struct mmc_data *data) DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { - dma_unmap_sg(dma_chan->device->dev, data->sg, sg_len, dir_data); + dma_unmap_sg(dma_chan->device->dev, data->sg, data->sg_len, + dir_data); return; } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index d5d868cb4edc..be9954f5bc0a 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -776,12 +776,18 @@ static inline void msdc_dma_setup(struct msdc_host *host, struct msdc_dma *dma, static void msdc_prepare_data(struct msdc_host *host, struct mmc_data *data) { if (!(data->host_cookie & MSDC_PREPARE_FLAG)) { - data->host_cookie |= MSDC_PREPARE_FLAG; data->sg_count = dma_map_sg(host->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); + if (data->sg_count) + data->host_cookie |= MSDC_PREPARE_FLAG; } } +static bool msdc_data_prepared(struct mmc_data *data) +{ + return data->host_cookie & MSDC_PREPARE_FLAG; +} + static void msdc_unprepare_data(struct msdc_host *host, struct mmc_data *data) { if (data->host_cookie & MSDC_ASYNC_FLAG) @@ -1345,8 +1351,19 @@ static void msdc_ops_request(struct mmc_host *mmc, struct mmc_request *mrq) WARN_ON(host->mrq); host->mrq = mrq; - if (mrq->data) + if (mrq->data) { msdc_prepare_data(host, mrq->data); + if (!msdc_data_prepared(mrq->data)) { + host->mrq = NULL; + /* + * Failed to prepare DMA area, fail fast before + * starting any commands. + */ + mrq->cmd->error = -ENOSPC; + mmc_request_done(mmc_from_priv(host), mrq); + return; + } + } /* if SBC is required, we have HW option and SW option. * if HW option is enabled, and SBC does not have "special" flags, diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index b0b1d403f352..76ea0e892d4e 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -912,7 +912,8 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) { return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || - dmi_match(DMI_SYS_VENDOR, "IRBIS")); + dmi_match(DMI_SYS_VENDOR, "IRBIS") || + dmi_match(DMI_SYS_VENDOR, "Positivo Tecnologia SA")); } static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 8ae76300d157..4b91c9e96635 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2035,15 +2035,10 @@ void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); - if (clk & SDHCI_CLOCK_CARD_EN) - sdhci_writew(host, clk & ~SDHCI_CLOCK_CARD_EN, - SDHCI_CLOCK_CONTROL); + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); - if (clock == 0) { - sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); + if (clock == 0) return; - } clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); sdhci_enable_clk(host, clk); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index f531b617f28d..da1b0a46b1d9 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -825,4 +825,20 @@ void sdhci_switch_external_dma(struct sdhci_host *host, bool en); void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); +#if defined(CONFIG_DYNAMIC_DEBUG) || \ + (defined(CONFIG_DYNAMIC_DEBUG_CORE) && defined(DYNAMIC_DEBUG_MODULE)) +#define SDHCI_DBG_ANYWAY 0 +#elif defined(DEBUG) +#define SDHCI_DBG_ANYWAY 1 +#else +#define SDHCI_DBG_ANYWAY 0 +#endif + +#define sdhci_dbg_dumpregs(host, fmt) \ +do { \ + DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ + if (DYNAMIC_DEBUG_BRANCH(descriptor) || SDHCI_DBG_ANYWAY) \ + sdhci_dumpregs(host); \ +} while (0) + #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 0aa3c40ea6ed..8e0eb0acf442 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -588,7 +588,8 @@ static const struct sdhci_ops sdhci_am654_ops = { static const struct sdhci_pltfm_data sdhci_am654_pdata = { .ops = &sdhci_am654_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_am654_sr1_drvdata = { @@ -618,7 +619,8 @@ static const struct sdhci_ops sdhci_j721e_8bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { .ops = &sdhci_j721e_8bit_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_j721e_8bit_drvdata = { @@ -642,7 +644,8 @@ static const struct sdhci_ops sdhci_j721e_4bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { .ops = &sdhci_j721e_4bit_ops, .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_DISABLE_HW_TIMEOUT, }; static const struct sdhci_am654_driver_data sdhci_j721e_4bit_drvdata = { diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index beafca6ba0df..275d34119acd 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -2858,7 +2858,12 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ const struct nand_op_instr *instr = NULL; unsigned int op_id = 0; unsigned int len = 0; - int ret; + int ret, reg_base; + + reg_base = NAND_READ_LOCATION_0; + + if (nandc->props->qpic_v2) + reg_base = NAND_READ_LOCATION_LAST_CW_0; ret = qcom_parse_instructions(chip, subop, &q_op); if (ret) @@ -2910,7 +2915,10 @@ static int qcom_param_page_type_exec(struct nand_chip *chip, const struct nand_ op_id = q_op.data_instr_idx; len = nand_subop_get_data_len(subop, op_id); - nandc_set_read_loc(chip, 0, 0, 0, len, 1); + if (nandc->props->qpic_v2) + nandc_set_read_loc_last(chip, reg_base, 0, len, 1); + else + nandc_set_read_loc_first(chip, reg_base, 0, len, 1); if (!nandc->props->qpic_v2) { write_reg_dma(nandc, NAND_DEV_CMD_VLD, 1, 0); diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c index 4d76f9f71a0e..241f6a4df16c 100644 --- a/drivers/mtd/nand/spi/core.c +++ b/drivers/mtd/nand/spi/core.c @@ -1496,6 +1496,7 @@ static void spinand_cleanup(struct spinand_device *spinand) { struct nand_device *nand = spinand_to_nand(spinand); + nanddev_ecc_engine_cleanup(nand); nanddev_cleanup(nand); spinand_manufacturer_cleanup(spinand); kfree(spinand->databuf); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 2a513dbbd975..52ff0f9e04e0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -497,9 +497,9 @@ static int bond_ipsec_add_sa(struct xfrm_state *xs, goto out; } - xs->xso.real_dev = real_dev; err = real_dev->xfrmdev_ops->xdo_dev_state_add(xs, extack); if (!err) { + xs->xso.real_dev = real_dev; ipsec->xs = xs; INIT_LIST_HEAD(&ipsec->list); mutex_lock(&bond->ipsec_lock); @@ -541,11 +541,11 @@ static void bond_ipsec_add_sa_all(struct bonding *bond) if (ipsec->xs->xso.real_dev == real_dev) continue; - ipsec->xs->xso.real_dev = real_dev; if (real_dev->xfrmdev_ops->xdo_dev_state_add(ipsec->xs, NULL)) { slave_warn(bond_dev, real_dev, "%s: failed to add SA\n", __func__); - ipsec->xs->xso.real_dev = NULL; + continue; } + ipsec->xs->xso.real_dev = real_dev; } out: mutex_unlock(&bond->ipsec_lock); @@ -627,6 +627,7 @@ static void bond_ipsec_del_sa_all(struct bonding *bond) "%s: no slave xdo_dev_state_delete\n", __func__); } else { + ipsec->xs->xso.real_dev = NULL; real_dev->xfrmdev_ops->xdo_dev_state_delete(ipsec->xs); if (real_dev->xfrmdev_ops->xdo_dev_state_free) real_dev->xfrmdev_ops->xdo_dev_state_free(ipsec->xs); @@ -661,6 +662,7 @@ static void bond_ipsec_free_sa(struct xfrm_state *xs) WARN_ON(xs->xso.real_dev != real_dev); + xs->xso.real_dev = NULL; if (real_dev && real_dev->xfrmdev_ops && real_dev->xfrmdev_ops->xdo_dev_state_free) real_dev->xfrmdev_ops->xdo_dev_state_free(xs); diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 681643ab3780..63e4a495b137 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -147,13 +147,16 @@ void can_change_state(struct net_device *dev, struct can_frame *cf, EXPORT_SYMBOL_GPL(can_change_state); /* CAN device restart for bus-off recovery */ -static void can_restart(struct net_device *dev) +static int can_restart(struct net_device *dev) { struct can_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; int err; + if (!priv->do_set_mode) + return -EOPNOTSUPP; + if (netif_carrier_ok(dev)) netdev_err(dev, "Attempt to restart for bus-off recovery, but carrier is OK?\n"); @@ -175,10 +178,14 @@ static void can_restart(struct net_device *dev) if (err) { netdev_err(dev, "Restart failed, error %pe\n", ERR_PTR(err)); netif_carrier_off(dev); + + return err; } else { netdev_dbg(dev, "Restarted\n"); priv->can_stats.restarts++; } + + return 0; } static void can_restart_work(struct work_struct *work) @@ -203,9 +210,8 @@ int can_restart_now(struct net_device *dev) return -EBUSY; cancel_delayed_work_sync(&priv->restart_work); - can_restart(dev); - return 0; + return can_restart(dev); } /* CAN bus-off diff --git a/drivers/net/can/dev/netlink.c b/drivers/net/can/dev/netlink.c index 01aacdcda260..abe8dc051d94 100644 --- a/drivers/net/can/dev/netlink.c +++ b/drivers/net/can/dev/netlink.c @@ -285,6 +285,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], } if (data[IFLA_CAN_RESTART_MS]) { + if (!priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + /* Do not allow changing restart delay while running */ if (dev->flags & IFF_UP) return -EBUSY; @@ -292,6 +298,12 @@ static int can_changelink(struct net_device *dev, struct nlattr *tb[], } if (data[IFLA_CAN_RESTART]) { + if (!priv->do_set_mode) { + NL_SET_ERR_MSG(extack, + "Device doesn't support restart from Bus Off"); + return -EOPNOTSUPP; + } + /* Do not allow a restart while not running */ if (!(dev->flags & IFF_UP)) return -EINVAL; diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index dbd4d8796f9b..dbcf17fb3ef2 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -665,7 +665,7 @@ static int m_can_handle_lost_msg(struct net_device *dev) struct can_frame *frame; u32 timestamp = 0; - netdev_err(dev, "msg lost in rxf0\n"); + netdev_dbg(dev, "msg lost in rxf0\n"); stats->rx_errors++; stats->rx_over_errors++; diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index b6c5c8bab739..e8995738cf99 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -92,6 +92,8 @@ #define TCAN4X5X_MODE_STANDBY BIT(6) #define TCAN4X5X_MODE_NORMAL BIT(7) +#define TCAN4X5X_NWKRQ_VOLTAGE_VIO BIT(19) + #define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) #define TCAN4X5X_DISABLE_INH_MSK BIT(9) @@ -267,6 +269,13 @@ static int tcan4x5x_init(struct m_can_classdev *cdev) if (ret) return ret; + if (tcan4x5x->nwkrq_voltage_vio) { + ret = regmap_set_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, + TCAN4X5X_NWKRQ_VOLTAGE_VIO); + if (ret) + return ret; + } + return ret; } @@ -318,21 +327,27 @@ static const struct tcan4x5x_version_info return &tcan4x5x_versions[TCAN4X5X]; } -static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, - const struct tcan4x5x_version_info *version_info) +static void tcan4x5x_get_dt_data(struct m_can_classdev *cdev) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + + tcan4x5x->nwkrq_voltage_vio = + of_property_read_bool(cdev->dev->of_node, "ti,nwkrq-voltage-vio"); +} + +static int tcan4x5x_get_gpios(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); int ret; - if (version_info->has_wake_pin) { - tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", - GPIOD_OUT_HIGH); - if (IS_ERR(tcan4x5x->device_wake_gpio)) { - if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) - return -EPROBE_DEFER; + tcan4x5x->device_wake_gpio = devm_gpiod_get_optional(cdev->dev, + "device-wake", + GPIOD_OUT_HIGH); + if (IS_ERR(tcan4x5x->device_wake_gpio)) { + if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) + return -EPROBE_DEFER; - tcan4x5x_disable_wake(cdev); - } + tcan4x5x->device_wake_gpio = NULL; } tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", @@ -344,14 +359,31 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, if (ret) return ret; - if (version_info->has_state_pin) { - tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, - "device-state", - GPIOD_IN); - if (IS_ERR(tcan4x5x->device_state_gpio)) { - tcan4x5x->device_state_gpio = NULL; - tcan4x5x_disable_state(cdev); - } + tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, + "device-state", + GPIOD_IN); + if (IS_ERR(tcan4x5x->device_state_gpio)) + tcan4x5x->device_state_gpio = NULL; + + return 0; +} + +static int tcan4x5x_check_gpios(struct m_can_classdev *cdev, + const struct tcan4x5x_version_info *version_info) +{ + struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); + int ret; + + if (version_info->has_wake_pin && !tcan4x5x->device_wake_gpio) { + ret = tcan4x5x_disable_wake(cdev); + if (ret) + return ret; + } + + if (version_info->has_state_pin && !tcan4x5x->device_state_gpio) { + ret = tcan4x5x_disable_state(cdev); + if (ret) + return ret; } return 0; @@ -442,18 +474,26 @@ static int tcan4x5x_can_probe(struct spi_device *spi) goto out_m_can_class_free_dev; } + ret = tcan4x5x_get_gpios(mcan_class); + if (ret) { + dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + version_info = tcan4x5x_find_version(priv); if (IS_ERR(version_info)) { ret = PTR_ERR(version_info); goto out_power; } - ret = tcan4x5x_get_gpios(mcan_class, version_info); + ret = tcan4x5x_check_gpios(mcan_class, version_info); if (ret) { - dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); + dev_err(&spi->dev, "Checking gpios failed %pe\n", ERR_PTR(ret)); goto out_power; } + tcan4x5x_get_dt_data(mcan_class); + tcan4x5x_check_wake(priv); ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0); diff --git a/drivers/net/can/m_can/tcan4x5x.h b/drivers/net/can/m_can/tcan4x5x.h index e62c030d3e1e..203399d5e8cc 100644 --- a/drivers/net/can/m_can/tcan4x5x.h +++ b/drivers/net/can/m_can/tcan4x5x.h @@ -42,6 +42,8 @@ struct tcan4x5x_priv { struct tcan4x5x_map_buf map_buf_rx; struct tcan4x5x_map_buf map_buf_tx; + + bool nwkrq_voltage_vio; }; static inline void diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 3b70f6737633..aa25a8a0a106 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1373,6 +1373,8 @@ #define MDIO_VEND2_CTRL1_SS13 BIT(13) #endif +#define XGBE_VEND2_MAC_AUTO_SW BIT(9) + /* MDIO mask values */ #define XGBE_AN_CL73_INT_CMPLT BIT(0) #define XGBE_AN_CL73_INC_LINK BIT(1) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 07f4f3418d01..ed76a8df6ec6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -375,6 +375,10 @@ static void xgbe_an37_set(struct xgbe_prv_data *pdata, bool enable, reg |= MDIO_VEND2_CTRL1_AN_RESTART; XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg); + + reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL); + reg |= XGBE_VEND2_MAC_AUTO_SW; + XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_PCS_DIG_CTRL, reg); } static void xgbe_an37_restart(struct xgbe_prv_data *pdata) @@ -1003,6 +1007,11 @@ static void xgbe_an37_init(struct xgbe_prv_data *pdata) netif_dbg(pdata, link, pdata->netdev, "CL37 AN (%s) initialized\n", (pdata->an_mode == XGBE_AN_MODE_CL37) ? "BaseX" : "SGMII"); + + reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1); + reg &= ~MDIO_AN_CTRL1_ENABLE; + XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg); + } static void xgbe_an73_init(struct xgbe_prv_data *pdata) @@ -1404,6 +1413,10 @@ static void xgbe_phy_status(struct xgbe_prv_data *pdata) pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata, &an_restart); + /* bail out if the link status register read fails */ + if (pdata->phy.link < 0) + return; + if (an_restart) { xgbe_phy_config_aneg(pdata); goto adjust_link; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 268399dfcf22..32e633d11348 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -2855,8 +2855,7 @@ static bool xgbe_phy_valid_speed(struct xgbe_prv_data *pdata, int speed) static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int reg; - int ret; + int reg, ret; *an_restart = 0; @@ -2890,11 +2889,20 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) return 0; } - /* Link status is latched low, so read once to clear - * and then read again to get current state - */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg < 0) + return reg; + + /* Link status is latched low so that momentary link drops + * can be detected. If link was already down read again + * to get the latest state. + */ + + if (!pdata->phy.link && !(reg & MDIO_STAT1_LSTATUS)) { + reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); + if (reg < 0) + return reg; + } if (pdata->en_rx_adap) { /* if the link is available and adaptation is done, @@ -2913,9 +2921,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) xgbe_phy_set_mode(pdata, phy_data->cur_mode); } - /* check again for the link and adaptation status */ - reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1); - if ((reg & MDIO_STAT1_LSTATUS) && pdata->rx_adapt_done) + if (pdata->rx_adapt_done) return 1; } else if (reg & MDIO_STAT1_LSTATUS) return 1; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ed5d43c16d0e..7526a0906b39 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -292,12 +292,12 @@ #define XGBE_LINK_TIMEOUT 5 #define XGBE_KR_TRAINING_WAIT_ITER 50 -#define XGBE_SGMII_AN_LINK_STATUS BIT(1) +#define XGBE_SGMII_AN_LINK_DUPLEX BIT(1) #define XGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3)) #define XGBE_SGMII_AN_LINK_SPEED_10 0x00 #define XGBE_SGMII_AN_LINK_SPEED_100 0x04 #define XGBE_SGMII_AN_LINK_SPEED_1000 0x08 -#define XGBE_SGMII_AN_LINK_DUPLEX BIT(4) +#define XGBE_SGMII_AN_LINK_STATUS BIT(4) /* ECC correctable error notification window (seconds) */ #define XGBE_ECC_LIMIT 60 diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index 3afd3627ce48..9c5d61990904 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -1861,14 +1861,21 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) break; } - buffer_info->alloced = 1; - buffer_info->skb = skb; - buffer_info->length = (u16) adapter->rx_buffer_len; page = virt_to_page(skb->data); offset = offset_in_page(skb->data); buffer_info->dma = dma_map_page(&pdev->dev, page, offset, adapter->rx_buffer_len, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, buffer_info->dma)) { + kfree_skb(skb); + adapter->soft_stats.rx_dropped++; + break; + } + + buffer_info->alloced = 1; + buffer_info->skb = skb; + buffer_info->length = (u16)adapter->rx_buffer_len; + rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); rfd_desc->coalese = 0; @@ -2183,8 +2190,8 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, return 0; } -static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, - struct tx_packet_desc *ptpd) +static bool atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, + struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; @@ -2194,6 +2201,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, unsigned int nr_frags; unsigned int f; int retval; + u16 first_mapped; u16 next_to_use; u16 data_len; u8 hdr_len; @@ -2201,6 +2209,7 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; next_to_use = atomic_read(&tpd_ring->next_to_use); + first_mapped = next_to_use; buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); /* put skb in last TPD */ @@ -2216,6 +2225,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, offset, hdr_len, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; @@ -2242,6 +2253,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, page, offset, buffer_info->length, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; } @@ -2254,6 +2268,8 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->dma = dma_map_page(&adapter->pdev->dev, page, offset, buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; } @@ -2277,6 +2293,9 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, buffer_info->dma = skb_frag_dma_map(&adapter->pdev->dev, frag, i * ATL1_MAX_TX_BUF_LEN, buffer_info->length, DMA_TO_DEVICE); + if (dma_mapping_error(&adapter->pdev->dev, + buffer_info->dma)) + goto dma_err; if (++next_to_use == tpd_ring->count) next_to_use = 0; @@ -2285,6 +2304,22 @@ static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, /* last tpd's buffer-info */ buffer_info->skb = skb; + + return true; + + dma_err: + while (first_mapped != next_to_use) { + buffer_info = &tpd_ring->buffer_info[first_mapped]; + dma_unmap_page(&adapter->pdev->dev, + buffer_info->dma, + buffer_info->length, + DMA_TO_DEVICE); + buffer_info->dma = 0; + + if (++first_mapped == tpd_ring->count) + first_mapped = 0; + } + return false; } static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, @@ -2355,10 +2390,8 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, len = skb_headlen(skb); - if (unlikely(skb->len <= 0)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (unlikely(skb->len <= 0)) + goto drop_packet; nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { @@ -2371,10 +2404,9 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, if (mss) { if (skb->protocol == htons(ETH_P_IP)) { proto_hdr_len = skb_tcp_all_headers(skb); - if (unlikely(proto_hdr_len > len)) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (unlikely(proto_hdr_len > len)) + goto drop_packet; + /* need additional TPD ? */ if (proto_hdr_len != len) count += (len - proto_hdr_len + @@ -2406,23 +2438,26 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, } tso = atl1_tso(adapter, skb, ptpd); - if (tso < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (tso < 0) + goto drop_packet; if (!tso) { ret_val = atl1_tx_csum(adapter, skb, ptpd); - if (ret_val < 0) { - dev_kfree_skb_any(skb); - return NETDEV_TX_OK; - } + if (ret_val < 0) + goto drop_packet; } - atl1_tx_map(adapter, skb, ptpd); + if (!atl1_tx_map(adapter, skb, ptpd)) + goto drop_packet; + atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); return NETDEV_TX_OK; + +drop_packet: + adapter->soft_stats.tx_errors++; + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; } static int atl1_rings_clean(struct napi_struct *napi, int budget) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index ad4aec522f4f..f4bafc71a739 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -11061,11 +11061,9 @@ static void bnxt_free_irq(struct bnxt *bp) static int bnxt_request_irq(struct bnxt *bp) { + struct cpu_rmap *rmap = NULL; int i, j, rc = 0; unsigned long flags = 0; -#ifdef CONFIG_RFS_ACCEL - struct cpu_rmap *rmap; -#endif rc = bnxt_setup_int_mode(bp); if (rc) { @@ -11080,15 +11078,15 @@ static int bnxt_request_irq(struct bnxt *bp) int map_idx = bnxt_cp_num_to_irq_num(bp, i); struct bnxt_irq *irq = &bp->irq_tbl[map_idx]; -#ifdef CONFIG_RFS_ACCEL - if (rmap && bp->bnapi[i]->rx_ring) { + if (IS_ENABLED(CONFIG_RFS_ACCEL) && + rmap && bp->bnapi[i]->rx_ring) { rc = irq_cpu_rmap_add(rmap, irq->vector); if (rc) netdev_warn(bp->dev, "failed adding irq rmap for ring %d\n", j); j++; } -#endif + rc = request_irq(irq->vector, irq->handler, flags, irq->name, bp->bnapi[i]); if (rc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 0dbb880a7aa0..71e14be2507e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -487,7 +487,9 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) && i > bp->max_tc) return -EINVAL; + } + for (i = 0; i < max_tc; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: break; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index 8726657f5cb9..844812bd6536 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -115,7 +115,7 @@ static void __bnxt_xmit_xdp_redirect(struct bnxt *bp, tx_buf->action = XDP_REDIRECT; tx_buf->xdpf = xdpf; dma_unmap_addr_set(tx_buf, mapping, mapping); - dma_unmap_len_set(tx_buf, len, 0); + dma_unmap_len_set(tx_buf, len, len); } void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index ffed14b63d41..a432783756d8 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -2127,10 +2127,10 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu) if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) return -EOPNOTSUPP; - if (netdev->mtu > enic->port_mtu) + if (new_mtu > enic->port_mtu) netdev_warn(netdev, "interface MTU (%d) set higher than port MTU (%d)\n", - netdev->mtu, enic->port_mtu); + new_mtu, enic->port_mtu); return _enic_change_mtu(netdev, new_mtu); } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 29886a8ba73f..c744e10e6403 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -3928,6 +3928,7 @@ static int dpaa2_eth_setup_rx_flow(struct dpaa2_eth_priv *priv, MEM_TYPE_PAGE_ORDER0, NULL); if (err) { dev_err(dev, "xdp_rxq_info_reg_mem_model failed\n"); + xdp_rxq_info_unreg(&fq->channel->xdp_rxq); return err; } @@ -4421,17 +4422,25 @@ static int dpaa2_eth_bind_dpni(struct dpaa2_eth_priv *priv) return -EINVAL; } if (err) - return err; + goto out; } err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, DPNI_QUEUE_TX, &priv->tx_qdid); if (err) { dev_err(dev, "dpni_get_qdid() failed\n"); - return err; + goto out; } return 0; + +out: + while (i--) { + if (priv->fq[i].type == DPAA2_RX_FQ && + xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq)) + xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq); + } + return err; } /* Allocate rings for storing incoming frame descriptors */ @@ -4646,12 +4655,19 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) return PTR_ERR(dpmac_dev); } - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR(dpmac_dev)) return 0; + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { + err = 0; + goto out_put_device; + } + mac = kzalloc(sizeof(struct dpaa2_mac), GFP_KERNEL); - if (!mac) - return -ENOMEM; + if (!mac) { + err = -ENOMEM; + goto out_put_device; + } mac->mc_dev = dpmac_dev; mac->mc_io = priv->mc_io; @@ -4685,6 +4701,8 @@ err_close_mac: dpaa2_mac_close(mac); err_free_mac: kfree(mac); +out_put_device: + put_device(&dpmac_dev->dev); return err; } @@ -4814,6 +4832,17 @@ static void dpaa2_eth_del_ch_napi(struct dpaa2_eth_priv *priv) } } +static void dpaa2_eth_free_rx_xdp_rxq(struct dpaa2_eth_priv *priv) +{ + int i; + + for (i = 0; i < priv->num_fqs; i++) { + if (priv->fq[i].type == DPAA2_RX_FQ && + xdp_rxq_info_is_reg(&priv->fq[i].channel->xdp_rxq)) + xdp_rxq_info_unreg(&priv->fq[i].channel->xdp_rxq); + } +} + static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) { struct device *dev; @@ -5017,6 +5046,7 @@ err_alloc_percpu_extras: free_percpu(priv->percpu_stats); err_alloc_percpu_stats: dpaa2_eth_del_ch_napi(priv); + dpaa2_eth_free_rx_xdp_rxq(priv); err_bind: dpaa2_eth_free_dpbps(priv); err_dpbp_setup: @@ -5069,6 +5099,7 @@ static void dpaa2_eth_remove(struct fsl_mc_device *ls_dev) free_percpu(priv->percpu_extras); dpaa2_eth_del_ch_napi(priv); + dpaa2_eth_free_rx_xdp_rxq(priv); dpaa2_eth_free_dpbps(priv); dpaa2_eth_free_dpio(priv); dpaa2_eth_free_dpni(priv); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index a293b08f36d4..cbd3859ea475 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -1447,12 +1447,19 @@ static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) return PTR_ERR(dpmac_dev); - if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + if (IS_ERR(dpmac_dev)) return 0; + if (dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) { + err = 0; + goto out_put_device; + } + mac = kzalloc(sizeof(*mac), GFP_KERNEL); - if (!mac) - return -ENOMEM; + if (!mac) { + err = -ENOMEM; + goto out_put_device; + } mac->mc_dev = dpmac_dev; mac->mc_io = port_priv->ethsw_data->mc_io; @@ -1482,6 +1489,8 @@ err_close_mac: dpaa2_mac_close(mac); err_free_mac: kfree(mac); +out_put_device: + put_device(&dpmac_dev->dev); return err; } diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 14f39d1f59d3..8ea3c7493663 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1972,49 +1972,56 @@ static void gve_turnup_and_check_status(struct gve_priv *priv) gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status); } -static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) +static struct gve_notify_block *gve_get_tx_notify_block(struct gve_priv *priv, + unsigned int txqueue) { - struct gve_notify_block *block; - struct gve_tx_ring *tx = NULL; - struct gve_priv *priv; - u32 last_nic_done; - u32 current_time; u32 ntfy_idx; - netdev_info(dev, "Timeout on tx queue, %d", txqueue); - priv = netdev_priv(dev); if (txqueue > priv->tx_cfg.num_queues) - goto reset; + return NULL; ntfy_idx = gve_tx_idx_to_ntfy(priv, txqueue); if (ntfy_idx >= priv->num_ntfy_blks) - goto reset; + return NULL; + + return &priv->ntfy_blocks[ntfy_idx]; +} + +static bool gve_tx_timeout_try_q_kick(struct gve_priv *priv, + unsigned int txqueue) +{ + struct gve_notify_block *block; + u32 current_time; - block = &priv->ntfy_blocks[ntfy_idx]; - tx = block->tx; + block = gve_get_tx_notify_block(priv, txqueue); + + if (!block) + return false; current_time = jiffies_to_msecs(jiffies); - if (tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) - goto reset; + if (block->tx->last_kick_msec + MIN_TX_TIMEOUT_GAP > current_time) + return false; - /* Check to see if there are missed completions, which will allow us to - * kick the queue. - */ - last_nic_done = gve_tx_load_event_counter(priv, tx); - if (last_nic_done - tx->done) { - netdev_info(dev, "Kicking queue %d", txqueue); - iowrite32be(GVE_IRQ_MASK, gve_irq_doorbell(priv, block)); - napi_schedule(&block->napi); - tx->last_kick_msec = current_time; - goto out; - } // Else reset. + netdev_info(priv->dev, "Kicking queue %d", txqueue); + napi_schedule(&block->napi); + block->tx->last_kick_msec = current_time; + return true; +} -reset: - gve_schedule_reset(priv); +static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue) +{ + struct gve_notify_block *block; + struct gve_priv *priv; -out: - if (tx) - tx->queue_timeout++; + netdev_info(dev, "Timeout on tx queue, %d", txqueue); + priv = netdev_priv(dev); + + if (!gve_tx_timeout_try_q_kick(priv, txqueue)) + gve_schedule_reset(priv); + + block = gve_get_tx_notify_block(priv, txqueue); + if (block) + block->tx->queue_timeout++; priv->tx_timeo_cnt++; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 24062a40a779..94432e237640 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -11,6 +11,7 @@ #include <linux/irq.h> #include <linux/ip.h> #include <linux/ipv6.h> +#include <linux/iommu.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/skbuff.h> @@ -1039,6 +1040,8 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) { u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_tx_spare *tx_spare; struct page *page; dma_addr_t dma; @@ -1080,6 +1083,7 @@ static void hns3_init_tx_spare_buffer(struct hns3_enet_ring *ring) tx_spare->buf = page_address(page); tx_spare->len = PAGE_SIZE << order; ring->tx_spare = tx_spare; + ring->tx_copybreak = priv->tx_copybreak; return; dma_mapping_error: @@ -4874,6 +4878,30 @@ static void hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) devm_kfree(&pdev->dev, priv->tqp_vector); } +static void hns3_update_tx_spare_buf_config(struct hns3_nic_priv *priv) +{ +#define HNS3_MIN_SPARE_BUF_SIZE (2 * 1024 * 1024) +#define HNS3_MAX_PACKET_SIZE (64 * 1024) + + struct iommu_domain *domain = iommu_get_domain_for_dev(priv->dev); + struct hnae3_ae_dev *ae_dev = hns3_get_ae_dev(priv->ae_handle); + struct hnae3_handle *handle = priv->ae_handle; + + if (ae_dev->dev_version < HNAE3_DEVICE_VERSION_V3) + return; + + if (!(domain && iommu_is_dma_domain(domain))) + return; + + priv->min_tx_copybreak = HNS3_MAX_PACKET_SIZE; + priv->min_tx_spare_buf_size = HNS3_MIN_SPARE_BUF_SIZE; + + if (priv->tx_copybreak < priv->min_tx_copybreak) + priv->tx_copybreak = priv->min_tx_copybreak; + if (handle->kinfo.tx_spare_buf_size < priv->min_tx_spare_buf_size) + handle->kinfo.tx_spare_buf_size = priv->min_tx_spare_buf_size; +} + static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, unsigned int ring_type) { @@ -5107,6 +5135,7 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) int i, j; int ret; + hns3_update_tx_spare_buf_config(priv); for (i = 0; i < ring_num; i++) { ret = hns3_alloc_ring_memory(&priv->ring[i]); if (ret) { @@ -5311,6 +5340,8 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->ae_handle = handle; priv->tx_timeout_count = 0; priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; + priv->min_tx_copybreak = 0; + priv->min_tx_spare_buf_size = 0; set_bit(HNS3_NIC_STATE_DOWN, &priv->state); handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d36c4ed16d8d..caf7a4df8585 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -596,6 +596,8 @@ struct hns3_nic_priv { struct hns3_enet_coalesce rx_coal; u32 tx_copybreak; u32 rx_copybreak; + u32 min_tx_copybreak; + u32 min_tx_spare_buf_size; }; union l3_hdr_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 06eedf80cfac..407ad0b985b4 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9586,33 +9586,36 @@ static bool hclge_need_enable_vport_vlan_filter(struct hclge_vport *vport) return false; } -int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +static int __hclge_enable_vport_vlan_filter(struct hclge_vport *vport, + bool request_en) { - struct hclge_dev *hdev = vport->back; bool need_en; int ret; - mutex_lock(&hdev->vport_lock); - - vport->req_vlan_fltr_en = request_en; - need_en = hclge_need_enable_vport_vlan_filter(vport); - if (need_en == vport->cur_vlan_fltr_en) { - mutex_unlock(&hdev->vport_lock); + if (need_en == vport->cur_vlan_fltr_en) return 0; - } ret = hclge_set_vport_vlan_filter(vport, need_en); - if (ret) { - mutex_unlock(&hdev->vport_lock); + if (ret) return ret; - } vport->cur_vlan_fltr_en = need_en; + return 0; +} + +int hclge_enable_vport_vlan_filter(struct hclge_vport *vport, bool request_en) +{ + struct hclge_dev *hdev = vport->back; + int ret; + + mutex_lock(&hdev->vport_lock); + vport->req_vlan_fltr_en = request_en; + ret = __hclge_enable_vport_vlan_filter(vport, request_en); mutex_unlock(&hdev->vport_lock); - return 0; + return ret; } static int hclge_enable_vlan_filter(struct hnae3_handle *handle, bool enable) @@ -10633,16 +10636,19 @@ static void hclge_sync_vlan_fltr_state(struct hclge_dev *hdev) &vport->state)) continue; - ret = hclge_enable_vport_vlan_filter(vport, - vport->req_vlan_fltr_en); + mutex_lock(&hdev->vport_lock); + ret = __hclge_enable_vport_vlan_filter(vport, + vport->req_vlan_fltr_en); if (ret) { dev_err(&hdev->pdev->dev, "failed to sync vlan filter state for vport%u, ret = %d\n", vport->vport_id, ret); set_bit(HCLGE_VPORT_STATE_VLAN_FLTR_CHANGE, &vport->state); + mutex_unlock(&hdev->vport_lock); return; } + mutex_unlock(&hdev->vport_lock); } } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 0ffda5146bae..16ef5a584af3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -496,14 +496,14 @@ int hclge_ptp_init(struct hclge_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed to init freq, ret = %d\n", ret); - goto out; + goto out_clear_int; } ret = hclge_ptp_set_ts_mode(hdev, &hdev->ptp->ts_cfg); if (ret) { dev_err(&hdev->pdev->dev, "failed to init ts mode, ret = %d\n", ret); - goto out; + goto out_clear_int; } ktime_get_real_ts64(&ts); @@ -511,7 +511,7 @@ int hclge_ptp_init(struct hclge_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "failed to init ts time, ret = %d\n", ret); - goto out; + goto out_clear_int; } set_bit(HCLGE_STATE_PTP_EN, &hdev->state); @@ -519,6 +519,9 @@ int hclge_ptp_init(struct hclge_dev *hdev) return 0; +out_clear_int: + clear_bit(HCLGE_PTP_FLAG_EN, &hdev->ptp->flags); + hclge_ptp_int_en(hdev, false); out: hclge_ptp_destroy_clock(hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 8f5a85b97ac0..e8573358309c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -3096,11 +3096,7 @@ static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) static u32 hclgevf_get_max_channels(struct hclgevf_dev *hdev) { - struct hnae3_handle *nic = &hdev->nic; - struct hnae3_knic_private_info *kinfo = &nic->kinfo; - - return min_t(u32, hdev->rss_size_max, - hdev->num_tqps / kinfo->tc_info.num_tc); + return min(hdev->rss_size_max, hdev->num_tqps); } /** diff --git a/drivers/net/ethernet/ibm/ibmvnic.h b/drivers/net/ethernet/ibm/ibmvnic.h index a189038d88df..246ddce753f9 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.h +++ b/drivers/net/ethernet/ibm/ibmvnic.h @@ -211,7 +211,6 @@ struct ibmvnic_statistics { u8 reserved[72]; } __packed __aligned(8); -#define NUM_TX_STATS 3 struct ibmvnic_tx_queue_stats { u64 batched_packets; u64 direct_packets; @@ -219,13 +218,18 @@ struct ibmvnic_tx_queue_stats { u64 dropped_packets; }; -#define NUM_RX_STATS 3 +#define NUM_TX_STATS \ + (sizeof(struct ibmvnic_tx_queue_stats) / sizeof(u64)) + struct ibmvnic_rx_queue_stats { u64 packets; u64 bytes; u64 interrupts; }; +#define NUM_RX_STATS \ + (sizeof(struct ibmvnic_rx_queue_stats) / sizeof(u64)) + struct ibmvnic_acl_buffer { __be32 len; __be32 version; diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 8294a7c4f122..ba331899d186 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -638,6 +638,9 @@ /* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ #define NVM_SUM 0xBABA +/* Uninitialized ("empty") checksum word value */ +#define NVM_CHECKSUM_UNINITIALIZED 0xFFFF + /* PBA (printed board assembly) number words */ #define NVM_PBA_OFFSET_0 8 #define NVM_PBA_OFFSET_1 9 diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 364378133526..df4e7d781cb1 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -4274,6 +4274,8 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) ret_val = e1000e_update_nvm_checksum(hw); if (ret_val) return ret_val; + } else if (hw->mac.type == e1000_pch_tgp) { + return 0; } } diff --git a/drivers/net/ethernet/intel/e1000e/nvm.c b/drivers/net/ethernet/intel/e1000e/nvm.c index e609f4df86f4..16369e6d245a 100644 --- a/drivers/net/ethernet/intel/e1000e/nvm.c +++ b/drivers/net/ethernet/intel/e1000e/nvm.c @@ -558,6 +558,12 @@ s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw) checksum += nvm_data; } + if (hw->mac.type == e1000_pch_tgp && + nvm_data == NVM_CHECKSUM_UNINITIALIZED) { + e_dbg("Uninitialized NVM Checksum on TGP platform - ignoring\n"); + return 0; + } + if (checksum != (u16)NVM_SUM) { e_dbg("NVM Checksum Invalid\n"); return -E1000_ERR_NVM; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 625fa93fc18b..97f32a0c68d0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -3137,10 +3137,10 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg) const u8 *addr = al->list[i].addr; /* Allow to delete VF primary MAC only if it was not set - * administratively by PF or if VF is trusted. + * administratively by PF. */ if (ether_addr_equal(addr, vf->default_lan_addr.addr)) { - if (i40e_can_vf_change_mac(vf)) + if (!vf->pf_set_mac) was_unimac_deleted = true; else continue; @@ -5006,7 +5006,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id, vf_stats->broadcast = stats->rx_broadcast; vf_stats->multicast = stats->rx_multicast; vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other; - vf_stats->tx_dropped = stats->tx_discards; + vf_stats->tx_dropped = stats->tx_errors; return 0; } diff --git a/drivers/net/ethernet/intel/ice/ice_ddp.c b/drivers/net/ethernet/intel/ice/ice_ddp.c index 272fd823a825..e4c8cd12a41d 100644 --- a/drivers/net/ethernet/intel/ice/ice_ddp.c +++ b/drivers/net/ethernet/intel/ice/ice_ddp.c @@ -2277,6 +2277,8 @@ enum ice_ddp_state ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, return ICE_DDP_PKG_ERR; buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL); + if (!buf_copy) + return ICE_DDP_PKG_ERR; state = ice_init_pkg(hw, buf_copy, len); if (!ice_is_init_pkg_successful(state)) { diff --git a/drivers/net/ethernet/intel/ice/ice_debugfs.c b/drivers/net/ethernet/intel/ice/ice_debugfs.c index 9fc0fd95a13d..cb71eca6a85b 100644 --- a/drivers/net/ethernet/intel/ice/ice_debugfs.c +++ b/drivers/net/ethernet/intel/ice/ice_debugfs.c @@ -606,7 +606,7 @@ void ice_debugfs_fwlog_init(struct ice_pf *pf) pf->ice_debugfs_pf_fwlog = debugfs_create_dir("fwlog", pf->ice_debugfs_pf); - if (IS_ERR(pf->ice_debugfs_pf)) + if (IS_ERR(pf->ice_debugfs_pf_fwlog)) goto err_create_module_files; fw_modules_dir = debugfs_create_dir("modules", diff --git a/drivers/net/ethernet/intel/ice/ice_lag.c b/drivers/net/ethernet/intel/ice/ice_lag.c index 2410aee59fb2..d132eb477551 100644 --- a/drivers/net/ethernet/intel/ice/ice_lag.c +++ b/drivers/net/ethernet/intel/ice/ice_lag.c @@ -2226,7 +2226,8 @@ bool ice_lag_is_switchdev_running(struct ice_pf *pf) struct ice_lag *lag = pf->lag; struct net_device *tmp_nd; - if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || !lag) + if (!ice_is_feature_supported(pf, ICE_F_SRIOV_LAG) || + !lag || !lag->upper_netdev) return false; rcu_read_lock(); diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c index b28991dd1870..48b8e184f3db 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c @@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq) */ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) { - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* free ring buffers and the ring itself */ idpf_ctlq_dealloc_ring_res(hw, cq); @@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq) /* Set ring_size to 0 to indicate uninitialized queue */ cq->ring_size = 0; - mutex_unlock(&cq->cq_lock); - mutex_destroy(&cq->cq_lock); + spin_unlock(&cq->cq_lock); } /** @@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw, idpf_ctlq_init_regs(hw, cq, is_rxq); - mutex_init(&cq->cq_lock); + spin_lock_init(&cq->cq_lock); list_add(&cq->cq_list, &hw->cq_list_head); @@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, int err = 0; int i; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); /* Ensure there are enough descriptors to send all messages */ num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq); @@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq, wr32(hw, cq->reg.tail, cq->next_to_use); err_unlock: - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); return err; } @@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, if (*clean_count > cq->ring_size) return -EBADR; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -397,7 +396,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* Return number of descriptors actually cleaned */ *clean_count = i; @@ -435,7 +434,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq, if (*buff_count > 0) buffs_avail = true; - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); if (tbp >= cq->ring_size) tbp = 0; @@ -524,7 +523,7 @@ post_buffs_out: wr32(hw, cq->reg.tail, cq->next_to_post); } - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); /* return the number of buffers that were not posted */ *buff_count = *buff_count - i; @@ -552,7 +551,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, u16 i; /* take the lock before we start messing with the ring */ - mutex_lock(&cq->cq_lock); + spin_lock(&cq->cq_lock); ntc = cq->next_to_clean; @@ -614,7 +613,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg, cq->next_to_clean = ntc; - mutex_unlock(&cq->cq_lock); + spin_unlock(&cq->cq_lock); *num_q_msg = i; if (*num_q_msg == 0) diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h index e8e046ef2f0d..5890d8adca4a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h +++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h @@ -99,7 +99,7 @@ struct idpf_ctlq_info { enum idpf_ctlq_type cq_type; int q_id; - struct mutex cq_lock; /* control queue lock */ + spinlock_t cq_lock; /* control queue lock */ /* used for interrupt processing */ u16 next_to_use; u16 next_to_clean; diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 59b1a1a09996..f72420cf6821 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -46,7 +46,7 @@ static u32 idpf_get_rxfh_key_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; @@ -65,7 +65,7 @@ static u32 idpf_get_rxfh_indir_size(struct net_device *netdev) struct idpf_vport_user_config_data *user_config; if (!idpf_is_cap_ena_all(np->adapter, IDPF_RSS_CAPS, IDPF_CAP_RSS)) - return -EOPNOTSUPP; + return 0; user_config = &np->adapter->vport_config[np->vport_idx]->user_config; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index ba645ab22d39..746b65533727 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -2315,8 +2315,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size) struct idpf_adapter *adapter = hw->back; size_t sz = ALIGN(size, 4096); - mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz, - &mem->pa, GFP_KERNEL); + /* The control queue resources are freed under a spinlock, contiguous + * pages will avoid IOMMU remapping and the use vmap (and vunmap in + * dma_free_*() path. + */ + mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = sz; return mem->va; @@ -2331,8 +2335,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) { struct idpf_adapter *adapter = hw->back; - dma_free_coherent(&adapter->pdev->dev, mem->size, - mem->va, mem->pa); + dma_free_attrs(&adapter->pdev->dev, mem->size, + mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS); mem->size = 0; mem->va = NULL; mem->pa = 0; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 082b0baf5d37..2a0c5a343e47 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -6987,6 +6987,10 @@ static int igc_probe(struct pci_dev *pdev, adapter->port_num = hw->bus.func; adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); + /* Disable ASPM L1.2 on I226 devices to avoid packet loss */ + if (igc_is_device_id_i226(hw)) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + err = pci_save_state(pdev); if (err) goto err_ioremap; @@ -7368,6 +7372,9 @@ static int igc_resume(struct device *dev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + if (igc_is_device_id_i226(hw)) + pci_disable_link_state(pdev, PCIE_LINK_STATE_L1_2); + if (igc_init_interrupt_scheme(adapter, true)) { netdev_err(netdev, "Unable to allocate memory for queues\n"); return -ENOMEM; @@ -7480,6 +7487,9 @@ static pci_ers_result_t igc_io_slot_reset(struct pci_dev *pdev) pci_enable_wake(pdev, PCI_D3hot, 0); pci_enable_wake(pdev, PCI_D3cold, 0); + if (igc_is_device_id_i226(hw)) + pci_disable_link_state_locked(pdev, PCIE_LINK_STATE_L1_2); + /* In case of PCI error, adapter loses its HW address * so we should re-assign it here. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index e733b81e18a2..5bb4940da59d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1925,8 +1925,8 @@ static int cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, err = mlx5_cmd_invoke(dev, inb, outb, out, out_size, callback, context, pages_queue, token, force_polling); - if (callback) - return err; + if (callback && !err) + return 0; if (err > 0) /* Failed in FW, command didn't execute */ err = deliv_status_to_err(err); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 1e8b7d330701..b5aac0e1a68e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -18,7 +18,8 @@ enum { enum { MLX5E_TC_PRIO = 0, - MLX5E_NIC_PRIO + MLX5E_PROMISC_PRIO, + MLX5E_NIC_PRIO, }; struct mlx5e_flow_table { @@ -68,9 +69,13 @@ struct mlx5e_l2_table { MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) -/* NIC prio FTS */ +/* NIC promisc FT level */ enum { MLX5E_PROMISC_FT_LEVEL, +}; + +/* NIC prio FTS */ +enum { MLX5E_VLAN_FT_LEVEL, MLX5E_L2_FT_LEVEL, MLX5E_TTC_FT_LEVEL, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c index 298bb74ec5e9..d1d629697e28 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dim.c @@ -113,7 +113,7 @@ int mlx5e_dim_rx_change(struct mlx5e_rq *rq, bool enable) __set_bit(MLX5E_RQ_STATE_DIM, &rq->state); } else { __clear_bit(MLX5E_RQ_STATE_DIM, &rq->state); - + synchronize_net(); mlx5e_dim_disable(rq->dim); rq->dim = NULL; } @@ -140,7 +140,7 @@ int mlx5e_dim_tx_change(struct mlx5e_txqsq *sq, bool enable) __set_bit(MLX5E_SQ_STATE_DIM, &sq->state); } else { __clear_bit(MLX5E_SQ_STATE_DIM, &sq->state); - + synchronize_net(); mlx5e_dim_disable(sq->dim); sq->dim = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 05058710d2c7..537e732085b2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -776,7 +776,7 @@ static int mlx5e_create_promisc_table(struct mlx5e_flow_steering *fs) ft_attr.max_fte = MLX5E_PROMISC_TABLE_SIZE; ft_attr.autogroup.max_num_groups = 1; ft_attr.level = MLX5E_PROMISC_FT_LEVEL; - ft_attr.prio = MLX5E_NIC_PRIO; + ft_attr.prio = MLX5E_PROMISC_PRIO; ft->t = mlx5_create_auto_grouped_flow_table(fs->ns, &ft_attr); if (IS_ERR(ft->t)) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 8e24ba96c779..8ed47e7a7515 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1156,8 +1156,9 @@ static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp) } } -static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, - u32 cqe_bcnt) +static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb, + struct mlx5_cqe64 *cqe, + u32 cqe_bcnt) { struct ethhdr *eth = (struct ethhdr *)(skb->data); struct tcphdr *tcp; @@ -1207,6 +1208,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, tcp->check = tcp_v6_check(payload_len, &ipv6->saddr, &ipv6->daddr, check); } + + return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data); } static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index) @@ -1563,8 +1566,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe); if (lro_num_seg > 1) { - mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); - skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt, lro_num_seg); + unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt); + + skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg); /* Subtract one since we already counted this as one * "regular" packet in mlx5e_complete_rx_cqe() */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 988df7047b01..558962423521 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1181,19 +1181,19 @@ static void esw_set_peer_miss_rule_source_port(struct mlx5_eswitch *esw, static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_core_dev *peer_dev) { + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; struct mlx5_flow_destination dest = {}; struct mlx5_flow_act flow_act = {0}; struct mlx5_flow_handle **flows; - /* total vports is the same for both e-switches */ - int nvports = esw->total_vports; struct mlx5_flow_handle *flow; + struct mlx5_vport *peer_vport; struct mlx5_flow_spec *spec; - struct mlx5_vport *vport; int err, pfindex; unsigned long i; void *misc; - if (!MLX5_VPORT_MANAGER(esw->dev) && !mlx5_core_is_ecpf_esw_manager(esw->dev)) + if (!MLX5_VPORT_MANAGER(peer_dev) && + !mlx5_core_is_ecpf_esw_manager(peer_dev)) return 0; spec = kvzalloc(sizeof(*spec), GFP_KERNEL); @@ -1202,7 +1202,7 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, peer_miss_rules_setup(esw, peer_dev, spec, &dest); - flows = kvcalloc(nvports, sizeof(*flows), GFP_KERNEL); + flows = kvcalloc(peer_esw->total_vports, sizeof(*flows), GFP_KERNEL); if (!flows) { err = -ENOMEM; goto alloc_flows_err; @@ -1212,10 +1212,10 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, - spec, MLX5_VPORT_PF); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + esw_set_peer_miss_rule_source_port(esw, peer_esw, spec, + MLX5_VPORT_PF); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1223,11 +1223,11 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_pf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_ECPF); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1235,13 +1235,14 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_ecpf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) { esw_set_peer_miss_rule_source_port(esw, - peer_dev->priv.eswitch, - spec, vport->vport); + peer_esw, + spec, peer_vport->vport); flow = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(esw), spec, &flow_act, &dest, 1); @@ -1249,22 +1250,22 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, err = PTR_ERR(flow); goto add_vf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } - if (mlx5_core_ec_sriov_enabled(esw->dev)) { - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - if (i >= mlx5_core_max_ec_vfs(peer_dev)) - break; - esw_set_peer_miss_rule_source_port(esw, peer_dev->priv.eswitch, - spec, vport->vport); + if (mlx5_core_ec_sriov_enabled(peer_dev)) { + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) { + esw_set_peer_miss_rule_source_port(esw, peer_esw, + spec, + peer_vport->vport); flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow)) { err = PTR_ERR(flow); goto add_ec_vf_flow_err; } - flows[vport->index] = flow; + flows[peer_vport->index] = flow; } } @@ -1281,25 +1282,27 @@ static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, return 0; add_ec_vf_flow_err: - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - if (!flows[vport->index]) + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) { + if (!flows[peer_vport->index]) continue; - mlx5_del_flow_rules(flows[vport->index]); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_vf_flow_err: - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) { - if (!flows[vport->index]) + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) { + if (!flows[peer_vport->index]) continue; - mlx5_del_flow_rules(flows[vport->index]); + mlx5_del_flow_rules(flows[peer_vport->index]); } - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_ecpf_flow_err: - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[peer_vport->index]); } add_pf_flow_err: esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); @@ -1312,37 +1315,34 @@ alloc_flows_err: static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw, struct mlx5_core_dev *peer_dev) { + struct mlx5_eswitch *peer_esw = peer_dev->priv.eswitch; u16 peer_index = mlx5_get_dev_index(peer_dev); struct mlx5_flow_handle **flows; - struct mlx5_vport *vport; + struct mlx5_vport *peer_vport; unsigned long i; flows = esw->fdb_table.offloads.peer_miss_rules[peer_index]; if (!flows) return; - if (mlx5_core_ec_sriov_enabled(esw->dev)) { - mlx5_esw_for_each_ec_vf_vport(esw, i, vport, mlx5_core_max_ec_vfs(esw->dev)) { - /* The flow for a particular vport could be NULL if the other ECPF - * has fewer or no VFs enabled - */ - if (!flows[vport->index]) - continue; - mlx5_del_flow_rules(flows[vport->index]); - } + if (mlx5_core_ec_sriov_enabled(peer_dev)) { + mlx5_esw_for_each_ec_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_ec_vfs(peer_dev)) + mlx5_del_flow_rules(flows[peer_vport->index]); } - mlx5_esw_for_each_vf_vport(esw, i, vport, mlx5_core_max_vfs(esw->dev)) - mlx5_del_flow_rules(flows[vport->index]); + mlx5_esw_for_each_vf_vport(peer_esw, i, peer_vport, + mlx5_core_max_vfs(peer_dev)) + mlx5_del_flow_rules(flows[peer_vport->index]); - if (mlx5_ecpf_vport_exists(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_ECPF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_ecpf_vport_exists(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_ECPF); + mlx5_del_flow_rules(flows[peer_vport->index]); } - if (mlx5_core_is_ecpf_esw_manager(esw->dev)) { - vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_PF); - mlx5_del_flow_rules(flows[vport->index]); + if (mlx5_core_is_ecpf_esw_manager(peer_dev)) { + peer_vport = mlx5_eswitch_get_vport(peer_esw, MLX5_VPORT_PF); + mlx5_del_flow_rules(flows[peer_vport->index]); } kvfree(flows); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 1bc88743d2df..7ef0a4af89e4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -113,13 +113,16 @@ #define ETHTOOL_PRIO_NUM_LEVELS 1 #define ETHTOOL_NUM_PRIOS 11 #define ETHTOOL_MIN_LEVEL (KERNEL_MIN_LEVEL + ETHTOOL_NUM_PRIOS) -/* Promiscuous, Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, +/* Vlan, mac, ttc, inner ttc, {UDP/ANY/aRFS/accel/{esp, esp_err}}, IPsec policy, * {IPsec RoCE MPV,Alias table},IPsec RoCE policy */ -#define KERNEL_NIC_PRIO_NUM_LEVELS 11 +#define KERNEL_NIC_PRIO_NUM_LEVELS 10 #define KERNEL_NIC_NUM_PRIOS 1 -/* One more level for tc */ -#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 1) +/* One more level for tc, and one more for promisc */ +#define KERNEL_MIN_LEVEL (KERNEL_NIC_PRIO_NUM_LEVELS + 2) + +#define KERNEL_NIC_PROMISC_NUM_PRIOS 1 +#define KERNEL_NIC_PROMISC_NUM_LEVELS 1 #define KERNEL_NIC_TC_NUM_PRIOS 1 #define KERNEL_NIC_TC_NUM_LEVELS 3 @@ -187,6 +190,8 @@ static struct init_tree_node { ADD_NS(MLX5_FLOW_TABLE_MISS_ACTION_DEF, ADD_MULTIPLE_PRIO(KERNEL_NIC_TC_NUM_PRIOS, KERNEL_NIC_TC_NUM_LEVELS), + ADD_MULTIPLE_PRIO(KERNEL_NIC_PROMISC_NUM_PRIOS, + KERNEL_NIC_PROMISC_NUM_LEVELS), ADD_MULTIPLE_PRIO(KERNEL_NIC_NUM_PRIOS, KERNEL_NIC_PRIO_NUM_LEVELS))), ADD_PRIO(0, BY_PASS_MIN_LEVEL, 0, FS_CHAINING_CAPS, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 220a9ac75c8b..5bc947f703b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -2241,6 +2241,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */ { PCI_VDEVICE(MELLANOX, 0x1025) }, /* ConnectX-9 */ + { PCI_VDEVICE(MELLANOX, 0x1027) }, /* ConnectX-10 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 9bac4083d8a0..876de6db63c4 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -28,6 +28,9 @@ static void mana_gd_init_pf_regs(struct pci_dev *pdev) gc->db_page_base = gc->bar0_va + mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); + gc->phys_db_page_base = gc->bar0_pa + + mana_gd_r64(gc, GDMA_PF_REG_DB_PAGE_OFF); + sriov_base_off = mana_gd_r64(gc, GDMA_SRIOV_REG_CFG_BASE_OFF); sriov_base_va = gc->bar0_va + sriov_base_off; diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c index 6b3f7fca8d15..05c4b6c8c9c3 100644 --- a/drivers/net/ethernet/renesas/rtsn.c +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -1259,7 +1259,12 @@ static int rtsn_probe(struct platform_device *pdev) priv = netdev_priv(ndev); priv->pdev = pdev; priv->ndev = ndev; + priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); + if (!priv->ptp_priv) { + ret = -ENOMEM; + goto error_free; + } spin_lock_init(&priv->lock); platform_set_drvdata(pdev, priv); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 83ad7c7935e3..23d9ece46d9c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -379,6 +379,12 @@ static int intel_crosststamp(ktime_t *device, return -ETIMEDOUT; } + *system = (struct system_counterval_t) { + .cycles = 0, + .cs_id = CSID_X86_ART, + .use_nsecs = false, + }; + num_snapshot = (readl(ioaddr + GMAC_TIMESTAMP_STATUS) & GMAC_TIMESTAMP_ATSNS_MASK) >> GMAC_TIMESTAMP_ATSNS_SHIFT; @@ -394,7 +400,7 @@ static int intel_crosststamp(ktime_t *device, } system->cycles *= intel_priv->crossts_adj; - system->cs_id = CSID_X86_ART; + priv->plat->flags &= ~STMMAC_FLAG_INT_SNAPSHOT_EN; return 0; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c index 7840bc403788..5dcc95bc0ad2 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_dma.c @@ -364,19 +364,17 @@ static int dwxgmac2_dma_interrupt(struct stmmac_priv *priv, } /* TX/RX NORMAL interrupts */ - if (likely(intr_status & XGMAC_NIS)) { - if (likely(intr_status & XGMAC_RI)) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->rx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_rx; - } - if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { - u64_stats_update_begin(&stats->syncp); - u64_stats_inc(&stats->tx_normal_irq_n[chan]); - u64_stats_update_end(&stats->syncp); - ret |= handle_tx; - } + if (likely(intr_status & XGMAC_RI)) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->rx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_rx; + } + if (likely(intr_status & (XGMAC_TI | XGMAC_TBU))) { + u64_stats_update_begin(&stats->syncp); + u64_stats_inc(&stats->tx_normal_irq_n[chan]); + u64_stats_update_end(&stats->syncp); + ret |= handle_tx; } /* Clear interrupts */ diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index f5449b73b9a7..1e4cf89bd79a 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -3336,7 +3336,7 @@ static int niu_rbr_add_page(struct niu *np, struct rx_ring_info *rp, addr = np->ops->map_page(np->device, page, 0, PAGE_SIZE, DMA_FROM_DEVICE); - if (!addr) { + if (np->ops->mapping_error(np->device, addr)) { __free_page(page); return -ENOMEM; } @@ -6672,6 +6672,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, len = skb_headlen(skb); mapping = np->ops->map_single(np->device, skb->data, len, DMA_TO_DEVICE); + if (np->ops->mapping_error(np->device, mapping)) + goto out_drop; prod = rp->prod; @@ -6713,6 +6715,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, mapping = np->ops->map_page(np->device, skb_frag_page(frag), skb_frag_off(frag), len, DMA_TO_DEVICE); + if (np->ops->mapping_error(np->device, mapping)) + goto out_unmap; rp->tx_buffs[prod].skb = NULL; rp->tx_buffs[prod].mapping = mapping; @@ -6737,6 +6741,19 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb, out: return NETDEV_TX_OK; +out_unmap: + while (i--) { + const skb_frag_t *frag; + + prod = PREVIOUS_TX(rp, prod); + frag = &skb_shinfo(skb)->frags[i]; + np->ops->unmap_page(np->device, rp->tx_buffs[prod].mapping, + skb_frag_size(frag), DMA_TO_DEVICE); + } + + np->ops->unmap_single(np->device, rp->tx_buffs[rp->prod].mapping, + skb_headlen(skb), DMA_TO_DEVICE); + out_drop: rp->tx_errors++; kfree_skb(skb); @@ -9638,6 +9655,11 @@ static void niu_pci_unmap_single(struct device *dev, u64 dma_address, dma_unmap_single(dev, dma_address, size, direction); } +static int niu_pci_mapping_error(struct device *dev, u64 addr) +{ + return dma_mapping_error(dev, addr); +} + static const struct niu_ops niu_pci_ops = { .alloc_coherent = niu_pci_alloc_coherent, .free_coherent = niu_pci_free_coherent, @@ -9645,6 +9667,7 @@ static const struct niu_ops niu_pci_ops = { .unmap_page = niu_pci_unmap_page, .map_single = niu_pci_map_single, .unmap_single = niu_pci_unmap_single, + .mapping_error = niu_pci_mapping_error, }; static void niu_driver_version(void) @@ -10011,6 +10034,11 @@ static void niu_phys_unmap_single(struct device *dev, u64 dma_address, /* Nothing to do. */ } +static int niu_phys_mapping_error(struct device *dev, u64 dma_address) +{ + return false; +} + static const struct niu_ops niu_phys_ops = { .alloc_coherent = niu_phys_alloc_coherent, .free_coherent = niu_phys_free_coherent, @@ -10018,6 +10046,7 @@ static const struct niu_ops niu_phys_ops = { .unmap_page = niu_phys_unmap_page, .map_single = niu_phys_map_single, .unmap_single = niu_phys_unmap_single, + .mapping_error = niu_phys_mapping_error, }; static int niu_of_probe(struct platform_device *op) diff --git a/drivers/net/ethernet/sun/niu.h b/drivers/net/ethernet/sun/niu.h index 04c215f91fc0..0b169c08b0f2 100644 --- a/drivers/net/ethernet/sun/niu.h +++ b/drivers/net/ethernet/sun/niu.h @@ -2879,6 +2879,9 @@ struct tx_ring_info { #define NEXT_TX(tp, index) \ (((index) + 1) < (tp)->pending ? ((index) + 1) : 0) +#define PREVIOUS_TX(tp, index) \ + (((index) - 1) >= 0 ? ((index) - 1) : (((tp)->pending) - 1)) + static inline u32 niu_tx_avail(struct tx_ring_info *tp) { return (tp->pending - @@ -3140,6 +3143,7 @@ struct niu_ops { enum dma_data_direction direction); void (*unmap_single)(struct device *dev, u64 dma_address, size_t size, enum dma_data_direction direction); + int (*mapping_error)(struct device *dev, u64 dma_address); }; struct niu_link_config { diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 393cc5192e90..6b5cff087686 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -612,8 +612,6 @@ static struct sk_buff *am65_cpsw_build_skb(void *page_addr, { struct sk_buff *skb; - len += AM65_CPSW_HEADROOM; - skb = build_skb(page_addr, len); if (unlikely(!skb)) return NULL; @@ -1217,7 +1215,7 @@ static int am65_cpsw_nuss_rx_packets(struct am65_cpsw_rx_flow *flow, } skb = am65_cpsw_build_skb(page_addr, ndev, - AM65_CPSW_MAX_PACKET_SIZE, headroom); + PAGE_SIZE, headroom); if (unlikely(!skb)) { new_page = page; goto requeue; diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index ddfd1c02a885..da53eb04b0a4 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -288,8 +288,12 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) int i; addr = lower_32_bits(prueth->msmcram.pa); - if (slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + if (slice) { + if (prueth->pdata.banked_ms_ram) + addr += MSMC_RAM_BANK_SIZE; + else + addr += PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE; + } if (addr % SZ_64K) { dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); @@ -297,43 +301,66 @@ static int prueth_fw_offload_buffer_setup(struct prueth_emac *emac) } bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; - /* workaround for f/w bug. bpool 0 needs to be initialized */ - for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) { + + /* Configure buffer pools for forwarding buffers + * - used by firmware to store packets to be forwarded to other port + * - 8 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); - addr += PRUETH_EMAC_BUF_POOL_SIZE; + writel(PRUETH_SW_FWD_BUF_POOL_SIZE, &bpool_cfg[i].len); + addr += PRUETH_SW_FWD_BUF_POOL_SIZE; } - if (!slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; - else - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; - - for (i = PRUETH_NUM_BUF_POOLS; - i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS; - i++) { - /* The driver only uses first 4 queues per PRU so only initialize them */ - if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) { - writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len); - addr += PRUETH_SW_BUF_POOL_SIZE_HOST; + /* Configure buffer pools for Local Injection buffers + * - used by firmware to store packets received from host core + * - 16 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; + + /* The driver only uses first 4 queues per PRU, + * so only initialize buffer for them + */ + if ((i % PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE) + < PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { + writel(addr, &bpool_cfg[cfg_idx].addr); + writel(PRUETH_SW_LI_BUF_POOL_SIZE, + &bpool_cfg[cfg_idx].len); + addr += PRUETH_SW_LI_BUF_POOL_SIZE; } else { - writel(0, &bpool_cfg[i].addr); - writel(0, &bpool_cfg[i].len); + writel(0, &bpool_cfg[cfg_idx].addr); + writel(0, &bpool_cfg[cfg_idx].len); } } - if (!slice) - addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; - else - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + /* Express RX buffer queue + * - used by firmware to store express packets to be transmitted + * to the host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + + addr += PRUETH_SW_HOST_EXP_BUF_POOL_SIZE; + writel(addr, &rxq_ctx->end); + /* Pre-emptible RX buffer queue + * - used by firmware to store preemptible packets to be transmitted + * to the host core + */ rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; - writel(addr - SZ_2K, &rxq_ctx->end); + addr += PRUETH_SW_HOST_PRE_BUF_POOL_SIZE; + writel(addr, &rxq_ctx->end); + + /* Set pointer for default dropped packet write + * - used by firmware to temporarily store packet to be dropped + */ + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; + writel(addr, &rxq_ctx->start[0]); return 0; } @@ -347,13 +374,13 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac) u32 addr; int i; - /* Layout to have 64KB aligned buffer pool - * |BPOOL0|BPOOL1|RX_CTX0|RX_CTX1| - */ - addr = lower_32_bits(prueth->msmcram.pa); - if (slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + if (slice) { + if (prueth->pdata.banked_ms_ram) + addr += MSMC_RAM_BANK_SIZE; + else + addr += PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE; + } if (addr % SZ_64K) { dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); @@ -361,39 +388,66 @@ static int prueth_emac_buffer_setup(struct prueth_emac *emac) } bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; - /* workaround for f/w bug. bpool 0 needs to be initilalized */ - writel(addr, &bpool_cfg[0].addr); - writel(0, &bpool_cfg[0].len); - for (i = PRUETH_EMAC_BUF_POOL_START; - i < PRUETH_EMAC_BUF_POOL_START + PRUETH_NUM_BUF_POOLS; - i++) { - writel(addr, &bpool_cfg[i].addr); - writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); - addr += PRUETH_EMAC_BUF_POOL_SIZE; + /* Configure buffer pools for forwarding buffers + * - in mac mode - no forwarding so initialize all pools to 0 + * - 8 total pools per slice + */ + for (i = 0; i < PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; i++) { + writel(0, &bpool_cfg[i].addr); + writel(0, &bpool_cfg[i].len); } - if (!slice) - addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; - else - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE * 2; + /* Configure buffer pools for Local Injection buffers + * - used by firmware to store packets received from host core + * - 16 total pools per slice + */ + bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; + for (i = 0; i < PRUETH_NUM_LI_BUF_POOLS_PER_SLICE; i++) { + int cfg_idx = i + PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE; + + /* In EMAC mode, only first 4 buffers are used, + * as 1 slice needs to handle only 1 port + */ + if (i < PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE) { + writel(addr, &bpool_cfg[cfg_idx].addr); + writel(PRUETH_EMAC_LI_BUF_POOL_SIZE, + &bpool_cfg[cfg_idx].len); + addr += PRUETH_EMAC_LI_BUF_POOL_SIZE; + } else { + writel(0, &bpool_cfg[cfg_idx].addr); + writel(0, &bpool_cfg[cfg_idx].len); + } + } - /* Pre-emptible RX buffer queue */ - rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; + /* Express RX buffer queue + * - used by firmware to store express packets to be transmitted + * to host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + addr += PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE; writel(addr, &rxq_ctx->end); - /* Express RX buffer queue */ - rxq_ctx = emac->dram.va + HOST_RX_Q_EXP_CONTEXT_OFFSET; + /* Pre-emptible RX buffer queue + * - used by firmware to store preemptible packets to be transmitted + * to host core + */ + rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; for (i = 0; i < 3; i++) writel(addr, &rxq_ctx->start[i]); - addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + addr += PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE; writel(addr, &rxq_ctx->end); + /* Set pointer for default dropped packet write + * - used by firmware to temporarily store packet to be dropped + */ + rxq_ctx = emac->dram.va + DEFAULT_MSMC_Q_OFFSET; + writel(addr, &rxq_ctx->start[0]); + return 0; } diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index c884e9fa099e..60d69744ffae 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -26,21 +26,71 @@ struct icssg_flow_cfg { #define PRUETH_MAX_RX_FLOWS 1 /* excluding default flow */ #define PRUETH_RX_FLOW_DATA 0 -#define PRUETH_EMAC_BUF_POOL_SIZE SZ_8K -#define PRUETH_EMAC_POOLS_PER_SLICE 24 -#define PRUETH_EMAC_BUF_POOL_START 8 -#define PRUETH_NUM_BUF_POOLS 8 -#define PRUETH_EMAC_RX_CTX_BUF_SIZE SZ_16K /* per slice */ -#define MSMC_RAM_SIZE \ - (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ - PRUETH_EMAC_RX_CTX_BUF_SIZE * 2)) - -#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K -#define PRUETH_SW_NUM_BUF_POOLS_HOST 8 -#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4 -#define MSMC_RAM_SIZE_SWITCH_MODE \ - (MSMC_RAM_SIZE + \ - (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST)) +/* Defines for forwarding path buffer pools: + * - used by firmware to store packets to be forwarded to other port + * - 8 total pools per slice + * - only used in switch mode (as no forwarding in mac mode) + */ +#define PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE 8 +#define PRUETH_SW_FWD_BUF_POOL_SIZE (SZ_8K) + +/* Defines for local injection path buffer pools: + * - used by firmware to store packets received from host core + * - 16 total pools per slice + * - 8 pools per port per slice and each slice handles both ports + * - only 4 out of 8 pools used per port (as only 4 real QoS levels in ICSSG) + * - switch mode: 8 total pools used + * - mac mode: 4 total pools used + */ +#define PRUETH_NUM_LI_BUF_POOLS_PER_SLICE 16 +#define PRUETH_NUM_LI_BUF_POOLS_PER_PORT_PER_SLICE 8 +#define PRUETH_SW_LI_BUF_POOL_SIZE SZ_4K +#define PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE 8 +#define PRUETH_SW_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 +#define PRUETH_EMAC_LI_BUF_POOL_SIZE SZ_8K +#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE 4 +#define PRUETH_EMAC_USED_LI_BUF_POOLS_PER_PORT_PER_SLICE 4 + +/* Defines for host egress path - express and preemptible buffers + * - used by firmware to store express and preemptible packets + * to be transmitted to host core + * - used by both mac/switch modes + */ +#define PRUETH_SW_HOST_EXP_BUF_POOL_SIZE SZ_16K +#define PRUETH_SW_HOST_PRE_BUF_POOL_SIZE (SZ_16K - SZ_2K) +#define PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE PRUETH_SW_HOST_EXP_BUF_POOL_SIZE +#define PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + +/* Buffer used by firmware to temporarily store packet to be dropped */ +#define PRUETH_SW_DROP_PKT_BUF_SIZE SZ_2K +#define PRUETH_EMAC_DROP_PKT_BUF_SIZE PRUETH_SW_DROP_PKT_BUF_SIZE + +/* Total switch mode memory usage for buffers per slice */ +#define PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE \ + (PRUETH_SW_FWD_BUF_POOL_SIZE * PRUETH_NUM_FWD_BUF_POOLS_PER_SLICE + \ + PRUETH_SW_LI_BUF_POOL_SIZE * PRUETH_SW_USED_LI_BUF_POOLS_PER_SLICE + \ + PRUETH_SW_HOST_EXP_BUF_POOL_SIZE + \ + PRUETH_SW_HOST_PRE_BUF_POOL_SIZE + \ + PRUETH_SW_DROP_PKT_BUF_SIZE) + +/* Total switch mode memory usage for all buffers */ +#define PRUETH_SW_TOTAL_BUF_SIZE \ + (2 * PRUETH_SW_TOTAL_BUF_SIZE_PER_SLICE) + +/* Total mac mode memory usage for buffers per slice */ +#define PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE \ + (PRUETH_EMAC_LI_BUF_POOL_SIZE * \ + PRUETH_EMAC_USED_LI_BUF_POOLS_PER_SLICE + \ + PRUETH_EMAC_HOST_EXP_BUF_POOL_SIZE + \ + PRUETH_EMAC_HOST_PRE_BUF_POOL_SIZE + \ + PRUETH_EMAC_DROP_PKT_BUF_SIZE) + +/* Total mac mode memory usage for all buffers */ +#define PRUETH_EMAC_TOTAL_BUF_SIZE \ + (2 * PRUETH_EMAC_TOTAL_BUF_SIZE_PER_SLICE) + +/* Size of 1 bank of MSMC/OC_SRAM memory */ +#define MSMC_RAM_BANK_SIZE SZ_256K #define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1) diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 6f0700d156e7..0769e1ade30b 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -1510,10 +1510,15 @@ static int prueth_probe(struct platform_device *pdev) goto put_mem; } - msmc_ram_size = MSMC_RAM_SIZE; prueth->is_switchmode_supported = prueth->pdata.switch_mode; - if (prueth->is_switchmode_supported) - msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; + if (prueth->pdata.banked_ms_ram) { + /* Reserve 2 MSMC RAM banks for buffers to avoid arbitration */ + msmc_ram_size = (2 * MSMC_RAM_BANK_SIZE); + } else { + msmc_ram_size = PRUETH_EMAC_TOTAL_BUF_SIZE; + if (prueth->is_switchmode_supported) + msmc_ram_size = PRUETH_SW_TOTAL_BUF_SIZE; + } /* NOTE: FW bug needs buffer base to be 64KB aligned */ prueth->msmcram.va = @@ -1670,7 +1675,8 @@ put_iep0: free_pool: gen_pool_free(prueth->sram_pool, - (unsigned long)prueth->msmcram.va, msmc_ram_size); + (unsigned long)prueth->msmcram.va, + prueth->msmcram.size); put_mem: pruss_release_mem_region(prueth->pruss, &prueth->shram); @@ -1722,8 +1728,8 @@ static void prueth_remove(struct platform_device *pdev) icss_iep_put(prueth->iep0); gen_pool_free(prueth->sram_pool, - (unsigned long)prueth->msmcram.va, - MSMC_RAM_SIZE); + (unsigned long)prueth->msmcram.va, + prueth->msmcram.size); pruss_release_mem_region(prueth->pruss, &prueth->shram); @@ -1740,12 +1746,14 @@ static const struct prueth_pdata am654_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, .quirk_10m_link_issue = 1, .switch_mode = 1, + .banked_ms_ram = 0, }; static const struct prueth_pdata am64x_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_RING, .quirk_10m_link_issue = 1, .switch_mode = 1, + .banked_ms_ram = 1, }; static const struct of_device_id prueth_dt_match[] = { diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index e456a11c5d4e..693c8731c094 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -207,11 +207,13 @@ struct prueth_emac { * @fdqring_mode: Free desc queue mode * @quirk_10m_link_issue: 10M link detect errata * @switch_mode: switch firmware support + * @banked_ms_ram: banked memory support */ struct prueth_pdata { enum k3_ring_mode fdqring_mode; u32 quirk_10m_link_issue:1; u32 switch_mode:1; + u32 banked_ms_ram:1; }; struct icssg_firmwares { diff --git a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h index 424a7e945ea8..12541a12ebd6 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_switch_map.h +++ b/drivers/net/ethernet/ti/icssg/icssg_switch_map.h @@ -180,6 +180,9 @@ /* Used to notify the FW of the current link speed */ #define PORT_LINK_SPEED_OFFSET 0x00A8 +/* 2k memory pointer reserved for default writes by PRU0*/ +#define DEFAULT_MSMC_Q_OFFSET 0x00AC + /* TAS gate mask for windows list0 */ #define TAS_GATE_MASK_LIST0 0x0100 diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index deaf670c160e..e79220cb725b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1531,7 +1531,6 @@ static void wx_configure_rx_ring(struct wx *wx, struct wx_ring *ring) { u16 reg_idx = ring->reg_idx; - union wx_rx_desc *rx_desc; u64 rdba = ring->dma; u32 rxdctl; @@ -1561,9 +1560,9 @@ static void wx_configure_rx_ring(struct wx *wx, memset(ring->rx_buffer_info, 0, sizeof(struct wx_rx_buffer) * ring->count); - /* initialize Rx descriptor 0 */ - rx_desc = WX_RX_DESC(ring, 0); - rx_desc->wb.upper.length = 0; + /* reset ntu and ntc to place SW in sync with hardware */ + ring->next_to_clean = 0; + ring->next_to_use = 0; /* enable receive descriptor ring */ wr32m(wx, WX_PX_RR_CFG(reg_idx), @@ -2356,6 +2355,8 @@ void wx_update_stats(struct wx *wx) hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS); } + /* qmprc is not cleared on read, manual reset it */ + hwstats->qmprc = 0; for (i = 0; i < wx->mac.max_rx_queues; i++) hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index d8a6fea961c0..4c203f4afd68 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -172,10 +172,6 @@ static void wx_dma_sync_frag(struct wx_ring *rx_ring, skb_frag_off(frag), skb_frag_size(frag), DMA_FROM_DEVICE); - - /* If the page was released, just unmap it. */ - if (unlikely(WX_CB(skb)->page_released)) - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); } static struct wx_rx_buffer *wx_get_rx_buffer(struct wx_ring *rx_ring, @@ -225,10 +221,6 @@ static void wx_put_rx_buffer(struct wx_ring *rx_ring, struct sk_buff *skb, int rx_buffer_pgcnt) { - if (!IS_ERR(skb) && WX_CB(skb)->dma == rx_buffer->dma) - /* the page has been released from the ring */ - WX_CB(skb)->page_released = true; - /* clear contents of rx_buffer */ rx_buffer->page = NULL; rx_buffer->skb = NULL; @@ -313,7 +305,7 @@ static bool wx_alloc_mapped_page(struct wx_ring *rx_ring, return false; dma = page_pool_get_dma_addr(page); - bi->page_dma = dma; + bi->dma = dma; bi->page = page; bi->page_offset = 0; @@ -350,7 +342,7 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) DMA_FROM_DEVICE); rx_desc->read.pkt_addr = - cpu_to_le64(bi->page_dma + bi->page_offset); + cpu_to_le64(bi->dma + bi->page_offset); rx_desc++; bi++; @@ -363,6 +355,8 @@ void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count) /* clear the status bits for the next_to_use descriptor */ rx_desc->wb.upper.status_error = 0; + /* clear the length for the next_to_use descriptor */ + rx_desc->wb.upper.length = 0; cleaned_count--; } while (cleaned_count); @@ -1585,6 +1579,7 @@ static void wx_set_rss_queues(struct wx *wx) clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + wx->ring_feature[RING_F_FDIR].indices = 1; /* Use Flow Director in addition to RSS to ensure the best * distribution of flows across cores, even when an FDIR flow * isn't matched. @@ -1623,7 +1618,7 @@ static void wx_set_num_queues(struct wx *wx) */ static int wx_acquire_msix_vectors(struct wx *wx) { - struct irq_affinity affd = { .pre_vectors = 1 }; + struct irq_affinity affd = { .post_vectors = 1 }; int nvecs, i; /* We start by asking for one vector per queue pair */ @@ -1660,16 +1655,17 @@ static int wx_acquire_msix_vectors(struct wx *wx) return nvecs; } - wx->msix_entry->entry = 0; - wx->msix_entry->vector = pci_irq_vector(wx->pdev, 0); nvecs -= 1; for (i = 0; i < nvecs; i++) { wx->msix_q_entries[i].entry = i; - wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i + 1); + wx->msix_q_entries[i].vector = pci_irq_vector(wx->pdev, i); } wx->num_q_vectors = nvecs; + wx->msix_entry->entry = nvecs; + wx->msix_entry->vector = pci_irq_vector(wx->pdev, nvecs); + return 0; } @@ -2119,7 +2115,6 @@ static void wx_set_ivar(struct wx *wx, s8 direction, wr32(wx, WX_PX_MISC_IVAR, ivar); } else { /* tx or rx causes */ - msix_vector += 1; /* offset for queue vectors */ msix_vector |= WX_PX_IVAR_ALLOC_VAL; index = ((16 * (queue & 1)) + (8 * direction)); ivar = rd32(wx, WX_PX_IVAR(queue >> 1)); @@ -2150,7 +2145,7 @@ void wx_write_eitr(struct wx_q_vector *q_vector) itr_reg |= WX_PX_ITR_CNT_WDIS; - wr32(wx, WX_PX_ITR(v_idx + 1), itr_reg); + wr32(wx, WX_PX_ITR(v_idx), itr_reg); } /** @@ -2196,9 +2191,9 @@ void wx_configure_vectors(struct wx *wx) wx_write_eitr(q_vector); } - wx_set_ivar(wx, -1, 0, 0); + wx_set_ivar(wx, -1, 0, v_idx); if (pdev->msix_enabled) - wr32(wx, WX_PX_ITR(0), 1950); + wr32(wx, WX_PX_ITR(v_idx), 1950); } EXPORT_SYMBOL(wx_configure_vectors); @@ -2218,9 +2213,6 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) if (rx_buffer->skb) { struct sk_buff *skb = rx_buffer->skb; - if (WX_CB(skb)->page_released) - page_pool_put_full_page(rx_ring->page_pool, rx_buffer->page, false); - dev_kfree_skb(skb); } @@ -2244,6 +2236,9 @@ static void wx_clean_rx_ring(struct wx_ring *rx_ring) } } + /* Zero out the descriptor ring */ + memset(rx_ring->desc, 0, rx_ring->size); + rx_ring->next_to_alloc = 0; rx_ring->next_to_clean = 0; rx_ring->next_to_use = 0; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index b54bffda027b..950cacaf095a 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -787,7 +787,6 @@ enum wx_reset_type { struct wx_cb { dma_addr_t dma; u16 append_cnt; /* number of skb's appended */ - bool page_released; bool dma_released; }; @@ -875,7 +874,6 @@ struct wx_tx_buffer { struct wx_rx_buffer { struct sk_buff *skb; dma_addr_t dma; - dma_addr_t page_dma; struct page *page; unsigned int page_offset; }; @@ -1136,7 +1134,7 @@ struct wx { }; #define WX_INTR_ALL (~0ULL) -#define WX_INTR_Q(i) BIT((i) + 1) +#define WX_INTR_Q(i) BIT((i)) /* register operations */ #define wr32(a, reg, value) writel((value), ((a)->hw_addr + (reg))) diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index 1be2a5cc4a83..d2fb77f1d876 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -154,7 +154,7 @@ static void ngbe_irq_enable(struct wx *wx, bool queues) if (queues) wx_intr_enable(wx, NGBE_INTR_ALL); else - wx_intr_enable(wx, NGBE_INTR_MISC); + wx_intr_enable(wx, NGBE_INTR_MISC(wx)); } /** diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h index f48ed7fc1805..f4dc4acbedae 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_type.h @@ -80,7 +80,7 @@ NGBE_PX_MISC_IEN_GPIO) #define NGBE_INTR_ALL 0x1FF -#define NGBE_INTR_MISC BIT(0) +#define NGBE_INTR_MISC(A) BIT((A)->num_q_vectors) #define NGBE_PHY_CONFIG(reg_offset) (0x14000 + ((reg_offset) * 4)) #define NGBE_CFG_LAN_SPEED 0x14440 diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c index 0ee73a265545..76d33c042eee 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_irq.c @@ -21,7 +21,7 @@ void txgbe_irq_enable(struct wx *wx, bool queues) wr32(wx, WX_PX_MISC_IEN, TXGBE_PX_MISC_IEN_MASK); /* unmask interrupt */ - wx_intr_enable(wx, TXGBE_INTR_MISC); + wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); if (queues) wx_intr_enable(wx, TXGBE_INTR_QALL(wx)); } @@ -68,7 +68,6 @@ free_queue_irqs: free_irq(wx->msix_q_entries[vector].vector, wx->q_vector[vector]); } - wx_reset_interrupt_capability(wx); return err; } @@ -148,7 +147,7 @@ static irqreturn_t txgbe_misc_irq_thread_fn(int irq, void *data) nhandled++; } - wx_intr_enable(wx, TXGBE_INTR_MISC); + wx_intr_enable(wx, TXGBE_INTR_MISC(wx)); return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE); } @@ -169,6 +168,7 @@ void txgbe_free_misc_irq(struct txgbe *txgbe) free_irq(txgbe->link_irq, txgbe); free_irq(txgbe->misc.irq, txgbe); txgbe_del_irq_domain(txgbe); + txgbe->wx->misc_irq_domain = false; } int txgbe_setup_misc_irq(struct txgbe *txgbe) diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index 7e352837184f..9ede260b85dc 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -308,10 +308,14 @@ static int txgbe_open(struct net_device *netdev) wx_configure(wx); - err = txgbe_request_queue_irqs(wx); + err = txgbe_setup_misc_irq(wx->priv); if (err) goto err_free_resources; + err = txgbe_request_queue_irqs(wx); + if (err) + goto err_free_misc_irq; + /* Notify the stack of the actual queue counts. */ err = netif_set_real_num_tx_queues(netdev, wx->num_tx_queues); if (err) @@ -327,6 +331,9 @@ static int txgbe_open(struct net_device *netdev) err_free_irq: wx_free_irq(wx); +err_free_misc_irq: + txgbe_free_misc_irq(wx->priv); + wx_reset_interrupt_capability(wx); err_free_resources: wx_free_resources(wx); err_reset: @@ -365,6 +372,7 @@ static int txgbe_close(struct net_device *netdev) txgbe_down(wx); wx_free_irq(wx); + txgbe_free_misc_irq(wx->priv); wx_free_resources(wx); txgbe_fdir_filter_exit(wx); wx_control_hw(wx, false); @@ -410,7 +418,6 @@ static void txgbe_shutdown(struct pci_dev *pdev) int txgbe_setup_tc(struct net_device *dev, u8 tc) { struct wx *wx = netdev_priv(dev); - struct txgbe *txgbe = wx->priv; /* Hardware has to reinitialize queues and interrupts to * match packet buffer alignment. Unfortunately, the @@ -421,7 +428,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc) else txgbe_reset(wx); - txgbe_free_misc_irq(txgbe); wx_clear_interrupt_scheme(wx); if (tc) @@ -430,7 +436,6 @@ int txgbe_setup_tc(struct net_device *dev, u8 tc) netdev_reset_tc(dev); wx_init_interrupt_scheme(wx); - txgbe_setup_misc_irq(txgbe); if (netif_running(dev)) txgbe_open(dev); @@ -677,13 +682,9 @@ static int txgbe_probe(struct pci_dev *pdev, txgbe_init_fdir(txgbe); - err = txgbe_setup_misc_irq(txgbe); - if (err) - goto err_release_hw; - err = txgbe_init_phy(txgbe); if (err) - goto err_free_misc_irq; + goto err_release_hw; err = register_netdev(netdev); if (err) @@ -711,8 +712,6 @@ static int txgbe_probe(struct pci_dev *pdev, err_remove_phy: txgbe_remove_phy(txgbe); -err_free_misc_irq: - txgbe_free_misc_irq(txgbe); err_release_hw: wx_clear_interrupt_scheme(wx); wx_control_hw(wx, false); @@ -746,7 +745,6 @@ static void txgbe_remove(struct pci_dev *pdev) unregister_netdev(netdev); txgbe_remove_phy(txgbe); - txgbe_free_misc_irq(txgbe); wx_free_isb_resources(wx); pci_release_selected_regions(pdev, diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index 8ea413a7abe9..5fe415f3f2ca 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -264,8 +264,8 @@ struct txgbe_fdir_filter { #define TXGBE_DEFAULT_RX_WORK 128 #endif -#define TXGBE_INTR_MISC BIT(0) -#define TXGBE_INTR_QALL(A) GENMASK((A)->num_q_vectors, 1) +#define TXGBE_INTR_MISC(A) BIT((A)->num_q_vectors) +#define TXGBE_INTR_QALL(A) (TXGBE_INTR_MISC(A) - 1) #define TXGBE_MAX_EITR GENMASK(11, 3) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1072e2210aed..6b93418224e7 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1309,7 +1309,7 @@ ll_temac_ethtools_set_ringparam(struct net_device *ndev, if (ering->rx_pending > RX_BD_NUM_MAX || ering->rx_mini_pending || ering->rx_jumbo_pending || - ering->rx_pending > TX_BD_NUM_MAX) + ering->tx_pending > TX_BD_NUM_MAX) return -EINVAL; if (netif_running(ndev)) diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 940452d0a4d2..258096543b08 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -285,7 +285,7 @@ static void xemaclite_aligned_read(u32 *src_ptr, u8 *dest_ptr, /* Read the remaining data */ for (; length > 0; length--) - *to_u8_ptr = *from_u8_ptr; + *to_u8_ptr++ = *from_u8_ptr++; } } diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 8ec497023224..4376e116eb9f 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -2316,8 +2316,11 @@ static int netvsc_prepare_bonding(struct net_device *vf_netdev) if (!ndev) return NOTIFY_DONE; - /* set slave flag before open to prevent IPv6 addrconf */ + /* Set slave flag and no addrconf flag before open + * to prevent IPv6 addrconf. + */ vf_netdev->flags |= IFF_SLAVE; + vf_netdev->priv_flags |= IFF_NO_ADDRCONF; return NOTIFY_DONE; } diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index e3a5961dced9..ffca1cec4ec9 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -332,7 +332,7 @@ static void lan88xx_link_change_notify(struct phy_device *phydev) * As workaround, set to 10 before setting to 100 * at forced 100 F/H mode. */ - if (!phydev->autoneg && phydev->speed == 100) { + if (phydev->state == PHY_NOLINK && !phydev->autoneg && phydev->speed == 100) { /* disable phy interrupt */ temp = phy_read(phydev, LAN88XX_INT_MASK); temp &= ~LAN88XX_INT_MASK_MDINTPIN_EN_; @@ -486,6 +486,7 @@ static struct phy_driver microchip_phy_driver[] = { .config_init = lan88xx_config_init, .config_aneg = lan88xx_config_aneg, .link_change_notify = lan88xx_link_change_notify, + .soft_reset = genphy_soft_reset, /* Interrupt handling is broken, do not define related * functions to force polling. diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 13dea33d86ff..834624a61060 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3663,7 +3663,8 @@ static int phy_probe(struct device *dev) /* Get the LEDs from the device tree, and instantiate standard * LEDs for them. */ - if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) + if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) && + !phy_driver_is_genphy_10g(phydev)) err = of_phy_leds(phydev); out: @@ -3680,7 +3681,8 @@ static int phy_remove(struct device *dev) cancel_delayed_work_sync(&phydev->state_queue); - if (IS_ENABLED(CONFIG_PHYLIB_LEDS)) + if (IS_ENABLED(CONFIG_PHYLIB_LEDS) && !phy_driver_is_genphy(phydev) && + !phy_driver_is_genphy_10g(phydev)) phy_leds_unregister(phydev); phydev->state = PHY_DOWN; diff --git a/drivers/net/phy/qcom/at803x.c b/drivers/net/phy/qcom/at803x.c index 105602581a03..ac909ad8a87b 100644 --- a/drivers/net/phy/qcom/at803x.c +++ b/drivers/net/phy/qcom/at803x.c @@ -26,9 +26,6 @@ #define AT803X_LED_CONTROL 0x18 -#define AT803X_PHY_MMD3_WOL_CTRL 0x8012 -#define AT803X_WOL_EN BIT(5) - #define AT803X_REG_CHIP_CONFIG 0x1f #define AT803X_BT_BX_REG_SEL 0x8000 @@ -866,30 +863,6 @@ static int at8031_config_init(struct phy_device *phydev) return at803x_config_init(phydev); } -static int at8031_set_wol(struct phy_device *phydev, - struct ethtool_wolinfo *wol) -{ - int ret; - - /* First setup MAC address and enable WOL interrupt */ - ret = at803x_set_wol(phydev, wol); - if (ret) - return ret; - - if (wol->wolopts & WAKE_MAGIC) - /* Enable WOL function for 1588 */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, - AT803X_PHY_MMD3_WOL_CTRL, - 0, AT803X_WOL_EN); - else - /* Disable WoL function for 1588 */ - ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, - AT803X_PHY_MMD3_WOL_CTRL, - AT803X_WOL_EN, 0); - - return ret; -} - static int at8031_config_intr(struct phy_device *phydev) { struct at803x_priv *priv = phydev->priv; diff --git a/drivers/net/phy/qcom/qca808x.c b/drivers/net/phy/qcom/qca808x.c index 5048304ccc9e..c3aad0e6b700 100644 --- a/drivers/net/phy/qcom/qca808x.c +++ b/drivers/net/phy/qcom/qca808x.c @@ -633,7 +633,7 @@ static struct phy_driver qca808x_driver[] = { .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, .set_tunable = at803x_set_tunable, - .set_wol = at803x_set_wol, + .set_wol = at8031_set_wol, .get_wol = at803x_get_wol, .get_features = qca808x_get_features, .config_aneg = qca808x_config_aneg, diff --git a/drivers/net/phy/qcom/qcom-phy-lib.c b/drivers/net/phy/qcom/qcom-phy-lib.c index d28815ef56bb..af7d0d8e81be 100644 --- a/drivers/net/phy/qcom/qcom-phy-lib.c +++ b/drivers/net/phy/qcom/qcom-phy-lib.c @@ -115,6 +115,31 @@ int at803x_set_wol(struct phy_device *phydev, } EXPORT_SYMBOL_GPL(at803x_set_wol); +int at8031_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol) +{ + int ret; + + /* First setup MAC address and enable WOL interrupt */ + ret = at803x_set_wol(phydev, wol); + if (ret) + return ret; + + if (wol->wolopts & WAKE_MAGIC) + /* Enable WOL function for 1588 */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + 0, AT803X_WOL_EN); + else + /* Disable WoL function for 1588 */ + ret = phy_modify_mmd(phydev, MDIO_MMD_PCS, + AT803X_PHY_MMD3_WOL_CTRL, + AT803X_WOL_EN, 0); + + return ret; +} +EXPORT_SYMBOL_GPL(at8031_set_wol); + void at803x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol) { diff --git a/drivers/net/phy/qcom/qcom.h b/drivers/net/phy/qcom/qcom.h index 4bb541728846..7f7151c8baca 100644 --- a/drivers/net/phy/qcom/qcom.h +++ b/drivers/net/phy/qcom/qcom.h @@ -172,6 +172,9 @@ #define AT803X_LOC_MAC_ADDR_16_31_OFFSET 0x804B #define AT803X_LOC_MAC_ADDR_32_47_OFFSET 0x804A +#define AT803X_PHY_MMD3_WOL_CTRL 0x8012 +#define AT803X_WOL_EN BIT(5) + #define AT803X_DEBUG_ADDR 0x1D #define AT803X_DEBUG_DATA 0x1E @@ -215,6 +218,8 @@ int at803x_debug_reg_mask(struct phy_device *phydev, u16 reg, int at803x_debug_reg_write(struct phy_device *phydev, u16 reg, u16 data); int at803x_set_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); +int at8031_set_wol(struct phy_device *phydev, + struct ethtool_wolinfo *wol); void at803x_get_wol(struct phy_device *phydev, struct ethtool_wolinfo *wol); int at803x_ack_interrupt(struct phy_device *phydev); diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 150aea7c9c36..6a43f6d6e85c 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -155,10 +155,29 @@ static int smsc_phy_reset(struct phy_device *phydev) static int lan87xx_config_aneg(struct phy_device *phydev) { - int rc; + u8 mdix_ctrl; int val; + int rc; + + /* When auto-negotiation is disabled (forced mode), the PHY's + * Auto-MDIX will continue toggling the TX/RX pairs. + * + * To establish a stable link, we must select a fixed MDI mode. + * If the user has not specified a fixed MDI mode (i.e., mdix_ctrl is + * 'auto'), we default to ETH_TP_MDI. This choice of a ETH_TP_MDI mode + * mirrors the behavior the hardware would exhibit if the AUTOMDIX_EN + * strap were configured for a fixed MDI connection. + */ + if (phydev->autoneg == AUTONEG_DISABLE) { + if (phydev->mdix_ctrl == ETH_TP_MDI_AUTO) + mdix_ctrl = ETH_TP_MDI; + else + mdix_ctrl = phydev->mdix_ctrl; + } else { + mdix_ctrl = phydev->mdix_ctrl; + } - switch (phydev->mdix_ctrl) { + switch (mdix_ctrl) { case ETH_TP_MDI: val = SPECIAL_CTRL_STS_OVRRD_AMDIX_; break; @@ -167,7 +186,8 @@ static int lan87xx_config_aneg(struct phy_device *phydev) SPECIAL_CTRL_STS_AMDIX_STATE_; break; case ETH_TP_MDI_AUTO: - val = SPECIAL_CTRL_STS_AMDIX_ENABLE_; + val = SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_; break; default: return genphy_config_aneg(phydev); @@ -183,7 +203,7 @@ static int lan87xx_config_aneg(struct phy_device *phydev) rc |= val; phy_write(phydev, SPECIAL_CTRL_STS, rc); - phydev->mdix = phydev->mdix_ctrl; + phydev->mdix = mdix_ctrl; return genphy_config_aneg(phydev); } @@ -261,6 +281,33 @@ int lan87xx_read_status(struct phy_device *phydev) } EXPORT_SYMBOL_GPL(lan87xx_read_status); +static int lan87xx_phy_config_init(struct phy_device *phydev) +{ + int rc; + + /* The LAN87xx PHY's initial MDI-X mode is determined by the AUTOMDIX_EN + * hardware strap, but the driver cannot read the strap's status. This + * creates an unpredictable initial state. + * + * To ensure consistent and reliable behavior across all boards, + * override the strap configuration on initialization and force the PHY + * into a known state with Auto-MDIX enabled, which is the expected + * default for modern hardware. + */ + rc = phy_modify(phydev, SPECIAL_CTRL_STS, + SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_ | + SPECIAL_CTRL_STS_AMDIX_STATE_, + SPECIAL_CTRL_STS_OVRRD_AMDIX_ | + SPECIAL_CTRL_STS_AMDIX_ENABLE_); + if (rc < 0) + return rc; + + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + + return smsc_phy_config_init(phydev); +} + static int lan874x_phy_config_init(struct phy_device *phydev) { u16 val; @@ -694,7 +741,7 @@ static struct phy_driver smsc_phy_driver[] = { /* basic functions */ .read_status = lan87xx_read_status, - .config_init = smsc_phy_config_init, + .config_init = lan87xx_phy_config_init, .soft_reset = smsc_phy_reset, .config_aneg = lan87xx_config_aneg, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 531b1b6a37d1..2f8637224b69 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -4229,8 +4229,6 @@ static void lan78xx_disconnect(struct usb_interface *intf) if (!dev) return; - netif_napi_del(&dev->napi); - udev = interface_to_usbdev(intf); net = dev->net; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 944a33361dae..7e0608f56835 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1426,6 +1426,7 @@ static const struct usb_device_id products[] = { {QMI_QUIRK_SET_DTR(0x22de, 0x9051, 2)}, /* Hucom Wireless HM-211S/K */ {QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */ {QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */ + {QMI_QUIRK_SET_DTR(0x1e0e, 0x9071, 3)}, /* SIMCom 8230C ++ */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0195, 4)}, /* Quectel EG95 */ diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c index 3d239b8d1a1b..52e9fd8116f9 100644 --- a/drivers/net/usb/sierra_net.c +++ b/drivers/net/usb/sierra_net.c @@ -689,6 +689,10 @@ static int sierra_net_bind(struct usbnet *dev, struct usb_interface *intf) status); return -ENODEV; } + if (!dev->status) { + dev_err(&dev->udev->dev, "No status endpoint found"); + return -ENODEV; + } /* Initialize sierra private data */ priv = kzalloc(sizeof *priv, GFP_KERNEL); if (!priv) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 6d36cb204f9b..fb3908798458 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -765,6 +765,26 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) return (unsigned long)mrg_ctx & ((1 << MRG_CTX_HEADER_SHIFT) - 1); } +static int check_mergeable_len(struct net_device *dev, void *mrg_ctx, + unsigned int len) +{ + unsigned int headroom, tailroom, room, truesize; + + truesize = mergeable_ctx_to_truesize(mrg_ctx); + headroom = mergeable_ctx_to_headroom(mrg_ctx); + tailroom = headroom ? sizeof(struct skb_shared_info) : 0; + room = SKB_DATA_ALIGN(headroom + tailroom); + + if (len > truesize - room) { + pr_debug("%s: rx error: len %u exceeds truesize %lu\n", + dev->name, len, (unsigned long)(truesize - room)); + DEV_STATS_INC(dev, rx_length_errors); + return -1; + } + + return 0; +} + static struct sk_buff *virtnet_build_skb(void *buf, unsigned int buflen, unsigned int headroom, unsigned int len) @@ -1098,15 +1118,29 @@ static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) sg->length = len; } +/* Note that @len is the length of received data without virtio header */ static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, - struct receive_queue *rq, void *buf, u32 len) + struct receive_queue *rq, void *buf, + u32 len, bool first_buf) { struct xdp_buff *xdp; u32 bufsize; xdp = (struct xdp_buff *)buf; - bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; + /* In virtnet_add_recvbuf_xsk, we use part of XDP_PACKET_HEADROOM for + * virtio header and ask the vhost to fill data from + * hard_start + XDP_PACKET_HEADROOM - vi->hdr_len + * The first buffer has virtio header so the remaining region for frame + * data is + * xsk_pool_get_rx_frame_size() + * While other buffers than the first one do not have virtio header, so + * the maximum frame data's length can be + * xsk_pool_get_rx_frame_size() + vi->hdr_len + */ + bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool); + if (!first_buf) + bufsize += vi->hdr_len; if (unlikely(len > bufsize)) { pr_debug("%s: rx error: len %u exceeds truesize %u\n", @@ -1231,7 +1265,7 @@ static int xsk_append_merge_buffer(struct virtnet_info *vi, u64_stats_add(&stats->bytes, len); - xdp = buf_to_xdp(vi, rq, buf, len); + xdp = buf_to_xdp(vi, rq, buf, len, false); if (!xdp) goto err; @@ -1329,7 +1363,7 @@ static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queu u64_stats_add(&stats->bytes, len); - xdp = buf_to_xdp(vi, rq, buf, len); + xdp = buf_to_xdp(vi, rq, buf, len, true); if (!xdp) return; @@ -1649,7 +1683,8 @@ static unsigned int virtnet_get_headroom(struct virtnet_info *vi) * across multiple buffers (num_buf > 1), and we make sure buffers * have enough headroom. */ -static struct page *xdp_linearize_page(struct receive_queue *rq, +static struct page *xdp_linearize_page(struct net_device *dev, + struct receive_queue *rq, int *num_buf, struct page *p, int offset, @@ -1669,18 +1704,27 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, memcpy(page_address(page) + page_off, page_address(p) + offset, *len); page_off += *len; + /* Only mergeable mode can go inside this while loop. In small mode, + * *num_buf == 1, so it cannot go inside. + */ while (--*num_buf) { unsigned int buflen; void *buf; + void *ctx; int off; - buf = virtnet_rq_get_buf(rq, &buflen, NULL); + buf = virtnet_rq_get_buf(rq, &buflen, &ctx); if (unlikely(!buf)) goto err_buf; p = virt_to_head_page(buf); off = buf - page_address(p); + if (check_mergeable_len(dev, ctx, buflen)) { + put_page(p); + goto err_buf; + } + /* guard against a misconfigured or uncooperative backend that * is sending packet larger than the MTU. */ @@ -1769,7 +1813,7 @@ static struct sk_buff *receive_small_xdp(struct net_device *dev, headroom = vi->hdr_len + header_offset; buflen = SKB_DATA_ALIGN(GOOD_PACKET_LEN + headroom) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - xdp_page = xdp_linearize_page(rq, &num_buf, page, + xdp_page = xdp_linearize_page(dev, rq, &num_buf, page, offset, header_offset, &tlen); if (!xdp_page) @@ -2104,7 +2148,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, */ if (!xdp_prog->aux->xdp_has_frags) { /* linearize data for XDP */ - xdp_page = xdp_linearize_page(rq, num_buf, + xdp_page = xdp_linearize_page(vi->dev, rq, num_buf, *page, offset, XDP_PACKET_HEADROOM, len); @@ -3231,6 +3275,12 @@ static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, { int qindex, err; + if (ring_num <= MAX_SKB_FRAGS + 2) { + netdev_err(vi->dev, "tx size (%d) cannot be smaller than %d\n", + ring_num, MAX_SKB_FRAGS + 2); + return -EINVAL; + } + qindex = sq - vi->sq; virtnet_tx_pause(vi, sq); @@ -6758,7 +6808,7 @@ static int virtnet_probe(struct virtio_device *vdev) otherwise get link status from config. */ netif_carrier_off(dev); if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) { - virtnet_config_changed_work(&vi->config_work); + virtio_config_changed(vi->vdev); } else { vi->status = VIRTIO_NET_S_LINK_UP; virtnet_update_settings(vi); diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 1623298ba2c4..eebdcc16e8fc 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -1868,8 +1868,7 @@ static void ath12k_dp_rx_h_csum_offload(struct ath12k *ar, struct sk_buff *msdu) CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } -static int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, - enum hal_encrypt_type enctype) +int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index eb1f92559179..4232091d9e32 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -143,4 +143,7 @@ int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, const void *ptr, void *data), void *data); + +int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype); + #endif /* ATH12K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 734e3da4cbf1..21e07b5cee57 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -227,7 +227,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb); struct hal_tcl_data_cmd *hal_tcl_desc; struct hal_tx_msdu_ext_desc *msg; - struct sk_buff *skb_ext_desc; + struct sk_buff *skb_ext_desc = NULL; struct hal_srng *tcl_ring; struct ieee80211_hdr *hdr = (void *)skb->data; struct dp_tx_ring *tx_ring; @@ -397,17 +397,15 @@ map: if (ret < 0) { ath12k_dbg(ab, ATH12K_DBG_DP_TX, "Failed to add HTT meta data, dropping packet\n"); - goto fail_unmap_dma; + goto fail_free_ext_skb; } } ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data, skb_ext_desc->len, DMA_TO_DEVICE); ret = dma_mapping_error(ab->dev, ti.paddr); - if (ret) { - kfree_skb(skb_ext_desc); - goto fail_unmap_dma; - } + if (ret) + goto fail_free_ext_skb; ti.data_len = skb_ext_desc->len; ti.type = HAL_TCL_DESC_TYPE_EXT_DESC; @@ -443,7 +441,7 @@ map: ring_selector++; } - goto fail_unmap_dma; + goto fail_unmap_dma_ext; } ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti); @@ -459,13 +457,16 @@ map: return 0; -fail_unmap_dma: - dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); - +fail_unmap_dma_ext: if (skb_cb->paddr_ext_desc) dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); +fail_free_ext_skb: + kfree_skb(skb_ext_desc); + +fail_unmap_dma: + dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); fail_remove_tx_buf: ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id); diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index fbf5d5728357..4ca684278c36 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -3864,8 +3864,8 @@ static int ath12k_install_key(struct ath12k_vif *arvif, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: arg.key_cipher = WMI_CIPHER_AES_CCM; - /* TODO: Re-check if flag is valid */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; case WLAN_CIPHER_SUITE_TKIP: @@ -3873,12 +3873,10 @@ static int ath12k_install_key(struct ath12k_vif *arvif, arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; - case WLAN_CIPHER_SUITE_CCMP_256: - arg.key_cipher = WMI_CIPHER_AES_CCM; - break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: arg.key_cipher = WMI_CIPHER_AES_GCM; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; default: ath12k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); @@ -5725,6 +5723,8 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, struct ath12k_base *ab = ar->ab; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info *info; + enum hal_encrypt_type enctype; + unsigned int mic_len; dma_addr_t paddr; int buf_id; int ret; @@ -5738,12 +5738,16 @@ static int ath12k_mac_mgmt_tx_wmi(struct ath12k *ar, struct ath12k_vif *arvif, return -ENOSPC; info = IEEE80211_SKB_CB(skb); - if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { + if ((ATH12K_SKB_CB(skb)->flags & ATH12K_SKB_CIPHER_SET) && + !(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) { if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { - skb_put(skb, IEEE80211_CCMP_MIC_LEN); + enctype = + ath12k_dp_tx_get_encrypt_type(ATH12K_SKB_CB(skb)->cipher); + mic_len = ath12k_dp_rx_crypto_mic_len(ar, enctype); + skb_put(skb, mic_len); } } diff --git a/drivers/net/wireless/ath/ath6kl/bmi.c b/drivers/net/wireless/ath/ath6kl/bmi.c index af98e871199d..5a9e93fd1ef4 100644 --- a/drivers/net/wireless/ath/ath6kl/bmi.c +++ b/drivers/net/wireless/ath/ath6kl/bmi.c @@ -87,7 +87,9 @@ int ath6kl_bmi_get_target_info(struct ath6kl *ar, * We need to do some backwards compatibility to make this work. */ if (le32_to_cpu(targ_info->byte_count) != sizeof(*targ_info)) { - WARN_ON(1); + ath6kl_err("mismatched byte count %d vs. expected %zd\n", + le32_to_cpu(targ_info->byte_count), + sizeof(*targ_info)); return -EINVAL; } diff --git a/drivers/net/wireless/marvell/mwifiex/util.c b/drivers/net/wireless/marvell/mwifiex/util.c index 1f1f6280a0f2..86e20edb593b 100644 --- a/drivers/net/wireless/marvell/mwifiex/util.c +++ b/drivers/net/wireless/marvell/mwifiex/util.c @@ -477,7 +477,9 @@ mwifiex_process_mgmt_packet(struct mwifiex_private *priv, "auth: receive authentication from %pM\n", ieee_hdr->addr3); } else { - if (!priv->wdev.connected) + if (!priv->wdev.connected || + !ether_addr_equal(ieee_hdr->addr3, + priv->curr_bss_params.bss_descriptor.mac_address)) return 0; if (ieee80211_is_deauth(ieee_hdr->frame_control)) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 452579ccc492..a6324f6ead78 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -1696,8 +1696,8 @@ int mt76_connac_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, if (!sreq->ssids[i].ssid_len) continue; - req->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); - memcpy(req->ssids[i].ssid, sreq->ssids[i].ssid, + req->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(req->ssids[n_ssids].ssid, sreq->ssids[i].ssid, sreq->ssids[i].ssid_len); n_ssids++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 9c245c23a2d7..5b832f1aa00d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -1173,6 +1173,9 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); + if (!msta->deflink.wcid.sta) + return; + mt792x_mutex_acquire(dev); if (enabled) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index 14553dcc61c5..02899320da5c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -52,6 +52,8 @@ static int mt7925_thermal_init(struct mt792x_phy *phy) name = devm_kasprintf(&wiphy->dev, GFP_KERNEL, "mt7925_%s", wiphy_name(wiphy)); + if (!name) + return -ENOMEM; hwmon = devm_hwmon_device_register_with_groups(&wiphy->dev, name, phy, mt7925_hwmon_groups); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index d2a98c92e114..a635b223dab1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -1155,7 +1155,12 @@ static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, struct mt792x_bss_conf *mconf; mconf = mt792x_link_conf_to_mconf(link_conf); - mt792x_mac_link_bss_remove(dev, mconf, mlink); + + if (ieee80211_vif_is_mld(vif)) + mt792x_mac_link_bss_remove(dev, mconf, mlink); + else + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf, + link_sta, false); } spin_lock_bh(&mdev->sta_poll_lock); @@ -1175,6 +1180,31 @@ mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif, struct mt76_wcid *wcid; unsigned int link_id; + /* clean up bss before starec */ + for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta; + struct ieee80211_bss_conf *link_conf; + struct mt792x_bss_conf *mconf; + struct mt792x_link_sta *mlink; + + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + if (!link_sta) + continue; + + mlink = mt792x_sta_to_link(msta, link_id); + if (!mlink) + continue; + + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + if (!link_conf) + continue; + + mconf = mt792x_link_conf_to_mconf(link_conf); + + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf, + link_sta, false); + } + for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_link_sta *link_sta; struct mt792x_link_sta *mlink; @@ -1213,44 +1243,14 @@ void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - struct { - struct { - u8 omac_idx; - u8 band_idx; - __le16 pad; - } __packed hdr; - struct req_tlv { - __le16 tag; - __le16 len; - u8 active; - u8 link_idx; /* hw link idx */ - u8 omac_addr[ETH_ALEN]; - } __packed tlv; - } dev_req = { - .hdr = { - .omac_idx = 0, - .band_idx = 0, - }, - .tlv = { - .tag = cpu_to_le16(DEV_INFO_ACTIVE), - .len = cpu_to_le16(sizeof(struct req_tlv)), - .active = true, - }, - }; unsigned long rem; rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0); mt7925_mac_sta_remove_links(dev, vif, sta, rem); - if (ieee80211_vif_is_mld(vif)) { - mt7925_mcu_set_dbdc(&dev->mphy, false); - - /* recovery omac address for the legacy interface */ - memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); - mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE), - &dev_req, sizeof(dev_req), true); - } + if (ieee80211_vif_is_mld(vif)) + mt7925_mcu_del_dev(mdev, vif); if (vif->type == NL80211_IFTYPE_STATION) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; @@ -1296,22 +1296,22 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_RX_START: mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn, params->buf_size); - mt7925_mcu_uni_rx_ba(dev, vif, params, true); + mt7925_mcu_uni_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid); - mt7925_mcu_uni_rx_ba(dev, vif, params, false); + mt7925_mcu_uni_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: mtxq->aggr = true; mtxq->send_bar = false; - mt7925_mcu_uni_tx_ba(dev, vif, params, true); + mt7925_mcu_uni_tx_ba(dev, params, true); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; clear_bit(tid, &msta->deflink.wcid.ampdu_state); - mt7925_mcu_uni_tx_ba(dev, vif, params, false); + mt7925_mcu_uni_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: set_bit(tid, &msta->deflink.wcid.ampdu_state); @@ -1320,7 +1320,7 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; clear_bit(tid, &msta->deflink.wcid.ampdu_state); - mt7925_mcu_uni_tx_ba(dev, vif, params, false); + mt7925_mcu_uni_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } @@ -1565,6 +1565,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, unsigned long valid = mvif->valid_links; u8 i; + if (!msta->vif) + return; + mt792x_mutex_acquire(dev); valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0); @@ -1579,6 +1582,9 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, else clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags); + if (!mlink->wcid.sta) + continue; + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 57a1db394dda..e42b4f0abbe7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -529,10 +529,10 @@ void mt7925_mcu_rx_event(struct mt792x_dev *dev, struct sk_buff *skb) static int mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, - struct mt76_wcid *wcid, struct ieee80211_ampdu_params *params, bool enable, bool tx) { + struct mt76_wcid *wcid = (struct mt76_wcid *)params->sta->drv_priv; struct sta_rec_ba_uni *ba; struct sk_buff *skb; struct tlv *tlv; @@ -560,60 +560,28 @@ mt7925_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, /** starec & wtbl **/ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable) { struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_link_sta *mlink; - struct mt792x_bss_conf *mconf; - unsigned long usable_links = ieee80211_vif_usable_links(vif); - struct mt76_wcid *wcid; - u8 link_id, ret; - - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - mconf = mt792x_vif_to_link(mvif, link_id); - mlink = mt792x_sta_to_link(msta, link_id); - wcid = &mlink->wcid; - - if (enable && !params->amsdu) - mlink->wcid.amsdu = false; + struct mt792x_vif *mvif = msta->vif; - ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params, - enable, true); - if (ret < 0) - break; - } + if (enable && !params->amsdu) + msta->deflink.wcid.amsdu = false; - return ret; + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, + enable, true); } int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable) { struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_link_sta *mlink; - struct mt792x_bss_conf *mconf; - unsigned long usable_links = ieee80211_vif_usable_links(vif); - struct mt76_wcid *wcid; - u8 link_id, ret; - - for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { - mconf = mt792x_vif_to_link(mvif, link_id); - mlink = mt792x_sta_to_link(msta, link_id); - wcid = &mlink->wcid; - - ret = mt7925_mcu_sta_ba(&dev->mt76, &mconf->mt76, wcid, params, - enable, false); - if (ret < 0) - break; - } + struct mt792x_vif *mvif = msta->vif; - return ret; + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, + enable, false); } static int mt7925_mcu_read_eeprom(struct mt792x_dev *dev, u32 offset, u8 *val) @@ -2694,6 +2662,62 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy, MCU_UNI_CMD(BSS_INFO_UPDATE), true); } +void mt7925_mcu_del_dev(struct mt76_dev *mdev, + struct ieee80211_vif *vif) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct { + struct { + u8 omac_idx; + u8 band_idx; + __le16 pad; + } __packed hdr; + struct req_tlv { + __le16 tag; + __le16 len; + u8 active; + u8 link_idx; /* hw link idx */ + u8 omac_addr[ETH_ALEN]; + } __packed tlv; + } dev_req = { + .tlv = { + .tag = cpu_to_le16(DEV_INFO_ACTIVE), + .len = cpu_to_le16(sizeof(struct req_tlv)), + .active = true, + }, + }; + struct { + struct { + u8 bss_idx; + u8 pad[3]; + } __packed hdr; + struct mt76_connac_bss_basic_tlv basic; + } basic_req = { + .basic = { + .tag = cpu_to_le16(UNI_BSS_INFO_BASIC), + .len = cpu_to_le16(sizeof(struct mt76_connac_bss_basic_tlv)), + .active = true, + .conn_state = 1, + }, + }; + + dev_req.hdr.omac_idx = mvif->bss_conf.mt76.omac_idx; + dev_req.hdr.band_idx = mvif->bss_conf.mt76.band_idx; + + basic_req.hdr.bss_idx = mvif->bss_conf.mt76.idx; + basic_req.basic.omac_idx = mvif->bss_conf.mt76.omac_idx; + basic_req.basic.band_idx = mvif->bss_conf.mt76.band_idx; + basic_req.basic.link_idx = mvif->bss_conf.mt76.link_idx; + + mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), + &basic_req, sizeof(basic_req), true); + + /* recovery omac address for the legacy interface */ + memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); + mt76_mcu_send_msg(mdev, MCU_UNI_CMD(DEV_INFO_UPDATE), + &dev_req, sizeof(dev_req), true); +} + int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, struct ieee80211_bss_conf *link_conf, @@ -2823,8 +2847,8 @@ int mt7925_mcu_hw_scan(struct mt76_phy *phy, struct ieee80211_vif *vif, if (!sreq->ssids[i].ssid_len) continue; - ssid->ssids[i].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); - memcpy(ssid->ssids[i].ssid, sreq->ssids[i].ssid, + ssid->ssids[n_ssids].ssid_len = cpu_to_le32(sreq->ssids[i].ssid_len); + memcpy(ssid->ssids[n_ssids].ssid, sreq->ssids[i].ssid, sreq->ssids[i].ssid_len); n_ssids++; } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index 780c5921679a..ee89d5778adf 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -627,6 +627,8 @@ int mt7925_mcu_sched_scan_req(struct mt76_phy *phy, int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, struct ieee80211_vif *vif, bool enable); +void mt7925_mcu_del_dev(struct mt76_dev *mdev, + struct ieee80211_vif *vif); int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, struct ieee80211_bss_conf *link_conf, diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index 4ad779329b8f..c83b8a210498 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -245,11 +245,9 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, struct ieee80211_vif *vif, bool enable); int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable); int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, - struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params, bool enable); void mt7925_scan_work(struct work_struct *work); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h index 547489092c29..341987e47f67 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/regs.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/regs.h @@ -58,7 +58,7 @@ #define MT_INT_TX_DONE_MCU (MT_INT_TX_DONE_MCU_WM | \ MT_INT_TX_DONE_FWDL) -#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU_WM | \ +#define MT_INT_TX_DONE_ALL (MT_INT_TX_DONE_MCU | \ MT_INT_TX_DONE_BAND0 | \ GENMASK(18, 4)) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c index eface610178d..f7f3a2340c39 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.c @@ -108,7 +108,7 @@ exit_free_device: } EXPORT_SYMBOL_GPL(rt2x00soc_probe); -int rt2x00soc_remove(struct platform_device *pdev) +void rt2x00soc_remove(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct rt2x00_dev *rt2x00dev = hw->priv; @@ -119,8 +119,6 @@ int rt2x00soc_remove(struct platform_device *pdev) rt2x00lib_remove_dev(rt2x00dev); rt2x00soc_free_reg(rt2x00dev); ieee80211_free_hw(hw); - - return 0; } EXPORT_SYMBOL_GPL(rt2x00soc_remove); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h index 021fd06b3627..d6226b8a10e0 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00soc.h @@ -17,7 +17,7 @@ * SoC driver handlers. */ int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops); -int rt2x00soc_remove(struct platform_device *pdev); +void rt2x00soc_remove(struct platform_device *pdev); #ifdef CONFIG_PM int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state); int rt2x00soc_resume(struct platform_device *pdev); diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index f90c33d19b39..8fd7be37e209 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -583,7 +583,11 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error) skb_queue_tail(q, skb); while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) { - zd_mac_tx_status(hw, skb_dequeue(q), + skb = skb_dequeue(q); + if (!skb) + break; + + zd_mac_tx_status(hw, skb, mac->ack_pending ? mac->ack_signal : 0, NULL); mac->ack_pending = 0; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index eca764fede48..9e223574db7f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -375,12 +375,12 @@ static void nvme_log_err_passthru(struct request *req) nr->status & NVME_SC_MASK, /* Status Code */ nr->status & NVME_STATUS_MORE ? "MORE " : "", nr->status & NVME_STATUS_DNR ? "DNR " : "", - nr->cmd->common.cdw10, - nr->cmd->common.cdw11, - nr->cmd->common.cdw12, - nr->cmd->common.cdw13, - nr->cmd->common.cdw14, - nr->cmd->common.cdw14); + le32_to_cpu(nr->cmd->common.cdw10), + le32_to_cpu(nr->cmd->common.cdw11), + le32_to_cpu(nr->cmd->common.cdw12), + le32_to_cpu(nr->cmd->common.cdw13), + le32_to_cpu(nr->cmd->common.cdw14), + le32_to_cpu(nr->cmd->common.cdw15)); } enum nvme_disposition { @@ -757,6 +757,10 @@ blk_status_t nvme_fail_nonready_command(struct nvme_ctrl *ctrl, !test_bit(NVME_CTRL_FAILFAST_EXPIRED, &ctrl->flags) && !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH)) return BLK_STS_RESOURCE; + + if (!(rq->rq_flags & RQF_DONTPREP)) + nvme_clear_nvme_request(rq); + return nvme_host_path_error(rq); } EXPORT_SYMBOL_GPL(nvme_fail_nonready_command); @@ -3854,7 +3858,7 @@ static void nvme_ns_add_to_ctrl_list(struct nvme_ns *ns) return; } } - list_add(&ns->list, &ns->ctrl->namespaces); + list_add_rcu(&ns->list, &ns->ctrl->namespaces); } static void nvme_alloc_ns(struct nvme_ctrl *ctrl, struct nvme_ns_info *info) diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h index 190f55e6d753..3062562c096a 100644 --- a/drivers/nvme/target/nvmet.h +++ b/drivers/nvme/target/nvmet.h @@ -714,6 +714,8 @@ static inline void nvmet_req_bio_put(struct nvmet_req *req, struct bio *bio) { if (bio != &req->b.inline_bio) bio_put(bio); + else + bio_uninit(bio); } #ifdef CONFIG_NVME_TARGET_AUTH diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 259ad77c03c5..6268b18d2456 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -1941,10 +1941,10 @@ static void nvmet_tcp_alloc_queue(struct nvmet_tcp_port *port, struct sock *sk = queue->sock->sk; /* Restore the default callbacks before starting upcall */ - read_lock_bh(&sk->sk_callback_lock); + write_lock_bh(&sk->sk_callback_lock); sk->sk_user_data = NULL; sk->sk_data_ready = port->data_ready; - read_unlock_bh(&sk->sk_callback_lock); + write_unlock_bh(&sk->sk_callback_lock); if (!nvmet_tcp_try_peek_pdu(queue)) { if (!nvmet_tcp_tls_handshake(queue)) return; diff --git a/drivers/nvmem/imx-ocotp-ele.c b/drivers/nvmem/imx-ocotp-ele.c index ca6dd71d8a2e..7807ec0e2d18 100644 --- a/drivers/nvmem/imx-ocotp-ele.c +++ b/drivers/nvmem/imx-ocotp-ele.c @@ -12,6 +12,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/slab.h> +#include <linux/if_ether.h> /* ETH_ALEN */ enum fuse_type { FUSE_FSB = BIT(0), @@ -118,9 +119,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index, int i; /* Deal with some post processing of nvmem cell data */ - if (id && !strcmp(id, "mac-address")) + if (id && !strcmp(id, "mac-address")) { + bytes = min(bytes, ETH_ALEN); for (i = 0; i < bytes / 2; i++) swap(buf[i], buf[bytes - i - 1]); + } return 0; } diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index 79dd4fda0329..7bf7656d4f96 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -23,6 +23,7 @@ #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/delay.h> +#include <linux/if_ether.h> /* ETH_ALEN */ #define IMX_OCOTP_OFFSET_B0W0 0x400 /* Offset from base address of the * OTP Bank0 Word0 @@ -227,9 +228,11 @@ static int imx_ocotp_cell_pp(void *context, const char *id, int index, int i; /* Deal with some post processing of nvmem cell data */ - if (id && !strcmp(id, "mac-address")) + if (id && !strcmp(id, "mac-address")) { + bytes = min(bytes, ETH_ALEN); for (i = 0; i < bytes / 2; i++) swap(buf[i], buf[bytes - i - 1]); + } return 0; } diff --git a/drivers/nvmem/layouts/u-boot-env.c b/drivers/nvmem/layouts/u-boot-env.c index 731e6f4f12b2..21f6dcf905dd 100644 --- a/drivers/nvmem/layouts/u-boot-env.c +++ b/drivers/nvmem/layouts/u-boot-env.c @@ -92,7 +92,7 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem, size_t crc32_data_offset; size_t crc32_data_len; size_t crc32_offset; - __le32 *crc32_addr; + uint32_t *crc32_addr; size_t data_offset; size_t data_len; size_t dev_size; @@ -143,8 +143,8 @@ int u_boot_env_parse(struct device *dev, struct nvmem_device *nvmem, goto err_kfree; } - crc32_addr = (__le32 *)(buf + crc32_offset); - crc32 = le32_to_cpu(*crc32_addr); + crc32_addr = (uint32_t *)(buf + crc32_offset); + crc32 = *crc32_addr; crc32_data_len = dev_size - crc32_data_offset; data_len = dev_size - data_offset; diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c index b78e0e417324..af370628e583 100644 --- a/drivers/pci/pci-acpi.c +++ b/drivers/pci/pci-acpi.c @@ -1676,19 +1676,24 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) return NULL; root_ops = kzalloc(sizeof(*root_ops), GFP_KERNEL); - if (!root_ops) - goto free_ri; + if (!root_ops) { + kfree(ri); + return NULL; + } ri->cfg = pci_acpi_setup_ecam_mapping(root); - if (!ri->cfg) - goto free_root_ops; + if (!ri->cfg) { + kfree(ri); + kfree(root_ops); + return NULL; + } root_ops->release_info = pci_acpi_generic_release_info; root_ops->prepare_resources = pci_acpi_root_prepare_resources; root_ops->pci_ops = (struct pci_ops *)&ri->cfg->ops->pci_ops; bus = acpi_pci_root_create(root, root_ops, &ri->common, ri->cfg); if (!bus) - goto free_cfg; + return NULL; /* If we must preserve the resource configuration, claim now */ host = pci_find_host_bridge(bus); @@ -1705,14 +1710,6 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root) pcie_bus_configure_settings(child); return bus; - -free_cfg: - pci_ecam_free(ri->cfg); -free_root_ops: - kfree(root_ops); -free_ri: - kfree(ri); - return NULL; } void pcibios_add_bus(struct pci_bus *bus) diff --git a/drivers/phy/tegra/xusb-tegra186.c b/drivers/phy/tegra/xusb-tegra186.c index 23a23f2d64e5..e818f6c3980e 100644 --- a/drivers/phy/tegra/xusb-tegra186.c +++ b/drivers/phy/tegra/xusb-tegra186.c @@ -648,14 +648,15 @@ static void tegra186_utmi_bias_pad_power_on(struct tegra_xusb_padctl *padctl) udelay(100); } - if (padctl->soc->trk_hw_mode) { - value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); - value |= USB2_TRK_HW_MODE; + value = padctl_readl(padctl, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); + if (padctl->soc->trk_update_on_idle) value &= ~CYA_TRK_CODE_UPDATE_ON_IDLE; - padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); - } else { + if (padctl->soc->trk_hw_mode) + value |= USB2_TRK_HW_MODE; + padctl_writel(padctl, value, XUSB_PADCTL_USB2_BIAS_PAD_CTL2); + + if (!padctl->soc->trk_hw_mode) clk_disable_unprepare(priv->usb2_trk_clk); - } } static void tegra186_utmi_bias_pad_power_off(struct tegra_xusb_padctl *padctl) @@ -782,13 +783,15 @@ static int tegra186_xusb_padctl_vbus_override(struct tegra_xusb_padctl *padctl, } static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, - bool status) + struct tegra_xusb_usb2_port *port, bool status) { - u32 value; + u32 value, id_override; + int err = 0; dev_dbg(padctl->dev, "%s id override\n", status ? "set" : "clear"); value = padctl_readl(padctl, USB2_VBUS_ID); + id_override = value & ID_OVERRIDE(~0); if (status) { if (value & VBUS_OVERRIDE) { @@ -799,15 +802,35 @@ static int tegra186_xusb_padctl_id_override(struct tegra_xusb_padctl *padctl, value = padctl_readl(padctl, USB2_VBUS_ID); } - value &= ~ID_OVERRIDE(~0); - value |= ID_OVERRIDE_GROUNDED; + if (id_override != ID_OVERRIDE_GROUNDED) { + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_GROUNDED; + padctl_writel(padctl, value, USB2_VBUS_ID); + + err = regulator_enable(port->supply); + if (err) { + dev_err(padctl->dev, "Failed to enable regulator: %d\n", err); + return err; + } + } } else { - value &= ~ID_OVERRIDE(~0); - value |= ID_OVERRIDE_FLOATING; + if (id_override == ID_OVERRIDE_GROUNDED) { + /* + * The regulator is disabled only when the role transitions + * from USB_ROLE_HOST to USB_ROLE_NONE. + */ + err = regulator_disable(port->supply); + if (err) { + dev_err(padctl->dev, "Failed to disable regulator: %d\n", err); + return err; + } + + value &= ~ID_OVERRIDE(~0); + value |= ID_OVERRIDE_FLOATING; + padctl_writel(padctl, value, USB2_VBUS_ID); + } } - padctl_writel(padctl, value, USB2_VBUS_ID); - return 0; } @@ -826,27 +849,20 @@ static int tegra186_utmi_phy_set_mode(struct phy *phy, enum phy_mode mode, if (mode == PHY_MODE_USB_OTG) { if (submode == USB_ROLE_HOST) { - tegra186_xusb_padctl_id_override(padctl, true); - - err = regulator_enable(port->supply); + err = tegra186_xusb_padctl_id_override(padctl, port, true); + if (err) + goto out; } else if (submode == USB_ROLE_DEVICE) { tegra186_xusb_padctl_vbus_override(padctl, true); } else if (submode == USB_ROLE_NONE) { - /* - * When port is peripheral only or role transitions to - * USB_ROLE_NONE from USB_ROLE_DEVICE, regulator is not - * enabled. - */ - if (regulator_is_enabled(port->supply)) - regulator_disable(port->supply); - - tegra186_xusb_padctl_id_override(padctl, false); + err = tegra186_xusb_padctl_id_override(padctl, port, false); + if (err) + goto out; tegra186_xusb_padctl_vbus_override(padctl, false); } } - +out: mutex_unlock(&padctl->lock); - return err; } @@ -1710,7 +1726,8 @@ const struct tegra_xusb_padctl_soc tegra234_xusb_padctl_soc = { .num_supplies = ARRAY_SIZE(tegra194_xusb_padctl_supply_names), .supports_gen2 = true, .poll_trk_completed = true, - .trk_hw_mode = true, + .trk_hw_mode = false, + .trk_update_on_idle = true, .supports_lp_cfg_en = true, }; EXPORT_SYMBOL_GPL(tegra234_xusb_padctl_soc); diff --git a/drivers/phy/tegra/xusb.h b/drivers/phy/tegra/xusb.h index 6e45d194c689..d2b5f9565132 100644 --- a/drivers/phy/tegra/xusb.h +++ b/drivers/phy/tegra/xusb.h @@ -434,6 +434,7 @@ struct tegra_xusb_padctl_soc { bool need_fake_usb3_port; bool poll_trk_completed; bool trk_hw_mode; + bool trk_update_on_idle; bool supports_lp_cfg_en; }; diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c index a12766b3bc8a..debf36ce5785 100644 --- a/drivers/pinctrl/pinctrl-amd.c +++ b/drivers/pinctrl/pinctrl-amd.c @@ -933,6 +933,17 @@ static int amd_gpio_suspend_hibernate_common(struct device *dev, bool is_suspend pin, is_suspend ? "suspend" : "hibernate"); } + /* + * debounce enabled over suspend has shown issues with a GPIO + * being unable to wake the system, as we're only interested in + * the actual wakeup event, clear it. + */ + if (gpio_dev->saved_regs[i] & (DB_CNTRl_MASK << DB_CNTRL_OFF)) { + amd_gpio_set_debounce(gpio_dev, pin, 0); + pm_pr_dbg("Clearing debounce for GPIO #%d during %s.\n", + pin, is_suspend ? "suspend" : "hibernate"); + } + raw_spin_unlock_irqrestore(&gpio_dev->lock, flags); } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index 018e96d921c0..553232809789 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -1035,6 +1035,25 @@ static bool msm_gpio_needs_dual_edge_parent_workaround(struct irq_data *d, test_bit(d->hwirq, pctrl->skip_wake_irqs); } +static void msm_gpio_irq_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, + unsigned int ngpios) +{ + struct msm_pinctrl *pctrl = gpiochip_get_data(gc); + const struct msm_pingroup *g; + int i; + + bitmap_fill(valid_mask, ngpios); + + for (i = 0; i < ngpios; i++) { + g = &pctrl->soc->groups[i]; + + if (g->intr_detection_width != 1 && + g->intr_detection_width != 2) + clear_bit(i, valid_mask); + } +} + static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int type) { struct gpio_chip *gc = irq_data_get_irq_chip_data(d); @@ -1438,6 +1457,7 @@ static int msm_gpio_init(struct msm_pinctrl *pctrl) girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; girq->parents[0] = pctrl->irq; + girq->init_valid_mask = msm_gpio_irq_init_valid_mask; ret = gpiochip_add_data(&pctrl->chip, pctrl); if (ret) { diff --git a/drivers/platform/mellanox/mlxbf-pmc.c b/drivers/platform/mellanox/mlxbf-pmc.c index 9ff7b487dc48..9a0220b4de3c 100644 --- a/drivers/platform/mellanox/mlxbf-pmc.c +++ b/drivers/platform/mellanox/mlxbf-pmc.c @@ -15,6 +15,7 @@ #include <linux/hwmon.h> #include <linux/platform_device.h> #include <linux/string.h> +#include <linux/string_helpers.h> #include <uapi/linux/psci.h> #define MLXBF_PMC_WRITE_REG_32 0x82000009 @@ -710,7 +711,7 @@ static const struct mlxbf_pmc_events mlxbf_pmc_llt_events[] = { {101, "GDC_BANK0_HIT_DCL_PARTIAL"}, {102, "GDC_BANK0_EVICT_DCL"}, {103, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA0"}, - {103, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA1"}, + {104, "GDC_BANK0_G_RSE_PIPE_CACHE_DATA1"}, {105, "GDC_BANK0_ARB_STRB"}, {106, "GDC_BANK0_ARB_WAIT"}, {107, "GDC_BANK0_GGA_STRB"}, @@ -1067,7 +1068,7 @@ static int mlxbf_pmc_get_event_num(const char *blk, const char *evt) return -ENODEV; } -/* Get the event number given the name */ +/* Get the event name given the number */ static char *mlxbf_pmc_get_event_name(const char *blk, u32 evt) { const struct mlxbf_pmc_events *events; @@ -1625,6 +1626,7 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev, attr, struct mlxbf_pmc_attribute, dev_attr); unsigned int blk_num, cnt_num; bool is_l3 = false; + char *evt_name; int evt_num; int err; @@ -1632,14 +1634,23 @@ static ssize_t mlxbf_pmc_event_store(struct device *dev, cnt_num = attr_event->index; if (isalpha(buf[0])) { + /* Remove the trailing newline character if present */ + evt_name = kstrdup_and_replace(buf, '\n', '\0', GFP_KERNEL); + if (!evt_name) + return -ENOMEM; + evt_num = mlxbf_pmc_get_event_num(pmc->block_name[blk_num], - buf); + evt_name); + kfree(evt_name); if (evt_num < 0) return -EINVAL; } else { err = kstrtouint(buf, 0, &evt_num); if (err < 0) return err; + + if (!mlxbf_pmc_get_event_name(pmc->block_name[blk_num], evt_num)) + return -EINVAL; } if (strstr(pmc->block_name[blk_num], "l3cache")) @@ -1720,13 +1731,14 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev, { struct mlxbf_pmc_attribute *attr_enable = container_of( attr, struct mlxbf_pmc_attribute, dev_attr); - unsigned int en, blk_num; + unsigned int blk_num; u32 word; int err; + bool en; blk_num = attr_enable->nr; - err = kstrtouint(buf, 0, &en); + err = kstrtobool(buf, &en); if (err < 0) return err; @@ -1746,14 +1758,11 @@ static ssize_t mlxbf_pmc_enable_store(struct device *dev, MLXBF_PMC_CRSPACE_PERFMON_CTL(pmc->block[blk_num].counters), MLXBF_PMC_WRITE_REG_32, word); } else { - if (en && en != 1) - return -EINVAL; - err = mlxbf_pmc_config_l3_counters(blk_num, false, !!en); if (err) return err; - if (en == 1) { + if (en) { err = mlxbf_pmc_config_l3_counters(blk_num, true, false); if (err) return err; diff --git a/drivers/platform/mellanox/mlxbf-tmfifo.c b/drivers/platform/mellanox/mlxbf-tmfifo.c index 6c834e39352d..d2c27cc0733b 100644 --- a/drivers/platform/mellanox/mlxbf-tmfifo.c +++ b/drivers/platform/mellanox/mlxbf-tmfifo.c @@ -281,7 +281,8 @@ static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo, vring->align = SMP_CACHE_BYTES; vring->index = i; vring->vdev_id = tm_vdev->vdev.id.device; - vring->drop_desc.len = VRING_DROP_DESC_MAX_LEN; + vring->drop_desc.len = cpu_to_virtio32(&tm_vdev->vdev, + VRING_DROP_DESC_MAX_LEN); dev = &tm_vdev->vdev.dev; size = vring_size(vring->num, vring->align); diff --git a/drivers/platform/mellanox/mlxreg-lc.c b/drivers/platform/mellanox/mlxreg-lc.c index 43d119e3a473..99152676dbd2 100644 --- a/drivers/platform/mellanox/mlxreg-lc.c +++ b/drivers/platform/mellanox/mlxreg-lc.c @@ -688,7 +688,7 @@ static int mlxreg_lc_completion_notify(void *handle, struct i2c_adapter *parent, if (regval & mlxreg_lc->data->mask) { mlxreg_lc->state |= MLXREG_LC_SYNCED; mlxreg_lc_state_update_locked(mlxreg_lc, MLXREG_LC_SYNCED, 1); - if (mlxreg_lc->state & ~MLXREG_LC_POWERED) { + if (!(mlxreg_lc->state & MLXREG_LC_POWERED)) { err = mlxreg_lc_power_on_off(mlxreg_lc, 1); if (err) goto mlxreg_lc_regmap_power_on_off_fail; diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c index abe7be602f84..e708521e5274 100644 --- a/drivers/platform/mellanox/nvsw-sn2201.c +++ b/drivers/platform/mellanox/nvsw-sn2201.c @@ -1088,7 +1088,7 @@ static int nvsw_sn2201_i2c_completion_notify(void *handle, int id) if (!nvsw_sn2201->main_mux_devs->adapter) { err = -ENODEV; dev_err(nvsw_sn2201->dev, "Failed to get adapter for bus %d\n", - nvsw_sn2201->cpld_devs->nr); + nvsw_sn2201->main_mux_devs->nr); goto i2c_get_adapter_main_fail; } diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index e1b142947067..4631c7bb22cd 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -58,6 +58,8 @@ obj-$(CONFIG_X86_PLATFORM_DRIVERS_HP) += hp/ # Hewlett Packard Enterprise obj-$(CONFIG_UV_SYSFS) += uv_sysfs.o +obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o + # IBM Thinkpad and Lenovo obj-$(CONFIG_IBM_RTL) += ibm_rtl.o obj-$(CONFIG_IDEAPAD_LAPTOP) += ideapad-laptop.o @@ -120,7 +122,6 @@ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o obj-$(CONFIG_TOPSTAR_LAPTOP) += topstar-laptop.o # Platform drivers -obj-$(CONFIG_FW_ATTR_CLASS) += firmware_attributes_class.o obj-$(CONFIG_SERIAL_MULTI_INSTANTIATE) += serial-multi-instantiate.o obj-$(CONFIG_MLX_PLATFORM) += mlx-platform.o obj-$(CONFIG_TOUCHSCREEN_DMI) += touchscreen_dmi.o diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 2e3f6fc67c56..7ed12c1d3b34 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -224,6 +224,15 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_BOARD_NAME, "WUJIE14-GX4HRXL"), } }, + /* https://bugzilla.kernel.org/show_bug.cgi?id=220116 */ + { + .ident = "PCSpecialist Lafite Pro V 14M", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PCSpecialist"), + DMI_MATCH(DMI_PRODUCT_NAME, "Lafite Pro V 14M"), + } + }, {} }; diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index a5933980ade3..90ad0045fec5 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -529,6 +529,15 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_zenbook_duo_kbd, }, + { + .callback = dmi_matched, + .ident = "ASUS Zenbook Duo UX8406CA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "UX8406CA"), + }, + .driver_data = &quirk_asus_zenbook_duo_kbd, + }, {}, }; diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h index 3ad33a094588..817ee7ba07ca 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h +++ b/drivers/platform/x86/dell/dell-wmi-sysman/dell-wmi-sysman.h @@ -89,6 +89,11 @@ extern struct wmi_sysman_priv wmi_priv; enum { ENUM, INT, STR, PO }; +#define ENUM_MIN_ELEMENTS 8 +#define INT_MIN_ELEMENTS 9 +#define STR_MIN_ELEMENTS 8 +#define PO_MIN_ELEMENTS 4 + enum { ATTR_NAME, DISPL_NAME_LANG_CODE, diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c index 8cc212c85266..fc2f58b4cbc6 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/enum-attributes.c @@ -23,9 +23,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_ENUMERATION_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < ENUM_MIN_ELEMENTS || + obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c index 951e75b538fa..735248064239 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/int-attributes.c @@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_INTEGER_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < INT_MIN_ELEMENTS || + obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_INTEGER) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[CURRENT_VAL].integer.value); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c index d8f1bf5e58a0..3167e06d416e 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/passobj-attributes.c @@ -26,9 +26,10 @@ static ssize_t is_enabled_show(struct kobject *kobj, struct kobj_attribute *attr obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_PASSOBJ_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < PO_MIN_ELEMENTS || + obj->package.elements[IS_PASS_SET].type != ACPI_TYPE_INTEGER) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%lld\n", obj->package.elements[IS_PASS_SET].integer.value); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c index c392f0ecf8b5..0d2c74f8d1aa 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/string-attributes.c @@ -25,9 +25,10 @@ static ssize_t current_value_show(struct kobject *kobj, struct kobj_attribute *a obj = get_wmiobj_pointer(instance_id, DELL_WMI_BIOS_STRING_ATTRIBUTE_GUID); if (!obj) return -EIO; - if (obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { + if (obj->type != ACPI_TYPE_PACKAGE || obj->package.count < STR_MIN_ELEMENTS || + obj->package.elements[CURRENT_VAL].type != ACPI_TYPE_STRING) { kfree(obj); - return -EINVAL; + return -EIO; } ret = snprintf(buf, PAGE_SIZE, "%s\n", obj->package.elements[CURRENT_VAL].string.pointer); kfree(obj); diff --git a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c index 40ddc6eb7562..f5402b714657 100644 --- a/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c +++ b/drivers/platform/x86/dell/dell-wmi-sysman/sysman.c @@ -25,7 +25,6 @@ struct wmi_sysman_priv wmi_priv = { /* reset bios to defaults */ static const char * const reset_types[] = {"builtinsafe", "lastknowngood", "factory", "custom"}; static int reset_option = -1; -static const struct class *fw_attr_class; /** @@ -408,10 +407,10 @@ static int init_bios_attributes(int attr_type, const char *guid) return retval; switch (attr_type) { - case ENUM: min_elements = 8; break; - case INT: min_elements = 9; break; - case STR: min_elements = 8; break; - case PO: min_elements = 4; break; + case ENUM: min_elements = ENUM_MIN_ELEMENTS; break; + case INT: min_elements = INT_MIN_ELEMENTS; break; + case STR: min_elements = STR_MIN_ELEMENTS; break; + case PO: min_elements = PO_MIN_ELEMENTS; break; default: pr_err("Error: Unknown attr_type: %d\n", attr_type); return -EINVAL; @@ -541,15 +540,11 @@ static int __init sysman_init(void) goto err_exit_bios_attr_pass_interface; } - ret = fw_attributes_class_get(&fw_attr_class); - if (ret) - goto err_exit_bios_attr_pass_interface; - - wmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), + wmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", DRIVER_NAME); if (IS_ERR(wmi_priv.class_dev)) { ret = PTR_ERR(wmi_priv.class_dev); - goto err_unregister_class; + goto err_exit_bios_attr_pass_interface; } wmi_priv.main_dir_kset = kset_create_and_add("attributes", NULL, @@ -602,10 +597,7 @@ err_release_attributes_data: release_attributes_data(); err_destroy_classdev: - device_destroy(fw_attr_class, MKDEV(0, 0)); - -err_unregister_class: - fw_attributes_class_put(); + device_unregister(wmi_priv.class_dev); err_exit_bios_attr_pass_interface: exit_bios_attr_pass_interface(); @@ -619,8 +611,7 @@ err_exit_bios_attr_set_interface: static void __exit sysman_exit(void) { release_attributes_data(); - device_destroy(fw_attr_class, MKDEV(0, 0)); - fw_attributes_class_put(); + device_unregister(wmi_priv.class_dev); exit_bios_attr_set_interface(); exit_bios_attr_pass_interface(); } diff --git a/drivers/platform/x86/firmware_attributes_class.c b/drivers/platform/x86/firmware_attributes_class.c index 182a07d8ae3d..87672c49e86a 100644 --- a/drivers/platform/x86/firmware_attributes_class.c +++ b/drivers/platform/x86/firmware_attributes_class.c @@ -2,48 +2,35 @@ /* Firmware attributes class helper module */ -#include <linux/mutex.h> -#include <linux/device/class.h> #include <linux/module.h> #include "firmware_attributes_class.h" -static DEFINE_MUTEX(fw_attr_lock); -static int fw_attr_inuse; - -static const struct class firmware_attributes_class = { +const struct class firmware_attributes_class = { .name = "firmware-attributes", }; +EXPORT_SYMBOL_GPL(firmware_attributes_class); + +static __init int fw_attributes_class_init(void) +{ + return class_register(&firmware_attributes_class); +} +module_init(fw_attributes_class_init); + +static __exit void fw_attributes_class_exit(void) +{ + class_unregister(&firmware_attributes_class); +} +module_exit(fw_attributes_class_exit); int fw_attributes_class_get(const struct class **fw_attr_class) { - int err; - - mutex_lock(&fw_attr_lock); - if (!fw_attr_inuse) { /*first time class is being used*/ - err = class_register(&firmware_attributes_class); - if (err) { - mutex_unlock(&fw_attr_lock); - return err; - } - } - fw_attr_inuse++; *fw_attr_class = &firmware_attributes_class; - mutex_unlock(&fw_attr_lock); return 0; } EXPORT_SYMBOL_GPL(fw_attributes_class_get); int fw_attributes_class_put(void) { - mutex_lock(&fw_attr_lock); - if (!fw_attr_inuse) { - mutex_unlock(&fw_attr_lock); - return -EINVAL; - } - fw_attr_inuse--; - if (!fw_attr_inuse) /* No more consumers */ - class_unregister(&firmware_attributes_class); - mutex_unlock(&fw_attr_lock); return 0; } EXPORT_SYMBOL_GPL(fw_attributes_class_put); diff --git a/drivers/platform/x86/firmware_attributes_class.h b/drivers/platform/x86/firmware_attributes_class.h index 363c75f1ac1b..ef6c3764a834 100644 --- a/drivers/platform/x86/firmware_attributes_class.h +++ b/drivers/platform/x86/firmware_attributes_class.h @@ -5,6 +5,9 @@ #ifndef FW_ATTR_CLASS_H #define FW_ATTR_CLASS_H +#include <linux/device/class.h> + +extern const struct class firmware_attributes_class; int fw_attributes_class_get(const struct class **fw_attr_class); int fw_attributes_class_put(void); diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index 2dc50152158a..00b04adb4f19 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -24,8 +24,6 @@ struct bioscfg_priv bioscfg_drv = { .mutex = __MUTEX_INITIALIZER(bioscfg_drv.mutex), }; -static const struct class *fw_attr_class; - ssize_t display_name_language_code_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf) @@ -972,11 +970,7 @@ static int __init hp_init(void) if (ret) return ret; - ret = fw_attributes_class_get(&fw_attr_class); - if (ret) - goto err_unregister_class; - - bioscfg_drv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), + bioscfg_drv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", DRIVER_NAME); if (IS_ERR(bioscfg_drv.class_dev)) { ret = PTR_ERR(bioscfg_drv.class_dev); @@ -1043,10 +1037,9 @@ err_release_attributes_data: release_attributes_data(); err_destroy_classdev: - device_destroy(fw_attr_class, MKDEV(0, 0)); + device_unregister(bioscfg_drv.class_dev); err_unregister_class: - fw_attributes_class_put(); hp_exit_attr_set_interface(); return ret; @@ -1055,9 +1048,8 @@ err_unregister_class: static void __exit hp_exit(void) { release_attributes_data(); - device_destroy(fw_attr_class, MKDEV(0, 0)); + device_unregister(bioscfg_drv.class_dev); - fw_attributes_class_put(); hp_exit_attr_set_interface(); } diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index 93aa72bff3f0..c7e8bcf3d623 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -1672,7 +1672,7 @@ static int ideapad_kbd_bl_init(struct ideapad_private *priv) priv->kbd_bl.led.name = "platform::" LED_FUNCTION_KBD_BACKLIGHT; priv->kbd_bl.led.brightness_get = ideapad_kbd_bl_led_cdev_brightness_get; priv->kbd_bl.led.brightness_set_blocking = ideapad_kbd_bl_led_cdev_brightness_set; - priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED; + priv->kbd_bl.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN; err = led_classdev_register(&priv->platform_device->dev, &priv->kbd_bl.led); if (err) @@ -1731,7 +1731,7 @@ static int ideapad_fn_lock_led_init(struct ideapad_private *priv) priv->fn_lock.led.name = "platform::" LED_FUNCTION_FNLOCK; priv->fn_lock.led.brightness_get = ideapad_fn_lock_led_cdev_get; priv->fn_lock.led.brightness_set_blocking = ideapad_fn_lock_led_cdev_set; - priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED; + priv->fn_lock.led.flags = LED_BRIGHT_HW_CHANGED | LED_RETAIN_AT_SHUTDOWN; err = led_classdev_register(&priv->platform_device->dev, &priv->fn_lock.led); if (err) diff --git a/drivers/platform/x86/think-lmi.c b/drivers/platform/x86/think-lmi.c index 1abd8378f158..6ad2af46248b 100644 --- a/drivers/platform/x86/think-lmi.c +++ b/drivers/platform/x86/think-lmi.c @@ -192,7 +192,6 @@ static const char * const level_options[] = { [TLMI_LEVEL_MASTER] = "master", }; static struct think_lmi tlmi_priv; -static const struct class *fw_attr_class; static DEFINE_MUTEX(tlmi_mutex); static inline struct tlmi_pwd_setting *to_tlmi_pwd_setting(struct kobject *kobj) @@ -907,6 +906,7 @@ static const struct attribute_group auth_attr_group = { .is_visible = auth_attr_is_visible, .attrs = auth_attrs, }; +__ATTRIBUTE_GROUPS(auth_attr); /* ---- Attributes sysfs --------------------------------------------------------- */ static ssize_t display_name_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -1122,6 +1122,7 @@ static const struct attribute_group tlmi_attr_group = { .is_visible = attr_is_visible, .attrs = tlmi_attrs, }; +__ATTRIBUTE_GROUPS(tlmi_attr); static void tlmi_attr_setting_release(struct kobject *kobj) { @@ -1141,11 +1142,13 @@ static void tlmi_pwd_setting_release(struct kobject *kobj) static const struct kobj_type tlmi_attr_setting_ktype = { .release = &tlmi_attr_setting_release, .sysfs_ops = &kobj_sysfs_ops, + .default_groups = tlmi_attr_groups, }; static const struct kobj_type tlmi_pwd_setting_ktype = { .release = &tlmi_pwd_setting_release, .sysfs_ops = &kobj_sysfs_ops, + .default_groups = auth_attr_groups, }; static ssize_t pending_reboot_show(struct kobject *kobj, struct kobj_attribute *attr, @@ -1314,21 +1317,18 @@ static struct kobj_attribute debug_cmd = __ATTR_WO(debug_cmd); /* ---- Initialisation --------------------------------------------------------- */ static void tlmi_release_attr(void) { - int i; + struct kobject *pos, *n; /* Attribute structures */ - for (i = 0; i < TLMI_SETTINGS_COUNT; i++) { - if (tlmi_priv.setting[i]) { - sysfs_remove_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group); - kobject_put(&tlmi_priv.setting[i]->kobj); - } - } sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &pending_reboot.attr); sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &save_settings.attr); if (tlmi_priv.can_debug_cmd && debug_support) sysfs_remove_file(&tlmi_priv.attribute_kset->kobj, &debug_cmd.attr); + list_for_each_entry_safe(pos, n, &tlmi_priv.attribute_kset->list, entry) + kobject_put(pos); + kset_unregister(tlmi_priv.attribute_kset); /* Free up any saved signatures */ @@ -1336,19 +1336,8 @@ static void tlmi_release_attr(void) kfree(tlmi_priv.pwd_admin->save_signature); /* Authentication structures */ - sysfs_remove_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_admin->kobj); - sysfs_remove_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_power->kobj); - - if (tlmi_priv.opcode_support) { - sysfs_remove_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_system->kobj); - sysfs_remove_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_hdd->kobj); - sysfs_remove_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group); - kobject_put(&tlmi_priv.pwd_nvme->kobj); - } + list_for_each_entry_safe(pos, n, &tlmi_priv.authentication_kset->list, entry) + kobject_put(pos); kset_unregister(tlmi_priv.authentication_kset); } @@ -1375,11 +1364,7 @@ static int tlmi_sysfs_init(void) { int i, ret; - ret = fw_attributes_class_get(&fw_attr_class); - if (ret) - return ret; - - tlmi_priv.class_dev = device_create(fw_attr_class, NULL, MKDEV(0, 0), + tlmi_priv.class_dev = device_create(&firmware_attributes_class, NULL, MKDEV(0, 0), NULL, "%s", "thinklmi"); if (IS_ERR(tlmi_priv.class_dev)) { ret = PTR_ERR(tlmi_priv.class_dev); @@ -1393,6 +1378,14 @@ static int tlmi_sysfs_init(void) goto fail_device_created; } + tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, + &tlmi_priv.class_dev->kobj); + if (!tlmi_priv.authentication_kset) { + kset_unregister(tlmi_priv.attribute_kset); + ret = -ENOMEM; + goto fail_device_created; + } + for (i = 0; i < TLMI_SETTINGS_COUNT; i++) { /* Check if index is a valid setting - skip if it isn't */ if (!tlmi_priv.setting[i]) @@ -1409,12 +1402,8 @@ static int tlmi_sysfs_init(void) /* Build attribute */ tlmi_priv.setting[i]->kobj.kset = tlmi_priv.attribute_kset; - ret = kobject_add(&tlmi_priv.setting[i]->kobj, NULL, - "%s", tlmi_priv.setting[i]->display_name); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.setting[i]->kobj, &tlmi_attr_group); + ret = kobject_init_and_add(&tlmi_priv.setting[i]->kobj, &tlmi_attr_setting_ktype, + NULL, "%s", tlmi_priv.setting[i]->display_name); if (ret) goto fail_create_attr; } @@ -1434,55 +1423,34 @@ static int tlmi_sysfs_init(void) } /* Create authentication entries */ - tlmi_priv.authentication_kset = kset_create_and_add("authentication", NULL, - &tlmi_priv.class_dev->kobj); - if (!tlmi_priv.authentication_kset) { - ret = -ENOMEM; - goto fail_create_attr; - } tlmi_priv.pwd_admin->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_admin->kobj, NULL, "%s", "Admin"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_admin->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_admin->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "Admin"); if (ret) goto fail_create_attr; tlmi_priv.pwd_power->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_power->kobj, NULL, "%s", "Power-on"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_power->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_power->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "Power-on"); if (ret) goto fail_create_attr; if (tlmi_priv.opcode_support) { tlmi_priv.pwd_system->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_system->kobj, NULL, "%s", "System"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_system->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_system->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "System"); if (ret) goto fail_create_attr; tlmi_priv.pwd_hdd->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_hdd->kobj, NULL, "%s", "HDD"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_hdd->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_hdd->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "HDD"); if (ret) goto fail_create_attr; tlmi_priv.pwd_nvme->kobj.kset = tlmi_priv.authentication_kset; - ret = kobject_add(&tlmi_priv.pwd_nvme->kobj, NULL, "%s", "NVMe"); - if (ret) - goto fail_create_attr; - - ret = sysfs_create_group(&tlmi_priv.pwd_nvme->kobj, &auth_attr_group); + ret = kobject_init_and_add(&tlmi_priv.pwd_nvme->kobj, &tlmi_pwd_setting_ktype, + NULL, "%s", "NVMe"); if (ret) goto fail_create_attr; } @@ -1492,9 +1460,8 @@ static int tlmi_sysfs_init(void) fail_create_attr: tlmi_release_attr(); fail_device_created: - device_destroy(fw_attr_class, MKDEV(0, 0)); + device_unregister(tlmi_priv.class_dev); fail_class_created: - fw_attributes_class_put(); return ret; } @@ -1516,8 +1483,6 @@ static struct tlmi_pwd_setting *tlmi_create_auth(const char *pwd_type, new_pwd->maxlen = tlmi_priv.pwdcfg.core.max_length; new_pwd->index = 0; - kobject_init(&new_pwd->kobj, &tlmi_pwd_setting_ktype); - return new_pwd; } @@ -1621,7 +1586,6 @@ static int tlmi_analyze(void) if (setting->possible_values) strreplace(setting->possible_values, ',', ';'); - kobject_init(&setting->kobj, &tlmi_attr_setting_ktype); tlmi_priv.setting[i] = setting; kfree(item); } @@ -1717,8 +1681,7 @@ fail_clear_attr: static void tlmi_remove(struct wmi_device *wdev) { tlmi_release_attr(); - device_destroy(fw_attr_class, MKDEV(0, 0)); - fw_attributes_class_put(); + device_unregister(tlmi_priv.class_dev); } static int tlmi_probe(struct wmi_device *wdev, const void *context) diff --git a/drivers/pmdomain/governor.c b/drivers/pmdomain/governor.c index d1a10eeebd16..600592f19669 100644 --- a/drivers/pmdomain/governor.c +++ b/drivers/pmdomain/governor.c @@ -8,6 +8,7 @@ #include <linux/pm_domain.h> #include <linux/pm_qos.h> #include <linux/hrtimer.h> +#include <linux/cpu.h> #include <linux/cpuidle.h> #include <linux/cpumask.h> #include <linux/ktime.h> @@ -349,6 +350,8 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) struct cpuidle_device *dev; ktime_t domain_wakeup, next_hrtimer; ktime_t now = ktime_get(); + struct device *cpu_dev; + s64 cpu_constraint, global_constraint; s64 idle_duration_ns; int cpu, i; @@ -359,6 +362,7 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) if (!(genpd->flags & GENPD_FLAG_CPU_DOMAIN)) return true; + global_constraint = cpu_latency_qos_limit(); /* * Find the next wakeup for any of the online CPUs within the PM domain * and its subdomains. Note, we only need the genpd->cpus, as it already @@ -372,8 +376,16 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) if (ktime_before(next_hrtimer, domain_wakeup)) domain_wakeup = next_hrtimer; } + + cpu_dev = get_cpu_device(cpu); + if (cpu_dev) { + cpu_constraint = dev_pm_qos_raw_resume_latency(cpu_dev); + if (cpu_constraint < global_constraint) + global_constraint = cpu_constraint; + } } + global_constraint *= NSEC_PER_USEC; /* The minimum idle duration is from now - until the next wakeup. */ idle_duration_ns = ktime_to_ns(ktime_sub(domain_wakeup, now)); if (idle_duration_ns <= 0) @@ -389,8 +401,10 @@ static bool cpu_power_down_ok(struct dev_pm_domain *pd) */ i = genpd->state_idx; do { - if (idle_duration_ns >= (genpd->states[i].residency_ns + - genpd->states[i].power_off_latency_ns)) { + if ((idle_duration_ns >= (genpd->states[i].residency_ns + + genpd->states[i].power_off_latency_ns)) && + (global_constraint >= (genpd->states[i].power_on_latency_ns + + genpd->states[i].power_off_latency_ns))) { genpd->state_idx = i; return true; } diff --git a/drivers/powercap/intel_rapl_common.c b/drivers/powercap/intel_rapl_common.c index 5e793b80fd6b..c3de52e78e01 100644 --- a/drivers/powercap/intel_rapl_common.c +++ b/drivers/powercap/intel_rapl_common.c @@ -340,12 +340,28 @@ static int set_domain_enable(struct powercap_zone *power_zone, bool mode) { struct rapl_domain *rd = power_zone_to_rapl_domain(power_zone); struct rapl_defaults *defaults = get_defaults(rd->rp); + u64 val; int ret; cpus_read_lock(); ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode); - if (!ret && defaults->set_floor_freq) + if (ret) + goto end; + + ret = rapl_read_pl_data(rd, POWER_LIMIT1, PL_ENABLE, false, &val); + if (ret) + goto end; + + if (mode != val) { + pr_debug("%s cannot be %s\n", power_zone->name, + str_enabled_disabled(mode)); + goto end; + } + + if (defaults->set_floor_freq) defaults->set_floor_freq(rd, mode); + +end: cpus_read_unlock(); return ret; diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c index 174939359ae3..3697781c0179 100644 --- a/drivers/pwm/core.c +++ b/drivers/pwm/core.c @@ -148,7 +148,7 @@ static bool pwm_state_valid(const struct pwm_state *state) * and supposed to be ignored. So also ignore any strange values and * consider the state ok. */ - if (state->enabled) + if (!state->enabled) return true; if (!state->period) diff --git a/drivers/pwm/pwm-mediatek.c b/drivers/pwm/pwm-mediatek.c index 7eaab5831499..33d3554b9197 100644 --- a/drivers/pwm/pwm-mediatek.c +++ b/drivers/pwm/pwm-mediatek.c @@ -130,8 +130,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, return ret; clk_rate = clk_get_rate(pc->clk_pwms[pwm->hwpwm]); - if (!clk_rate) - return -EINVAL; + if (!clk_rate) { + ret = -EINVAL; + goto out; + } /* Make sure we use the bus clock and not the 26MHz clock */ if (pc->soc->has_ck_26m_sel) @@ -150,9 +152,9 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, } if (clkdiv > PWM_CLK_DIV_MAX) { - pwm_mediatek_clk_disable(chip, pwm); dev_err(pwmchip_parent(chip), "period of %d ns not supported\n", period_ns); - return -EINVAL; + ret = -EINVAL; + goto out; } if (pc->soc->pwm45_fixup && pwm->hwpwm > 2) { @@ -169,9 +171,10 @@ static int pwm_mediatek_config(struct pwm_chip *chip, struct pwm_device *pwm, pwm_mediatek_writel(pc, pwm->hwpwm, reg_width, cnt_period); pwm_mediatek_writel(pc, pwm->hwpwm, reg_thres, cnt_duty); +out: pwm_mediatek_clk_disable(chip, pwm); - return 0; + return ret; } static int pwm_mediatek_enable(struct pwm_chip *chip, struct pwm_device *pwm) diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 1f4698d724bb..e7f2a8b65947 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -5536,6 +5536,7 @@ static void regulator_remove_coupling(struct regulator_dev *rdev) ERR_PTR(err)); } + rdev->coupling_desc.n_coupled = 0; kfree(rdev->coupling_desc.coupled_rdevs); rdev->coupling_desc.coupled_rdevs = NULL; } diff --git a/drivers/regulator/fan53555.c b/drivers/regulator/fan53555.c index bd9447dac596..c282236959b1 100644 --- a/drivers/regulator/fan53555.c +++ b/drivers/regulator/fan53555.c @@ -147,6 +147,7 @@ struct fan53555_device_info { unsigned int slew_mask; const unsigned int *ramp_delay_table; unsigned int n_ramp_values; + unsigned int enable_time; unsigned int slew_rate; }; @@ -282,6 +283,7 @@ static int fan53526_voltages_setup_fairchild(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 250; di->vsel_count = FAN53526_NVOLTAGES; return 0; @@ -296,10 +298,12 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) case FAN53555_CHIP_REV_00: di->vsel_min = 600000; di->vsel_step = 10000; + di->enable_time = 400; break; case FAN53555_CHIP_REV_13: di->vsel_min = 800000; di->vsel_step = 10000; + di->enable_time = 400; break; default: dev_err(di->dev, @@ -311,13 +315,19 @@ static int fan53555_voltages_setup_fairchild(struct fan53555_device_info *di) case FAN53555_CHIP_ID_01: case FAN53555_CHIP_ID_03: case FAN53555_CHIP_ID_05: + di->vsel_min = 600000; + di->vsel_step = 10000; + di->enable_time = 400; + break; case FAN53555_CHIP_ID_08: di->vsel_min = 600000; di->vsel_step = 10000; + di->enable_time = 175; break; case FAN53555_CHIP_ID_04: di->vsel_min = 603000; di->vsel_step = 12826; + di->enable_time = 400; break; default: dev_err(di->dev, @@ -350,6 +360,7 @@ static int fan53555_voltages_setup_rockchip(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 360; di->vsel_count = FAN53555_NVOLTAGES; return 0; @@ -372,6 +383,7 @@ static int rk8602_voltages_setup_rockchip(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 360; di->vsel_count = RK8602_NVOLTAGES; return 0; @@ -395,6 +407,7 @@ static int fan53555_voltages_setup_silergy(struct fan53555_device_info *di) di->slew_mask = CTL_SLEW_MASK; di->ramp_delay_table = slew_rates; di->n_ramp_values = ARRAY_SIZE(slew_rates); + di->enable_time = 400; di->vsel_count = FAN53555_NVOLTAGES; return 0; @@ -594,6 +607,7 @@ static int fan53555_regulator_register(struct fan53555_device_info *di, rdesc->ramp_mask = di->slew_mask; rdesc->ramp_delay_table = di->ramp_delay_table; rdesc->n_ramp_values = di->n_ramp_values; + rdesc->enable_time = di->enable_time; rdesc->owner = THIS_MODULE; rdev = devm_regulator_register(di->dev, &di->desc, config); diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c index 65927fa2ef16..1bdd494cf882 100644 --- a/drivers/regulator/gpio-regulator.c +++ b/drivers/regulator/gpio-regulator.c @@ -260,8 +260,10 @@ static int gpio_regulator_probe(struct platform_device *pdev) return -ENOMEM; } - drvdata->gpiods = devm_kzalloc(dev, sizeof(struct gpio_desc *), - GFP_KERNEL); + drvdata->gpiods = devm_kcalloc(dev, config->ngpios, + sizeof(struct gpio_desc *), GFP_KERNEL); + if (!drvdata->gpiods) + return -ENOMEM; if (config->input_supply) { drvdata->desc.supply_name = devm_kstrdup(&pdev->dev, @@ -274,8 +276,6 @@ static int gpio_regulator_probe(struct platform_device *pdev) } } - if (!drvdata->gpiods) - return -ENOMEM; for (i = 0; i < config->ngpios; i++) { drvdata->gpiods[i] = devm_gpiod_get_index(dev, NULL, diff --git a/drivers/remoteproc/ti_k3_dsp_remoteproc.c b/drivers/remoteproc/ti_k3_dsp_remoteproc.c index 2ae0655ddf1d..73be3d216791 100644 --- a/drivers/remoteproc/ti_k3_dsp_remoteproc.c +++ b/drivers/remoteproc/ti_k3_dsp_remoteproc.c @@ -568,11 +568,9 @@ static int k3_dsp_reserved_mem_init(struct k3_dsp_rproc *kproc) return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - return -EINVAL; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* 64-bit address regions currently not supported */ diff --git a/drivers/remoteproc/ti_k3_m4_remoteproc.c b/drivers/remoteproc/ti_k3_m4_remoteproc.c index fba6e393635e..6cd50b16a8e8 100644 --- a/drivers/remoteproc/ti_k3_m4_remoteproc.c +++ b/drivers/remoteproc/ti_k3_m4_remoteproc.c @@ -433,11 +433,9 @@ static int k3_m4_reserved_mem_init(struct k3_m4_rproc *kproc) return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - return -EINVAL; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* 64-bit address regions currently not supported */ diff --git a/drivers/remoteproc/ti_k3_r5_remoteproc.c b/drivers/remoteproc/ti_k3_r5_remoteproc.c index 4894461aa65f..941bb130c85c 100644 --- a/drivers/remoteproc/ti_k3_r5_remoteproc.c +++ b/drivers/remoteproc/ti_k3_r5_remoteproc.c @@ -440,13 +440,36 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_r5_core *core = kproc->core, *core0, *core1; struct device *dev = kproc->dev; u32 ctrl = 0, cfg = 0, stat = 0; u64 boot_vec = 0; bool mem_init_dis; int ret; + /* + * R5 cores require to be powered on sequentially, core0 should be in + * higher power state than core1 in a cluster. So, wait for core0 to + * power up before proceeding to core1 and put timeout of 2sec. This + * waiting mechanism is necessary because rproc_auto_boot_callback() for + * core1 can be called before core0 due to thread execution order. + * + * By placing the wait mechanism here in .prepare() ops, this condition + * is enforced for rproc boot requests from sysfs as well. + */ + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); + if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1 && + !core0->released_from_reset) { + ret = wait_event_interruptible_timeout(cluster->core_transition, + core0->released_from_reset, + msecs_to_jiffies(2000)); + if (ret <= 0) { + dev_err(dev, "can not power up core1 before core0"); + return -EPERM; + } + } + ret = ti_sci_proc_get_status(core->tsp, &boot_vec, &cfg, &ctrl, &stat); if (ret < 0) return ret; @@ -463,6 +486,14 @@ static int k3_r5_rproc_prepare(struct rproc *rproc) } /* + * Notify all threads in the wait queue when core0 state has changed so + * that threads waiting for this condition can be executed. + */ + core->released_from_reset = true; + if (core == core0) + wake_up_interruptible(&cluster->core_transition); + + /* * Newer IP revisions like on J7200 SoCs support h/w auto-initialization * of TCMs, so there is no need to perform the s/w memzero. This bit is * configurable through System Firmware, the default value does perform @@ -507,10 +538,30 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; - struct k3_r5_core *core = kproc->core; + struct k3_r5_core *core = kproc->core, *core0, *core1; struct device *dev = kproc->dev; int ret; + /* + * Ensure power-down of cores is sequential in split mode. Core1 must + * power down before Core0 to maintain the expected state. By placing + * the wait mechanism here in .unprepare() ops, this condition is + * enforced for rproc stop or shutdown requests from sysfs and device + * removal as well. + */ + core0 = list_first_entry(&cluster->cores, struct k3_r5_core, elem); + core1 = list_last_entry(&cluster->cores, struct k3_r5_core, elem); + if (cluster->mode == CLUSTER_MODE_SPLIT && core == core0 && + core1->released_from_reset) { + ret = wait_event_interruptible_timeout(cluster->core_transition, + !core1->released_from_reset, + msecs_to_jiffies(2000)); + if (ret <= 0) { + dev_err(dev, "can not power down core0 before core1"); + return -EPERM; + } + } + /* Re-use LockStep-mode reset logic for Single-CPU mode */ ret = (cluster->mode == CLUSTER_MODE_LOCKSTEP || cluster->mode == CLUSTER_MODE_SINGLECPU) ? @@ -518,6 +569,14 @@ static int k3_r5_rproc_unprepare(struct rproc *rproc) if (ret) dev_err(dev, "unable to disable cores, ret = %d\n", ret); + /* + * Notify all threads in the wait queue when core1 state has changed so + * that threads waiting for this condition can be executed. + */ + core->released_from_reset = false; + if (core == core1) + wake_up_interruptible(&cluster->core_transition); + return ret; } @@ -543,7 +602,7 @@ static int k3_r5_rproc_start(struct rproc *rproc) struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; struct device *dev = kproc->dev; - struct k3_r5_core *core0, *core; + struct k3_r5_core *core; u32 boot_addr; int ret; @@ -565,21 +624,9 @@ static int k3_r5_rproc_start(struct rproc *rproc) goto unroll_core_run; } } else { - /* do not allow core 1 to start before core 0 */ - core0 = list_first_entry(&cluster->cores, struct k3_r5_core, - elem); - if (core != core0 && core0->rproc->state == RPROC_OFFLINE) { - dev_err(dev, "%s: can not start core 1 before core 0\n", - __func__); - return -EPERM; - } - ret = k3_r5_core_run(core); if (ret) return ret; - - core->released_from_reset = true; - wake_up_interruptible(&cluster->core_transition); } return 0; @@ -620,8 +667,7 @@ static int k3_r5_rproc_stop(struct rproc *rproc) { struct k3_r5_rproc *kproc = rproc->priv; struct k3_r5_cluster *cluster = kproc->cluster; - struct device *dev = kproc->dev; - struct k3_r5_core *core1, *core = kproc->core; + struct k3_r5_core *core = kproc->core; int ret; /* halt all applicable cores */ @@ -634,16 +680,6 @@ static int k3_r5_rproc_stop(struct rproc *rproc) } } } else { - /* do not allow core 0 to stop before core 1 */ - core1 = list_last_entry(&cluster->cores, struct k3_r5_core, - elem); - if (core != core1 && core1->rproc->state != RPROC_OFFLINE) { - dev_err(dev, "%s: can not stop core 0 before core 1\n", - __func__); - ret = -EPERM; - goto out; - } - ret = k3_r5_core_halt(core); if (ret) goto out; @@ -947,6 +983,13 @@ out: return ret; } +static void k3_r5_mem_release(void *data) +{ + struct device *dev = data; + + of_reserved_mem_device_release(dev); +} + static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) { struct device *dev = kproc->dev; @@ -977,28 +1020,25 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) return ret; } + ret = devm_add_action_or_reset(dev, k3_r5_mem_release, dev); + if (ret) + return ret; + num_rmems--; - kproc->rmem = kcalloc(num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); - if (!kproc->rmem) { - ret = -ENOMEM; - goto release_rmem; - } + kproc->rmem = devm_kcalloc(dev, num_rmems, sizeof(*kproc->rmem), GFP_KERNEL); + if (!kproc->rmem) + return -ENOMEM; /* use remaining reserved memory regions for static carveouts */ for (i = 0; i < num_rmems; i++) { rmem_np = of_parse_phandle(np, "memory-region", i + 1); - if (!rmem_np) { - ret = -EINVAL; - goto unmap_rmem; - } + if (!rmem_np) + return -EINVAL; rmem = of_reserved_mem_lookup(rmem_np); - if (!rmem) { - of_node_put(rmem_np); - ret = -EINVAL; - goto unmap_rmem; - } of_node_put(rmem_np); + if (!rmem) + return -EINVAL; kproc->rmem[i].bus_addr = rmem->base; /* @@ -1013,12 +1053,11 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) */ kproc->rmem[i].dev_addr = (u32)rmem->base; kproc->rmem[i].size = rmem->size; - kproc->rmem[i].cpu_addr = ioremap_wc(rmem->base, rmem->size); + kproc->rmem[i].cpu_addr = devm_ioremap_wc(dev, rmem->base, rmem->size); if (!kproc->rmem[i].cpu_addr) { dev_err(dev, "failed to map reserved memory#%d at %pa of size %pa\n", i + 1, &rmem->base, &rmem->size); - ret = -ENOMEM; - goto unmap_rmem; + return -ENOMEM; } dev_dbg(dev, "reserved memory%d: bus addr %pa size 0x%zx va %pK da 0x%x\n", @@ -1029,25 +1068,6 @@ static int k3_r5_reserved_mem_init(struct k3_r5_rproc *kproc) kproc->num_rmems = num_rmems; return 0; - -unmap_rmem: - for (i--; i >= 0; i--) - iounmap(kproc->rmem[i].cpu_addr); - kfree(kproc->rmem); -release_rmem: - of_reserved_mem_device_release(dev); - return ret; -} - -static void k3_r5_reserved_mem_exit(struct k3_r5_rproc *kproc) -{ - int i; - - for (i = 0; i < kproc->num_rmems; i++) - iounmap(kproc->rmem[i].cpu_addr); - kfree(kproc->rmem); - - of_reserved_mem_device_release(kproc->dev); } /* @@ -1274,10 +1294,10 @@ init_rmem: goto out; } - ret = rproc_add(rproc); + ret = devm_rproc_add(dev, rproc); if (ret) { - dev_err(dev, "rproc_add failed, ret = %d\n", ret); - goto err_add; + dev_err_probe(dev, ret, "rproc_add failed\n"); + goto out; } /* create only one rproc in lockstep, single-cpu or @@ -1287,26 +1307,6 @@ init_rmem: cluster->mode == CLUSTER_MODE_SINGLECPU || cluster->mode == CLUSTER_MODE_SINGLECORE) break; - - /* - * R5 cores require to be powered on sequentially, core0 - * should be in higher power state than core1 in a cluster - * So, wait for current core to power up before proceeding - * to next core and put timeout of 2sec for each core. - * - * This waiting mechanism is necessary because - * rproc_auto_boot_callback() for core1 can be called before - * core0 due to thread execution order. - */ - ret = wait_event_interruptible_timeout(cluster->core_transition, - core->released_from_reset, - msecs_to_jiffies(2000)); - if (ret <= 0) { - dev_err(dev, - "Timed out waiting for %s core to power up!\n", - rproc->name); - goto err_powerup; - } } return 0; @@ -1321,10 +1321,6 @@ err_split: } } -err_powerup: - rproc_del(rproc); -err_add: - k3_r5_reserved_mem_exit(kproc); out: /* undo core0 upon any failures on core1 in split-mode */ if (cluster->mode == CLUSTER_MODE_SPLIT && core == core1) { @@ -1367,10 +1363,6 @@ static void k3_r5_cluster_rproc_exit(void *data) } mbox_free_channel(kproc->mbox); - - rproc_del(rproc); - - k3_r5_reserved_mem_exit(kproc); } } diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 5849d2970bba..095de4e0e4f3 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -697,8 +697,12 @@ static irqreturn_t cmos_interrupt(int irq, void *p) { u8 irqstat; u8 rtc_control; + unsigned long flags; - spin_lock(&rtc_lock); + /* We cannot use spin_lock() here, as cmos_interrupt() is also called + * in a non-irq context. + */ + spin_lock_irqsave(&rtc_lock, flags); /* When the HPET interrupt handler calls us, the interrupt * status is passed as arg1 instead of the irq number. But @@ -732,7 +736,7 @@ static irqreturn_t cmos_interrupt(int irq, void *p) hpet_mask_rtc_irq_bit(RTC_AIE); CMOS_READ(RTC_INTR_FLAGS); } - spin_unlock(&rtc_lock); + spin_unlock_irqrestore(&rtc_lock, flags); if (is_intr(irqstat)) { rtc_update_irq(p, 1, irqstat); @@ -1300,9 +1304,7 @@ static void cmos_check_wkalrm(struct device *dev) * ACK the rtc irq here */ if (t_now >= cmos->alarm_expires && cmos_use_acpi_alarm()) { - local_irq_disable(); cmos_interrupt(0, (void *)cmos->rtc); - local_irq_enable(); return; } diff --git a/drivers/rtc/rtc-pcf2127.c b/drivers/rtc/rtc-pcf2127.c index 9c04c4e1a49c..fc079b9dcf71 100644 --- a/drivers/rtc/rtc-pcf2127.c +++ b/drivers/rtc/rtc-pcf2127.c @@ -1383,6 +1383,11 @@ static int pcf2127_i2c_probe(struct i2c_client *client) variant = &pcf21xx_cfg[type]; } + if (variant->type == PCF2131) { + config.read_flag_mask = 0x0; + config.write_flag_mask = 0x0; + } + config.max_register = variant->max_register, regmap = devm_regmap_init(&client->dev, &pcf2127_i2c_regmap, @@ -1456,7 +1461,7 @@ static int pcf2127_spi_probe(struct spi_device *spi) variant = &pcf21xx_cfg[type]; } - config.max_register = variant->max_register, + config.max_register = variant->max_register; regmap = devm_regmap_init_spi(spi, &config); if (IS_ERR(regmap)) { diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c index 2f34761e6413..7cfc4f986297 100644 --- a/drivers/s390/net/ism_drv.c +++ b/drivers/s390/net/ism_drv.c @@ -131,6 +131,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) struct ism_req_hdr *req = cmd; struct ism_resp_hdr *resp = cmd; + spin_lock(&ism->cmd_lock); __ism_write_cmd(ism, req + 1, sizeof(*req), req->len - sizeof(*req)); __ism_write_cmd(ism, req, 0, sizeof(*req)); @@ -144,6 +145,7 @@ static int ism_cmd(struct ism_dev *ism, void *cmd) } __ism_read_cmd(ism, resp + 1, sizeof(*resp), resp->len - sizeof(*resp)); out: + spin_unlock(&ism->cmd_lock); return resp->ret; } @@ -607,6 +609,7 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENOMEM; spin_lock_init(&ism->lock); + spin_lock_init(&ism->cmd_lock); dev_set_drvdata(&pdev->dev, ism); ism->pdev = pdev; ism->dev.parent = &pdev->dev; diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index 85059b83ea6b..1c6b024160da 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -398,7 +398,11 @@ lpfc_bsg_send_mgmt_cmd(struct bsg_job *job) /* in case no data is transferred */ bsg_reply->reply_payload_rcv_len = 0; - if (ndlp->nlp_flag & NLP_ELS_SND_MASK) + if (test_bit(NLP_PLOGI_SND, &ndlp->nlp_flag) || + test_bit(NLP_PRLI_SND, &ndlp->nlp_flag) || + test_bit(NLP_ADISC_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_RNID_SND, &ndlp->nlp_flag)) return -ENODEV; /* allocate our bsg tracking structure */ diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index d4e46a08f94d..36470bd71617 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -571,7 +571,7 @@ int lpfc_issue_reg_vfi(struct lpfc_vport *); int lpfc_issue_unreg_vfi(struct lpfc_vport *); int lpfc_selective_reset(struct lpfc_hba *); int lpfc_sli4_read_config(struct lpfc_hba *); -void lpfc_sli4_node_prep(struct lpfc_hba *); +void lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba); int lpfc_sli4_els_sgl_update(struct lpfc_hba *phba); int lpfc_sli4_nvmet_sgl_update(struct lpfc_hba *phba); int lpfc_io_buf_flush(struct lpfc_hba *phba, struct list_head *sglist); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index ce3a1f42713d..30891ad17e2a 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -735,7 +735,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " - "Data: x%x x%x x%x x%lx x%x\n", Did, + "Data: x%lx x%x x%x x%lx x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); @@ -744,7 +744,7 @@ lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) * state of ndlp hit devloss, change state to * allow rediscovery. */ - if (ndlp->nlp_flag & NLP_NPR_2B_DISC && + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -832,12 +832,10 @@ lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) if (ndlp->nlp_type != NLP_NVME_INITIATOR || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) continue; - spin_lock_irq(&ndlp->lock); if (ndlp->nlp_DID == Did) - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); else - ndlp->nlp_flag |= NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } } @@ -894,13 +892,11 @@ lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, */ if (vport->phba->nvmet_support) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { - if (!(ndlp->nlp_flag & NLP_NVMET_RECOV)) + if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag)) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NVMET_RECOV; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } @@ -1440,7 +1436,7 @@ lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0242 Process x%x GFF " - "NameServer Rsp Data: x%x x%lx x%x\n", + "NameServer Rsp Data: x%lx x%lx x%x\n", did, ndlp->nlp_flag, vport->fc_flag, vport->fc_rscn_id_cnt); } else { diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c index a2d2b02b3418..3fd1aa5cc78c 100644 --- a/drivers/scsi/lpfc/lpfc_debugfs.c +++ b/drivers/scsi/lpfc/lpfc_debugfs.c @@ -870,8 +870,8 @@ lpfc_debugfs_nodelist_data(struct lpfc_vport *vport, char *buf, int size) wwn_to_u64(ndlp->nlp_nodename.u.wwn)); len += scnprintf(buf+len, size-len, "RPI:x%04x ", ndlp->nlp_rpi); - len += scnprintf(buf+len, size-len, "flag:x%08x ", - ndlp->nlp_flag); + len += scnprintf(buf+len, size-len, "flag:x%08lx ", + ndlp->nlp_flag); if (!ndlp->nlp_type) len += scnprintf(buf+len, size-len, "UNKNOWN_TYPE "); if (ndlp->nlp_type & NLP_FC_NODE) diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h index f5ae8cc15820..af5d5bd75642 100644 --- a/drivers/scsi/lpfc/lpfc_disc.h +++ b/drivers/scsi/lpfc/lpfc_disc.h @@ -102,7 +102,7 @@ struct lpfc_nodelist { spinlock_t lock; /* Node management lock */ - uint32_t nlp_flag; /* entry flags */ + unsigned long nlp_flag; /* entry flags */ uint32_t nlp_DID; /* FC D_ID of entry */ uint32_t nlp_last_elscmd; /* Last ELS cmd sent */ uint16_t nlp_type; @@ -182,37 +182,37 @@ struct lpfc_node_rrq { #define lpfc_ndlp_check_qdepth(phba, ndlp) \ (ndlp->cmd_qdepth < phba->sli4_hba.max_cfg_param.max_xri) -/* Defines for nlp_flag (uint32) */ -#define NLP_IGNR_REG_CMPL 0x00000001 /* Rcvd rscn before we cmpl reg login */ -#define NLP_REG_LOGIN_SEND 0x00000002 /* sent reglogin to adapter */ -#define NLP_RELEASE_RPI 0x00000004 /* Release RPI to free pool */ -#define NLP_SUPPRESS_RSP 0x00000010 /* Remote NPort supports suppress rsp */ -#define NLP_PLOGI_SND 0x00000020 /* sent PLOGI request for this entry */ -#define NLP_PRLI_SND 0x00000040 /* sent PRLI request for this entry */ -#define NLP_ADISC_SND 0x00000080 /* sent ADISC request for this entry */ -#define NLP_LOGO_SND 0x00000100 /* sent LOGO request for this entry */ -#define NLP_RNID_SND 0x00000400 /* sent RNID request for this entry */ -#define NLP_ELS_SND_MASK 0x000007e0 /* sent ELS request for this entry */ -#define NLP_NVMET_RECOV 0x00001000 /* NVMET auditing node for recovery. */ -#define NLP_UNREG_INP 0x00008000 /* UNREG_RPI cmd is in progress */ -#define NLP_DROPPED 0x00010000 /* Init ref count has been dropped */ -#define NLP_DELAY_TMO 0x00020000 /* delay timeout is running for node */ -#define NLP_NPR_2B_DISC 0x00040000 /* node is included in num_disc_nodes */ -#define NLP_RCV_PLOGI 0x00080000 /* Rcv'ed PLOGI from remote system */ -#define NLP_LOGO_ACC 0x00100000 /* Process LOGO after ACC completes */ -#define NLP_TGT_NO_SCSIID 0x00200000 /* good PRLI but no binding for scsid */ -#define NLP_ISSUE_LOGO 0x00400000 /* waiting to issue a LOGO */ -#define NLP_IN_DEV_LOSS 0x00800000 /* devloss in progress */ -#define NLP_ACC_REGLOGIN 0x01000000 /* Issue Reg Login after successful +/* nlp_flag mask bits */ +enum lpfc_nlp_flag { + NLP_IGNR_REG_CMPL = 0, /* Rcvd rscn before we cmpl reg login */ + NLP_REG_LOGIN_SEND = 1, /* sent reglogin to adapter */ + NLP_SUPPRESS_RSP = 4, /* Remote NPort supports suppress rsp */ + NLP_PLOGI_SND = 5, /* sent PLOGI request for this entry */ + NLP_PRLI_SND = 6, /* sent PRLI request for this entry */ + NLP_ADISC_SND = 7, /* sent ADISC request for this entry */ + NLP_LOGO_SND = 8, /* sent LOGO request for this entry */ + NLP_RNID_SND = 10, /* sent RNID request for this entry */ + NLP_NVMET_RECOV = 12, /* NVMET auditing node for recovery. */ + NLP_UNREG_INP = 15, /* UNREG_RPI cmd is in progress */ + NLP_DROPPED = 16, /* Init ref count has been dropped */ + NLP_DELAY_TMO = 17, /* delay timeout is running for node */ + NLP_NPR_2B_DISC = 18, /* node is included in num_disc_nodes */ + NLP_RCV_PLOGI = 19, /* Rcv'ed PLOGI from remote system */ + NLP_LOGO_ACC = 20, /* Process LOGO after ACC completes */ + NLP_TGT_NO_SCSIID = 21, /* good PRLI but no binding for scsid */ + NLP_ISSUE_LOGO = 22, /* waiting to issue a LOGO */ + NLP_IN_DEV_LOSS = 23, /* devloss in progress */ + NLP_ACC_REGLOGIN = 24, /* Issue Reg Login after successful ACC */ -#define NLP_NPR_ADISC 0x02000000 /* Issue ADISC when dq'ed from + NLP_NPR_ADISC = 25, /* Issue ADISC when dq'ed from NPR list */ -#define NLP_RM_DFLT_RPI 0x04000000 /* need to remove leftover dflt RPI */ -#define NLP_NODEV_REMOVE 0x08000000 /* Defer removal till discovery ends */ -#define NLP_TARGET_REMOVE 0x10000000 /* Target remove in process */ -#define NLP_SC_REQ 0x20000000 /* Target requires authentication */ -#define NLP_FIRSTBURST 0x40000000 /* Target supports FirstBurst */ -#define NLP_RPI_REGISTERED 0x80000000 /* nlp_rpi is valid */ + NLP_RM_DFLT_RPI = 26, /* need to remove leftover dflt RPI */ + NLP_NODEV_REMOVE = 27, /* Defer removal till discovery ends */ + NLP_TARGET_REMOVE = 28, /* Target remove in process */ + NLP_SC_REQ = 29, /* Target requires authentication */ + NLP_FIRSTBURST = 30, /* Target supports FirstBurst */ + NLP_RPI_REGISTERED = 31 /* nlp_rpi is valid */ +}; /* There are 4 different double linked lists nodelist entries can reside on. * The Port Login (PLOGI) list and Address Discovery (ADISC) list are used diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index d737b897ddd8..b5fa5054e952 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -725,11 +725,9 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&np->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&np->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -864,9 +862,7 @@ lpfc_cmpl_els_flogi_nport(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, sizeof(struct lpfc_name)); /* Set state will put ndlp onto node list if not already done */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!mbox) @@ -1018,7 +1014,7 @@ stop_rr_fcf_flogi: * registered with the SCSI transport, remove the initial * reference to trigger node release. */ - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS) && + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) lpfc_nlp_put(ndlp); @@ -1548,7 +1544,7 @@ lpfc_initial_flogi(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1597,7 +1593,7 @@ lpfc_initial_fdisc(struct lpfc_vport *vport) * Otherwise, decrement node reference to trigger release. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && - !(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + !test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); return 0; } @@ -1675,9 +1671,9 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, struct lpfc_nodelist *new_ndlp; struct serv_parm *sp; uint8_t name[sizeof(struct lpfc_name)]; - uint32_t keepDID = 0, keep_nlp_flag = 0; + uint32_t keepDID = 0; int rc; - uint32_t keep_new_nlp_flag = 0; + unsigned long keep_nlp_flag = 0, keep_new_nlp_flag = 0; uint16_t keep_nlp_state; u32 keep_nlp_fc4_type = 0; struct lpfc_nvme_rport *keep_nrport = NULL; @@ -1704,8 +1700,8 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, } lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3178 PLOGI confirm: ndlp x%x x%x x%x: " - "new_ndlp x%x x%x x%x\n", + "3178 PLOGI confirm: ndlp x%x x%lx x%x: " + "new_ndlp x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_fc4_type, (new_ndlp ? new_ndlp->nlp_DID : 0), (new_ndlp ? new_ndlp->nlp_flag : 0), @@ -1769,48 +1765,48 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, new_ndlp->nlp_flag = ndlp->nlp_flag; /* if new_ndlp had NLP_UNREG_INP set, keep it */ - if (keep_new_nlp_flag & NLP_UNREG_INP) - new_ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_new_nlp_flag)) + set_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &new_ndlp->nlp_flag); /* if new_ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_new_nlp_flag & NLP_RPI_REGISTERED) - new_ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_new_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &new_ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_new_nlp_flag & NLP_DROPPED) - new_ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_new_nlp_flag)) + set_bit(NLP_DROPPED, &new_ndlp->nlp_flag); else - new_ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &new_ndlp->nlp_flag); ndlp->nlp_flag = keep_new_nlp_flag; /* if ndlp had NLP_UNREG_INP set, keep it */ - if (keep_nlp_flag & NLP_UNREG_INP) - ndlp->nlp_flag |= NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &keep_nlp_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); /* if ndlp had NLP_RPI_REGISTERED set, keep it */ - if (keep_nlp_flag & NLP_RPI_REGISTERED) - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + if (test_bit(NLP_RPI_REGISTERED, &keep_nlp_flag)) + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* * Retain the DROPPED flag. This will take care of the init * refcount when affecting the state change */ - if (keep_nlp_flag & NLP_DROPPED) - ndlp->nlp_flag |= NLP_DROPPED; + if (test_bit(NLP_DROPPED, &keep_nlp_flag)) + set_bit(NLP_DROPPED, &ndlp->nlp_flag); else - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); spin_unlock_irq(&new_ndlp->lock); spin_unlock_irq(&ndlp->lock); @@ -1888,7 +1884,7 @@ lpfc_plogi_confirm_nport(struct lpfc_hba *phba, uint32_t *prsp, phba->active_rrq_pool); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE, - "3173 PLOGI confirm exit: new_ndlp x%x x%x x%x\n", + "3173 PLOGI confirm exit: new_ndlp x%x x%lx x%x\n", new_ndlp->nlp_DID, new_ndlp->nlp_flag, new_ndlp->nlp_fc4_type); @@ -2009,7 +2005,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, IOCB_t *irsp; struct lpfc_nodelist *ndlp, *free_ndlp; struct lpfc_dmabuf *prsp; - int disc; + bool disc; struct serv_parm *sp = NULL; u32 ulp_status, ulp_word4, did, iotag; bool release_node = false; @@ -2044,10 +2040,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* PLOGI completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, @@ -2060,9 +2053,7 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2070,11 +2061,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check for retry */ if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ - if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); - } + if (disc) + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } /* Warn PLOGI status Don't print the vport to vport rjts */ @@ -2097,7 +2085,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * with the reglogin process. */ spin_lock_irq(&ndlp->lock); - if ((ndlp->nlp_flag & (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI)) && + if ((test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag) || + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) && ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { spin_unlock_irq(&ndlp->lock); goto out; @@ -2108,8 +2097,8 @@ lpfc_cmpl_els_plogi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * start the device remove process. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2212,12 +2201,13 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry) * outstanding UNREG_RPI mbox command completes, unless we * are going offline. This logic does not apply for Fabric DIDs */ - if ((ndlp->nlp_flag & (NLP_IGNR_REG_CMPL | NLP_UNREG_INP)) && + if ((test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || + test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) && ((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && !test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4110 Issue PLOGI x%x deferred " - "on NPort x%x rpi x%x flg x%x Data:" + "on NPort x%x rpi x%x flg x%lx Data:" " x%px\n", ndlp->nlp_defer_did, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, ndlp); @@ -2335,10 +2325,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ulp_status = get_job_ulpstatus(phba, rspiocb); ulp_word4 = get_job_word4(phba, rspiocb); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_PRLI_SND; + clear_bit(NLP_PRLI_SND, &ndlp->nlp_flag); /* Driver supports multiple FC4 types. Counters matter. */ + spin_lock_irq(&ndlp->lock); vport->fc_prli_sent--; ndlp->fc4_prli_sent--; spin_unlock_irq(&ndlp->lock); @@ -2379,7 +2369,7 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Warn PRLI status */ lpfc_printf_vlog(vport, mode, LOG_ELS, "2754 PRLI DID:%06X Status:x%x/x%x, " - "data: x%x x%x x%x\n", + "data: x%x x%x x%lx\n", ndlp->nlp_DID, ulp_status, ulp_word4, ndlp->nlp_state, ndlp->fc4_prli_sent, ndlp->nlp_flag); @@ -2396,10 +2386,10 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if ((ndlp->nlp_state >= NLP_STE_PLOGI_ISSUE && ndlp->nlp_state <= NLP_STE_REG_LOGIN_ISSUE) || (ndlp->nlp_state == NLP_STE_NPR_NODE && - ndlp->nlp_flag & NLP_DELAY_TMO)) { - lpfc_printf_vlog(vport, KERN_WARNING, LOG_NODE, + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag))) { + lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "2784 PRLI cmpl: Allow Node recovery " - "DID x%06x nstate x%x nflag x%x\n", + "DID x%06x nstate x%x nflag x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag); goto out; @@ -2420,8 +2410,8 @@ lpfc_cmpl_els_prli(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD)) && !ndlp->fc4_prli_sent) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2496,7 +2486,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_FCP_TARGET | NLP_FCP_INITIATOR); ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag &= ~(NLP_FIRSTBURST | NLP_NPR_2B_DISC); + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nvme_fb_size = 0; send_next_prli: @@ -2627,8 +2618,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * the ndlp is used to track outstanding PRLIs for different * FC4 types. */ + set_bit(NLP_PRLI_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_PRLI_SND; vport->fc_prli_sent++; ndlp->fc4_prli_sent++; spin_unlock_irq(&ndlp->lock); @@ -2789,7 +2780,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_vport *vport = cmdiocb->vport; IOCB_t *irsp; struct lpfc_nodelist *ndlp; - int disc; + bool disc; u32 ulp_status, ulp_word4, tmo, iotag; bool release_node = false; @@ -2818,10 +2809,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Since ndlp can be freed in the disc state machine, note if this node * is being used during discovery. */ - spin_lock_irq(&ndlp->lock); - disc = (ndlp->nlp_flag & NLP_NPR_2B_DISC); - ndlp->nlp_flag &= ~(NLP_ADISC_SND | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + disc = test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); /* ADISC completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0104 ADISC completes to NPort x%x " @@ -2832,9 +2821,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Check to see if link went down during discovery */ if (lpfc_els_chk_latt(vport)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); goto out; } @@ -2843,9 +2830,7 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, if (lpfc_els_retry(phba, cmdiocb, rspiocb)) { /* ELS command is being retried */ if (disc) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_set_disctmo(vport); } goto out; @@ -2864,8 +2849,8 @@ lpfc_cmpl_els_adisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, */ spin_lock_irq(&ndlp->lock); if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - if (!(ndlp->nlp_flag & NLP_IN_DEV_LOSS)) + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + if (!test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) release_node = true; } spin_unlock_irq(&ndlp->lock); @@ -2938,9 +2923,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitADISC++; elsiocb->cmd_cmpl = lpfc_cmpl_els_adisc; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ADISC_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -2961,9 +2944,7 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ADISC_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ADISC_SND, &ndlp->nlp_flag); return 1; } @@ -2985,7 +2966,6 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_nodelist *ndlp = cmdiocb->ndlp; struct lpfc_vport *vport = ndlp->vport; IOCB_t *irsp; - unsigned long flags; uint32_t skip_recovery = 0; int wake_up_waiter = 0; u32 ulp_status; @@ -3007,8 +2987,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, iotag = irsp->ulpIoTag; } + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; if (ndlp->save_flags & NLP_WAIT_FOR_LOGO) { wake_up_waiter = 1; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; @@ -3023,7 +3003,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0105 LOGO completes to NPort x%x " - "IoTag x%x refcnt %d nflags x%x xflags x%x " + "IoTag x%x refcnt %d nflags x%lx xflags x%x " "Data: x%x x%x x%x x%x\n", ndlp->nlp_DID, iotag, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -3061,12 +3041,8 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* The driver sets this flag for an NPIV instance that doesn't want to * log into the remote port. */ - if (ndlp->nlp_flag & NLP_TARGET_REMOVE) { - spin_lock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); goto out_rsrc_free; @@ -3089,9 +3065,7 @@ out: if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET) && skip_recovery == 0) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irqsave(&ndlp->lock, flags); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "3187 LOGO completes to NPort x%x: Start " @@ -3113,9 +3087,7 @@ out: * register with the transport. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } @@ -3156,12 +3128,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, uint16_t cmdsize; int rc; - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_LOGO_SND) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag)) return 0; - } - spin_unlock_irq(&ndlp->lock); cmdsize = (2 * sizeof(uint32_t)) + sizeof(struct lpfc_name); elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp, @@ -3180,10 +3148,8 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, phba->fc_stat.elsXmitLOGO++; elsiocb->cmd_cmpl = lpfc_cmpl_els_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -3208,9 +3174,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -3286,13 +3250,13 @@ lpfc_cmpl_els_cmd(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, static int lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) { - int rc = 0; + int rc; struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ns_ndlp; LPFC_MBOXQ_t *mbox; - if (fc_ndlp->nlp_flag & NLP_RPI_REGISTERED) - return rc; + if (test_bit(NLP_RPI_REGISTERED, &fc_ndlp->nlp_flag)) + return 0; ns_ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ns_ndlp) @@ -3309,7 +3273,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0936 %s: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return -ENOMEM; @@ -3321,7 +3285,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) goto out; } - fc_ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &fc_ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_fc_reg_login; mbox->ctx_ndlp = lpfc_nlp_get(fc_ndlp); if (!mbox->ctx_ndlp) { @@ -3345,7 +3309,7 @@ lpfc_reg_fab_ctrl_node(struct lpfc_vport *vport, struct lpfc_nodelist *fc_ndlp) lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE, "0938 %s: failed to format reg_login " - "Data: x%x x%x x%x x%x\n", __func__, + "Data: x%x x%x x%lx x%x\n", __func__, fc_ndlp->nlp_DID, fc_ndlp->nlp_state, fc_ndlp->nlp_flag, fc_ndlp->nlp_rpi); return rc; @@ -4384,11 +4348,8 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) { struct lpfc_work_evt *evtp; - if (!(nlp->nlp_flag & NLP_DELAY_TMO)) + if (!test_and_clear_bit(NLP_DELAY_TMO, &nlp->nlp_flag)) return; - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&nlp->lock); del_timer_sync(&nlp->nlp_delayfunc); nlp->nlp_last_elscmd = 0; if (!list_empty(&nlp->els_retry_evt.evt_listp)) { @@ -4397,10 +4358,7 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) evtp = &nlp->els_retry_evt; lpfc_nlp_put((struct lpfc_nodelist *)evtp->evt_arg1); } - if (nlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&nlp->lock); - nlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&nlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &nlp->nlp_flag)) { if (vport->num_disc_nodes) { if (vport->port_state < LPFC_VPORT_READY) { /* Check if there are more ADISCs to be sent */ @@ -4480,14 +4438,11 @@ lpfc_els_retry_delay_handler(struct lpfc_nodelist *ndlp) spin_lock_irq(&ndlp->lock); cmd = ndlp->nlp_last_elscmd; ndlp->nlp_last_elscmd = 0; + spin_unlock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { - spin_unlock_irq(&ndlp->lock); + if (!test_and_clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return; - } - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); /* * If a discovery event readded nlp_delayfunc after timer * firing and before processing the timer, cancel the @@ -5010,9 +4965,7 @@ out_retry: /* delay is specified in milliseconds */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(delay)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; if ((cmd == ELS_CMD_PRLI) || @@ -5072,7 +5025,7 @@ out_retry: lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0108 No retry ELS command x%x to remote " "NPORT x%x Retried:%d Error:x%x/%x " - "IoTag x%x nflags x%x\n", + "IoTag x%x nflags x%lx\n", cmd, did, cmdiocb->retry, ulp_status, ulp_word4, cmdiocb->iotag, (ndlp ? ndlp->nlp_flag : 0)); @@ -5239,7 +5192,7 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ACC to LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0109 ACC to LOGO completes to NPort x%x refcnt %d " - "last els x%x Data: x%x x%x x%x\n", + "last els x%x Data: x%lx x%x x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp->nlp_last_elscmd, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -5254,16 +5207,14 @@ lpfc_cmpl_els_logo_acc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out; if (ndlp->nlp_state == NLP_STE_NPR_NODE) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); /* If came from PRLO, then PRLO_ACC is done. * Start rediscovery now. */ if (ndlp->nlp_last_elscmd == ELS_CMD_PRLO) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -5300,7 +5251,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (ndlp) { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "0006 rpi x%x DID:%x flg:%x %d x%px " + "0006 rpi x%x DID:%x flg:%lx %d x%px " "mbx_cmd x%x mbx_flag x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp, mbx_cmd, @@ -5311,11 +5262,9 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * first on an UNREG_LOGIN and then release the final * references. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (mbx_cmd == MBX_UNREG_LOGIN) - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); lpfc_drop_node(ndlp->vport, ndlp); } @@ -5381,23 +5330,23 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* ELS response tag <ulpIoTag> completes */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0110 ELS response tag x%x completes " - "Data: x%x x%x x%x x%x x%x x%x x%x x%x %p %p\n", + "Data: x%x x%x x%x x%x x%lx x%x x%x x%x %p %p\n", iotag, ulp_status, ulp_word4, tmo, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi, kref_read(&ndlp->kref), mbox, ndlp); if (mbox) { - if (ulp_status == 0 - && (ndlp->nlp_flag & NLP_ACC_REGLOGIN)) { + if (ulp_status == 0 && + test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { if (!lpfc_unreg_rpi(vport, ndlp) && !test_bit(FC_PT2PT, &vport->fc_flag)) { - if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || + if (ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0314 PLOGI recov " "DID x%x " - "Data: x%x x%x x%x\n", + "Data: x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_rpi, @@ -5414,18 +5363,17 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, goto out_free_mbox; mbox->vport = vport; - if (ndlp->nlp_flag & NLP_RM_DFLT_RPI) { + if (test_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag)) { mbox->mbox_flag |= LPFC_MBX_IMED_UNREG; mbox->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; - } - else { + } else { mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE); } - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT) != MBX_NOT_FINISHED) goto out; @@ -5434,12 +5382,12 @@ lpfc_cmpl_els_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, * set for this failed mailbox command. */ lpfc_nlp_put(ndlp); - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* ELS rsp: Cannot issue reg_login for <NPortid> */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0138 ELS rsp: Cannot issue reg_login for x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } @@ -5448,32 +5396,20 @@ out_free_mbox: } out: if (ndlp && shost) { - spin_lock_irq(&ndlp->lock); if (mbox) - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; - ndlp->nlp_flag &= ~NLP_RM_DFLT_RPI; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + clear_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); } /* An SLI4 NPIV instance wants to drop the node at this point under - * these conditions and release the RPI. + * these conditions because it doesn't need the login. */ if (phba->sli_rev == LPFC_SLI_REV4 && vport && vport->port_type == LPFC_NPIV_PORT && !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - lpfc_drop_node(vport, ndlp); - } else if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && - ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && - ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { + if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE && + ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE && + ndlp->nlp_state != NLP_STE_PRLI_ISSUE) { /* Drop ndlp if there is no planned or outstanding * issued PRLI. * @@ -5540,9 +5476,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp, ndlp->nlp_DID, ELS_CMD_ACC); if (!elsiocb) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } @@ -5570,7 +5504,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, pcmd += sizeof(uint32_t); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC: did:x%x flg:x%x", + "Issue ACC: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_FLOGI: @@ -5649,7 +5583,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC FLOGI/PLOGI: did:x%x flg:x%x", + "Issue ACC FLOGI/PLOGI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_PRLO: @@ -5687,7 +5621,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, els_pkt_ptr->un.prlo.acceptRspCode = PRLO_REQ_EXECUTED; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLO: did:x%x flg:x%x", + "Issue ACC PRLO: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); break; case ELS_CMD_RDF: @@ -5732,12 +5666,10 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, default: return 1; } - if (ndlp->nlp_flag & NLP_LOGO_ACC) { - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND)) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) { + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) && + !test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); elsiocb->cmd_cmpl = lpfc_cmpl_els_logo_acc; } else { elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5760,7 +5692,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0128 Xmit ELS ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx refcnt %d\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -5835,13 +5767,13 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, /* Xmit ELS RJT <err> response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0129 Xmit ELS RJT x%x response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", rejectError, elsiocb->iotag, get_job_ulpcontext(phba, elsiocb), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue LS_RJT: did:x%x flg:x%x err:x%x", + "Issue LS_RJT: did:x%x flg:x%lx err:x%x", ndlp->nlp_DID, ndlp->nlp_flag, rejectError); phba->fc_stat.elsXmitLSRJT++; @@ -5852,18 +5784,6 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError, return 1; } - /* The NPIV instance is rejecting this unsolicited ELS. Make sure the - * node's assigned RPI gets released provided this node is not already - * registered with the transport. - */ - if (phba->sli_rev == LPFC_SLI_REV4 && - vport->port_type == LPFC_NPIV_PORT && - !(ndlp->fc4_xpt_flags & SCSI_XPT_REGD)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irq(&ndlp->lock); - } - rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0); if (rc == IOCB_ERROR) { lpfc_els_free_iocb(phba, elsiocb); @@ -5944,7 +5864,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, lpfc_format_edc_lft_desc(phba, tlv); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue EDC ACC: did:x%x flg:x%x refcnt %d", + "Issue EDC ACC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); elsiocb->cmd_cmpl = lpfc_cmpl_els_rsp; @@ -5966,7 +5886,7 @@ lpfc_issue_els_edc_rsp(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0152 Xmit EDC ACC response Status: x%x, IoTag: x%x, " - "XRI: x%x, DID: x%x, nlp_flag: x%x nlp_state: x%x " + "XRI: x%x, DID: x%x, nlp_flag: x%lx nlp_state: x%x " "RPI: x%x, fc_flag x%lx\n", rc, elsiocb->iotag, elsiocb->sli4_xritag, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6035,7 +5955,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit ADISC ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0130 Xmit ADISC ACC response iotag x%x xri: " - "x%x, did x%x, nlp_flag x%x, nlp_state x%x rpi x%x\n", + "x%x, did x%x, nlp_flag x%lx, nlp_state x%x rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6051,7 +5971,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ap->DID = be32_to_cpu(vport->fc_myDID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ADISC: did:x%x flg:x%x refcnt %d", + "Issue ACC ADISC: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6157,7 +6077,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, /* Xmit PRLI ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0131 Xmit PRLI ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -6228,7 +6148,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6015 NVME issue PRLI ACC word1 x%08x " - "word4 x%08x word5 x%08x flag x%x, " + "word4 x%08x word5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", npr_nvme->word1, npr_nvme->word4, npr_nvme->word5, ndlp->nlp_flag, @@ -6243,7 +6163,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb, ndlp->nlp_DID); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC PRLI: did:x%x flg:x%x", + "Issue ACC PRLI: did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6357,7 +6277,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format, } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC RNID: did:x%x flg:x%x refcnt %d", + "Issue ACC RNID: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6414,7 +6334,7 @@ lpfc_els_clear_rrq(struct lpfc_vport *vport, get_job_ulpcontext(phba, iocb)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Clear RRQ: did:x%x flg:x%x exchg:x%.08x", + "Clear RRQ: did:x%x flg:x%lx exchg:x%.08x", ndlp->nlp_DID, ndlp->nlp_flag, rrq->rrq_exchg); if (vport->fc_myDID == be32_to_cpu(bf_get(rrq_did, rrq))) xri = bf_get(rrq_oxid, rrq); @@ -6491,7 +6411,7 @@ lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data, memcpy(pcmd, data, cmdsize - sizeof(uint32_t)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP, - "Issue ACC ECHO: did:x%x flg:x%x refcnt %d", + "Issue ACC ECHO: did:x%x flg:x%lx refcnt %d", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); phba->fc_stat.elsXmitACC++; @@ -6541,14 +6461,12 @@ lpfc_els_disc_adisc(struct lpfc_vport *vport) list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state != NLP_STE_NPR_NODE || - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); - if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* This node was marked for ADISC but was not picked * for discovery. This is possible if the node was * missing in gidft response. @@ -6606,9 +6524,9 @@ lpfc_els_disc_plogi(struct lpfc_vport *vport) /* go thru NPR nodes and issue any remaining ELS PLOGIs */ list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_state == NLP_STE_NPR_NODE && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) != 0 && - (ndlp->nlp_flag & NLP_DELAY_TMO) == 0 && - (ndlp->nlp_flag & NLP_NPR_ADISC) == 0) { + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + !test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -7104,7 +7022,7 @@ lpfc_els_rdp_cmpl(struct lpfc_hba *phba, struct lpfc_rdp_context *rdp_context, lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2171 Xmit RDP response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8078,7 +7996,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, */ if (vport->port_state <= LPFC_NS_QRY) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN ignore: did:x%x/ste:x%x flg:x%x", + "RCV RSCN ignore: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); @@ -8108,7 +8026,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, vport->fc_flag, payload_len, *lp, vport->fc_rscn_id_cnt); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN vport: did:x%x/ste:x%x flg:x%x", + "RCV RSCN vport: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); @@ -8145,7 +8063,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, if (test_bit(FC_RSCN_MODE, &vport->fc_flag) || test_bit(FC_NDISC_ACTIVE, &vport->fc_flag)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN defer: did:x%x/ste:x%x flg:x%x", + "RCV RSCN defer: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_DEFERRED, &vport->fc_flag); @@ -8201,7 +8119,7 @@ lpfc_els_rcv_rscn(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, return 0; } lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RSCN: did:x%x/ste:x%x flg:x%x", + "RCV RSCN: did:x%x/ste:x%x flg:x%lx", ndlp->nlp_DID, vport->port_state, ndlp->nlp_flag); set_bit(FC_RSCN_MODE, &vport->fc_flag); @@ -8707,7 +8625,7 @@ lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2874 Xmit ELS RLS ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n", + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); @@ -8869,7 +8787,7 @@ lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, /* Xmit ELS RLS ACC response tag <ulpIoTag> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS, "2875 Xmit ELS RTV ACC response tag x%x xri x%x, " - "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, " + "did x%x, nlp_flag x%lx, nlp_state x%x, rpi x%x, " "Data: x%x x%x x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -9066,7 +8984,7 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize, /* Xmit ELS RPL ACC response tag <ulpIoTag> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0120 Xmit ELS RPL ACC response tag x%x " - "xri x%x, did x%x, nlp_flag x%x, nlp_state x%x, " + "xri x%x, did x%x, nlp_flag x%lx, nlp_state x%x, " "rpi x%x\n", elsiocb->iotag, ulp_context, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -10411,14 +10329,11 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, * Do not process any unsolicited ELS commands * if the ndlp is in DEV_LOSS */ - spin_lock_irq(&ndlp->lock); - if (ndlp->nlp_flag & NLP_IN_DEV_LOSS) { - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag)) { if (newnode) lpfc_nlp_put(ndlp); goto dropit; } - spin_unlock_irq(&ndlp->lock); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) @@ -10447,7 +10362,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, switch (cmd) { case ELS_CMD_PLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PLOGI: did:x%x/ste:x%x flg:x%x", + "RCV PLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPLOGI++; @@ -10486,9 +10401,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, elsiocb, NLP_EVT_RCV_PLOGI); @@ -10496,7 +10409,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FLOGI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FLOGI: did:x%x/ste:x%x flg:x%x", + "RCV FLOGI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFLOGI++; @@ -10523,7 +10436,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LOGO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LOGO: did:x%x/ste:x%x flg:x%x", + "RCV LOGO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLOGO++; @@ -10540,7 +10453,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PRLO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLO: did:x%x/ste:x%x flg:x%x", + "RCV PRLO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLO++; @@ -10569,7 +10482,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ADISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ADISC: did:x%x/ste:x%x flg:x%x", + "RCV ADISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_send_els_event(vport, ndlp, payload); @@ -10584,7 +10497,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_PDISC: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PDISC: did:x%x/ste:x%x flg:x%x", + "RCV PDISC: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPDISC++; @@ -10598,7 +10511,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARPR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARPR: did:x%x/ste:x%x flg:x%x", + "RCV FARPR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARPR++; @@ -10606,7 +10519,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FARP: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FARP: did:x%x/ste:x%x flg:x%x", + "RCV FARP: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFARP++; @@ -10614,7 +10527,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FAN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FAN: did:x%x/ste:x%x flg:x%x", + "RCV FAN: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvFAN++; @@ -10623,7 +10536,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, case ELS_CMD_PRLI: case ELS_CMD_NVMEPRLI: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV PRLI: did:x%x/ste:x%x flg:x%x", + "RCV PRLI: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvPRLI++; @@ -10637,7 +10550,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_LIRR: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV LIRR: did:x%x/ste:x%x flg:x%x", + "RCV LIRR: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvLIRR++; @@ -10648,7 +10561,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RLS: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RLS: did:x%x/ste:x%x flg:x%x", + "RCV RLS: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRLS++; @@ -10659,7 +10572,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RPL: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RPL: did:x%x/ste:x%x flg:x%x", + "RCV RPL: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRPL++; @@ -10670,7 +10583,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RNID: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RNID: did:x%x/ste:x%x flg:x%x", + "RCV RNID: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRNID++; @@ -10681,7 +10594,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RTV: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RTV: did:x%x/ste:x%x flg:x%x", + "RCV RTV: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRTV++; lpfc_els_rcv_rtv(vport, elsiocb, ndlp); @@ -10691,7 +10604,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_RRQ: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV RRQ: did:x%x/ste:x%x flg:x%x", + "RCV RRQ: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvRRQ++; @@ -10702,7 +10615,7 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_ECHO: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV ECHO: did:x%x/ste:x%x flg:x%x", + "RCV ECHO: did:x%x/ste:x%x flg:x%lx", did, vport->port_state, ndlp->nlp_flag); phba->fc_stat.elsRcvECHO++; @@ -10718,7 +10631,8 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, break; case ELS_CMD_FPIN: lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL, - "RCV FPIN: did:x%x/ste:x%x flg:x%x", + "RCV FPIN: did:x%x/ste:x%x " + "flg:x%lx", did, vport->port_state, ndlp->nlp_flag); lpfc_els_rcv_fpin(vport, (struct fc_els_fpin *)payload, @@ -11226,9 +11140,7 @@ lpfc_retry_pport_discovery(struct lpfc_hba *phba) return; mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FLOGI; phba->pport->port_state = LPFC_FLOGI; return; @@ -11359,11 +11271,9 @@ lpfc_cmpl_els_fdisc(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, list_for_each_entry_safe(np, next_np, &vport->fc_nodes, nlp_listp) { if ((np->nlp_state != NLP_STE_NPR_NODE) || - !(np->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &np->nlp_flag)) continue; - spin_lock_irq(&ndlp->lock); - np->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &np->nlp_flag); lpfc_unreg_rpi(vport, np); } lpfc_cleanup_pending_mbox(vport); @@ -11566,7 +11476,7 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* NPIV LOGO completes to NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "2928 NPIV LOGO completes to NPort x%x " - "Data: x%x x%x x%x x%x x%x x%x x%x\n", + "Data: x%x x%x x%x x%x x%x x%lx x%x\n", ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes, kref_read(&ndlp->kref), ndlp->nlp_flag, @@ -11582,8 +11492,9 @@ lpfc_cmpl_els_npiv_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, /* Wake up lpfc_vport_delete if waiting...*/ if (ndlp->logo_waitq) wake_up(ndlp->logo_waitq); + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_ISSUE_LOGO | NLP_LOGO_SND); ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); } @@ -11633,13 +11544,11 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) memcpy(pcmd, &vport->fc_portname, sizeof(struct lpfc_name)); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_CMD, - "Issue LOGO npiv did:x%x flg:x%x", + "Issue LOGO npiv did:x%x flg:x%lx", ndlp->nlp_DID, ndlp->nlp_flag, 0); elsiocb->cmd_cmpl = lpfc_cmpl_els_npiv_logo; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_SND, &ndlp->nlp_flag); elsiocb->ndlp = lpfc_nlp_get(ndlp); if (!elsiocb->ndlp) { lpfc_els_free_iocb(phba, elsiocb); @@ -11655,9 +11564,7 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 0; err: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_LOGO_SND; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_LOGO_SND, &ndlp->nlp_flag); return 1; } @@ -12138,7 +12045,7 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT, "3094 Start rport recovery on shost id 0x%x " "fc_id 0x%06x vpi 0x%x rpi 0x%x state 0x%x " - "flags 0x%x\n", + "flag 0x%lx\n", shost->host_no, ndlp->nlp_DID, vport->vpi, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag); @@ -12148,8 +12055,8 @@ lpfc_sli_abts_recover_port(struct lpfc_vport *vport, */ spin_lock_irqsave(&ndlp->lock, flags); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; spin_unlock_irqrestore(&ndlp->lock, flags); + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 34f77b250387..b5dd17eecf82 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -137,7 +137,7 @@ lpfc_terminate_rport_io(struct fc_rport *rport) ndlp = rdata->pnode; vport = ndlp->vport; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport terminate: sid:x%x did:x%x flg:x%x", + "rport terminate: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); if (ndlp->nlp_sid != NLP_NO_SID) @@ -155,7 +155,7 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) struct lpfc_hba *phba; struct lpfc_work_evt *evtp; unsigned long iflags; - bool nvme_reg = false; + bool drop_initial_node_ref = false; ndlp = ((struct lpfc_rport_data *)rport->dd_data)->pnode; if (!ndlp) @@ -165,11 +165,11 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) phba = vport->phba; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport devlosscb: sid:x%x did:x%x flg:x%x", + "rport devlosscb: sid:x%x did:x%x flg:x%lx", ndlp->nlp_sid, ndlp->nlp_DID, ndlp->nlp_flag); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3181 dev_loss_callbk x%06x, rport x%px flg x%x " + "3181 dev_loss_callbk x%06x, rport x%px flg x%lx " "load_flag x%lx refcnt %u state %d xpt x%x\n", ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref), @@ -182,8 +182,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_lock_irqsave(&ndlp->lock, iflags); ndlp->rport = NULL; - if (ndlp->fc4_xpt_flags & NVME_XPT_REGD) - nvme_reg = true; + /* Only 1 thread can drop the initial node reference. + * If not registered for NVME and NLP_DROPPED flag is + * clear, remove the initial reference. + */ + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) + drop_initial_node_ref = true; /* The scsi_transport is done with the rport so lpfc cannot * call to unregister. @@ -194,13 +199,16 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) /* If NLP_XPT_REGD was cleared in lpfc_nlp_unreg_node, * unregister calls were made to the scsi and nvme * transports and refcnt was already decremented. Clear - * the NLP_XPT_REGD flag only if the NVME Rport is + * the NLP_XPT_REGD flag only if the NVME nrport is * confirmed unregistered. */ - if (!nvme_reg && ndlp->fc4_xpt_flags & NLP_XPT_REGD) { - ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; + if (ndlp->fc4_xpt_flags & NLP_XPT_REGD) { + if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) + ndlp->fc4_xpt_flags &= ~NLP_XPT_REGD; spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); /* may free ndlp */ + + /* Release scsi transport reference */ + lpfc_nlp_put(ndlp); } else { spin_unlock_irqrestore(&ndlp->lock, iflags); } @@ -208,19 +216,8 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) spin_unlock_irqrestore(&ndlp->lock, iflags); } - spin_lock_irqsave(&ndlp->lock, iflags); - - /* Only 1 thread can drop the initial node reference. If - * another thread has set NLP_DROPPED, this thread is done. - */ - if (nvme_reg || (ndlp->nlp_flag & NLP_DROPPED)) { - spin_unlock_irqrestore(&ndlp->lock, iflags); - return; - } - - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irqrestore(&ndlp->lock, iflags); - lpfc_nlp_put(ndlp); + if (drop_initial_node_ref) + lpfc_nlp_put(ndlp); return; } @@ -253,14 +250,14 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) return; } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_IN_DEV_LOSS; + set_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); + spin_lock_irqsave(&ndlp->lock, iflags); /* If there is a PLOGI in progress, and we are in a * NLP_NPR_2B_DISC state, don't turn off the flag. */ if (ndlp->nlp_state != NLP_STE_PLOGI_ISSUE) - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* * The backend does not expect any more calls associated with this @@ -289,15 +286,13 @@ lpfc_dev_loss_tmo_callbk(struct fc_rport *rport) } else { lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, "3188 worker thread is stopped %s x%06x, " - " rport x%px flg x%x load_flag x%lx refcnt " + " rport x%px flg x%lx load_flag x%lx refcnt " "%d\n", __func__, ndlp->nlp_DID, ndlp->rport, ndlp->nlp_flag, vport->load_flag, kref_read(&ndlp->kref)); if (!(ndlp->fc4_xpt_flags & NVME_XPT_REGD)) { - spin_lock_irqsave(&ndlp->lock, iflags); /* Node is in dev loss. No further transaction. */ - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM); } @@ -430,7 +425,7 @@ lpfc_check_nlp_post_devloss(struct lpfc_vport *vport, lpfc_nlp_get(ndlp); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8438 Devloss timeout reversed on DID x%x " - "refcnt %d ndlp %p flag x%x " + "refcnt %d ndlp %p flag x%lx " "port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -473,7 +468,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) ndlp->nlp_DID, ndlp->nlp_type, ndlp->nlp_sid); lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE, - "3182 %s x%06x, nflag x%x xflags x%x refcnt %d\n", + "3182 %s x%06x, nflag x%lx xflags x%x refcnt %d\n", __func__, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, kref_read(&ndlp->kref)); @@ -487,9 +482,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); return fcf_inuse; } @@ -517,7 +510,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) } break; case Fabric_Cntl_DID: - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) recovering = true; break; case FDMI_DID: @@ -545,15 +538,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) * the following lpfc_nlp_put is necessary after fabric node is * recovered. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); if (recovering) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "8436 Devloss timeout marked on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -570,7 +561,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) LOG_DISCOVERY | LOG_NODE, "8437 Devloss timeout ignored on " "DID x%x refcnt %d ndlp %p " - "flag x%x port_state = x%x\n", + "flag x%lx port_state = x%x\n", ndlp->nlp_DID, kref_read(&ndlp->kref), ndlp, ndlp->nlp_flag, vport->port_state); @@ -590,7 +581,7 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0203 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x refcnt %d\n", + "NPort x%06x Data: x%lx x%x x%x refcnt %d\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, @@ -600,15 +591,13 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_TRACE_EVENT, "0204 Devloss timeout on " "WWPN %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x " - "NPort x%06x Data: x%x x%x x%x\n", + "NPort x%06x Data: x%lx x%x x%x\n", *name, *(name+1), *(name+2), *(name+3), *(name+4), *(name+5), *(name+6), *(name+7), ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); } - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_IN_DEV_LOSS; - spin_unlock_irqrestore(&ndlp->lock, iflags); + clear_bit(NLP_IN_DEV_LOSS, &ndlp->nlp_flag); /* If we are devloss, but we are in the process of rediscovering the * ndlp, don't issue a NLP_EVT_DEVICE_RM event. @@ -1373,7 +1362,7 @@ lpfc_linkup_cleanup_nodes(struct lpfc_vport *vport) if (ndlp->nlp_DID != Fabric_DID) lpfc_unreg_rpi(vport, ndlp); lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device is * marked for PLOGI. */ @@ -3882,14 +3871,13 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) pmb->ctx_ndlp = NULL; lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NODE | LOG_DISCOVERY, - "0002 rpi:%x DID:%x flg:%x %d x%px\n", + "0002 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); - if (ndlp->nlp_flag & NLP_IGNR_REG_CMPL || + if (test_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag) || ndlp->nlp_state != NLP_STE_REG_LOGIN_ISSUE) { /* We rcvd a rscn after issuing this * mbox reg login, we may have cycled @@ -3899,16 +3887,14 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) * there is another reg login in * process. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); /* * We cannot leave the RPI registered because * if we go thru discovery again for this ndlp * a subsequent REG_RPI will fail. */ - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); } @@ -4221,7 +4207,7 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4352,9 +4338,7 @@ out: * reference. */ if (!(ndlp->fc4_xpt_flags & (SCSI_XPT_REGD | NVME_XPT_REGD))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } @@ -4375,11 +4359,11 @@ out: if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0003 rpi:%x DID:%x flg:%x %d x%px\n", + "0003 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -4471,8 +4455,8 @@ lpfc_mbx_cmpl_fc_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) __func__, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_state); - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); @@ -4506,7 +4490,7 @@ lpfc_register_remote_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport add: did:x%x flg:x%x type x%x", + "rport add: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); /* Don't add the remote port if unloading. */ @@ -4574,7 +4558,7 @@ lpfc_unregister_remote_port(struct lpfc_nodelist *ndlp) return; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport delete: did:x%x flg:x%x type x%x", + "rport delete: did:x%x flg:x%lx type x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -4690,7 +4674,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, "0999 %s Not regd: ndlp x%px rport x%px DID " - "x%x FLG x%x XPT x%x\n", + "x%x FLG x%lx XPT x%x\n", __func__, ndlp, ndlp->rport, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags); return; @@ -4706,7 +4690,7 @@ lpfc_nlp_unreg_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } else if (!ndlp->rport) { lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS | LOG_NODE | LOG_DISCOVERY, - "1999 %s NDLP in devloss x%px DID x%x FLG x%x" + "1999 %s NDLP in devloss x%px DID x%x FLG x%lx" " XPT x%x refcnt %u\n", __func__, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->fc4_xpt_flags, @@ -4751,7 +4735,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type |= NLP_FC_NODE; fallthrough; case NLP_STE_MAPPED_NODE: - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); lpfc_nlp_reg_node(vport, ndlp); break; @@ -4762,7 +4746,7 @@ lpfc_handle_adisc_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * backend, attempt it now */ case NLP_STE_NPR_NODE: - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); fallthrough; default: lpfc_nlp_unreg_node(vport, ndlp); @@ -4783,13 +4767,13 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } if (new_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FC_NODE; } if (new_state == NLP_STE_MAPPED_NODE) - ndlp->nlp_flag &= ~NLP_NODEV_REMOVE; + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); if (new_state == NLP_STE_NPR_NODE) - ndlp->nlp_flag &= ~NLP_RCV_PLOGI; + clear_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Reg/Unreg for FCP and NVME Transport interface */ if ((old_state == NLP_STE_MAPPED_NODE || @@ -4797,7 +4781,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* For nodes marked for ADISC, Handle unreg in ADISC cmpl * if linkup. In linkdown do unreg_node */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC) || + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag) || !lpfc_is_link_up(vport->phba)) lpfc_nlp_unreg_node(vport, ndlp); } @@ -4817,9 +4801,7 @@ lpfc_nlp_state_cleanup(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (!ndlp->rport || ndlp->rport->scsi_target_id == -1 || ndlp->rport->scsi_target_id >= LPFC_MAX_TARGET)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TGT_NO_SCSIID; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); } } @@ -4851,7 +4833,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int state) { int old_state = ndlp->nlp_state; - int node_dropped = ndlp->nlp_flag & NLP_DROPPED; + bool node_dropped = test_bit(NLP_DROPPED, &ndlp->nlp_flag); char name1[16], name2[16]; unsigned long iflags; @@ -4867,7 +4849,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (node_dropped && old_state == NLP_STE_UNUSED_NODE && state != NLP_STE_UNUSED_NODE) { - ndlp->nlp_flag &= ~NLP_DROPPED; + clear_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_get(ndlp); } @@ -4875,7 +4857,7 @@ lpfc_nlp_set_state(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, state != NLP_STE_NPR_NODE) lpfc_cancel_retry_delay_tmo(vport, ndlp); if (old_state == NLP_STE_UNMAPPED_NODE) { - ndlp->nlp_flag &= ~NLP_TGT_NO_SCSIID; + clear_bit(NLP_TGT_NO_SCSIID, &ndlp->nlp_flag); ndlp->nlp_type &= ~NLP_FC_NODE; } @@ -4972,14 +4954,8 @@ lpfc_drop_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference from lpfc_nlp_init. If set, don't drop it again and * introduce an imbalance. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, &ndlp->nlp_flag)) lpfc_nlp_put(ndlp); - return; - } - spin_unlock_irq(&ndlp->lock); } /* @@ -5094,9 +5070,9 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba, } else if (pring->ringno == LPFC_FCP_RING) { /* Skip match check if waiting to relogin to FCP target */ if ((ndlp->nlp_type & NLP_FCP_TARGET) && - (ndlp->nlp_flag & NLP_DELAY_TMO)) { + test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) return 0; - } + if (ulp_context == ndlp->nlp_rpi) return 1; } @@ -5166,7 +5142,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) * Everything that matches on txcmplq will be returned * by firmware with a no rpi error. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { if (phba->sli_rev != LPFC_SLI_REV4) lpfc_sli3_dequeue_nport_iocbs(phba, ndlp, &completions); else @@ -5200,29 +5176,19 @@ lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) lpfc_issue_els_logo(vport, ndlp, 0); /* Check to see if there are any deferred events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1434 UNREG cmpl deferred logo x%x " "on NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - /* NLP_RELEASE_RPI is only set for SLI4 ports. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irq(&ndlp->lock); - } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_UNREG_INP; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The node has an outstanding reference for the unreg. Now @@ -5242,8 +5208,6 @@ static void lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, LPFC_MBOXQ_t *mbox) { - unsigned long iflags; - /* Driver always gets a reference on the mailbox job * in support of async jobs. */ @@ -5251,9 +5215,8 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, if (!mbox->ctx_ndlp) return; - if (ndlp->nlp_flag & NLP_ISSUE_LOGO) { + if (test_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag)) { mbox->mbox_cmpl = lpfc_nlp_logo_unreg; - } else if (phba->sli_rev == LPFC_SLI_REV4 && !test_bit(FC_UNLOADING, &vport->load_flag) && (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >= @@ -5261,13 +5224,6 @@ lpfc_set_unreg_login_mbx_cmpl(struct lpfc_hba *phba, struct lpfc_vport *vport, (kref_read(&ndlp->kref) > 0)) { mbox->mbox_cmpl = lpfc_sli4_unreg_rpi_cmpl_clr; } else { - if (test_bit(FC_UNLOADING, &vport->load_flag)) { - if (phba->sli_rev == LPFC_SLI_REV4) { - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_RELEASE_RPI; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - } mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl; } } @@ -5289,13 +5245,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) int rc, acc_plogi = 1; uint16_t rpi; - if (ndlp->nlp_flag & NLP_RPI_REGISTERED || - ndlp->nlp_flag & NLP_REG_LOGIN_SEND) { - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || + test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) { + if (test_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "3366 RPI x%x needs to be " - "unregistered nlp_flag x%x " + "unregistered nlp_flag x%lx " "did x%x\n", ndlp->nlp_rpi, ndlp->nlp_flag, ndlp->nlp_DID); @@ -5303,11 +5259,11 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1436 unreg_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -5330,27 +5286,24 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) return 1; } + /* Accept PLOGIs after unreg_rpi_cmpl. */ if (mbox->mbox_cmpl == lpfc_sli4_unreg_rpi_cmpl_clr) - /* - * accept PLOGIs after unreg_rpi_cmpl - */ acc_plogi = 0; - if (((ndlp->nlp_DID & Fabric_DID_MASK) != - Fabric_DID_MASK) && - (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + + if (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag)) + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "1433 unreg_rpi UNREG x%x on " - "NPort x%x deferred flg x%x " + "NPort x%x deferred flg x%lx " "Data:x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT); if (rc == MBX_NOT_FINISHED) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); mempool_free(mbox, phba->mbox_mem_pool); acc_plogi = 1; lpfc_nlp_put(ndlp); @@ -5360,7 +5313,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) LOG_NODE | LOG_DISCOVERY, "1444 Failed to allocate mempool " "unreg_rpi UNREG x%x, " - "DID x%x, flag x%x, " + "DID x%x, flag x%lx, " "ndlp x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp); @@ -5370,7 +5323,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * not unloading. */ if (!test_bit(FC_UNLOADING, &vport->load_flag)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, @@ -5383,13 +5336,13 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) out: if (phba->sli_rev != LPFC_SLI_REV4) ndlp->nlp_rpi = 0; - ndlp->nlp_flag &= ~NLP_RPI_REGISTERED; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; + clear_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (acc_plogi) - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 1; } - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); return 0; } @@ -5417,7 +5370,7 @@ lpfc_unreg_hba_rpis(struct lpfc_hba *phba) for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { spin_lock_irqsave(&vports[i]->fc_nodes_list_lock, iflags); list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) { - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { /* The mempool_alloc might sleep */ spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); @@ -5505,7 +5458,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) /* Cleanup node for NPort <nlp_DID> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, "0900 Cleanup node for NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); lpfc_dequeue_node(vport, ndlp); @@ -5550,9 +5503,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) lpfc_els_abort(phba, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = 0; del_timer_sync(&ndlp->nlp_delayfunc); @@ -5561,10 +5512,6 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) list_del_init(&ndlp->dev_loss_evt.evt_listp); list_del_init(&ndlp->recovery_evt.evt_listp); lpfc_cleanup_vports_rrqs(vport, ndlp); - - if (phba->sli_rev == LPFC_SLI_REV4) - ndlp->nlp_flag |= NLP_RELEASE_RPI; - return 0; } @@ -5639,7 +5586,7 @@ __lpfc_findnode_did(struct lpfc_vport *vport, uint32_t did) ); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "0929 FIND node DID " - "Data: x%px x%x x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->nlp_rpi, ndlp->active_rrqs_xri_bitmap); @@ -5692,7 +5639,7 @@ lpfc_findnode_mapped(struct lpfc_vport *vport) iflags); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "2025 FIND node DID MAPPED " - "Data: x%px x%x x%x x%x x%px\n", + "Data: x%px x%x x%lx x%x x%px\n", ndlp, ndlp->nlp_DID, ndlp->nlp_flag, data1, ndlp->active_rrqs_xri_bitmap); @@ -5726,13 +5673,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6453 Setup New Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp; } @@ -5751,7 +5696,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6455 Setup RSCN Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5769,13 +5714,11 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) NLP_EVT_DEVICE_RECOVERY); } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6456 Skip Setup RSCN Node x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); ndlp = NULL; @@ -5783,7 +5726,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6457 Setup Active Node 2B_DISC x%x " - "Data:x%x x%x x%lx\n", + "Data:x%lx x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, vport->fc_flag); @@ -5794,7 +5737,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) if (ndlp->nlp_state == NLP_STE_ADISC_ISSUE || ndlp->nlp_state == NLP_STE_PLOGI_ISSUE || (!vport->phba->nvmet_support && - ndlp->nlp_flag & NLP_RCV_PLOGI)) + test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag))) return NULL; if (vport->phba->nvmet_support) @@ -5804,10 +5747,7 @@ lpfc_setup_disc_node(struct lpfc_vport *vport, uint32_t did) * allows for rediscovery */ lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); } return ndlp; } @@ -6178,7 +6118,7 @@ lpfc_disc_timeout_handler(struct lpfc_vport *vport) /* Clean up the ndlp on Fabric connections */ lpfc_drop_node(vport, ndlp); - } else if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + } else if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { /* Fail outstanding IO now since device * is marked for PLOGI. */ @@ -6391,11 +6331,11 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); ndlp->nlp_type |= NLP_FABRIC; lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0004 rpi:%x DID:%x flg:%x %d x%px\n", + "0004 rpi:%x DID:%x flg:%lx %d x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref), ndlp); @@ -6445,7 +6385,7 @@ __lpfc_find_node(struct lpfc_vport *vport, node_filter filter, void *param) if (filter(ndlp, param)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE_VERBOSE, "3185 FIND node filter %ps DID " - "ndlp x%px did x%x flg x%x st x%x " + "ndlp x%px did x%x flg x%lx st x%x " "xri x%x type x%x rpi x%x\n", filter, ndlp, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, @@ -6580,9 +6520,10 @@ lpfc_nlp_init(struct lpfc_vport *vport, uint32_t did) INIT_LIST_HEAD(&ndlp->nlp_listp); if (vport->phba->sli_rev == LPFC_SLI_REV4) { ndlp->nlp_rpi = rpi; - lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "0007 Init New ndlp x%px, rpi:x%x DID:%x " - "flg:x%x refcnt:%d\n", + lpfc_printf_vlog(vport, KERN_INFO, + LOG_ELS | LOG_NODE | LOG_DISCOVERY, + "0007 Init New ndlp x%px, rpi:x%x DID:x%x " + "flg:x%lx refcnt:%d\n", ndlp, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6614,7 +6555,7 @@ lpfc_nlp_release(struct kref *kref) struct lpfc_vport *vport = ndlp->vport; lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node release: did:x%x flg:x%x type:x%x", + "node release: did:x%x flg:x%lx type:x%x", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_type); lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE, @@ -6626,19 +6567,12 @@ lpfc_nlp_release(struct kref *kref) lpfc_cancel_retry_delay_tmo(vport, ndlp); lpfc_cleanup_node(vport, ndlp); - /* Not all ELS transactions have registered the RPI with the port. - * In these cases the rpi usage is temporary and the node is - * released when the WQE is completed. Catch this case to free the - * RPI to the pool. Because this node is in the release path, a lock - * is unnecessary. All references are gone and the node has been - * dequeued. + /* All nodes are initialized with an RPI that needs to be released + * now. All references are gone and the node has been dequeued. */ - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - if (ndlp->nlp_rpi != LPFC_RPI_ALLOC_ERROR && - !(ndlp->nlp_flag & (NLP_RPI_REGISTERED | NLP_UNREG_INP))) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - } + if (vport->phba->sli_rev == LPFC_SLI_REV4) { + lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); + ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; } /* The node is not freed back to memory, it is released to a pool so @@ -6667,7 +6601,7 @@ lpfc_nlp_get(struct lpfc_nodelist *ndlp) if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node get: did:x%x flg:x%x refcnt:x%x", + "node get: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); @@ -6699,7 +6633,7 @@ lpfc_nlp_put(struct lpfc_nodelist *ndlp) { if (ndlp) { lpfc_debugfs_disc_trc(ndlp->vport, LPFC_DISC_TRC_NODE, - "node put: did:x%x flg:x%x refcnt:x%x", + "node put: did:x%x flg:x%lx refcnt:x%x", ndlp->nlp_DID, ndlp->nlp_flag, kref_read(&ndlp->kref)); } else { @@ -6752,11 +6686,12 @@ lpfc_fcf_inuse(struct lpfc_hba *phba) spin_unlock_irqrestore(&vports[i]->fc_nodes_list_lock, iflags); goto out; - } else if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + } else if (test_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag)) { ret = 1; lpfc_printf_log(phba, KERN_INFO, LOG_NODE | LOG_DISCOVERY, - "2624 RPI %x DID %x flag %x " + "2624 RPI %x DID %x flag %lx " "still logged in\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 50c761991191..3ddcaa864f07 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -3092,7 +3092,8 @@ lpfc_cleanup(struct lpfc_vport *vport) lpfc_printf_vlog(ndlp->vport, KERN_ERR, LOG_DISCOVERY, "0282 did:x%x ndlp:x%px " - "refcnt:%d xflags x%x nflag x%x\n", + "refcnt:%d xflags x%x " + "nflag x%lx\n", ndlp->nlp_DID, (void *)ndlp, kref_read(&ndlp->kref), ndlp->fc4_xpt_flags, @@ -3379,7 +3380,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) } /** - * lpfc_sli4_node_prep - Assign RPIs for active nodes. + * lpfc_sli4_node_rpi_restore - Recover assigned RPIs for active nodes. * @phba: pointer to lpfc hba data structure. * * Allocate RPIs for all active remote nodes. This is needed whenever @@ -3387,7 +3388,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action) * is to fixup the temporary rpi assignments. **/ void -lpfc_sli4_node_prep(struct lpfc_hba *phba) +lpfc_sli4_node_rpi_restore(struct lpfc_hba *phba) { struct lpfc_nodelist *ndlp, *next_ndlp; struct lpfc_vport **vports; @@ -3397,10 +3398,10 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) return; vports = lpfc_create_vport_work_array(phba); - if (vports == NULL) + if (!vports) return; - for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { + for (i = 0; i <= phba->max_vports && vports[i]; i++) { if (test_bit(FC_UNLOADING, &vports[i]->load_flag)) continue; @@ -3409,14 +3410,20 @@ lpfc_sli4_node_prep(struct lpfc_hba *phba) nlp_listp) { rpi = lpfc_sli4_alloc_rpi(phba); if (rpi == LPFC_RPI_ALLOC_ERROR) { - /* TODO print log? */ + lpfc_printf_vlog(ndlp->vport, KERN_INFO, + LOG_NODE | LOG_DISCOVERY, + "0099 RPI alloc error for " + "ndlp x%px DID:x%06x " + "flg:x%lx\n", + ndlp, ndlp->nlp_DID, + ndlp->nlp_flag); continue; } ndlp->nlp_rpi = rpi; lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_NODE | LOG_DISCOVERY, "0009 Assign RPI x%x to ndlp x%px " - "DID:x%06x flg:x%x\n", + "DID:x%06x flg:x%lx\n", ndlp->nlp_rpi, ndlp, ndlp->nlp_DID, ndlp->nlp_flag); } @@ -3820,35 +3827,12 @@ lpfc_offline_prep(struct lpfc_hba *phba, int mbx_action) &vports[i]->fc_nodes, nlp_listp) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); - + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); if (offline || hba_pci_err) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_UNREG_INP | - NLP_RPI_REGISTERED); - spin_unlock_irq(&ndlp->lock); - if (phba->sli_rev == LPFC_SLI_REV4) - lpfc_sli_rpi_release(vports[i], - ndlp); - } else { - lpfc_unreg_rpi(vports[i], ndlp); - } - /* - * Whenever an SLI4 port goes offline, free the - * RPI. Get a new RPI when the adapter port - * comes back online. - */ - if (phba->sli_rev == LPFC_SLI_REV4) { - lpfc_printf_vlog(vports[i], KERN_INFO, - LOG_NODE | LOG_DISCOVERY, - "0011 Free RPI x%x on " - "ndlp: x%px did x%x\n", - ndlp->nlp_rpi, ndlp, - ndlp->nlp_DID); - lpfc_sli4_free_rpi(phba, ndlp->nlp_rpi); - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; + clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_RPI_REGISTERED, + &ndlp->nlp_flag); } if (ndlp->nlp_type & NLP_FABRIC) { @@ -6925,9 +6909,7 @@ lpfc_sli4_async_fip_evt(struct lpfc_hba *phba, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 4574716c8764..4d88cfe71cae 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -65,7 +65,7 @@ lpfc_check_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_name *nn, struct lpfc_name *pn) { /* First, we MUST have a RPI registered */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) return 0; /* Compare the ADISC rsp WWNN / WWPN matches our internal node @@ -239,7 +239,7 @@ lpfc_els_abort(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp) /* Abort outstanding I/O on NPort <nlp_DID> */ lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_DISCOVERY, "2819 Abort outstanding I/O on NPort x%x " - "Data: x%x x%x x%x\n", + "Data: x%lx x%x x%x\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state, ndlp->nlp_rpi); /* Clean up all fabric IOs first.*/ @@ -340,7 +340,7 @@ lpfc_defer_plogi_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *login_mbox) /* Now process the REG_RPI cmpl */ lpfc_mbx_cmpl_reg_login(phba, login_mbox); - ndlp->nlp_flag &= ~NLP_ACC_REGLOGIN; + clear_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); kfree(save_iocb); } @@ -404,7 +404,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0114 PLOGI chkparm OK Data: x%x x%x x%x " + "0114 PLOGI chkparm OK Data: x%x x%x x%lx " "x%x x%x x%lx\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi, vport->port_state, @@ -429,7 +429,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* if already logged in, do implicit logout */ switch (ndlp->nlp_state) { case NLP_STE_NPR_NODE: - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) break; fallthrough; case NLP_STE_REG_LOGIN_ISSUE: @@ -449,7 +449,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PLOGI, cmdiocb, ndlp, NULL); @@ -480,7 +480,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_type &= ~(NLP_NVME_TARGET | NLP_NVME_INITIATOR); ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; ndlp->nlp_nvme_info &= ~NLP_NVME_NSLER; - ndlp->nlp_flag &= ~NLP_FIRSTBURST; + clear_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); login_mbox = NULL; link_mbox = NULL; @@ -552,13 +552,13 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_can_disctmo(vport); } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } login_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); @@ -627,10 +627,9 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * this ELS request. The only way to do this is * to register, then unregister the RPI. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_RM_DFLT_RPI | NLP_ACC_REGLOGIN | - NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_RM_DFLT_RPI, &ndlp->nlp_flag); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); } stat.un.b.lsRjtRsnCode = LSRJT_INVALID_CMD; @@ -665,9 +664,8 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, login_mbox->ctx_u.save_iocb = save_iocb; /* For PLOGI ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= (NLP_ACC_REGLOGIN | NLP_RCV_PLOGI); - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag); + set_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag); /* Start the ball rolling by issuing REG_LOGIN here */ rc = lpfc_sli_issue_mbox(phba, login_mbox, MBX_NOWAIT); @@ -797,7 +795,7 @@ out: */ if (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET)) { if ((ndlp->nlp_state != NLP_STE_MAPPED_NODE) && - !(ndlp->nlp_flag & NLP_NPR_ADISC)) + !test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE); } @@ -814,9 +812,7 @@ out: /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -835,9 +831,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Only call LOGO ACC for first LOGO, this avoids sending unnecessary * PLOGIs during LOGO storms from a device. */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); if (els_cmd == ELS_CMD_PRLO) lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); else @@ -890,9 +884,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_FDISC; vport->port_state = LPFC_FDISC; } else { @@ -915,14 +907,12 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, "3204 Start nlpdelay on DID x%06x " - "nflag x%x lastels x%x ref cnt %u", + "nflag x%lx lastels x%x ref cnt %u", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -935,9 +925,7 @@ out: ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); /* The driver has to wait until the ACC completes before it continues * processing the LOGO. The action will resume in * lpfc_cmpl_els_logo_acc routine. Since part of processing includes an @@ -978,7 +966,7 @@ lpfc_rcv_prli_support_check(struct lpfc_vport *vport, out: lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "6115 Rcv PRLI (%x) check failed: ndlp rpi %d " - "state x%x flags x%x port_type: x%x " + "state x%x flags x%lx port_type: x%x " "npr->initfcn: x%x npr->tgtfcn: x%x\n", cmd, ndlp->nlp_rpi, ndlp->nlp_state, ndlp->nlp_flag, vport->port_type, @@ -1020,7 +1008,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->prliType == PRLI_NVME_TYPE) ndlp->nlp_type |= NLP_NVME_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); } if (npr->Retry && ndlp->nlp_type & (NLP_FCP_INITIATOR | NLP_FCP_TARGET)) @@ -1057,7 +1045,7 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, roles |= FC_RPORT_ROLE_FCP_TARGET; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT, - "rport rolechg: role:x%x did:x%x flg:x%x", + "rport rolechg: role:x%x did:x%x flg:x%lx", roles, ndlp->nlp_DID, ndlp->nlp_flag); if (vport->cfg_enable_fc4_type != LPFC_ENABLE_NVME) @@ -1068,10 +1056,8 @@ lpfc_rcv_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, static uint32_t lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) { - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 0; } @@ -1081,16 +1067,12 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) (test_bit(FC_RSCN_MODE, &vport->fc_flag) || ((ndlp->nlp_fcp_info & NLP_FCP_2_DEVICE) && (ndlp->nlp_type & NLP_FCP_TARGET)))) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); return 1; } } - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); lpfc_unreg_rpi(vport, ndlp); return 0; } @@ -1115,10 +1097,10 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, /* If there is already an UNREG in progress for this ndlp, * no need to queue up another one. */ - if (ndlp->nlp_flag & NLP_UNREG_INP) { + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1435 release_rpi SKIP UNREG x%x on " - "NPort x%x deferred x%x flg x%x " + "NPort x%x deferred x%x flg x%lx " "Data: x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, @@ -1143,11 +1125,11 @@ lpfc_release_rpi(struct lpfc_hba *phba, struct lpfc_vport *vport, if (((ndlp->nlp_DID & Fabric_DID_MASK) != Fabric_DID_MASK) && (!test_bit(FC_OFFLINE_MODE, &vport->fc_flag))) - ndlp->nlp_flag |= NLP_UNREG_INP; + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "1437 release_rpi UNREG x%x " - "on NPort x%x flg x%x\n", + "on NPort x%x flg x%lx\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag); rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT); @@ -1175,7 +1157,7 @@ lpfc_disc_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0271 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); return ndlp->nlp_state; @@ -1190,13 +1172,12 @@ lpfc_cmpl_plogi_illegal(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * working on the same NPortID, do nothing for this thread * to stop it. */ - if (!(ndlp->nlp_flag & NLP_RCV_PLOGI)) { + if (!test_bit(NLP_RCV_PLOGI, &ndlp->nlp_flag)) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0272 Illegal State Transition: node x%x " - "event x%x, state x%x Data: x%x x%x\n", + "event x%x, state x%x Data: x%x x%lx\n", ndlp->nlp_DID, evt, ndlp->nlp_state, ndlp->nlp_rpi, ndlp->nlp_flag); - } return ndlp->nlp_state; } @@ -1230,9 +1211,7 @@ lpfc_rcv_logo_unused_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; @@ -1290,11 +1269,9 @@ lpfc_rcv_plogi_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, NULL); } else { if (lpfc_rcv_plogi(vport, ndlp, cmdiocb) && - (ndlp->nlp_flag & NLP_NPR_2B_DISC) && - (vport->num_disc_nodes)) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && + vport->num_disc_nodes) { + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); /* Check if there are more PLOGIs to be sent */ lpfc_more_plogi(vport); if (vport->num_disc_nodes == 0) { @@ -1356,9 +1333,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); @@ -1389,7 +1364,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ndlp->nlp_flag & NLP_ACC_REGLOGIN) { + if (test_bit(NLP_ACC_REGLOGIN, &ndlp->nlp_flag)) { /* Recovery from PLOGI collision logic */ return ndlp->nlp_state; } @@ -1418,7 +1393,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, goto out; /* PLOGI chkparm OK */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, - "0121 PLOGI chkparm OK Data: x%x x%x x%x x%x\n", + "0121 PLOGI chkparm OK Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); if (vport->cfg_fcp_class == 2 && (sp->cls2.classValid)) @@ -1446,14 +1421,14 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, ed_tov = (phba->fc_edtov + 999999) / 1000000; } - ndlp->nlp_flag &= ~NLP_SUPPRESS_RSP; + clear_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); if ((phba->sli.sli_flag & LPFC_SLI_SUPPRESS_RSP) && sp->cmn.valid_vendor_ver_level) { vid = be32_to_cpu(sp->un.vv.vid); flag = be32_to_cpu(sp->un.vv.flags); if ((vid == LPFC_VV_EMLX_ID) && (flag & LPFC_VV_SUPPRESS_RSP)) - ndlp->nlp_flag |= NLP_SUPPRESS_RSP; + set_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag); } /* @@ -1476,7 +1451,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, LOG_TRACE_EVENT, "0133 PLOGI: no memory " "for config_link " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1500,7 +1475,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, if (!mbox) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0018 PLOGI: no memory for reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); goto out; @@ -1520,7 +1495,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, mbox->mbox_cmpl = lpfc_mbx_cmpl_fdmi_reg_login; break; default: - ndlp->nlp_flag |= NLP_REG_LOGIN_SEND; + set_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); mbox->mbox_cmpl = lpfc_mbx_cmpl_reg_login; } @@ -1535,8 +1510,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, NLP_STE_REG_LOGIN_ISSUE); return ndlp->nlp_state; } - if (ndlp->nlp_flag & NLP_REG_LOGIN_SEND) - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); /* decrement node reference count to the failed mbox * command */ @@ -1544,7 +1518,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0134 PLOGI: cannot issue reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } else { @@ -1552,7 +1526,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport, lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0135 PLOGI: cannot format reg_login " - "Data: x%x x%x x%x x%x\n", + "Data: x%x x%x x%lx x%x\n", ndlp->nlp_DID, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); } @@ -1605,18 +1579,15 @@ static uint32_t lpfc_device_rm_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1636,9 +1607,8 @@ lpfc_device_recov_plogi_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); return ndlp->nlp_state; } @@ -1656,10 +1626,7 @@ lpfc_rcv_plogi_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, cmdiocb = (struct lpfc_iocbq *) arg; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_2B_DISC; - spin_unlock_irq(&ndlp->lock); + if (test_and_clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { if (vport->num_disc_nodes) lpfc_more_adisc(vport); } @@ -1748,9 +1715,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport, /* 1 sec timeout */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; @@ -1789,18 +1754,15 @@ static uint32_t lpfc_device_rm_adisc_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding ADISC */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding ADISC */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -1820,9 +1782,8 @@ lpfc_device_recov_adisc_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -1856,7 +1817,7 @@ lpfc_rcv_prli_reglogin_issue(struct lpfc_vport *vport, * transition to UNMAPPED provided the RPI has completed * registration. */ - if (ndlp->nlp_flag & NLP_RPI_REGISTERED) { + if (test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag)) { lpfc_rcv_prli(vport, ndlp, cmdiocb); lpfc_els_rsp_prli_acc(vport, cmdiocb, ndlp); } else { @@ -1895,7 +1856,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, if ((mb = phba->sli.mbox_active)) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); mb->ctx_ndlp = NULL; mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl; @@ -1906,7 +1867,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport, list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) { if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) && (ndlp == mb->ctx_ndlp)) { - ndlp->nlp_flag &= ~NLP_REG_LOGIN_SEND; + clear_bit(NLP_REG_LOGIN_SEND, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); list_del(&mb->list); phba->sli.mboxq_cnt--; @@ -1976,9 +1937,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, /* Put ndlp in npr state set plogi timer for 1 sec */ mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; lpfc_issue_els_logo(vport, ndlp, 0); @@ -1989,7 +1948,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport, if (phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); /* Only if we are not a fabric nport do we issue PRLI */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, @@ -2061,15 +2020,12 @@ lpfc_device_rm_reglogin_issue(struct lpfc_vport *vport, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } static uint32_t @@ -2084,17 +2040,16 @@ lpfc_device_recov_reglogin_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); /* If we are a target we won't immediately transition into PRLI, * so if REG_LOGIN already completed we don't need to ignore it. */ - if (!(ndlp->nlp_flag & NLP_RPI_REGISTERED) || + if (!test_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag) || !vport->phba->nvmet_support) - ndlp->nlp_flag |= NLP_IGNR_REG_CMPL; + set_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2228,7 +2183,8 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, if (npr->targetFunc) { ndlp->nlp_type |= NLP_FCP_TARGET; if (npr->writeXferRdyDis) - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, + &ndlp->nlp_flag); } if (npr->Retry) ndlp->nlp_fcp_info |= NLP_FCP_2_DEVICE; @@ -2272,7 +2228,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* Both sides support FB. The target's first * burst size is a 512 byte encoded value. */ - ndlp->nlp_flag |= NLP_FIRSTBURST; + set_bit(NLP_FIRSTBURST, &ndlp->nlp_flag); ndlp->nvme_fb_size = bf_get_be32(prli_fb_sz, nvpr); @@ -2287,7 +2243,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_DISC, "6029 NVME PRLI Cmpl w1 x%08x " - "w4 x%08x w5 x%08x flag x%x, " + "w4 x%08x w5 x%08x flag x%lx, " "fcp_info x%x nlp_type x%x\n", be32_to_cpu(nvpr->word1), be32_to_cpu(nvpr->word4), @@ -2299,9 +2255,7 @@ lpfc_cmpl_prli_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, (vport->port_type == LPFC_NPIV_PORT) && vport->cfg_restrict_login) { out: - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_TARGET_REMOVE; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_TARGET_REMOVE, &ndlp->nlp_flag); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; @@ -2353,18 +2307,15 @@ static uint32_t lpfc_device_rm_prli_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; - } else { - /* software abort outstanding PLOGI */ - lpfc_els_abort(vport->phba, ndlp); - - lpfc_drop_node(vport, ndlp); - return NLP_STE_FREED_NODE; } + /* software abort outstanding PLOGI */ + lpfc_els_abort(vport->phba, ndlp); + + lpfc_drop_node(vport, ndlp); + return NLP_STE_FREED_NODE; } @@ -2401,9 +2352,8 @@ lpfc_device_recov_prli_issue(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_PRLI_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2442,9 +2392,7 @@ lpfc_rcv_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); return ndlp->nlp_state; } @@ -2483,9 +2431,8 @@ lpfc_cmpl_logo_logo_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { ndlp->nlp_prev_state = NLP_STE_LOGO_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); lpfc_disc_set_adisc(vport, ndlp); return ndlp->nlp_state; } @@ -2591,8 +2538,9 @@ lpfc_device_recov_unmap_node(struct lpfc_vport *vport, { ndlp->nlp_prev_state = NLP_STE_UNMAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); lpfc_disc_set_adisc(vport, ndlp); @@ -2653,9 +2601,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_sli_abort_iocb(vport, ndlp->nlp_sid, 0, LPFC_CTX_TGT); /* Send PRLO_ACC */ - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL); /* Save ELS_CMD_PRLO as the last elscmd and then set to NPR. @@ -2665,7 +2611,7 @@ lpfc_rcv_prlo_mapped_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ndlp->nlp_prev_state = ndlp->nlp_state; lpfc_printf_vlog(vport, KERN_INFO, LOG_NODE | LOG_ELS | LOG_DISCOVERY, - "3422 DID x%06x nflag x%x lastels x%x ref cnt %u\n", + "3422 DID x%06x nflag x%lx lastels x%x ref cnt %u\n", ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_last_elscmd, kref_read(&ndlp->kref)); @@ -2685,8 +2631,9 @@ lpfc_device_recov_mapped_node(struct lpfc_vport *vport, ndlp->nlp_prev_state = NLP_STE_MAPPED_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -2699,16 +2646,16 @@ lpfc_rcv_plogi_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; /* Ignore PLOGI if we have an outstanding LOGO */ - if (ndlp->nlp_flag & (NLP_LOGO_SND | NLP_LOGO_ACC)) + if (test_bit(NLP_LOGO_SND, &ndlp->nlp_flag) || + test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) return ndlp->nlp_state; if (lpfc_rcv_plogi(vport, ndlp, cmdiocb)) { lpfc_cancel_retry_delay_tmo(vport, ndlp); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NPR_ADISC | NLP_NPR_2B_DISC); - spin_unlock_irq(&ndlp->lock); - } else if (!(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); + } else if (!test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* send PLOGI immediately, move to PLOGI issue state */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2729,14 +2676,14 @@ lpfc_rcv_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, stat.un.b.lsRjtRsnCodeExp = LSEXP_NOTHING_MORE; lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL); - if (!(ndlp->nlp_flag & NLP_DELAY_TMO)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2767,15 +2714,15 @@ lpfc_rcv_padisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, * or discovery in progress for this node. Starting discovery * here will affect the counting of discovery threads. */ - if (!(ndlp->nlp_flag & NLP_DELAY_TMO) && - !(ndlp->nlp_flag & NLP_NPR_2B_DISC)) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag) && + !test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { /* * ADISC nodes will be handled in regular discovery path after * receiving response from NS. * * For other nodes, Send PLOGI to trigger an implicit LOGO. */ - if (!(ndlp->nlp_flag & NLP_NPR_ADISC)) { + if (!test_bit(NLP_NPR_ADISC, &ndlp->nlp_flag)) { ndlp->nlp_prev_state = NLP_STE_NPR_NODE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PLOGI_ISSUE); lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); @@ -2790,24 +2737,18 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *) arg; - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_LOGO_ACC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL); - if ((ndlp->nlp_flag & NLP_DELAY_TMO) == 0) { + if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) { mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1)); - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_DELAY_TMO; - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); ndlp->nlp_last_elscmd = ELS_CMD_PLOGI; } else { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_NPR_ADISC; - spin_unlock_irq(&ndlp->lock); + clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag); } return ndlp->nlp_state; } @@ -2844,7 +2785,7 @@ lpfc_cmpl_prli_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2877,7 +2818,7 @@ lpfc_cmpl_adisc_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ulp_status = get_job_ulpstatus(phba, rspiocb); - if (ulp_status && (ndlp->nlp_flag & NLP_NODEV_REMOVE)) { + if (ulp_status && test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2896,12 +2837,11 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport, /* SLI4 ports have preallocated logical rpis. */ if (vport->phba->sli_rev < LPFC_SLI_REV4) ndlp->nlp_rpi = mb->un.varWords[0]; - ndlp->nlp_flag |= NLP_RPI_REGISTERED; - if (ndlp->nlp_flag & NLP_LOGO_ACC) { + set_bit(NLP_RPI_REGISTERED, &ndlp->nlp_flag); + if (test_bit(NLP_LOGO_ACC, &ndlp->nlp_flag)) lpfc_unreg_rpi(vport, ndlp); - } } else { - if (ndlp->nlp_flag & NLP_NODEV_REMOVE) { + if (test_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag)) { lpfc_drop_node(vport, ndlp); return NLP_STE_FREED_NODE; } @@ -2913,10 +2853,8 @@ static uint32_t lpfc_device_rm_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, void *arg, uint32_t evt) { - if (ndlp->nlp_flag & NLP_NPR_2B_DISC) { - spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag |= NLP_NODEV_REMOVE; - spin_unlock_irq(&ndlp->lock); + if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag)) { + set_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); return ndlp->nlp_state; } lpfc_drop_node(vport, ndlp); @@ -2932,8 +2870,9 @@ lpfc_device_recov_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, return ndlp->nlp_state; lpfc_cancel_retry_delay_tmo(vport, ndlp); + clear_bit(NLP_NODEV_REMOVE, &ndlp->nlp_flag); + clear_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~(NLP_NODEV_REMOVE | NLP_NPR_2B_DISC); ndlp->nlp_fc4_type &= ~(NLP_FC4_FCP | NLP_FC4_NVME); spin_unlock_irq(&ndlp->lock); return ndlp->nlp_state; @@ -3146,7 +3085,7 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, /* DSM in event <evt> on NPort <nlp_DID> in state <cur_state> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0211 DSM in event x%x on NPort x%x in " - "state %d rpi x%x Data: x%x x%x\n", + "state %d rpi x%x Data: x%lx x%x\n", evt, ndlp->nlp_DID, cur_state, ndlp->nlp_rpi, ndlp->nlp_flag, data1); @@ -3163,12 +3102,12 @@ lpfc_disc_state_machine(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, ((uint32_t)ndlp->nlp_type)); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0212 DSM out state %d on NPort x%x " - "rpi x%x Data: x%x x%x\n", + "rpi x%x Data: x%lx x%x\n", rc, ndlp->nlp_DID, ndlp->nlp_rpi, ndlp->nlp_flag, data1); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_DSM, - "DSM out: ste:%d did:x%x flg:x%x", + "DSM out: ste:%d did:x%x flg:x%lx", rc, ndlp->nlp_DID, ndlp->nlp_flag); /* Decrement the ndlp reference count held for this function */ lpfc_nlp_put(ndlp); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index fec23c723730..e9d9884830f3 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1232,7 +1232,7 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, /* Word 5 */ if ((phba->cfg_nvme_enable_fb) && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { req_len = lpfc_ncmd->nvmeCmd->payload_length; if (req_len < pnode->nvme_fb_size) wqe->fcp_iwrite.initial_xfer_len = @@ -2644,14 +2644,11 @@ lpfc_nvme_unregister_port(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * reference. Check if another thread has set * NLP_DROPPED. */ - spin_lock_irq(&ndlp->lock); - if (!(ndlp->nlp_flag & NLP_DROPPED)) { - ndlp->nlp_flag |= NLP_DROPPED; - spin_unlock_irq(&ndlp->lock); + if (!test_and_set_bit(NLP_DROPPED, + &ndlp->nlp_flag)) { lpfc_nlp_put(ndlp); return; } - spin_unlock_irq(&ndlp->lock); } } } diff --git a/drivers/scsi/lpfc/lpfc_nvmet.c b/drivers/scsi/lpfc/lpfc_nvmet.c index 55c3e2c2bf8f..e6c9112a8862 100644 --- a/drivers/scsi/lpfc/lpfc_nvmet.c +++ b/drivers/scsi/lpfc/lpfc_nvmet.c @@ -2854,7 +2854,7 @@ lpfc_nvmet_prep_fcp_wqe(struct lpfc_hba *phba, /* In template ar=1 wqes=0 sup=0 irsp=0 irsplen=0 */ if (rsp->rsplen == LPFC_NVMET_SUCCESS_LEN) { - if (ndlp->nlp_flag & NLP_SUPPRESS_RSP) + if (test_bit(NLP_SUPPRESS_RSP, &ndlp->nlp_flag)) bf_set(wqe_sup, &wqe->fcp_tsend.wqe_com, 1); } else { diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 11c974bffa72..905026a4782c 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4629,7 +4629,7 @@ static int lpfc_scsi_prep_cmnd_buf_s3(struct lpfc_vport *vport, iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR; iocb_cmd->ulpPU = PARM_READ_CHECK; if (vport->cfg_first_burst_size && - (pnode->nlp_flag & NLP_FIRSTBURST)) { + test_bit(NLP_FIRSTBURST, &pnode->nlp_flag)) { u32 xrdy_len; fcpdl = scsi_bufflen(scsi_cmnd); @@ -5829,7 +5829,7 @@ lpfc_send_taskmgmt(struct lpfc_vport *vport, struct fc_rport *rport, lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP, "0702 Issue %s to TGT %d LUN %llu " - "rpi x%x nlp_flag x%x Data: x%x x%x\n", + "rpi x%x nlp_flag x%lx Data: x%x x%x\n", lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id, pnode->nlp_rpi, pnode->nlp_flag, iocbq->sli4_xritag, iocbq->cmd_flag); @@ -6094,8 +6094,8 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0722 Target Reset rport failure: rdata x%px\n", rdata); if (pnode) { + clear_bit(NLP_NPR_ADISC, &pnode->nlp_flag); spin_lock_irqsave(&pnode->lock, flags); - pnode->nlp_flag &= ~NLP_NPR_ADISC; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; spin_unlock_irqrestore(&pnode->lock, flags); } @@ -6124,7 +6124,7 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) !pnode->logo_waitq) { pnode->logo_waitq = &waitq; pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - pnode->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &pnode->nlp_flag); pnode->save_flags |= NLP_WAIT_FOR_LOGO; spin_unlock_irqrestore(&pnode->lock, flags); lpfc_unreg_rpi(vport, pnode); diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 4dccbaeb6328..c4acf594286e 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -2842,27 +2842,6 @@ lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq) return; } -static void -__lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - unsigned long iflags; - - if (ndlp->nlp_flag & NLP_RELEASE_RPI) { - lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi); - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag &= ~NLP_RELEASE_RPI; - ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR; - spin_unlock_irqrestore(&ndlp->lock, iflags); - } - ndlp->nlp_flag &= ~NLP_UNREG_INP; -} - -void -lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) -{ - __lpfc_sli_rpi_release(vport, ndlp); -} - /** * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler * @phba: Pointer to HBA context object. @@ -2932,18 +2911,18 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_DISCOVERY, "1438 UNREG cmpl deferred mbox x%x " - "on NPort x%x Data: x%x x%x x%px x%lx x%x\n", + "on NPort x%x Data: x%lx x%x x%px x%lx x%x\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_defer_did, ndlp, vport->load_flag, kref_read(&ndlp->kref)); - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) { - ndlp->nlp_flag &= ~NLP_UNREG_INP; + if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) && + ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) { + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0); } else { - __lpfc_sli_rpi_release(vport, ndlp); + clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag); } /* The unreg_login mailbox is complete and had a @@ -2991,6 +2970,7 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; struct lpfc_nodelist *ndlp; + bool unreg_inp; ndlp = pmb->ctx_ndlp; if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) { @@ -3003,20 +2983,26 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, "0010 UNREG_LOGIN vpi:x%x " - "rpi:%x DID:%x defer x%x flg x%x " + "rpi:%x DID:%x defer x%x flg x%lx " "x%px\n", vport->vpi, ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp->nlp_flag, ndlp); - ndlp->nlp_flag &= ~NLP_LOGO_ACC; + + /* Cleanup the nlp_flag now that the UNREG RPI + * has completed. + */ + unreg_inp = test_and_clear_bit(NLP_UNREG_INP, + &ndlp->nlp_flag); + clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag); /* Check to see if there are any deferred * events to process */ - if ((ndlp->nlp_flag & NLP_UNREG_INP) && - (ndlp->nlp_defer_did != - NLP_EVT_NOTHING_PENDING)) { + if (unreg_inp && + ndlp->nlp_defer_did != + NLP_EVT_NOTHING_PENDING) { lpfc_printf_vlog( vport, KERN_INFO, LOG_MBOX | LOG_SLI | LOG_NODE, @@ -3025,14 +3011,12 @@ lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) "NPort x%x Data: x%x x%px\n", ndlp->nlp_rpi, ndlp->nlp_DID, ndlp->nlp_defer_did, ndlp); - ndlp->nlp_flag &= ~NLP_UNREG_INP; ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING; lpfc_issue_els_plogi( vport, ndlp->nlp_DID, 0); - } else { - __lpfc_sli_rpi_release(vport, ndlp); } + lpfc_nlp_put(ndlp); } } @@ -8750,6 +8734,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) lpfc_sli_config_mbox_opcode_get( phba, mboxq), rc, dd); + /* * Allocate all resources (xri,rpi,vpi,vfi) now. Subsequent * calls depends on these resources to complete port setup. @@ -8762,6 +8747,8 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) goto out_free_mbox; } + lpfc_sli4_node_rpi_restore(phba); + lpfc_set_host_data(phba, mboxq); rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL); @@ -8949,7 +8936,6 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba) rc = -ENODEV; goto out_free_iocblist; } - lpfc_sli4_node_prep(phba); if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) { @@ -14354,9 +14340,7 @@ lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe) * an unsolicited PLOGI from the same NPortId from * starting another mailbox transaction. */ - spin_lock_irqsave(&ndlp->lock, iflags); - ndlp->nlp_flag |= NLP_UNREG_INP; - spin_unlock_irqrestore(&ndlp->lock, iflags); + set_bit(NLP_UNREG_INP, &ndlp->nlp_flag); lpfc_unreg_login(phba, vport->vpi, pmbox->un.varWords[0], pmb); pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi; @@ -19105,9 +19089,9 @@ lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport, * to free ndlp when transmit completes */ if (ndlp->nlp_state == NLP_STE_UNUSED_NODE && - !(ndlp->nlp_flag & NLP_DROPPED) && + !test_bit(NLP_DROPPED, &ndlp->nlp_flag) && !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) { - ndlp->nlp_flag |= NLP_DROPPED; + set_bit(NLP_DROPPED, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21125,11 +21109,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Unregister the RPI when mailbox complete */ mb->mbox_flag |= LPFC_MBX_IMED_UNREG; restart_loop = 1; - spin_unlock_irq(&phba->hbalock); - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); - spin_lock_irq(&phba->hbalock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); break; } } @@ -21144,9 +21124,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) ndlp = mb->ctx_ndlp; mb->ctx_ndlp = NULL; if (ndlp) { - spin_lock(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag); lpfc_nlp_put(ndlp); } } @@ -21155,9 +21133,7 @@ lpfc_cleanup_pending_mbox(struct lpfc_vport *vport) /* Release the ndlp with the cleaned-up active mailbox command */ if (act_mbx_ndlp) { - spin_lock(&act_mbx_ndlp->lock); - act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL; - spin_unlock(&act_mbx_ndlp->lock); + clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag); lpfc_nlp_put(act_mbx_ndlp); } } diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c index 7a4d4d8e2ad5..9e0e35763377 100644 --- a/drivers/scsi/lpfc/lpfc_vport.c +++ b/drivers/scsi/lpfc/lpfc_vport.c @@ -496,7 +496,7 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) !ndlp->logo_waitq) { ndlp->logo_waitq = &waitq; ndlp->nlp_fcp_info &= ~NLP_FCP_2_DEVICE; - ndlp->nlp_flag |= NLP_ISSUE_LOGO; + set_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); ndlp->save_flags |= NLP_WAIT_FOR_LOGO; } spin_unlock_irq(&ndlp->lock); @@ -515,8 +515,8 @@ lpfc_send_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) } /* Error - clean up node flags. */ + clear_bit(NLP_ISSUE_LOGO, &ndlp->nlp_flag); spin_lock_irq(&ndlp->lock); - ndlp->nlp_flag &= ~NLP_ISSUE_LOGO; ndlp->save_flags &= ~NLP_WAIT_FOR_LOGO; spin_unlock_irq(&ndlp->lock); @@ -708,7 +708,7 @@ lpfc_vport_delete(struct fc_vport *fc_vport) lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT | LOG_ELS, "1829 DA_ID issue status %d. " - "SFlag x%x NState x%x, NFlag x%x " + "SFlag x%x NState x%x, NFlag x%lx " "Rpi x%x\n", rc, ndlp->save_flags, ndlp->nlp_state, ndlp->nlp_flag, ndlp->nlp_rpi); diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c index 0cd6f3e14882..13b6cb1b93ac 100644 --- a/drivers/scsi/qla2xxx/qla_mbx.c +++ b/drivers/scsi/qla2xxx/qla_mbx.c @@ -2147,7 +2147,7 @@ qla24xx_get_port_database(scsi_qla_host_t *vha, u16 nport_handle, pdb_dma = dma_map_single(&vha->hw->pdev->dev, pdb, sizeof(*pdb), DMA_FROM_DEVICE); - if (!pdb_dma) { + if (dma_mapping_error(&vha->hw->pdev->dev, pdb_dma)) { ql_log(ql_log_warn, vha, 0x1116, "Failed to map dma buffer.\n"); return QLA_MEMORY_ALLOC_FAILED; } diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index d91f54a6e752..97e9ca5a2a02 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -3420,6 +3420,8 @@ static int qla4xxx_alloc_pdu(struct iscsi_task *task, uint8_t opcode) task_data->data_dma = dma_map_single(&ha->pdev->dev, task->data, task->data_count, DMA_TO_DEVICE); + if (dma_mapping_error(&ha->pdev->dev, task_data->data_dma)) + return -ENOMEM; } DEBUG2(ql4_printk(KERN_INFO, ha, "%s: MaxRecvLen %u, iscsi hrd %d\n", diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 8947dab132d7..86dde3e7debb 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3388,7 +3388,7 @@ static void sd_read_block_limits_ext(struct scsi_disk *sdkp) rcu_read_lock(); vpd = rcu_dereference(sdkp->device->vpd_pgb7); - if (vpd && vpd->len >= 2) + if (vpd && vpd->len >= 6) sdkp->rscs = vpd->data[5] & 1; rcu_read_unlock(); } diff --git a/drivers/soc/aspeed/aspeed-lpc-snoop.c b/drivers/soc/aspeed/aspeed-lpc-snoop.c index d2e63277f0aa..54db2abc2e2a 100644 --- a/drivers/soc/aspeed/aspeed-lpc-snoop.c +++ b/drivers/soc/aspeed/aspeed-lpc-snoop.c @@ -58,6 +58,7 @@ struct aspeed_lpc_snoop_model_data { }; struct aspeed_lpc_snoop_channel { + bool enabled; struct kfifo fifo; wait_queue_head_t wq; struct miscdevice miscdev; @@ -190,6 +191,9 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, const struct aspeed_lpc_snoop_model_data *model_data = of_device_get_match_data(dev); + if (WARN_ON(lpc_snoop->chan[channel].enabled)) + return -EBUSY; + init_waitqueue_head(&lpc_snoop->chan[channel].wq); /* Create FIFO datastructure */ rc = kfifo_alloc(&lpc_snoop->chan[channel].fifo, @@ -236,6 +240,8 @@ static int aspeed_lpc_enable_snoop(struct aspeed_lpc_snoop *lpc_snoop, regmap_update_bits(lpc_snoop->regmap, HICRB, hicrb_en, hicrb_en); + lpc_snoop->chan[channel].enabled = true; + return 0; err_misc_deregister: @@ -248,6 +254,9 @@ err_free_fifo: static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop, int channel) { + if (!lpc_snoop->chan[channel].enabled) + return; + switch (channel) { case 0: regmap_update_bits(lpc_snoop->regmap, HICR5, @@ -263,8 +272,10 @@ static void aspeed_lpc_disable_snoop(struct aspeed_lpc_snoop *lpc_snoop, return; } - kfifo_free(&lpc_snoop->chan[channel].fifo); + lpc_snoop->chan[channel].enabled = false; + /* Consider improving safety wrt concurrent reader(s) */ misc_deregister(&lpc_snoop->chan[channel].miscdev); + kfifo_free(&lpc_snoop->chan[channel].fifo); } static int aspeed_lpc_snoop_probe(struct platform_device *pdev) diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c index e3d5e6c1d582..1895fba5e70b 100644 --- a/drivers/soundwire/amd_manager.c +++ b/drivers/soundwire/amd_manager.c @@ -187,7 +187,7 @@ static u64 amd_sdw_send_cmd_get_resp(struct amd_sdw_manager *amd_manager, u32 lo if (sts & AMD_SDW_IMM_RES_VALID) { dev_err(amd_manager->dev, "SDW%x manager is in bad state\n", amd_manager->instance); - writel(0x00, amd_manager->mmio + ACP_SW_IMM_CMD_STS); + writel(AMD_SDW_IMM_RES_VALID, amd_manager->mmio + ACP_SW_IMM_CMD_STS); } writel(upper_data, amd_manager->mmio + ACP_SW_IMM_CMD_UPPER_WORD); writel(lower_data, amd_manager->mmio + ACP_SW_IMM_CMD_LOWER_QWORD); @@ -1107,9 +1107,11 @@ static int __maybe_unused amd_suspend(struct device *dev) } if (amd_manager->power_mode_mask & AMD_SDW_CLK_STOP_MODE) { + cancel_work_sync(&amd_manager->amd_sdw_work); amd_sdw_wake_enable(amd_manager, false); return amd_sdw_clock_stop(amd_manager); } else if (amd_manager->power_mode_mask & AMD_SDW_POWER_OFF_MODE) { + cancel_work_sync(&amd_manager->amd_sdw_work); amd_sdw_wake_enable(amd_manager, false); /* * As per hardware programming sequence on AMD platforms, diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 12f8073cb596..56be0b6901a8 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -1931,11 +1931,6 @@ static int cqspi_probe(struct platform_device *pdev) pm_runtime_enable(dev); - if (cqspi->rx_chan) { - dma_release_channel(cqspi->rx_chan); - goto probe_setup_failed; - } - pm_runtime_set_autosuspend_delay(dev, CQSPI_AUTOSUSPEND_TIMEOUT); pm_runtime_use_autosuspend(dev); pm_runtime_get_noresume(dev); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 7c43df252328..e26363ae7489 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -983,11 +983,20 @@ static int dspi_transfer_one_message(struct spi_controller *ctlr, if (dspi->devtype_data->trans_mode == DSPI_DMA_MODE) { status = dspi_dma_xfer(dspi); } else { + /* + * Reinitialize the completion before transferring data + * to avoid the case where it might remain in the done + * state due to a spurious interrupt from a previous + * transfer. This could falsely signal that the current + * transfer has completed. + */ + if (dspi->irq) + reinit_completion(&dspi->xfer_done); + dspi_fifo_write(dspi); if (dspi->irq) { wait_for_completion(&dspi->xfer_done); - reinit_completion(&dspi->xfer_done); } else { do { status = dspi_poll(dspi); diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 0f3e6e2c2474..8d6341b0d866 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -4141,10 +4141,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->tx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD))) + !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) return -EINVAL; if ((xfer->tx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_TX_QUAD)) + !(spi->mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) + return -EINVAL; + if ((xfer->tx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_TX_OCTAL)) return -EINVAL; } /* Check transfer rx_nbits */ @@ -4157,10 +4160,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message) xfer->rx_nbits != SPI_NBITS_OCTAL) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_DUAL) && - !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD))) + !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))) return -EINVAL; if ((xfer->rx_nbits == SPI_NBITS_QUAD) && - !(spi->mode & SPI_RX_QUAD)) + !(spi->mode & (SPI_RX_QUAD | SPI_RX_OCTAL))) + return -EINVAL; + if ((xfer->rx_nbits == SPI_NBITS_OCTAL) && + !(spi->mode & SPI_RX_OCTAL)) return -EINVAL; } diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 1a9432646b70..20ad6b1e44bc 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -588,6 +588,29 @@ static int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state return 0; } +int +vchiq_platform_init_state(struct vchiq_state *state) +{ + struct vchiq_arm_state *platform_state; + + platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); + if (!platform_state) + return -ENOMEM; + + rwlock_init(&platform_state->susp_res_lock); + + init_completion(&platform_state->ka_evt); + atomic_set(&platform_state->ka_use_count, 0); + atomic_set(&platform_state->ka_use_ack_count, 0); + atomic_set(&platform_state->ka_release_count, 0); + + platform_state->state = state; + + state->platform_state = (struct opaque_platform_state *)platform_state; + + return 0; +} + static struct vchiq_arm_state *vchiq_platform_get_arm_state(struct vchiq_state *state) { return (struct vchiq_arm_state *)state->platform_state; @@ -716,8 +739,7 @@ int vchiq_shutdown(struct vchiq_instance *instance) struct vchiq_state *state = instance->state; int ret = 0; - if (mutex_lock_killable(&state->mutex)) - return -EAGAIN; + mutex_lock(&state->mutex); /* Remove all services */ vchiq_shutdown_internal(state, instance); @@ -1336,39 +1358,6 @@ exit: } int -vchiq_platform_init_state(struct vchiq_state *state) -{ - struct vchiq_arm_state *platform_state; - char threadname[16]; - - platform_state = devm_kzalloc(state->dev, sizeof(*platform_state), GFP_KERNEL); - if (!platform_state) - return -ENOMEM; - - snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", - state->id); - platform_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, - (void *)state, threadname); - if (IS_ERR(platform_state->ka_thread)) { - dev_err(state->dev, "couldn't create thread %s\n", threadname); - return PTR_ERR(platform_state->ka_thread); - } - - rwlock_init(&platform_state->susp_res_lock); - - init_completion(&platform_state->ka_evt); - atomic_set(&platform_state->ka_use_count, 0); - atomic_set(&platform_state->ka_use_ack_count, 0); - atomic_set(&platform_state->ka_release_count, 0); - - platform_state->state = state; - - state->platform_state = (struct opaque_platform_state *)platform_state; - - return 0; -} - -int vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service, enum USE_TYPE_E use_type) { @@ -1688,6 +1677,7 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state, enum vchiq_connstate newstate) { struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state); + char threadname[16]; dev_dbg(state->dev, "suspend: %d: %s->%s\n", state->id, get_conn_state_name(oldstate), get_conn_state_name(newstate)); @@ -1702,7 +1692,17 @@ void vchiq_platform_conn_state_changed(struct vchiq_state *state, arm_state->first_connect = 1; write_unlock_bh(&arm_state->susp_res_lock); - wake_up_process(arm_state->ka_thread); + snprintf(threadname, sizeof(threadname), "vchiq-keep/%d", + state->id); + arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func, + (void *)state, + threadname); + if (IS_ERR(arm_state->ka_thread)) { + dev_err(state->dev, "suspend: Couldn't create thread %s\n", + threadname); + } else { + wake_up_process(arm_state->ka_thread); + } } static const struct of_device_id vchiq_of_match[] = { diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 4f4ad6af416c..47fe50b80c22 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -1842,7 +1842,9 @@ out: } kmem_cache_free(t10_pr_reg_cache, dest_pr_reg); - core_scsi3_lunacl_undepend_item(dest_se_deve); + + if (dest_se_deve) + core_scsi3_lunacl_undepend_item(dest_se_deve); if (is_local) continue; diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index f3af5666bb11..f9ef7d94cebd 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -728,12 +728,21 @@ static bool optee_ffa_exchange_caps(struct ffa_device *ffa_dev, return true; } +static void notif_work_fn(struct work_struct *work) +{ + struct optee_ffa *optee_ffa = container_of(work, struct optee_ffa, + notif_work); + struct optee *optee = container_of(optee_ffa, struct optee, ffa); + + optee_do_bottom_half(optee->ctx); +} + static void notif_callback(int notify_id, void *cb_data) { struct optee *optee = cb_data; if (notify_id == optee->ffa.bottom_half_value) - optee_do_bottom_half(optee->ctx); + queue_work(optee->ffa.notif_wq, &optee->ffa.notif_work); else optee_notif_send(optee, notify_id); } @@ -817,9 +826,11 @@ static void optee_ffa_remove(struct ffa_device *ffa_dev) struct optee *optee = ffa_dev_get_drvdata(ffa_dev); u32 bottom_half_id = optee->ffa.bottom_half_value; - if (bottom_half_id != U32_MAX) + if (bottom_half_id != U32_MAX) { ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, bottom_half_id); + destroy_workqueue(optee->ffa.notif_wq); + } optee_remove_common(optee); mutex_destroy(&optee->ffa.mutex); @@ -835,6 +846,13 @@ static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev, u32 notif_id = 0; int rc; + INIT_WORK(&optee->ffa.notif_work, notif_work_fn); + optee->ffa.notif_wq = create_workqueue("optee_notification"); + if (!optee->ffa.notif_wq) { + rc = -EINVAL; + goto err; + } + while (true) { rc = ffa_dev->ops->notifier_ops->notify_request(ffa_dev, is_per_vcpu, @@ -851,19 +869,24 @@ static int optee_ffa_async_notif_init(struct ffa_device *ffa_dev, * notifications in that case. */ if (rc != -EACCES) - return rc; + goto err_wq; notif_id++; if (notif_id >= OPTEE_FFA_MAX_ASYNC_NOTIF_VALUE) - return rc; + goto err_wq; } optee->ffa.bottom_half_value = notif_id; rc = enable_async_notif(optee); - if (rc < 0) { - ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, - notif_id); - optee->ffa.bottom_half_value = U32_MAX; - } + if (rc < 0) + goto err_rel; + + return 0; +err_rel: + ffa_dev->ops->notifier_ops->notify_relinquish(ffa_dev, notif_id); +err_wq: + destroy_workqueue(optee->ffa.notif_wq); +err: + optee->ffa.bottom_half_value = U32_MAX; return rc; } diff --git a/drivers/tee/optee/optee_private.h b/drivers/tee/optee/optee_private.h index dc0f355ef72a..9526087f0e68 100644 --- a/drivers/tee/optee/optee_private.h +++ b/drivers/tee/optee/optee_private.h @@ -165,6 +165,8 @@ struct optee_ffa { /* Serializes access to @global_ids */ struct mutex mutex; struct rhashtable global_ids; + struct workqueue_struct *notif_wq; + struct work_struct notif_work; }; struct optee; diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 6a2116cbb06f..60818c1bec48 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1450,7 +1450,7 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, return ret; data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; - data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_TX_HOPID_MASK; data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & @@ -3437,7 +3437,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw) } } -static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) +static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime) { if (flags) tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); @@ -3445,7 +3445,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) tb_sw_dbg(sw, "disabling wakeup\n"); if (tb_switch_is_usb4(sw)) - return usb4_switch_set_wake(sw, flags); + return usb4_switch_set_wake(sw, flags, runtime); return tb_lc_set_wake(sw, flags); } @@ -3521,7 +3521,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime) tb_switch_check_wakes(sw); /* Disable wakes */ - tb_switch_set_wake(sw, 0); + tb_switch_set_wake(sw, 0, true); err = tb_switch_tmu_init(sw); if (err) @@ -3602,7 +3602,7 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime) flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; } - tb_switch_set_wake(sw, flags); + tb_switch_set_wake(sw, flags, runtime); if (tb_switch_is_usb4(sw)) usb4_switch_set_sleep(sw); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 6737188f2581..2a701f94af12 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -1299,7 +1299,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); -int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags); +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime); int usb4_switch_set_sleep(struct tb_switch *sw); int usb4_switch_nvm_sector_size(struct tb_switch *sw); int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 57821b6f4e46..9eacde552f81 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -403,12 +403,12 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) * usb4_switch_set_wake() - Enabled/disable wake * @sw: USB4 router * @flags: Wakeup flags (%0 to disable) + * @runtime: Wake is being programmed during system runtime * * Enables/disables router to wake up from sleep. */ -int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime) { - struct usb4_port *usb4; struct tb_port *port; u64 route = tb_route(sw); u32 val; @@ -438,13 +438,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) val |= PORT_CS_19_WOU4; } else { bool configured = val & PORT_CS_19_PC; - usb4 = port->usb4; + bool wakeup = runtime || device_may_wakeup(&port->usb4->dev); - if (((flags & TB_WAKE_ON_CONNECT) && - device_may_wakeup(&usb4->dev)) && !configured) + if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured) val |= PORT_CS_19_WOC; - if (((flags & TB_WAKE_ON_DISCONNECT) && - device_may_wakeup(&usb4->dev)) && configured) + if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured) val |= PORT_CS_19_WOD; if ((flags & TB_WAKE_ON_USB4) && configured) val |= PORT_CS_19_WOU4; diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index c7cee5fee603..70676e3247ab 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -954,7 +954,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) __func__); return 0; } - dma_sync_sg_for_device(port->dev, priv->sg_tx_p, nent, DMA_TO_DEVICE); + dma_sync_sg_for_device(port->dev, priv->sg_tx_p, num, DMA_TO_DEVICE); priv->desc_tx = desc; desc->callback = pch_dma_tx_complete; desc->callback_param = priv; diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index be5564ed8c01..5b09ce71345b 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -4566,6 +4566,7 @@ void do_unblank_screen(int leaving_gfx) set_palette(vc); set_cursor(vc); vt_event_post(VT_EVENT_UNBLANK, vc->vc_num, vc->vc_num); + notify_update(vc); } EXPORT_SYMBOL(do_unblank_screen); diff --git a/drivers/ufs/core/ufs-sysfs.c b/drivers/ufs/core/ufs-sysfs.c index 796e37a1d859..f8397ef3cf8d 100644 --- a/drivers/ufs/core/ufs-sysfs.c +++ b/drivers/ufs/core/ufs-sysfs.c @@ -1608,7 +1608,7 @@ UFS_UNIT_DESC_PARAM(logical_block_size, _LOGICAL_BLK_SIZE, 1); UFS_UNIT_DESC_PARAM(logical_block_count, _LOGICAL_BLK_COUNT, 8); UFS_UNIT_DESC_PARAM(erase_block_size, _ERASE_BLK_SIZE, 4); UFS_UNIT_DESC_PARAM(provisioning_type, _PROVISIONING_TYPE, 1); -UFS_UNIT_DESC_PARAM(physical_memory_resourse_count, _PHY_MEM_RSRC_CNT, 8); +UFS_UNIT_DESC_PARAM(physical_memory_resource_count, _PHY_MEM_RSRC_CNT, 8); UFS_UNIT_DESC_PARAM(context_capabilities, _CTX_CAPABILITIES, 2); UFS_UNIT_DESC_PARAM(large_unit_granularity, _LARGE_UNIT_SIZE_M1, 1); UFS_UNIT_DESC_PARAM(wb_buf_alloc_units, _WB_BUF_ALLOC_UNITS, 4); @@ -1625,7 +1625,7 @@ static struct attribute *ufs_sysfs_unit_descriptor[] = { &dev_attr_logical_block_count.attr, &dev_attr_erase_block_size.attr, &dev_attr_provisioning_type.attr, - &dev_attr_physical_memory_resourse_count.attr, + &dev_attr_physical_memory_resource_count.attr, &dev_attr_context_capabilities.attr, &dev_attr_large_unit_granularity.attr, &dev_attr_wb_buf_alloc_units.attr, diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h index cd138acdcce1..86860686d836 100644 --- a/drivers/usb/cdns3/cdnsp-debug.h +++ b/drivers/usb/cdns3/cdnsp-debug.h @@ -327,12 +327,13 @@ static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0, case TRB_RESET_EP: case TRB_HALT_ENDPOINT: ret = scnprintf(str, size, - "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c", + "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c %c", cdnsp_trb_type_string(type), ep_num, ep_id % 2 ? "out" : "in", TRB_TO_EP_INDEX(field3), field1, field0, TRB_TO_SLOT_ID(field3), - field3 & TRB_CYCLE ? 'C' : 'c'); + field3 & TRB_CYCLE ? 'C' : 'c', + field3 & TRB_ESP ? 'P' : 'p'); break; case TRB_STOP_RING: ret = scnprintf(str, size, diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c index f317d3c84781..5cd9b898ce97 100644 --- a/drivers/usb/cdns3/cdnsp-ep0.c +++ b/drivers/usb/cdns3/cdnsp-ep0.c @@ -414,6 +414,7 @@ static int cdnsp_ep0_std_request(struct cdnsp_device *pdev, void cdnsp_setup_analyze(struct cdnsp_device *pdev) { struct usb_ctrlrequest *ctrl = &pdev->setup; + struct cdnsp_ep *pep; int ret = -EINVAL; u16 len; @@ -427,10 +428,21 @@ void cdnsp_setup_analyze(struct cdnsp_device *pdev) goto out; } + pep = &pdev->eps[0]; + /* Restore the ep0 to Stopped/Running state. */ - if (pdev->eps[0].ep_state & EP_HALTED) { - trace_cdnsp_ep0_halted("Restore to normal state"); - cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0); + if (pep->ep_state & EP_HALTED) { + if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_HALTED) + cdnsp_halt_endpoint(pdev, pep, 0); + + /* + * Halt Endpoint Command for SSP2 for ep0 preserve current + * endpoint state and driver has to synchronize the + * software endpoint state with endpoint output context + * state. + */ + pep->ep_state &= ~EP_HALTED; + pep->ep_state |= EP_STOPPED; } /* diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h index 2afa3e558f85..a91cca509db0 100644 --- a/drivers/usb/cdns3/cdnsp-gadget.h +++ b/drivers/usb/cdns3/cdnsp-gadget.h @@ -987,6 +987,12 @@ enum cdnsp_setup_dev { #define STREAM_ID_FOR_TRB(p) ((((p)) << 16) & GENMASK(31, 16)) #define SCT_FOR_TRB(p) (((p) << 1) & 0x7) +/* + * Halt Endpoint Command TRB field. + * The ESP bit only exists in the SSP2 controller. + */ +#define TRB_ESP BIT(9) + /* Link TRB specific fields. */ #define TRB_TC BIT(1) diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index fd06cb85c4ea..0758f171f73e 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -772,7 +772,9 @@ static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id) } if (port_id != old_port) { - cdnsp_disable_slot(pdev); + if (pdev->slot_id) + cdnsp_disable_slot(pdev); + pdev->active_port = port; cdnsp_enable_slot(pdev); } @@ -2483,7 +2485,8 @@ void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index) { cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) | SLOT_ID_FOR_TRB(pdev->slot_id) | - EP_ID_FOR_TRB(ep_index)); + EP_ID_FOR_TRB(ep_index) | + (!ep_index ? TRB_ESP : 0)); } void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num) diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index fd6032874bf3..8f73bd5057a6 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -2362,6 +2362,10 @@ static void udc_suspend(struct ci_hdrc *ci) */ if (hw_read(ci, OP_ENDPTLISTADDR, ~0) == 0) hw_write(ci, OP_ENDPTLISTADDR, ~0, ~0); + + if (ci->gadget.connected && + (!ci->suspended || !device_may_wakeup(ci->dev))) + usb_gadget_disconnect(&ci->gadget); } static void udc_resume(struct ci_hdrc *ci, bool power_lost) @@ -2372,6 +2376,9 @@ static void udc_resume(struct ci_hdrc *ci, bool power_lost) OTGSC_BSVIS | OTGSC_BSVIE); if (ci->vbus_active) usb_gadget_vbus_disconnect(&ci->gadget); + } else if (ci->vbus_active && ci->driver && + !ci->gadget.connected) { + usb_gadget_connect(&ci->gadget); } /* Restore value 0 if it was set for power lost check */ diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index da3d0e525b64..090b3a757112 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -67,6 +67,12 @@ */ #define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */ +/* + * Give SS hubs 200ms time after wake to train downstream links before + * assuming no port activity and allowing hub to runtime suspend back. + */ +#define USB_SS_PORT_U0_WAKE_TIME 200 /* ms */ + /* Protect struct usb_device->state and ->children members * Note: Both are also protected by ->dev.sem, except that ->state can * change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */ @@ -1094,6 +1100,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) goto init2; goto init3; } + hub_get(hub); /* The superspeed hub except for root hub has to use Hub Depth @@ -1342,6 +1349,17 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) device_unlock(&hdev->dev); } + if (type == HUB_RESUME && hub_is_superspeed(hub->hdev)) { + /* give usb3 downstream links training time after hub resume */ + usb_autopm_get_interface_no_resume( + to_usb_interface(hub->intfdev)); + + queue_delayed_work(system_power_efficient_wq, + &hub->post_resume_work, + msecs_to_jiffies(USB_SS_PORT_U0_WAKE_TIME)); + return; + } + hub_put(hub); } @@ -1360,6 +1378,14 @@ static void hub_init_func3(struct work_struct *ws) hub_activate(hub, HUB_INIT3); } +static void hub_post_resume(struct work_struct *ws) +{ + struct usb_hub *hub = container_of(ws, struct usb_hub, post_resume_work.work); + + usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); + hub_put(hub); +} + enum hub_quiescing_type { HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND }; @@ -1385,6 +1411,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) /* Stop hub_wq and related activity */ del_timer_sync(&hub->irq_urb_retry); + flush_delayed_work(&hub->post_resume_work); usb_kill_urb(hub->urb); if (hub->has_indicators) cancel_delayed_work_sync(&hub->leds); @@ -1943,6 +1970,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id) hub->hdev = hdev; INIT_DELAYED_WORK(&hub->leds, led_work); INIT_DELAYED_WORK(&hub->init_work, NULL); + INIT_DELAYED_WORK(&hub->post_resume_work, hub_post_resume); INIT_WORK(&hub->events, hub_event); INIT_LIST_HEAD(&hub->onboard_devs); spin_lock_init(&hub->irq_urb_lock); @@ -2336,6 +2364,9 @@ void usb_disconnect(struct usb_device **pdev) usb_remove_ep_devs(&udev->ep0); usb_unlock_device(udev); + if (udev->usb4_link) + device_link_del(udev->usb4_link); + /* Unregister the device. The device driver is responsible * for de-configuring the device and invoking the remove-device * notifier chain (used by usbfs and possibly others). @@ -5718,6 +5749,7 @@ static void port_event(struct usb_hub *hub, int port1) struct usb_device *hdev = hub->hdev; u16 portstatus, portchange; int i = 0; + int err; connect_change = test_bit(port1, hub->change_bits); clear_bit(port1, hub->event_bits); @@ -5814,8 +5846,11 @@ static void port_event(struct usb_hub *hub, int port1) } else if (!udev || !(portstatus & USB_PORT_STAT_CONNECTION) || udev->state == USB_STATE_NOTATTACHED) { dev_dbg(&port_dev->dev, "do warm reset, port only\n"); - if (hub_port_reset(hub, port1, NULL, - HUB_BH_RESET_TIME, true) < 0) + err = hub_port_reset(hub, port1, NULL, + HUB_BH_RESET_TIME, true); + if (!udev && err == -ENOTCONN) + connect_change = 0; + else if (err < 0) hub_port_disable(hub, port1, 1); } else { dev_dbg(&port_dev->dev, "do warm reset, full device\n"); diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h index e6ae73f8a95d..9ebc5ef54a32 100644 --- a/drivers/usb/core/hub.h +++ b/drivers/usb/core/hub.h @@ -70,6 +70,7 @@ struct usb_hub { u8 indicator[USB_MAXCHILDREN]; struct delayed_work leds; struct delayed_work init_work; + struct delayed_work post_resume_work; struct work_struct events; spinlock_t irq_urb_lock; struct timer_list irq_urb_retry; diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index c979ecd0169a..46db600fdd82 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -227,7 +227,8 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x046a, 0x0023), .driver_info = USB_QUIRK_RESET_RESUME }, /* Logitech HD Webcam C270 */ - { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME }, + { USB_DEVICE(0x046d, 0x0825), .driver_info = USB_QUIRK_RESET_RESUME | + USB_QUIRK_NO_LPM}, /* Logitech HD Pro Webcams C920, C920-C, C922, C925e and C930e */ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, diff --git a/drivers/usb/core/usb-acpi.c b/drivers/usb/core/usb-acpi.c index 494e21a11cd2..3bc68534dbcd 100644 --- a/drivers/usb/core/usb-acpi.c +++ b/drivers/usb/core/usb-acpi.c @@ -157,7 +157,7 @@ EXPORT_SYMBOL_GPL(usb_acpi_set_power_state); */ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) { - const struct device_link *link; + struct device_link *link; struct usb_port *port_dev; struct usb_hub *hub; @@ -188,6 +188,8 @@ static int usb_acpi_add_usb4_devlink(struct usb_device *udev) dev_dbg(&port_dev->dev, "Created device link from %s to %s\n", dev_name(&port_dev->child->dev), dev_name(nhi_fwnode->dev)); + udev->usb4_link = link; + return 0; } diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index d3d0d75ab1f5..834fc02610a2 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -5352,20 +5352,34 @@ int dwc2_gadget_enter_hibernation(struct dwc2_hsotg *hsotg) if (gusbcfg & GUSBCFG_ULPI_UTMI_SEL) { /* ULPI interface */ gpwrdn |= GPWRDN_ULPI_LATCH_EN_DURING_HIB_ENTRY; - } - dwc2_writel(hsotg, gpwrdn, GPWRDN); - udelay(10); + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); - /* Suspend the Phy Clock */ - pcgcctl = dwc2_readl(hsotg, PCGCTL); - pcgcctl |= PCGCTL_STOPPCLK; - dwc2_writel(hsotg, pcgcctl, PCGCTL); - udelay(10); + /* Suspend the Phy Clock */ + pcgcctl = dwc2_readl(hsotg, PCGCTL); + pcgcctl |= PCGCTL_STOPPCLK; + dwc2_writel(hsotg, pcgcctl, PCGCTL); + udelay(10); - gpwrdn = dwc2_readl(hsotg, GPWRDN); - gpwrdn |= GPWRDN_PMUACTV; - dwc2_writel(hsotg, gpwrdn, GPWRDN); - udelay(10); + gpwrdn = dwc2_readl(hsotg, GPWRDN); + gpwrdn |= GPWRDN_PMUACTV; + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + } else { + /* UTMI+ Interface */ + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + + gpwrdn = dwc2_readl(hsotg, GPWRDN); + gpwrdn |= GPWRDN_PMUACTV; + dwc2_writel(hsotg, gpwrdn, GPWRDN); + udelay(10); + + pcgcctl = dwc2_readl(hsotg, PCGCTL); + pcgcctl |= PCGCTL_STOPPCLK; + dwc2_writel(hsotg, pcgcctl, PCGCTL); + udelay(10); + } /* Set flag to indicate that we are in hibernation */ hsotg->hibernated = 1; diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 7820d6815bed..f4209726f8ec 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -2364,6 +2364,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) { u32 reg; int i; + int ret; if (!pm_runtime_suspended(dwc->dev) && !PMSG_IS_AUTO(msg)) { dwc->susphy_state = (dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)) & @@ -2382,7 +2383,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) case DWC3_GCTL_PRTCAP_DEVICE: if (pm_runtime_suspended(dwc->dev)) break; - dwc3_gadget_suspend(dwc); + ret = dwc3_gadget_suspend(dwc); + if (ret) + return ret; synchronize_irq(dwc->irq_gadget); dwc3_core_exit(dwc); break; @@ -2417,7 +2420,9 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) break; if (dwc->current_otg_role == DWC3_OTG_ROLE_DEVICE) { - dwc3_gadget_suspend(dwc); + ret = dwc3_gadget_suspend(dwc); + if (ret) + return ret; synchronize_irq(dwc->irq_gadget); } diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index c1d4b52f25b0..6c79303891d1 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -763,13 +763,13 @@ static int dwc3_qcom_probe(struct platform_device *pdev) ret = reset_control_deassert(qcom->resets); if (ret) { dev_err(&pdev->dev, "failed to deassert resets, err=%d\n", ret); - goto reset_assert; + return ret; } ret = dwc3_qcom_clk_init(qcom, of_clk_get_parent_count(np)); if (ret) { dev_err_probe(dev, ret, "failed to get clocks\n"); - goto reset_assert; + return ret; } qcom->qscratch_base = devm_platform_ioremap_resource(pdev, 0); @@ -835,8 +835,6 @@ clk_disable: clk_disable_unprepare(qcom->clks[i]); clk_put(qcom->clks[i]); } -reset_assert: - reset_control_assert(qcom->resets); return ret; } @@ -857,8 +855,6 @@ static void dwc3_qcom_remove(struct platform_device *pdev) qcom->num_clocks = 0; dwc3_qcom_interconnect_exit(qcom); - reset_control_assert(qcom->resets); - pm_runtime_allow(dev); pm_runtime_disable(dev); } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 76e6000c65c7..37ae1dd3345d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4788,26 +4788,22 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) int ret; ret = dwc3_gadget_soft_disconnect(dwc); - if (ret) - goto err; - - spin_lock_irqsave(&dwc->lock, flags); - if (dwc->gadget_driver) - dwc3_disconnect_gadget(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); - - return 0; - -err: /* * Attempt to reset the controller's state. Likely no * communication can be established until the host * performs a port reset. */ - if (dwc->softconnect) + if (ret && dwc->softconnect) { dwc3_gadget_soft_connect(dwc); + return -EAGAIN; + } - return ret; + spin_lock_irqsave(&dwc->lock, flags); + if (dwc->gadget_driver) + dwc3_disconnect_gadget(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); + + return 0; } int dwc3_gadget_resume(struct dwc3 *dwc) diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c index 29390d573e23..1b4d0056f1d0 100644 --- a/drivers/usb/gadget/configfs.c +++ b/drivers/usb/gadget/configfs.c @@ -1065,6 +1065,8 @@ static ssize_t webusb_landingPage_store(struct config_item *item, const char *pa unsigned int bytes_to_strip = 0; int l = len; + if (!len) + return len; if (page[l - 1] == '\n') { --l; ++bytes_to_strip; @@ -1188,6 +1190,8 @@ static ssize_t os_desc_qw_sign_store(struct config_item *item, const char *page, struct gadget_info *gi = os_desc_item_to_gadget_info(item); int res, l; + if (!len) + return len; l = min((int)len, OS_STRING_QW_SIGN_LEN >> 1); if (page[l - 1] == '\n') --l; diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 53d9fc41acc5..2412f81f4412 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -294,8 +294,8 @@ __acquires(&port->port_lock) break; } - if (do_tty_wake && port->port.tty) - tty_wakeup(port->port.tty); + if (do_tty_wake) + tty_port_tty_wakeup(&port->port); return status; } @@ -543,20 +543,16 @@ static int gs_alloc_requests(struct usb_ep *ep, struct list_head *head, static int gs_start_io(struct gs_port *port) { struct list_head *head = &port->read_pool; - struct usb_ep *ep; + struct usb_ep *ep = port->port_usb->out; int status; unsigned started; - if (!port->port_usb || !port->port.tty) - return -EIO; - /* Allocate RX and TX I/O buffers. We can't easily do this much * earlier (with GFP_KERNEL) because the requests are coupled to * endpoints, as are the packet sizes we'll be using. Different * configurations may use different endpoints with a given port; * and high speed vs full speed changes packet sizes too. */ - ep = port->port_usb->out; status = gs_alloc_requests(ep, head, gs_read_complete, &port->read_allocated); if (status) @@ -577,7 +573,7 @@ static int gs_start_io(struct gs_port *port) gs_start_tx(port); /* Unblock any pending writes into our circular buffer, in case * we didn't in gs_start_tx() */ - tty_wakeup(port->port.tty); + tty_port_tty_wakeup(&port->port); } else { /* Free reqs only if we are still connected */ if (port->port_usb) { diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c index d35f3a18dd13..bdc664ad6a93 100644 --- a/drivers/usb/host/xhci-dbgcap.c +++ b/drivers/usb/host/xhci-dbgcap.c @@ -651,6 +651,10 @@ static void xhci_dbc_stop(struct xhci_dbc *dbc) case DS_DISABLED: return; case DS_CONFIGURED: + spin_lock(&dbc->lock); + xhci_dbc_flush_requests(dbc); + spin_unlock(&dbc->lock); + if (dbc->driver->disconnect) dbc->driver->disconnect(dbc); break; diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index d719c16ea30b..2b8558005cbb 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -585,6 +585,7 @@ int dbc_tty_init(void) dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL; dbc_tty_driver->init_termios = tty_std_termios; + dbc_tty_driver->init_termios.c_lflag &= ~ECHO; dbc_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; dbc_tty_driver->init_termios.c_ispeed = 9600; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index f9c51e0f2e37..91178b8dbbf0 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1426,6 +1426,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci, /* Periodic endpoint bInterval limit quirk */ if (usb_endpoint_xfer_int(&ep->desc) || usb_endpoint_xfer_isoc(&ep->desc)) { + if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_9) && + interval >= 9) { + interval = 8; + } if ((xhci->quirks & XHCI_LIMIT_ENDPOINT_INTERVAL_7) && udev->speed >= USB_SPEED_HIGH && interval >= 7) { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 1b033c8ce188..234efb9731b2 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -71,12 +71,22 @@ #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec #define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0 +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI 0x13ed +#define PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI 0x13ee +#define PCI_DEVICE_ID_AMD_STARSHIP_XHCI 0x148c +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI 0x15d4 +#define PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI 0x15d5 +#define PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI 0x15e0 +#define PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI 0x15e1 +#define PCI_DEVICE_ID_AMD_RAVEN2_XHCI 0x15e5 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc +#define PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI 0x7316 + #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 #define PCI_DEVICE_ID_ASMEDIA_1142_XHCI 0x1242 @@ -286,6 +296,21 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) if (pdev->vendor == PCI_VENDOR_ID_NEC) xhci->quirks |= XHCI_NEC_HOST; + if (pdev->vendor == PCI_VENDOR_ID_AMD && + (pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEC_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_ARIEL_TYPEA_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_STARSHIP_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D4_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_FIREFLIGHT_15D5_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E0_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN_15E1_XHCI || + pdev->device == PCI_DEVICE_ID_AMD_RAVEN2_XHCI)) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + + if (pdev->vendor == PCI_VENDOR_ID_ATI && + pdev->device == PCI_DEVICE_ID_ATI_NAVI10_7316_XHCI) + xhci->quirks |= XHCI_LIMIT_ENDPOINT_INTERVAL_9; + if (pdev->vendor == PCI_VENDOR_ID_AMD && xhci->hci_version == 0x96) xhci->quirks |= XHCI_AMD_0x96_HOST; diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 2379a67e34e1..3a9bdf916755 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -326,7 +326,8 @@ int xhci_plat_probe(struct platform_device *pdev, struct device *sysdev, const s } usb3_hcd = xhci_get_usb3_hcd(xhci); - if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4) + if (usb3_hcd && HCC_MAX_PSA(xhci->hcc_params) >= 4 && + !(xhci->quirks & XHCI_BROKEN_STREAMS)) usb3_hcd->can_do_streams = 1; if (xhci->shared_hcd) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index fbc8419a5473..2ff8787f753c 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -461,9 +461,8 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci, unsigned long flags) * In the future we should distinguish between -ENODEV and -ETIMEDOUT * and try to recover a -ETIMEDOUT with a host controller reset. */ - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->cmd_ring, - CMD_RING_RUNNING, 0, 5 * 1000 * 1000, - XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000); if (ret < 0) { xhci_err(xhci, "Abort failed to stop command ring: %d\n", ret); xhci_halt(xhci); diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 799941b6ad6c..09a5a6604962 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -83,29 +83,6 @@ int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us) } /* - * xhci_handshake_check_state - same as xhci_handshake but takes an additional - * exit_state parameter, and bails out with an error immediately when xhc_state - * has exit_state flag set. - */ -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state) -{ - u32 result; - int ret; - - ret = readl_poll_timeout_atomic(ptr, result, - (result & mask) == done || - result == U32_MAX || - xhci->xhc_state & exit_state, - 1, usec); - - if (result == U32_MAX || xhci->xhc_state & exit_state) - return -ENODEV; - - return ret; -} - -/* * Disable interrupts and begin the xHCI halting process. */ void xhci_quiesce(struct xhci_hcd *xhci) @@ -225,8 +202,7 @@ int xhci_reset(struct xhci_hcd *xhci, u64 timeout_us) if (xhci->quirks & XHCI_INTEL_HOST) udelay(1000); - ret = xhci_handshake_check_state(xhci, &xhci->op_regs->command, - CMD_RESET, 0, timeout_us, XHCI_STATE_REMOVING); + ret = xhci_handshake(&xhci->op_regs->command, CMD_RESET, 0, timeout_us); if (ret) return ret; @@ -1094,7 +1070,10 @@ int xhci_resume(struct xhci_hcd *xhci, pm_message_t msg) xhci_dbg(xhci, "Stop HCD\n"); xhci_halt(xhci); xhci_zero_64b_regs(xhci); - retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); + if (xhci->xhc_state & XHCI_STATE_REMOVING) + retval = -ENODEV; + else + retval = xhci_reset(xhci, XHCI_RESET_LONG_USEC); spin_unlock_irq(&xhci->lock); if (retval) return retval; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index c4d5b90ef90a..11580495e09c 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1626,6 +1626,7 @@ struct xhci_hcd { #define XHCI_WRITE_64_HI_LO BIT_ULL(47) #define XHCI_CDNS_SCTX_QUIRK BIT_ULL(48) #define XHCI_ETRON_HOST BIT_ULL(49) +#define XHCI_LIMIT_ENDPOINT_INTERVAL_9 BIT_ULL(50) unsigned int num_active_eps; unsigned int limit_active_eps; @@ -1846,8 +1847,6 @@ void xhci_remove_secondary_interrupter(struct usb_hcd /* xHCI host controller glue */ typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *); int xhci_handshake(void __iomem *ptr, u32 mask, u32 done, u64 timeout_us); -int xhci_handshake_check_state(struct xhci_hcd *xhci, void __iomem *ptr, - u32 mask, u32 done, int usec, unsigned int exit_state); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_start(struct xhci_hcd *xhci); diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c index c6076df0d50c..da2b864fdadf 100644 --- a/drivers/usb/musb/musb_gadget.c +++ b/drivers/usb/musb/musb_gadget.c @@ -1912,6 +1912,7 @@ static int musb_gadget_stop(struct usb_gadget *g) * gadget driver here and have everything work; * that currently misbehaves. */ + usb_gadget_set_state(g, USB_STATE_NOTATTACHED); /* Force check of devctl register for PM runtime */ pm_runtime_mark_last_busy(musb->controller); @@ -2018,6 +2019,7 @@ void musb_g_disconnect(struct musb *musb) case OTG_STATE_B_PERIPHERAL: case OTG_STATE_B_IDLE: musb_set_state(musb, OTG_STATE_B_IDLE); + usb_gadget_set_state(&musb->g, USB_STATE_NOTATTACHED); break; case OTG_STATE_B_SRP_INIT: break; diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index eef614be7db5..4f21d75f5877 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -803,6 +803,8 @@ static const struct usb_device_id id_table_combined[] = { .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, + { USB_DEVICE(FTDI_NDI_VID, FTDI_NDI_EMGUIDE_GEMINI_PID), + .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, { USB_DEVICE(NOVITUS_VID, NOVITUS_BONO_E_PID) }, { USB_DEVICE(FTDI_VID, RTSYSTEMS_USB_VX8_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 9acb6f837327..4cc1fae8acb9 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -204,6 +204,9 @@ #define FTDI_NDI_FUTURE_3_PID 0xDA73 /* NDI future device #3 */ #define FTDI_NDI_AURORA_SCU_PID 0xDA74 /* NDI Aurora SCU */ +#define FTDI_NDI_VID 0x23F2 +#define FTDI_NDI_EMGUIDE_GEMINI_PID 0x0003 /* NDI Emguide Gemini */ + /* * ChamSys Limited (www.chamsys.co.uk) USB wing/interface product IDs */ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 27879cc57536..147ca50c94be 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1415,6 +1415,9 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(5) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d0, 0xff, 0xff, 0x60) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x30), /* Telit FE910C04 (ECM) */ + .driver_info = NCTRL(4) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10c7, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x30), /* Telit FN990B (MBIM) */ .driver_info = NCTRL(6) }, { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x10d1, 0xff, 0xff, 0x40) }, @@ -2343,6 +2346,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3) }, { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe145, 0xff), /* Foxconn T99W651 RNDIS */ .driver_info = RSVD(5) | RSVD(6) }, + { USB_DEVICE_INTERFACE_CLASS(0x0489, 0xe167, 0xff), /* Foxconn T99W640 MBIM */ + .driver_info = RSVD(3) }, { USB_DEVICE(0x1508, 0x1001), /* Fibocom NL668 (IOT version) */ .driver_info = RSVD(4) | RSVD(5) | RSVD(6) }, { USB_DEVICE(0x1782, 0x4d10) }, /* Fibocom L610 (AT mode) */ diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 4976a7238b28..6964f403a2d5 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -394,8 +394,7 @@ static int dp_altmode_vdm(struct typec_altmode *alt, case CMDT_RSP_NAK: switch (cmd) { case DP_CMD_STATUS_UPDATE: - if (typec_altmode_exit(alt)) - dev_err(&dp->alt->dev, "Exit Mode Failed!\n"); + dp->state = DP_STATE_EXIT; break; case DP_CMD_CONFIGURE: dp->data.conf = 0; @@ -677,7 +676,7 @@ static ssize_t pin_assignment_show(struct device *dev, assignments = get_current_pin_assignments(dp); - for (i = 0; assignments; assignments >>= 1, i++) { + for (i = 0; assignments && i < DP_PIN_ASSIGN_MAX; assignments >>= 1, i++) { if (assignments & 1) { if (i == cur) len += sprintf(buf + len, "[%s] ", diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index 9838a2c8c1b8..aa2fa720af15 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -1132,7 +1132,7 @@ static int tcpm_set_attached_state(struct tcpm_port *port, bool attached) port->data_role); } -static int tcpm_set_roles(struct tcpm_port *port, bool attached, +static int tcpm_set_roles(struct tcpm_port *port, bool attached, int state, enum typec_role role, enum typec_data_role data) { enum typec_orientation orientation; @@ -1169,7 +1169,7 @@ static int tcpm_set_roles(struct tcpm_port *port, bool attached, } } - ret = tcpm_mux_set(port, TYPEC_STATE_USB, usb_role, orientation); + ret = tcpm_mux_set(port, state, usb_role, orientation); if (ret < 0) return ret; @@ -4339,16 +4339,6 @@ static int tcpm_src_attach(struct tcpm_port *port) tcpm_enable_auto_vbus_discharge(port, true); - ret = tcpm_set_roles(port, true, TYPEC_SOURCE, tcpm_data_role_for_source(port)); - if (ret < 0) - return ret; - - if (port->pd_supported) { - ret = port->tcpc->set_pd_rx(port->tcpc, true); - if (ret < 0) - goto out_disable_mux; - } - /* * USB Type-C specification, version 1.2, * chapter 4.5.2.2.8.1 (Attached.SRC Requirements) @@ -4358,13 +4348,24 @@ static int tcpm_src_attach(struct tcpm_port *port) (polarity == TYPEC_POLARITY_CC2 && port->cc1 == TYPEC_CC_RA)) { ret = tcpm_set_vconn(port, true); if (ret < 0) - goto out_disable_pd; + return ret; } ret = tcpm_set_vbus(port, true); if (ret < 0) goto out_disable_vconn; + ret = tcpm_set_roles(port, true, TYPEC_STATE_USB, TYPEC_SOURCE, + tcpm_data_role_for_source(port)); + if (ret < 0) + goto out_disable_vbus; + + if (port->pd_supported) { + ret = port->tcpc->set_pd_rx(port->tcpc, true); + if (ret < 0) + goto out_disable_mux; + } + port->pd_capable = false; port->partner = NULL; @@ -4375,14 +4376,14 @@ static int tcpm_src_attach(struct tcpm_port *port) return 0; -out_disable_vconn: - tcpm_set_vconn(port, false); -out_disable_pd: - if (port->pd_supported) - port->tcpc->set_pd_rx(port->tcpc, false); out_disable_mux: tcpm_mux_set(port, TYPEC_STATE_SAFE, USB_ROLE_NONE, TYPEC_ORIENTATION_NONE); +out_disable_vbus: + tcpm_set_vbus(port, false); +out_disable_vconn: + tcpm_set_vconn(port, false); + return ret; } @@ -4514,7 +4515,8 @@ static int tcpm_snk_attach(struct tcpm_port *port) tcpm_enable_auto_vbus_discharge(port, true); - ret = tcpm_set_roles(port, true, TYPEC_SINK, tcpm_data_role_for_sink(port)); + ret = tcpm_set_roles(port, true, TYPEC_STATE_USB, + TYPEC_SINK, tcpm_data_role_for_sink(port)); if (ret < 0) return ret; @@ -4537,12 +4539,24 @@ static void tcpm_snk_detach(struct tcpm_port *port) static int tcpm_acc_attach(struct tcpm_port *port) { int ret; + enum typec_role role; + enum typec_data_role data; + int state = TYPEC_STATE_USB; if (port->attached) return 0; - ret = tcpm_set_roles(port, true, TYPEC_SOURCE, - tcpm_data_role_for_source(port)); + role = tcpm_port_is_sink(port) ? TYPEC_SINK : TYPEC_SOURCE; + data = tcpm_port_is_sink(port) ? tcpm_data_role_for_sink(port) + : tcpm_data_role_for_source(port); + + if (tcpm_port_is_audio(port)) + state = TYPEC_MODE_AUDIO; + + if (tcpm_port_is_debug(port)) + state = TYPEC_MODE_DEBUG; + + ret = tcpm_set_roles(port, true, state, role, data); if (ret < 0) return ret; @@ -5301,7 +5315,7 @@ static void run_state_machine(struct tcpm_port *port) */ tcpm_set_vconn(port, false); tcpm_set_vbus(port, false); - tcpm_set_roles(port, port->self_powered, TYPEC_SOURCE, + tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SOURCE, tcpm_data_role_for_source(port)); /* * If tcpc fails to notify vbus off, TCPM will wait for PD_T_SAFE_0V + @@ -5333,7 +5347,7 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_vconn(port, false); if (port->pd_capable) tcpm_set_charge(port, false); - tcpm_set_roles(port, port->self_powered, TYPEC_SINK, + tcpm_set_roles(port, port->self_powered, TYPEC_STATE_USB, TYPEC_SINK, tcpm_data_role_for_sink(port)); /* * VBUS may or may not toggle, depending on the adapter. @@ -5457,10 +5471,10 @@ static void run_state_machine(struct tcpm_port *port) case DR_SWAP_CHANGE_DR: tcpm_unregister_altmodes(port); if (port->data_role == TYPEC_HOST) - tcpm_set_roles(port, true, port->pwr_role, + tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role, TYPEC_DEVICE); else - tcpm_set_roles(port, true, port->pwr_role, + tcpm_set_roles(port, true, TYPEC_STATE_USB, port->pwr_role, TYPEC_HOST); tcpm_ams_finish(port); tcpm_set_state(port, ready_state(port), 0); diff --git a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c index 68300fcd3c41..dda8cb3262e0 100644 --- a/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c +++ b/drivers/vfio/pci/hisilicon/hisi_acc_vfio_pci.c @@ -554,12 +554,6 @@ static int vf_qm_state_save(struct hisi_acc_vf_core_device *hisi_acc_vdev, vf_data->vf_qm_state = QM_READY; hisi_acc_vdev->vf_qm_state = vf_data->vf_qm_state; - ret = vf_qm_cache_wb(vf_qm); - if (ret) { - dev_err(dev, "failed to writeback QM Cache!\n"); - return ret; - } - ret = qm_get_regs(vf_qm, vf_data); if (ret) return -EINVAL; @@ -985,6 +979,13 @@ static int hisi_acc_vf_stop_device(struct hisi_acc_vf_core_device *hisi_acc_vdev dev_err(dev, "failed to check QM INT state!\n"); return ret; } + + ret = vf_qm_cache_wb(vf_qm); + if (ret) { + dev_err(dev, "failed to writeback QM cache!\n"); + return ret; + } + return 0; } @@ -1358,6 +1359,7 @@ static void hisi_acc_vfio_pci_close_device(struct vfio_device *core_vdev) struct hisi_acc_vf_core_device, core_device.vdev); struct hisi_qm *vf_qm = &hisi_acc_vdev->vf_qm; + hisi_acc_vf_disable_fds(hisi_acc_vdev); iounmap(vf_qm->io_base); vfio_pci_core_close_device(core_vdev); } diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 147926c8bae0..c0276979675d 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -2741,7 +2741,7 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num, void (*recycle_done)(struct virtqueue *vq)) { struct vring_virtqueue *vq = to_vvq(_vq); - int err; + int err, err_reset; if (num > vq->vq.num_max) return -E2BIG; @@ -2763,7 +2763,11 @@ int virtqueue_resize(struct virtqueue *_vq, u32 num, else err = virtqueue_resize_split(_vq, num); - return virtqueue_enable_after_reset(_vq); + err_reset = virtqueue_enable_after_reset(_vq); + if (err_reset) + return err_reset; + + return err; } EXPORT_SYMBOL_GPL(virtqueue_resize); |