diff options
Diffstat (limited to 'drivers')
417 files changed, 8718 insertions, 2371 deletions
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 40e816419f48..9601fa92950a 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -2010,16 +2010,16 @@ retry: return err_mask; } -static bool ata_log_supported(struct ata_device *dev, u8 log) +static int ata_log_supported(struct ata_device *dev, u8 log) { struct ata_port *ap = dev->link->ap; if (dev->horkage & ATA_HORKAGE_NO_LOG_DIR) - return false; + return 0; if (ata_read_log_page(dev, ATA_LOG_DIRECTORY, 0, ap->sector_buf, 1)) - return false; - return get_unaligned_le16(&ap->sector_buf[log * 2]) ? true : false; + return 0; + return get_unaligned_le16(&ap->sector_buf[log * 2]); } static bool ata_identify_page_supported(struct ata_device *dev, u8 page) @@ -2455,15 +2455,20 @@ static void ata_dev_config_cpr(struct ata_device *dev) struct ata_cpr_log *cpr_log = NULL; u8 *desc, *buf = NULL; - if (ata_id_major_version(dev->id) < 11 || - !ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES)) + if (ata_id_major_version(dev->id) < 11) + goto out; + + buf_len = ata_log_supported(dev, ATA_LOG_CONCURRENT_POSITIONING_RANGES); + if (buf_len == 0) goto out; /* * Read the concurrent positioning ranges log (0x47). We can have at - * most 255 32B range descriptors plus a 64B header. + * most 255 32B range descriptors plus a 64B header. This log varies in + * size, so use the size reported in the GPL directory. Reading beyond + * the supported length will result in an error. */ - buf_len = (64 + 255 * 32 + 511) & ~511; + buf_len <<= 9; buf = kzalloc(buf_len, GFP_KERNEL); if (!buf) goto out; @@ -5462,7 +5467,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, const struct ata_port_info * const * ppi, int n_ports) { - const struct ata_port_info *pi; + const struct ata_port_info *pi = &ata_dummy_port_info; struct ata_host *host; int i, j; @@ -5470,7 +5475,7 @@ struct ata_host *ata_host_alloc_pinfo(struct device *dev, if (!host) return NULL; - for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { + for (i = 0, j = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; if (ppi[j]) diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index 42cecf95a4e5..86dbb1cdfabd 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2125,7 +2125,7 @@ static unsigned int ata_scsiop_inq_b9(struct ata_scsi_args *args, u8 *rbuf) /* SCSI Concurrent Positioning Ranges VPD page: SBC-5 rev 1 or later */ rbuf[1] = 0xb9; - put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[3]); + put_unaligned_be16(64 + (int)cpr_log->nr_cpr * 32 - 4, &rbuf[2]); for (i = 0; i < cpr_log->nr_cpr; i++, desc += 32) { desc[0] = cpr_log->cpr[i].num; diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index ca129854a88c..c38027887499 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -196,7 +196,7 @@ static struct { { XFER_PIO_0, "XFER_PIO_0" }, { XFER_PIO_SLOW, "XFER_PIO_SLOW" } }; -ata_bitfield_name_match(xfer,ata_xfer_names) +ata_bitfield_name_search(xfer, ata_xfer_names) /* * ATA Port attributes diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c index 6b5ed3046b44..35608a0cf552 100644 --- a/drivers/ata/pata_octeon_cf.c +++ b/drivers/ata/pata_octeon_cf.c @@ -856,12 +856,14 @@ static int octeon_cf_probe(struct platform_device *pdev) int i; res_dma = platform_get_resource(dma_dev, IORESOURCE_MEM, 0); if (!res_dma) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } cf_port->dma_base = (u64)devm_ioremap(&pdev->dev, res_dma->start, resource_size(res_dma)); if (!cf_port->dma_base) { + put_device(&dma_dev->dev); of_node_put(dma_node); return -EINVAL; } @@ -871,6 +873,7 @@ static int octeon_cf_probe(struct platform_device *pdev) irq = i; irq_handler = octeon_cf_interrupt; } + put_device(&dma_dev->dev); } of_node_put(dma_node); } diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2ef23fce0860..a97776ea9d99 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -564,6 +564,12 @@ ssize_t __weak cpu_show_srbds(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_mmio_stale_data(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -573,6 +579,7 @@ static DEVICE_ATTR(mds, 0444, cpu_show_mds, NULL); static DEVICE_ATTR(tsx_async_abort, 0444, cpu_show_tsx_async_abort, NULL); static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); +static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -584,6 +591,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_tsx_async_abort.attr, &dev_attr_itlb_multihit.attr, &dev_attr_srbds.attr, + &dev_attr_mmio_stale_data.attr, NULL }; diff --git a/drivers/base/init.c b/drivers/base/init.c index d8d0fe687111..397eb9880cec 100644 --- a/drivers/base/init.c +++ b/drivers/base/init.c @@ -8,6 +8,7 @@ #include <linux/init.h> #include <linux/memory.h> #include <linux/of.h> +#include <linux/backing-dev.h> #include "base.h" @@ -20,6 +21,7 @@ void __init driver_init(void) { /* These are the core pieces */ + bdi_init(&noop_backing_dev_info); devtmpfs_init(); devices_init(); buses_init(); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 084d67fd55cc..bc60c9cd3230 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -558,7 +558,7 @@ static ssize_t hard_offline_page_store(struct device *dev, if (kstrtoull(buf, 0, &pfn) < 0) return -EINVAL; pfn >>= PAGE_SHIFT; - ret = memory_failure(pfn, 0); + ret = memory_failure(pfn, MF_SW_SIMULATED); if (ret == -EOPNOTSUPP) ret = 0; return ret ? ret : count; diff --git a/drivers/base/regmap/regmap-irq.c b/drivers/base/regmap/regmap-irq.c index 400c7412a7dc..a6db605707b0 100644 --- a/drivers/base/regmap/regmap-irq.c +++ b/drivers/base/regmap/regmap-irq.c @@ -252,6 +252,7 @@ static void regmap_irq_enable(struct irq_data *data) struct regmap_irq_chip_data *d = irq_data_get_irq_chip_data(data); struct regmap *map = d->map; const struct regmap_irq *irq_data = irq_to_regmap_irq(d, data->hwirq); + unsigned int reg = irq_data->reg_offset / map->reg_stride; unsigned int mask, type; type = irq_data->type.type_falling_val | irq_data->type.type_rising_val; @@ -268,14 +269,14 @@ static void regmap_irq_enable(struct irq_data *data) * at the corresponding offset in regmap_irq_set_type(). */ if (d->chip->type_in_mask && type) - mask = d->type_buf[irq_data->reg_offset / map->reg_stride]; + mask = d->type_buf[reg] & irq_data->mask; else mask = irq_data->mask; if (d->chip->clear_on_unmask) d->clear_status = true; - d->mask_buf[irq_data->reg_offset / map->reg_stride] &= ~mask; + d->mask_buf[reg] &= ~mask; } static void regmap_irq_disable(struct irq_data *data) @@ -386,6 +387,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, subreg = &chip->sub_reg_offsets[b]; for (i = 0; i < subreg->num_regs; i++) { unsigned int offset = subreg->offset[i]; + unsigned int index = offset / map->reg_stride; if (chip->not_fixed_stride) ret = regmap_read(map, @@ -394,7 +396,7 @@ static inline int read_sub_irq_data(struct regmap_irq_chip_data *data, else ret = regmap_read(map, chip->status_base + offset, - &data->status_buf[offset]); + &data->status_buf[index]); if (ret) break; diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 2221d9863831..c3517ccc3159 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1880,8 +1880,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, */ bool regmap_can_raw_write(struct regmap *map) { - return map->bus && map->bus->write && map->format.format_val && - map->format.format_reg; + return map->write && map->format.format_val && map->format.format_reg; } EXPORT_SYMBOL_GPL(regmap_can_raw_write); @@ -2155,10 +2154,9 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg, size_t write_len; int ret; - if (!map->bus) - return -EINVAL; - if (!map->bus->write) + if (!map->write) return -ENOTSUPP; + if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -2278,7 +2276,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val, * Some devices don't support bulk write, for them we have a series of * single write operations. */ - if (!map->bus || !map->format.parse_inplace) { + if (!map->write || !map->format.parse_inplace) { map->lock(map->lock_arg); for (i = 0; i < val_count; i++) { unsigned int ival; @@ -2904,6 +2902,9 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, size_t read_len; int ret; + if (!map->read) + return -ENOTSUPP; + if (val_len % map->format.val_bytes) return -EINVAL; if (!IS_ALIGNED(reg, map->reg_stride)) @@ -3017,7 +3018,7 @@ int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val, if (val_count == 0) return -EINVAL; - if (map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { + if (map->read && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) { ret = regmap_raw_read(map, reg, val, val_bytes * val_count); if (ret != 0) return ret; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index a88ce4426400..33f04ef78984 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -2114,9 +2114,11 @@ static void blkfront_closing(struct blkfront_info *info) return; /* No more blkif_request(). */ - blk_mq_stop_hw_queues(info->rq); - blk_mark_disk_dead(info->gd); - set_capacity(info->gd, 0); + if (info->rq && info->gd) { + blk_mq_stop_hw_queues(info->rq); + blk_mark_disk_dead(info->gd); + set_capacity(info->gd, 0); + } for_each_rinfo(info, rinfo, i) { /* No more gnttab callback work. */ @@ -2457,16 +2459,19 @@ static int blkfront_remove(struct xenbus_device *xbdev) dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); - del_gendisk(info->gd); + if (info->gd) + del_gendisk(info->gd); mutex_lock(&blkfront_mutex); list_del(&info->info_list); mutex_unlock(&blkfront_mutex); blkif_free(info, 0); - xlbd_release_minors(info->gd->first_minor, info->gd->minors); - blk_cleanup_disk(info->gd); - blk_mq_free_tag_set(&info->tag_set); + if (info->gd) { + xlbd_release_minors(info->gd->first_minor, info->gd->minors); + blk_cleanup_disk(info->gd); + blk_mq_free_tag_set(&info->tag_set); + } kfree(info); return 0; diff --git a/drivers/bus/bt1-apb.c b/drivers/bus/bt1-apb.c index b25ff941e7c7..63b1b4a76671 100644 --- a/drivers/bus/bt1-apb.c +++ b/drivers/bus/bt1-apb.c @@ -175,10 +175,9 @@ static int bt1_apb_request_rst(struct bt1_apb *apb) int ret; apb->prst = devm_reset_control_get_optional_exclusive(apb->dev, "prst"); - if (IS_ERR(apb->prst)) { - dev_warn(apb->dev, "Couldn't get reset control line\n"); - return PTR_ERR(apb->prst); - } + if (IS_ERR(apb->prst)) + return dev_err_probe(apb->dev, PTR_ERR(apb->prst), + "Couldn't get reset control line\n"); ret = reset_control_deassert(apb->prst); if (ret) @@ -199,10 +198,9 @@ static int bt1_apb_request_clk(struct bt1_apb *apb) int ret; apb->pclk = devm_clk_get(apb->dev, "pclk"); - if (IS_ERR(apb->pclk)) { - dev_err(apb->dev, "Couldn't get APB clock descriptor\n"); - return PTR_ERR(apb->pclk); - } + if (IS_ERR(apb->pclk)) + return dev_err_probe(apb->dev, PTR_ERR(apb->pclk), + "Couldn't get APB clock descriptor\n"); ret = clk_prepare_enable(apb->pclk); if (ret) { diff --git a/drivers/bus/bt1-axi.c b/drivers/bus/bt1-axi.c index e7a6744acc7b..70e49a6e5374 100644 --- a/drivers/bus/bt1-axi.c +++ b/drivers/bus/bt1-axi.c @@ -135,10 +135,9 @@ static int bt1_axi_request_rst(struct bt1_axi *axi) int ret; axi->arst = devm_reset_control_get_optional_exclusive(axi->dev, "arst"); - if (IS_ERR(axi->arst)) { - dev_warn(axi->dev, "Couldn't get reset control line\n"); - return PTR_ERR(axi->arst); - } + if (IS_ERR(axi->arst)) + return dev_err_probe(axi->dev, PTR_ERR(axi->arst), + "Couldn't get reset control line\n"); ret = reset_control_deassert(axi->arst); if (ret) @@ -159,10 +158,9 @@ static int bt1_axi_request_clk(struct bt1_axi *axi) int ret; axi->aclk = devm_clk_get(axi->dev, "aclk"); - if (IS_ERR(axi->aclk)) { - dev_err(axi->dev, "Couldn't get AXI Interconnect clock\n"); - return PTR_ERR(axi->aclk); - } + if (IS_ERR(axi->aclk)) + return dev_err_probe(axi->dev, PTR_ERR(axi->aclk), + "Couldn't get AXI Interconnect clock\n"); ret = clk_prepare_enable(axi->aclk); if (ret) { diff --git a/drivers/bus/fsl-mc/fsl-mc-bus.c b/drivers/bus/fsl-mc/fsl-mc-bus.c index e81a9700cfd0..6143dbf31f31 100644 --- a/drivers/bus/fsl-mc/fsl-mc-bus.c +++ b/drivers/bus/fsl-mc/fsl-mc-bus.c @@ -1239,14 +1239,14 @@ error_cleanup_mc_io: static int fsl_mc_bus_remove(struct platform_device *pdev) { struct fsl_mc *mc = platform_get_drvdata(pdev); + struct fsl_mc_io *mc_io; if (!fsl_mc_is_root_dprc(&mc->root_mc_bus_dev->dev)) return -EINVAL; + mc_io = mc->root_mc_bus_dev->mc_io; fsl_mc_device_remove(mc->root_mc_bus_dev); - - fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); - mc->root_mc_bus_dev->mc_io = NULL; + fsl_destroy_mc_io(mc_io); bus_unregister_notifier(&fsl_mc_bus_type, &fsl_mc_nb); diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 69fd31ffb847..0b6c03643ddc 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -429,28 +429,40 @@ config ADI driver include crash and makedumpfile. config RANDOM_TRUST_CPU - bool "Trust the CPU manufacturer to initialize Linux's CRNG" + bool "Initialize RNG using CPU RNG instructions" + default y depends on ARCH_RANDOM - default n help - Assume that CPU manufacturer (e.g., Intel or AMD for RDSEED or - RDRAND, IBM for the S390 and Power PC architectures) is trustworthy - for the purposes of initializing Linux's CRNG. Since this is not - something that can be independently audited, this amounts to trusting - that CPU manufacturer (perhaps with the insistence or mandate - of a Nation State's intelligence or law enforcement agencies) - has not installed a hidden back door to compromise the CPU's - random number generation facilities. This can also be configured - at boot with "random.trust_cpu=on/off". + Initialize the RNG using random numbers supplied by the CPU's + RNG instructions (e.g. RDRAND), if supported and available. These + random numbers are never used directly, but are rather hashed into + the main input pool, and this happens regardless of whether or not + this option is enabled. Instead, this option controls whether the + they are credited and hence can initialize the RNG. Additionally, + other sources of randomness are always used, regardless of this + setting. Enabling this implies trusting that the CPU can supply high + quality and non-backdoored random numbers. + + Say Y here unless you have reason to mistrust your CPU or believe + its RNG facilities may be faulty. This may also be configured at + boot time with "random.trust_cpu=on/off". config RANDOM_TRUST_BOOTLOADER - bool "Trust the bootloader to initialize Linux's CRNG" - help - Some bootloaders can provide entropy to increase the kernel's initial - device randomness. Say Y here to assume the entropy provided by the - booloader is trustworthy so it will be added to the kernel's entropy - pool. Otherwise, say N here so it will be regarded as device input that - only mixes the entropy pool. This can also be configured at boot with - "random.trust_bootloader=on/off". + bool "Initialize RNG using bootloader-supplied seed" + default y + help + Initialize the RNG using a seed supplied by the bootloader or boot + environment (e.g. EFI or a bootloader-generated device tree). This + seed is not used directly, but is rather hashed into the main input + pool, and this happens regardless of whether or not this option is + enabled. Instead, this option controls whether the seed is credited + and hence can initialize the RNG. Additionally, other sources of + randomness are always used, regardless of this setting. Enabling + this implies trusting that the bootloader can supply high quality and + non-backdoored seeds. + + Say Y here unless you have reason to mistrust your bootloader or + believe its RNG facilities may be faulty. This may also be configured + at boot time with "random.trust_bootloader=on/off". endmenu diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c index e856df7e285c..a6f3a8a2aca6 100644 --- a/drivers/char/hw_random/virtio-rng.c +++ b/drivers/char/hw_random/virtio-rng.c @@ -159,6 +159,8 @@ static int probe_common(struct virtio_device *vdev) goto err_find; } + virtio_device_ready(vdev); + /* we always have a pending entropy request */ request_entropy(vi); diff --git a/drivers/char/lp.c b/drivers/char/lp.c index 0e22e3b0a04e..38aad99ebb61 100644 --- a/drivers/char/lp.c +++ b/drivers/char/lp.c @@ -1019,7 +1019,7 @@ static struct parport_driver lp_driver = { static int __init lp_init(void) { - int i, err = 0; + int i, err; if (parport_nr[0] == LP_PARPORT_OFF) return 0; diff --git a/drivers/char/random.c b/drivers/char/random.c index b691b9d59503..e3dd1dd3dd22 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -87,7 +87,7 @@ static struct fasync_struct *fasync; /* Control how we warn userspace. */ static struct ratelimit_state urandom_warning = - RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3); + RATELIMIT_STATE_INIT_FLAGS("urandom_warning", HZ, 3, RATELIMIT_MSG_ON_RELEASE); static int ratelimit_disable __read_mostly = IS_ENABLED(CONFIG_WARN_ALL_UNSEEDED_RANDOM); module_param_named(ratelimit_disable, ratelimit_disable, int, 0644); @@ -408,7 +408,7 @@ static ssize_t get_random_bytes_user(struct iov_iter *iter) /* * Immediately overwrite the ChaCha key at index 4 with random - * bytes, in case userspace causes copy_to_user() below to sleep + * bytes, in case userspace causes copy_to_iter() below to sleep * forever, so that we still retain forward secrecy in that case. */ crng_make_state(chacha_state, (u8 *)&chacha_state[4], CHACHA_KEY_SIZE); @@ -650,7 +650,8 @@ static void __cold _credit_init_bits(size_t bits) if (orig < POOL_READY_BITS && new >= POOL_READY_BITS) { crng_reseed(); /* Sets crng_init to CRNG_READY under base_crng.lock. */ - execute_in_process_context(crng_set_ready, &set_ready); + if (static_key_initialized) + execute_in_process_context(crng_set_ready, &set_ready); wake_up_interruptible(&crng_init_wait); kill_fasync(&fasync, SIGIO, POLL_IN); pr_notice("crng init done\n"); @@ -724,9 +725,8 @@ static void __cold _credit_init_bits(size_t bits) * **********************************************************************/ -static bool used_arch_random; -static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); -static bool trust_bootloader __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); +static bool trust_cpu __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU); +static bool trust_bootloader __initdata = IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER); static int __init parse_trust_cpu(char *arg) { return kstrtobool(arg, &trust_cpu); @@ -776,7 +776,7 @@ static struct notifier_block pm_notifier = { .notifier_call = random_pm_notifica int __init random_init(const char *command_line) { ktime_t now = ktime_get_real(); - unsigned int i, arch_bytes; + unsigned int i, arch_bits; unsigned long entropy; #if defined(LATENT_ENTROPY_PLUGIN) @@ -784,12 +784,12 @@ int __init random_init(const char *command_line) _mix_pool_bytes(compiletime_seed, sizeof(compiletime_seed)); #endif - for (i = 0, arch_bytes = BLAKE2S_BLOCK_SIZE; + for (i = 0, arch_bits = BLAKE2S_BLOCK_SIZE * 8; i < BLAKE2S_BLOCK_SIZE; i += sizeof(entropy)) { if (!arch_get_random_seed_long_early(&entropy) && !arch_get_random_long_early(&entropy)) { entropy = random_get_entropy(); - arch_bytes -= sizeof(entropy); + arch_bits -= sizeof(entropy) * 8; } _mix_pool_bytes(&entropy, sizeof(entropy)); } @@ -798,11 +798,18 @@ int __init random_init(const char *command_line) _mix_pool_bytes(command_line, strlen(command_line)); add_latent_entropy(); + /* + * If we were initialized by the bootloader before jump labels are + * initialized, then we should enable the static branch here, where + * it's guaranteed that jump labels have been initialized. + */ + if (!static_branch_likely(&crng_is_ready) && crng_init >= CRNG_READY) + crng_set_ready(NULL); + if (crng_ready()) crng_reseed(); else if (trust_cpu) - credit_init_bits(arch_bytes * 8); - used_arch_random = arch_bytes * 8 >= POOL_READY_BITS; + _credit_init_bits(arch_bits); WARN_ON(register_pm_notifier(&pm_notifier)); @@ -812,17 +819,6 @@ int __init random_init(const char *command_line) } /* - * Returns whether arch randomness has been mixed into the initial - * state of the RNG, regardless of whether or not that randomness - * was credited. Knowing this is only good for a very limited set - * of uses, such as early init printk pointer obfuscation. - */ -bool rng_has_arch_random(void) -{ - return used_arch_random; -} - -/* * Add device- or boot-specific data to the input pool to help * initialize it. * @@ -865,13 +861,12 @@ EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); * Handle random seed passed by bootloader, and credit it if * CONFIG_RANDOM_TRUST_BOOTLOADER is set. */ -void __cold add_bootloader_randomness(const void *buf, size_t len) +void __init add_bootloader_randomness(const void *buf, size_t len) { mix_pool_bytes(buf, len); if (trust_bootloader) credit_init_bits(len * 8); } -EXPORT_SYMBOL_GPL(add_bootloader_randomness); #if IS_ENABLED(CONFIG_VMGENID) static BLOCKING_NOTIFIER_HEAD(vmfork_chain); @@ -1014,7 +1009,7 @@ void add_interrupt_randomness(int irq) if (new_count & MIX_INFLIGHT) return; - if (new_count < 64 && !time_is_before_jiffies(fast_pool->last + HZ)) + if (new_count < 1024 && !time_is_before_jiffies(fast_pool->last + HZ)) return; if (unlikely(!fast_pool->mix.func)) diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index ff188ab68496..bb47610bbd1c 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -565,4 +565,3 @@ void __init hv_init_clocksource(void) hv_sched_clock_offset = hv_read_reference_counter(); hv_setup_sched_clock(read_hv_sched_clock_msr); } -EXPORT_SYMBOL_GPL(hv_init_clocksource); diff --git a/drivers/comedi/drivers/vmk80xx.c b/drivers/comedi/drivers/vmk80xx.c index 46023adc5395..4536ed43f65b 100644 --- a/drivers/comedi/drivers/vmk80xx.c +++ b/drivers/comedi/drivers/vmk80xx.c @@ -684,7 +684,7 @@ static int vmk80xx_alloc_usb_buffers(struct comedi_device *dev) if (!devpriv->usb_rx_buf) return -ENOMEM; - size = max(usb_endpoint_maxp(devpriv->ep_rx), MIN_BUF_SIZE); + size = max(usb_endpoint_maxp(devpriv->ep_tx), MIN_BUF_SIZE); devpriv->usb_tx_buf = kzalloc(size, GFP_KERNEL); if (!devpriv->usb_tx_buf) return -ENOMEM; diff --git a/drivers/dma-buf/udmabuf.c b/drivers/dma-buf/udmabuf.c index e7330684d3b8..9631f2fd2faf 100644 --- a/drivers/dma-buf/udmabuf.c +++ b/drivers/dma-buf/udmabuf.c @@ -32,8 +32,11 @@ static vm_fault_t udmabuf_vm_fault(struct vm_fault *vmf) { struct vm_area_struct *vma = vmf->vma; struct udmabuf *ubuf = vma->vm_private_data; + pgoff_t pgoff = vmf->pgoff; - vmf->page = ubuf->pages[vmf->pgoff]; + if (pgoff >= ubuf->pagecount) + return VM_FAULT_SIGBUS; + vmf->page = ubuf->pages[pgoff]; get_page(vmf->page); return 0; } diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index c9fe5903725a..9c89f7d53e99 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -1211,7 +1211,7 @@ static int ioctl_get_cycle_timer2(struct client *client, union ioctl_arg *arg) struct fw_cdev_get_cycle_timer2 *a = &arg->get_cycle_timer2; struct fw_card *card = client->device->card; struct timespec64 ts = {0, 0}; - u32 cycle_time; + u32 cycle_time = 0; int ret = 0; local_irq_disable(); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index 90ed8fdaba75..adddd8c45d0c 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -372,8 +372,7 @@ static ssize_t rom_index_show(struct device *dev, struct fw_device *device = fw_device(dev->parent); struct fw_unit *unit = fw_unit(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", - (int)(unit->directory - device->config_rom)); + return sysfs_emit(buf, "%td\n", unit->directory - device->config_rom); } static struct device_attribute fw_unit_attributes[] = { @@ -403,8 +402,7 @@ static ssize_t guid_show(struct device *dev, int ret; down_read(&fw_device_rwsem); - ret = snprintf(buf, PAGE_SIZE, "0x%08x%08x\n", - device->config_rom[3], device->config_rom[4]); + ret = sysfs_emit(buf, "0x%08x%08x\n", device->config_rom[3], device->config_rom[4]); up_read(&fw_device_rwsem); return ret; diff --git a/drivers/firmware/arm_scmi/Kconfig b/drivers/firmware/arm_scmi/Kconfig index 1e7b7fec97d9..a14f65444b35 100644 --- a/drivers/firmware/arm_scmi/Kconfig +++ b/drivers/firmware/arm_scmi/Kconfig @@ -149,4 +149,16 @@ config ARM_SCMI_POWER_DOMAIN will be called scmi_pm_domain. Note this may needed early in boot before rootfs may be available. +config ARM_SCMI_POWER_CONTROL + tristate "SCMI system power control driver" + depends on ARM_SCMI_PROTOCOL || (COMPILE_TEST && OF) + help + This enables System Power control logic which binds system shutdown or + reboot actions to SCMI System Power notifications generated by SCP + firmware. + + This driver can also be built as a module. If so, the module will be + called scmi_power_control. Note this may needed early in boot to catch + early shutdown/reboot SCMI requests. + endmenu diff --git a/drivers/firmware/arm_scmi/Makefile b/drivers/firmware/arm_scmi/Makefile index 8d4afadda38c..9ea86f8cc8f7 100644 --- a/drivers/firmware/arm_scmi/Makefile +++ b/drivers/firmware/arm_scmi/Makefile @@ -7,11 +7,12 @@ scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_SMC) += smc.o scmi-transport-$(CONFIG_ARM_SCMI_HAVE_MSG) += msg.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_VIRTIO) += virtio.o scmi-transport-$(CONFIG_ARM_SCMI_TRANSPORT_OPTEE) += optee.o -scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o +scmi-protocols-y = base.o clock.o perf.o power.o reset.o sensors.o system.o voltage.o powercap.o scmi-module-objs := $(scmi-bus-y) $(scmi-driver-y) $(scmi-protocols-y) \ $(scmi-transport-y) obj-$(CONFIG_ARM_SCMI_PROTOCOL) += scmi-module.o obj-$(CONFIG_ARM_SCMI_POWER_DOMAIN) += scmi_pm_domain.o +obj-$(CONFIG_ARM_SCMI_POWER_CONTROL) += scmi_power_control.o ifeq ($(CONFIG_THUMB2_KERNEL)$(CONFIG_CC_IS_CLANG),yy) # The use of R7 in the SMCCC conflicts with the compiler's use of R7 as a frame diff --git a/drivers/firmware/arm_scmi/base.c b/drivers/firmware/arm_scmi/base.c index 20fba7370f4e..a52f084a6a87 100644 --- a/drivers/firmware/arm_scmi/base.c +++ b/drivers/firmware/arm_scmi/base.c @@ -36,7 +36,7 @@ struct scmi_msg_resp_base_attributes { struct scmi_msg_resp_base_discover_agent { __le32 agent_id; - u8 name[SCMI_MAX_STR_SIZE]; + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; }; @@ -119,7 +119,7 @@ scmi_base_vendor_id_get(const struct scmi_protocol_handle *ph, bool sub_vendor) ret = ph->xops->do_xfer(ph, t); if (!ret) - memcpy(vendor_id, t->rx.buf, size); + strscpy(vendor_id, t->rx.buf, size); ph->xops->xfer_put(ph, t); @@ -221,11 +221,17 @@ scmi_base_implementation_list_get(const struct scmi_protocol_handle *ph, calc_list_sz = (1 + (loop_num_ret - 1) / sizeof(u32)) * sizeof(u32); if (calc_list_sz != real_list_sz) { - dev_err(dev, - "Malformed reply - real_sz:%zd calc_sz:%u\n", - real_list_sz, calc_list_sz); - ret = -EPROTO; - break; + dev_warn(dev, + "Malformed reply - real_sz:%zd calc_sz:%u (loop_num_ret:%d)\n", + real_list_sz, calc_list_sz, loop_num_ret); + /* + * Bail out if the expected list size is bigger than the + * total payload size of the received reply. + */ + if (calc_list_sz > real_list_sz) { + ret = -EPROTO; + break; + } } for (loop = 0; loop < loop_num_ret; loop++) @@ -270,7 +276,7 @@ static int scmi_base_discover_agent_get(const struct scmi_protocol_handle *ph, ret = ph->xops->do_xfer(ph, t); if (!ret) { agent_info = t->rx.buf; - strlcpy(name, agent_info->name, SCMI_MAX_STR_SIZE); + strscpy(name, agent_info->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); @@ -369,7 +375,7 @@ static int scmi_base_protocol_init(const struct scmi_protocol_handle *ph) int id, ret; u8 *prot_imp; u32 version; - char name[SCMI_MAX_STR_SIZE]; + char name[SCMI_SHORT_NAME_MAX_SIZE]; struct device *dev = ph->dev; struct scmi_revision_info *rev = scmi_revision_area_get(ph); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index f6fe723ab869..d4e23101448a 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -181,7 +181,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol, return NULL; } - id = ida_simple_get(&scmi_bus_id, 1, 0, GFP_KERNEL); + id = ida_alloc_min(&scmi_bus_id, 1, GFP_KERNEL); if (id < 0) { kfree_const(scmi_dev->name); kfree(scmi_dev); @@ -204,7 +204,7 @@ scmi_device_create(struct device_node *np, struct device *parent, int protocol, put_dev: kfree_const(scmi_dev->name); put_device(&scmi_dev->dev); - ida_simple_remove(&scmi_bus_id, id); + ida_free(&scmi_bus_id, id); return NULL; } @@ -212,7 +212,7 @@ void scmi_device_destroy(struct scmi_device *scmi_dev) { kfree_const(scmi_dev->name); scmi_handle_put(scmi_dev->handle); - ida_simple_remove(&scmi_bus_id, scmi_dev->id); + ida_free(&scmi_bus_id, scmi_dev->id); device_unregister(&scmi_dev->dev); } diff --git a/drivers/firmware/arm_scmi/clock.c b/drivers/firmware/arm_scmi/clock.c index 4d36a9a133d1..3ed7ae0d6781 100644 --- a/drivers/firmware/arm_scmi/clock.c +++ b/drivers/firmware/arm_scmi/clock.c @@ -153,7 +153,7 @@ static int scmi_clock_attributes_get(const struct scmi_protocol_handle *ph, if (!ret) { u32 latency = 0; attributes = le32_to_cpu(attr->attributes); - strlcpy(clk->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(clk->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); /* clock_enable_latency field is present only since SCMI v3.1 */ if (PROTOCOL_REV_MAJOR(version) >= 0x2) latency = le32_to_cpu(attr->clock_enable_latency); @@ -194,6 +194,7 @@ static int rate_cmp_func(const void *_r1, const void *_r2) } struct scmi_clk_ipriv { + struct device *dev; u32 clk_id; struct scmi_clock_info *clk; }; @@ -223,6 +224,29 @@ iter_clk_describe_update_state(struct scmi_iterator_state *st, st->num_returned = NUM_RETURNED(flags); p->clk->rate_discrete = RATE_DISCRETE(flags); + /* Warn about out of spec replies ... */ + if (!p->clk->rate_discrete && + (st->num_returned != 3 || st->num_remaining != 0)) { + dev_warn(p->dev, + "Out-of-spec CLOCK_DESCRIBE_RATES reply for %s - returned:%d remaining:%d rx_len:%zd\n", + p->clk->name, st->num_returned, st->num_remaining, + st->rx_len); + + /* + * A known quirk: a triplet is returned but num_returned != 3 + * Check for a safe payload size and fix. + */ + if (st->num_returned != 3 && st->num_remaining == 0 && + st->rx_len == sizeof(*r) + sizeof(__le32) * 2 * 3) { + st->num_returned = 3; + st->num_remaining = 0; + } else { + dev_err(p->dev, + "Cannot fix out-of-spec reply !\n"); + return -EPROTO; + } + } + return 0; } @@ -255,7 +279,6 @@ iter_clk_describe_process_response(const struct scmi_protocol_handle *ph, *rate = RATE_TO_U64(r->rate[st->loop_idx]); p->clk->list.num_rates++; - //XXX dev_dbg(ph->dev, "Rate %llu Hz\n", *rate); } return ret; @@ -266,9 +289,7 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, struct scmi_clock_info *clk) { int ret; - void *iter; - struct scmi_msg_clock_describe_rates *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_clk_describe_prepare_message, .update_state = iter_clk_describe_update_state, @@ -277,11 +298,13 @@ scmi_clock_describe_rates_get(const struct scmi_protocol_handle *ph, u32 clk_id, struct scmi_clk_ipriv cpriv = { .clk_id = clk_id, .clk = clk, + .dev = ph->dev, }; iter = ph->hops->iter_response_init(ph, &ops, SCMI_MAX_NUM_RATES, CLOCK_DESCRIBE_RATES, - sizeof(*msg), &cpriv); + sizeof(struct scmi_msg_clock_describe_rates), + &cpriv); if (IS_ERR(iter)) return PTR_ERR(iter); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index c1922bd650ae..609ebedee9cb 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -19,6 +19,7 @@ #include <linux/export.h> #include <linux/idr.h> #include <linux/io.h> +#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/hashtable.h> @@ -60,6 +61,11 @@ static atomic_t transfer_last_id; static DEFINE_IDR(scmi_requested_devices); static DEFINE_MUTEX(scmi_requested_devices_mtx); +/* Track globally the creation of SCMI SystemPower related devices */ +static bool scmi_syspower_registered; +/* Protect access to scmi_syspower_registered */ +static DEFINE_MUTEX(scmi_syspower_mtx); + struct scmi_requested_dev { const struct scmi_device_id *id_table; struct list_head node; @@ -660,6 +666,11 @@ static void scmi_handle_notification(struct scmi_chan_info *cinfo, smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_notification(cinfo, info->desc->max_msg_size, xfer); + + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "NOTI", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + scmi_notify(cinfo->handle, xfer->hdr.protocol_id, xfer->hdr.id, xfer->rx.buf, xfer->rx.len, ts); @@ -694,6 +705,12 @@ static void scmi_handle_response(struct scmi_chan_info *cinfo, smp_store_mb(xfer->priv, priv); info->desc->ops->fetch_response(cinfo, xfer); + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + xfer->hdr.type == MSG_TYPE_DELAYED_RESP ? + "DLYD" : "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); + trace_scmi_rx_done(xfer->transfer_id, xfer->hdr.id, xfer->hdr.protocol_id, xfer->hdr.seq, xfer->hdr.type); @@ -827,6 +844,12 @@ static int scmi_wait_for_message_response(struct scmi_chan_info *cinfo, xfer->state = SCMI_XFER_RESP_OK; } spin_unlock_irqrestore(&xfer->lock, flags); + + /* Trace polled replies. */ + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, + "RESP", + xfer->hdr.seq, xfer->hdr.status, + xfer->rx.buf, xfer->rx.len); } } else { /* And we wait for the response. */ @@ -903,6 +926,10 @@ static int do_xfer(const struct scmi_protocol_handle *ph, return ret; } + trace_scmi_msg_dump(xfer->hdr.protocol_id, xfer->hdr.id, "CMND", + xfer->hdr.seq, xfer->hdr.status, + xfer->tx.buf, xfer->tx.len); + ret = scmi_wait_for_message_response(cinfo, xfer); if (!ret && xfer->hdr.status) ret = scmi_to_linux_errno(xfer->hdr.status); @@ -1223,6 +1250,7 @@ static int scmi_iterator_run(void *iter) if (ret) break; + st->rx_len = i->t->rx.len; ret = iops->update_state(st, i->resp, i->priv); if (ret) break; @@ -1258,10 +1286,174 @@ out: return ret; } +struct scmi_msg_get_fc_info { + __le32 domain; + __le32 message_id; +}; + +struct scmi_msg_resp_desc_fc { + __le32 attr; +#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) +#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) + __le32 rate_limit; + __le32 chan_addr_low; + __le32 chan_addr_high; + __le32 chan_size; + __le32 db_addr_low; + __le32 db_addr_high; + __le32 db_set_lmask; + __le32 db_set_hmask; + __le32 db_preserve_lmask; + __le32 db_preserve_hmask; +}; + +static void +scmi_common_fastchannel_init(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, u32 valid_size, + u32 domain, void __iomem **p_addr, + struct scmi_fc_db_info **p_db) +{ + int ret; + u32 flags; + u64 phys_addr; + u8 size; + void __iomem *addr; + struct scmi_xfer *t; + struct scmi_fc_db_info *db = NULL; + struct scmi_msg_get_fc_info *info; + struct scmi_msg_resp_desc_fc *resp; + const struct scmi_protocol_instance *pi = ph_to_pi(ph); + + if (!p_addr) { + ret = -EINVAL; + goto err_out; + } + + ret = ph->xops->xfer_get_init(ph, describe_id, + sizeof(*info), sizeof(*resp), &t); + if (ret) + goto err_out; + + info = t->tx.buf; + info->domain = cpu_to_le32(domain); + info->message_id = cpu_to_le32(message_id); + + /* + * Bail out on error leaving fc_info addresses zeroed; this includes + * the case in which the requested domain/message_id does NOT support + * fastchannels at all. + */ + ret = ph->xops->do_xfer(ph, t); + if (ret) + goto err_xfer; + + resp = t->rx.buf; + flags = le32_to_cpu(resp->attr); + size = le32_to_cpu(resp->chan_size); + if (size != valid_size) { + ret = -EINVAL; + goto err_xfer; + } + + phys_addr = le32_to_cpu(resp->chan_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_xfer; + } + + *p_addr = addr; + + if (p_db && SUPPORTS_DOORBELL(flags)) { + db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); + if (!db) { + ret = -ENOMEM; + goto err_db; + } + + size = 1 << DOORBELL_REG_WIDTH(flags); + phys_addr = le32_to_cpu(resp->db_addr_low); + phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; + addr = devm_ioremap(ph->dev, phys_addr, size); + if (!addr) { + ret = -EADDRNOTAVAIL; + goto err_db_mem; + } + + db->addr = addr; + db->width = size; + db->set = le32_to_cpu(resp->db_set_lmask); + db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; + db->mask = le32_to_cpu(resp->db_preserve_lmask); + db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; + + *p_db = db; + } + + ph->xops->xfer_put(ph, t); + + dev_dbg(ph->dev, + "Using valid FC for protocol %X [MSG_ID:%u / RES_ID:%u]\n", + pi->proto->id, message_id, domain); + + return; + +err_db_mem: + devm_kfree(ph->dev, db); + +err_db: + *p_addr = NULL; + +err_xfer: + ph->xops->xfer_put(ph, t); + +err_out: + dev_warn(ph->dev, + "Failed to get FC for protocol %X [MSG_ID:%u / RES_ID:%u] - ret:%d. Using regular messaging.\n", + pi->proto->id, message_id, domain, ret); +} + +#define SCMI_PROTO_FC_RING_DB(w) \ +do { \ + u##w val = 0; \ + \ + if (db->mask) \ + val = ioread##w(db->addr) & db->mask; \ + iowrite##w((u##w)db->set | val, db->addr); \ +} while (0) + +static void scmi_common_fastchannel_db_ring(struct scmi_fc_db_info *db) +{ + if (!db || !db->addr) + return; + + if (db->width == 1) + SCMI_PROTO_FC_RING_DB(8); + else if (db->width == 2) + SCMI_PROTO_FC_RING_DB(16); + else if (db->width == 4) + SCMI_PROTO_FC_RING_DB(32); + else /* db->width == 8 */ +#ifdef CONFIG_64BIT + SCMI_PROTO_FC_RING_DB(64); +#else + { + u64 val = 0; + + if (db->mask) + val = ioread64_hi_lo(db->addr) & db->mask; + iowrite64_hi_lo(db->set | val, db->addr); + } +#endif +} + static const struct scmi_proto_helpers_ops helpers_ops = { .extended_name_get = scmi_common_extended_name_get, .iter_response_init = scmi_iterator_init, .iter_response_run = scmi_iterator_run, + .fastchannel_init = scmi_common_fastchannel_init, + .fastchannel_db_ring = scmi_common_fastchannel_db_ring, }; /** @@ -1496,6 +1688,30 @@ static void scmi_devm_release_protocol(struct device *dev, void *res) scmi_protocol_release(dres->handle, dres->protocol_id); } +static struct scmi_protocol_instance __must_check * +scmi_devres_protocol_instance_get(struct scmi_device *sdev, u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + struct scmi_protocol_devres *dres; + + dres = devres_alloc(scmi_devm_release_protocol, + sizeof(*dres), GFP_KERNEL); + if (!dres) + return ERR_PTR(-ENOMEM); + + pi = scmi_get_protocol_instance(sdev->handle, protocol_id); + if (IS_ERR(pi)) { + devres_free(dres); + return pi; + } + + dres->handle = sdev->handle; + dres->protocol_id = protocol_id; + devres_add(&sdev->dev, dres); + + return pi; +} + /** * scmi_devm_protocol_get - Devres managed get protocol operations and handle * @sdev: A reference to an scmi_device whose embedded struct device is to @@ -1519,32 +1735,47 @@ scmi_devm_protocol_get(struct scmi_device *sdev, u8 protocol_id, struct scmi_protocol_handle **ph) { struct scmi_protocol_instance *pi; - struct scmi_protocol_devres *dres; - struct scmi_handle *handle = sdev->handle; if (!ph) return ERR_PTR(-EINVAL); - dres = devres_alloc(scmi_devm_release_protocol, - sizeof(*dres), GFP_KERNEL); - if (!dres) - return ERR_PTR(-ENOMEM); - - pi = scmi_get_protocol_instance(handle, protocol_id); - if (IS_ERR(pi)) { - devres_free(dres); + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) return pi; - } - - dres->handle = handle; - dres->protocol_id = protocol_id; - devres_add(&sdev->dev, dres); *ph = &pi->ph; return pi->proto->ops; } +/** + * scmi_devm_protocol_acquire - Devres managed helper to get hold of a protocol + * @sdev: A reference to an scmi_device whose embedded struct device is to + * be used for devres accounting. + * @protocol_id: The protocol being requested. + * + * Get hold of a protocol accounting for its usage, possibly triggering its + * initialization but without getting access to its protocol specific operations + * and handle. + * + * Being a devres based managed method, protocol hold will be automatically + * released, and possibly de-initialized on last user, once the SCMI driver + * owning the scmi_device is unbound from it. + * + * Return: 0 on SUCCESS + */ +static int __must_check scmi_devm_protocol_acquire(struct scmi_device *sdev, + u8 protocol_id) +{ + struct scmi_protocol_instance *pi; + + pi = scmi_devres_protocol_instance_get(sdev, protocol_id); + if (IS_ERR(pi)) + return PTR_ERR(pi); + + return 0; +} + static int scmi_devm_protocol_match(struct device *dev, void *res, void *data) { struct scmi_protocol_devres *dres = res; @@ -1848,21 +2079,39 @@ scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, if (sdev) return sdev; + mutex_lock(&scmi_syspower_mtx); + if (prot_id == SCMI_PROTOCOL_SYSTEM && scmi_syspower_registered) { + dev_warn(info->dev, + "SCMI SystemPower protocol device must be unique !\n"); + mutex_unlock(&scmi_syspower_mtx); + + return NULL; + } + pr_debug("Creating SCMI device (%s) for protocol %x\n", name, prot_id); sdev = scmi_device_create(np, info->dev, prot_id, name); if (!sdev) { dev_err(info->dev, "failed to create %d protocol device\n", prot_id); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } if (scmi_txrx_setup(info, &sdev->dev, prot_id)) { dev_err(&sdev->dev, "failed to setup transport\n"); scmi_device_destroy(sdev); + mutex_unlock(&scmi_syspower_mtx); + return NULL; } + if (prot_id == SCMI_PROTOCOL_SYSTEM) + scmi_syspower_registered = true; + + mutex_unlock(&scmi_syspower_mtx); + return sdev; } @@ -2131,6 +2380,7 @@ static int scmi_probe(struct platform_device *pdev) handle = &info->handle; handle->dev = info->dev; handle->version = &info->version; + handle->devm_protocol_acquire = scmi_devm_protocol_acquire; handle->devm_protocol_get = scmi_devm_protocol_get; handle->devm_protocol_put = scmi_devm_protocol_put; @@ -2400,6 +2650,7 @@ static int __init scmi_driver_init(void) scmi_sensors_register(); scmi_voltage_register(); scmi_system_register(); + scmi_powercap_register(); return platform_driver_register(&scmi_driver); } @@ -2416,6 +2667,7 @@ static void __exit scmi_driver_exit(void) scmi_sensors_unregister(); scmi_voltage_unregister(); scmi_system_unregister(); + scmi_powercap_unregister(); scmi_bus_exit(); diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index b503c22cfd32..8abace56b958 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -117,6 +117,7 @@ struct scmi_optee_channel { u32 channel_id; u32 tee_session; u32 caps; + u32 rx_len; struct mutex mu; struct scmi_chan_info *cinfo; union { @@ -302,6 +303,9 @@ static int invoke_process_msg_channel(struct scmi_optee_channel *channel, size_t return -EIO; } + /* Save response size */ + channel->rx_len = param[2].u.memref.size; + return 0; } @@ -353,6 +357,7 @@ static int setup_dynamic_shmem(struct device *dev, struct scmi_optee_channel *ch shbuf = tee_shm_get_va(channel->tee_shm, 0); memset(shbuf, 0, msg_size); channel->req.msg = shbuf; + channel->rx_len = msg_size; return 0; } @@ -508,7 +513,7 @@ static void scmi_optee_fetch_response(struct scmi_chan_info *cinfo, struct scmi_optee_channel *channel = cinfo->transport_info; if (channel->tee_shm) - msg_fetch_response(channel->req.msg, SCMI_OPTEE_MAX_MSG_SIZE, xfer); + msg_fetch_response(channel->req.msg, channel->rx_len, xfer); else shmem_fetch_response(channel->req.shmem, xfer); } diff --git a/drivers/firmware/arm_scmi/perf.c b/drivers/firmware/arm_scmi/perf.c index 8f4051aca220..64ea2d2f2875 100644 --- a/drivers/firmware/arm_scmi/perf.c +++ b/drivers/firmware/arm_scmi/perf.c @@ -10,13 +10,14 @@ #include <linux/bits.h> #include <linux/of.h> #include <linux/io.h> -#include <linux/io-64-nonatomic-hi-lo.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_opp.h> #include <linux/scmi_protocol.h> #include <linux/sort.h> +#include <trace/events/scmi.h> + #include "protocols.h" #include "notify.h" @@ -35,6 +36,12 @@ enum scmi_performance_protocol_cmd { PERF_DOMAIN_NAME_GET = 0xc, }; +enum { + PERF_FC_LEVEL, + PERF_FC_LIMIT, + PERF_FC_MAX, +}; + struct scmi_opp { u32 perf; u32 power; @@ -115,43 +122,6 @@ struct scmi_msg_resp_perf_describe_levels { } opp[]; }; -struct scmi_perf_get_fc_info { - __le32 domain; - __le32 message_id; -}; - -struct scmi_msg_resp_perf_desc_fc { - __le32 attr; -#define SUPPORTS_DOORBELL(x) ((x) & BIT(0)) -#define DOORBELL_REG_WIDTH(x) FIELD_GET(GENMASK(2, 1), (x)) - __le32 rate_limit; - __le32 chan_addr_low; - __le32 chan_addr_high; - __le32 chan_size; - __le32 db_addr_low; - __le32 db_addr_high; - __le32 db_set_lmask; - __le32 db_set_hmask; - __le32 db_preserve_lmask; - __le32 db_preserve_hmask; -}; - -struct scmi_fc_db_info { - int width; - u64 set; - u64 mask; - void __iomem *addr; -}; - -struct scmi_fc_info { - void __iomem *level_set_addr; - void __iomem *limit_set_addr; - void __iomem *level_get_addr; - void __iomem *limit_get_addr; - struct scmi_fc_db_info *level_set_db; - struct scmi_fc_db_info *limit_set_db; -}; - struct perf_dom_info { bool set_limits; bool set_perf; @@ -252,7 +222,7 @@ scmi_perf_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->mult_factor = (dom_info->sustained_freq_khz * 1000) / dom_info->sustained_perf_level; - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); @@ -332,7 +302,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, { int ret; void *iter; - struct scmi_msg_perf_describe_levels *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_perf_levels_prepare_message, .update_state = iter_perf_levels_update_state, @@ -345,7 +314,8 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, iter = ph->hops->iter_response_init(ph, &ops, MAX_OPPS, PERF_DESCRIBE_LEVELS, - sizeof(*msg), &ppriv); + sizeof(struct scmi_msg_perf_describe_levels), + &ppriv); if (IS_ERR(iter)) return PTR_ERR(iter); @@ -360,40 +330,6 @@ scmi_perf_describe_levels_get(const struct scmi_protocol_handle *ph, u32 domain, return ret; } -#define SCMI_PERF_FC_RING_DB(w) \ -do { \ - u##w val = 0; \ - \ - if (db->mask) \ - val = ioread##w(db->addr) & db->mask; \ - iowrite##w((u##w)db->set | val, db->addr); \ -} while (0) - -static void scmi_perf_fc_ring_db(struct scmi_fc_db_info *db) -{ - if (!db || !db->addr) - return; - - if (db->width == 1) - SCMI_PERF_FC_RING_DB(8); - else if (db->width == 2) - SCMI_PERF_FC_RING_DB(16); - else if (db->width == 4) - SCMI_PERF_FC_RING_DB(32); - else /* db->width == 8 */ -#ifdef CONFIG_64BIT - SCMI_PERF_FC_RING_DB(64); -#else - { - u64 val = 0; - - if (db->mask) - val = ioread64_hi_lo(db->addr) & db->mask; - iowrite64_hi_lo(db->set | val, db->addr); - } -#endif -} - static int scmi_perf_mb_limits_set(const struct scmi_protocol_handle *ph, u32 domain, u32 max_perf, u32 min_perf) { @@ -426,10 +362,14 @@ static int scmi_perf_limits_set(const struct scmi_protocol_handle *ph, if (PROTOCOL_REV_MAJOR(pi->version) >= 0x3 && !max_perf && !min_perf) return -EINVAL; - if (dom->fc_info && dom->fc_info->limit_set_addr) { - iowrite32(max_perf, dom->fc_info->limit_set_addr); - iowrite32(min_perf, dom->fc_info->limit_set_addr + 4); - scmi_perf_fc_ring_db(dom->fc_info->limit_set_db); + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_SET, + domain, min_perf, max_perf); + iowrite32(max_perf, fci->set_addr); + iowrite32(min_perf, fci->set_addr + 4); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -468,9 +408,13 @@ static int scmi_perf_limits_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->limit_get_addr) { - *max_perf = ioread32(dom->fc_info->limit_get_addr); - *min_perf = ioread32(dom->fc_info->limit_get_addr + 4); + if (dom->fc_info && dom->fc_info[PERF_FC_LIMIT].get_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LIMIT]; + + *max_perf = ioread32(fci->get_addr); + *min_perf = ioread32(fci->get_addr + 4); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LIMITS_GET, + domain, *min_perf, *max_perf); return 0; } @@ -505,9 +449,13 @@ static int scmi_perf_level_set(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_set_addr) { - iowrite32(level, dom->fc_info->level_set_addr); - scmi_perf_fc_ring_db(dom->fc_info->level_set_db); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr) { + struct scmi_fc_info *fci = &dom->fc_info[PERF_FC_LEVEL]; + + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_SET, + domain, level, 0); + iowrite32(level, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); return 0; } @@ -542,8 +490,10 @@ static int scmi_perf_level_get(const struct scmi_protocol_handle *ph, struct scmi_perf_info *pi = ph->get_priv(ph); struct perf_dom_info *dom = pi->dom_info + domain; - if (dom->fc_info && dom->fc_info->level_get_addr) { - *level = ioread32(dom->fc_info->level_get_addr); + if (dom->fc_info && dom->fc_info[PERF_FC_LEVEL].get_addr) { + *level = ioread32(dom->fc_info[PERF_FC_LEVEL].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_PERF, PERF_LEVEL_GET, + domain, *level, 0); return 0; } @@ -572,100 +522,33 @@ static int scmi_perf_level_limits_notify(const struct scmi_protocol_handle *ph, return ret; } -static bool scmi_perf_fc_size_is_valid(u32 msg, u32 size) -{ - if ((msg == PERF_LEVEL_GET || msg == PERF_LEVEL_SET) && size == 4) - return true; - if ((msg == PERF_LIMITS_GET || msg == PERF_LIMITS_SET) && size == 8) - return true; - return false; -} - -static void -scmi_perf_domain_desc_fc(const struct scmi_protocol_handle *ph, u32 domain, - u32 message_id, void __iomem **p_addr, - struct scmi_fc_db_info **p_db) -{ - int ret; - u32 flags; - u64 phys_addr; - u8 size; - void __iomem *addr; - struct scmi_xfer *t; - struct scmi_fc_db_info *db; - struct scmi_perf_get_fc_info *info; - struct scmi_msg_resp_perf_desc_fc *resp; - - if (!p_addr) - return; - - ret = ph->xops->xfer_get_init(ph, PERF_DESCRIBE_FASTCHANNEL, - sizeof(*info), sizeof(*resp), &t); - if (ret) - return; - - info = t->tx.buf; - info->domain = cpu_to_le32(domain); - info->message_id = cpu_to_le32(message_id); - - ret = ph->xops->do_xfer(ph, t); - if (ret) - goto err_xfer; - - resp = t->rx.buf; - flags = le32_to_cpu(resp->attr); - size = le32_to_cpu(resp->chan_size); - if (!scmi_perf_fc_size_is_valid(message_id, size)) - goto err_xfer; - - phys_addr = le32_to_cpu(resp->chan_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->chan_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - *p_addr = addr; - - if (p_db && SUPPORTS_DOORBELL(flags)) { - db = devm_kzalloc(ph->dev, sizeof(*db), GFP_KERNEL); - if (!db) - goto err_xfer; - - size = 1 << DOORBELL_REG_WIDTH(flags); - phys_addr = le32_to_cpu(resp->db_addr_low); - phys_addr |= (u64)le32_to_cpu(resp->db_addr_high) << 32; - addr = devm_ioremap(ph->dev, phys_addr, size); - if (!addr) - goto err_xfer; - - db->addr = addr; - db->width = size; - db->set = le32_to_cpu(resp->db_set_lmask); - db->set |= (u64)le32_to_cpu(resp->db_set_hmask) << 32; - db->mask = le32_to_cpu(resp->db_preserve_lmask); - db->mask |= (u64)le32_to_cpu(resp->db_preserve_hmask) << 32; - *p_db = db; - } -err_xfer: - ph->xops->xfer_put(ph, t); -} - static void scmi_perf_domain_init_fc(const struct scmi_protocol_handle *ph, u32 domain, struct scmi_fc_info **p_fc) { struct scmi_fc_info *fc; - fc = devm_kzalloc(ph->dev, sizeof(*fc), GFP_KERNEL); + fc = devm_kcalloc(ph->dev, PERF_FC_MAX, sizeof(*fc), GFP_KERNEL); if (!fc) return; - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_SET, - &fc->level_set_addr, &fc->level_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LEVEL_GET, - &fc->level_get_addr, NULL); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_SET, - &fc->limit_set_addr, &fc->limit_set_db); - scmi_perf_domain_desc_fc(ph, domain, PERF_LIMITS_GET, - &fc->limit_get_addr, NULL); + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_SET, 4, domain, + &fc[PERF_FC_LEVEL].set_addr, + &fc[PERF_FC_LEVEL].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LEVEL_GET, 4, domain, + &fc[PERF_FC_LEVEL].get_addr, NULL); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_SET, 8, domain, + &fc[PERF_FC_LIMIT].set_addr, + &fc[PERF_FC_LIMIT].set_db); + + ph->hops->fastchannel_init(ph, PERF_DESCRIBE_FASTCHANNEL, + PERF_LIMITS_GET, 8, domain, + &fc[PERF_FC_LIMIT].get_addr, NULL); + *p_fc = fc; } @@ -789,7 +672,7 @@ static bool scmi_fast_switch_possible(const struct scmi_protocol_handle *ph, dom = pi->dom_info + scmi_dev_domain_id(dev); - return dom->fc_info && dom->fc_info->level_set_addr; + return dom->fc_info && dom->fc_info[PERF_FC_LEVEL].set_addr; } static bool scmi_power_scale_mw_get(const struct scmi_protocol_handle *ph) diff --git a/drivers/firmware/arm_scmi/power.c b/drivers/firmware/arm_scmi/power.c index 964882cc8747..356e83631664 100644 --- a/drivers/firmware/arm_scmi/power.c +++ b/drivers/firmware/arm_scmi/power.c @@ -122,7 +122,7 @@ scmi_power_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->state_set_notify = SUPPORTS_STATE_SET_NOTIFY(flags); dom_info->state_set_async = SUPPORTS_STATE_SET_ASYNC(flags); dom_info->state_set_sync = SUPPORTS_STATE_SET_SYNC(flags); - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); diff --git a/drivers/firmware/arm_scmi/powercap.c b/drivers/firmware/arm_scmi/powercap.c new file mode 100644 index 000000000000..83b90bde755c --- /dev/null +++ b/drivers/firmware/arm_scmi/powercap.c @@ -0,0 +1,866 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * System Control and Management Interface (SCMI) Powercap Protocol + * + * Copyright (C) 2022 ARM Ltd. + */ + +#define pr_fmt(fmt) "SCMI Notifications POWERCAP - " fmt + +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/scmi_protocol.h> + +#include <trace/events/scmi.h> + +#include "protocols.h" +#include "notify.h" + +enum scmi_powercap_protocol_cmd { + POWERCAP_DOMAIN_ATTRIBUTES = 0x3, + POWERCAP_CAP_GET = 0x4, + POWERCAP_CAP_SET = 0x5, + POWERCAP_PAI_GET = 0x6, + POWERCAP_PAI_SET = 0x7, + POWERCAP_DOMAIN_NAME_GET = 0x8, + POWERCAP_MEASUREMENTS_GET = 0x9, + POWERCAP_CAP_NOTIFY = 0xa, + POWERCAP_MEASUREMENTS_NOTIFY = 0xb, + POWERCAP_DESCRIBE_FASTCHANNEL = 0xc, +}; + +enum { + POWERCAP_FC_CAP, + POWERCAP_FC_PAI, + POWERCAP_FC_MAX, +}; + +struct scmi_msg_resp_powercap_domain_attributes { + __le32 attributes; +#define SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(x) ((x) & BIT(31)) +#define SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(x) ((x) & BIT(30)) +#define SUPPORTS_ASYNC_POWERCAP_CAP_SET(x) ((x) & BIT(29)) +#define SUPPORTS_EXTENDED_NAMES(x) ((x) & BIT(28)) +#define SUPPORTS_POWERCAP_CAP_CONFIGURATION(x) ((x) & BIT(27)) +#define SUPPORTS_POWERCAP_MONITORING(x) ((x) & BIT(26)) +#define SUPPORTS_POWERCAP_PAI_CONFIGURATION(x) ((x) & BIT(25)) +#define SUPPORTS_POWERCAP_FASTCHANNELS(x) ((x) & BIT(22)) +#define POWERCAP_POWER_UNIT(x) \ + (FIELD_GET(GENMASK(24, 23), (x))) +#define SUPPORTS_POWER_UNITS_MW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x2) +#define SUPPORTS_POWER_UNITS_UW(x) \ + (POWERCAP_POWER_UNIT(x) == 0x1) + u8 name[SCMI_SHORT_NAME_MAX_SIZE]; + __le32 min_pai; + __le32 max_pai; + __le32 pai_step; + __le32 min_power_cap; + __le32 max_power_cap; + __le32 power_cap_step; + __le32 sustainable_power; + __le32 accuracy; + __le32 parent_id; +}; + +struct scmi_msg_powercap_set_cap_or_pai { + __le32 domain; + __le32 flags; +#define CAP_SET_ASYNC BIT(1) +#define CAP_SET_IGNORE_DRESP BIT(0) + __le32 value; +}; + +struct scmi_msg_resp_powercap_cap_set_complete { + __le32 domain; + __le32 power_cap; +}; + +struct scmi_msg_resp_powercap_meas_get { + __le32 power; + __le32 pai; +}; + +struct scmi_msg_powercap_notify_cap { + __le32 domain; + __le32 notify_enable; +}; + +struct scmi_msg_powercap_notify_thresh { + __le32 domain; + __le32 notify_enable; + __le32 power_thresh_low; + __le32 power_thresh_high; +}; + +struct scmi_powercap_cap_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power_cap; + __le32 pai; +}; + +struct scmi_powercap_meas_changed_notify_payld { + __le32 agent_id; + __le32 domain_id; + __le32 power; +}; + +struct scmi_powercap_state { + bool meas_notif_enabled; + u64 thresholds; +#define THRESH_LOW(p, id) \ + (lower_32_bits((p)->states[(id)].thresholds)) +#define THRESH_HIGH(p, id) \ + (upper_32_bits((p)->states[(id)].thresholds)) +}; + +struct powercap_info { + u32 version; + int num_domains; + struct scmi_powercap_state *states; + struct scmi_powercap_info *powercaps; +}; + +static enum scmi_powercap_protocol_cmd evt_2_cmd[] = { + POWERCAP_CAP_NOTIFY, + POWERCAP_MEASUREMENTS_NOTIFY, +}; + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable); + +static int +scmi_powercap_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pi) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, PROTOCOL_ATTRIBUTES, 0, + sizeof(u32), &t); + if (ret) + return ret; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + u32 attributes; + + attributes = get_unaligned_le32(t->rx.buf); + pi->num_domains = FIELD_GET(GENMASK(15, 0), attributes); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static inline int +scmi_powercap_validate(unsigned int min_val, unsigned int max_val, + unsigned int step_val, bool configurable) +{ + if (!min_val || !max_val) + return -EPROTO; + + if ((configurable && min_val == max_val) || + (!configurable && min_val != max_val)) + return -EPROTO; + + if (min_val != max_val && !step_val) + return -EPROTO; + + return 0; +} + +static int +scmi_powercap_domain_attributes_get(const struct scmi_protocol_handle *ph, + struct powercap_info *pinfo, u32 domain) +{ + int ret; + u32 flags; + struct scmi_xfer *t; + struct scmi_powercap_info *dom_info = pinfo->powercaps + domain; + struct scmi_msg_resp_powercap_domain_attributes *resp; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_DOMAIN_ATTRIBUTES, + sizeof(domain), sizeof(*resp), &t); + if (ret) + return ret; + + put_unaligned_le32(domain, t->tx.buf); + resp = t->rx.buf; + + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + flags = le32_to_cpu(resp->attributes); + + dom_info->id = domain; + dom_info->notify_powercap_cap_change = + SUPPORTS_POWERCAP_CAP_CHANGE_NOTIFY(flags); + dom_info->notify_powercap_measurement_change = + SUPPORTS_POWERCAP_MEASUREMENTS_CHANGE_NOTIFY(flags); + dom_info->async_powercap_cap_set = + SUPPORTS_ASYNC_POWERCAP_CAP_SET(flags); + dom_info->powercap_cap_config = + SUPPORTS_POWERCAP_CAP_CONFIGURATION(flags); + dom_info->powercap_monitoring = + SUPPORTS_POWERCAP_MONITORING(flags); + dom_info->powercap_pai_config = + SUPPORTS_POWERCAP_PAI_CONFIGURATION(flags); + dom_info->powercap_scale_mw = + SUPPORTS_POWER_UNITS_MW(flags); + dom_info->powercap_scale_uw = + SUPPORTS_POWER_UNITS_UW(flags); + dom_info->fastchannels = + SUPPORTS_POWERCAP_FASTCHANNELS(flags); + + strscpy(dom_info->name, resp->name, SCMI_SHORT_NAME_MAX_SIZE); + + dom_info->min_pai = le32_to_cpu(resp->min_pai); + dom_info->max_pai = le32_to_cpu(resp->max_pai); + dom_info->pai_step = le32_to_cpu(resp->pai_step); + ret = scmi_powercap_validate(dom_info->min_pai, + dom_info->max_pai, + dom_info->pai_step, + dom_info->powercap_pai_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent PAI config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->min_power_cap = le32_to_cpu(resp->min_power_cap); + dom_info->max_power_cap = le32_to_cpu(resp->max_power_cap); + dom_info->power_cap_step = le32_to_cpu(resp->power_cap_step); + ret = scmi_powercap_validate(dom_info->min_power_cap, + dom_info->max_power_cap, + dom_info->power_cap_step, + dom_info->powercap_cap_config); + if (ret) { + dev_err(ph->dev, + "Platform reported inconsistent CAP config for domain %d - %s\n", + dom_info->id, dom_info->name); + goto clean; + } + + dom_info->sustainable_power = + le32_to_cpu(resp->sustainable_power); + dom_info->accuracy = le32_to_cpu(resp->accuracy); + + dom_info->parent_id = le32_to_cpu(resp->parent_id); + if (dom_info->parent_id != SCMI_POWERCAP_ROOT_ZONE_ID && + (dom_info->parent_id >= pinfo->num_domains || + dom_info->parent_id == dom_info->id)) { + dev_err(ph->dev, + "Platform reported inconsistent parent ID for domain %d - %s\n", + dom_info->id, dom_info->name); + ret = -ENODEV; + } + } + +clean: + ph->xops->xfer_put(ph, t); + + /* + * If supported overwrite short name with the extended one; + * on error just carry on and use already provided short name. + */ + if (!ret && SUPPORTS_EXTENDED_NAMES(flags)) + ph->hops->extended_name_get(ph, POWERCAP_DOMAIN_NAME_GET, + domain, dom_info->name, + SCMI_MAX_STR_SIZE); + + return ret; +} + +static int scmi_powercap_num_domains_get(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + return pi->num_domains; +} + +static const struct scmi_powercap_info * +scmi_powercap_dom_info_get(const struct scmi_protocol_handle *ph, u32 domain_id) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains) + return NULL; + + return pi->powercaps + domain_id; +} + +static int scmi_powercap_xfer_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *power_cap = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_cap_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_cap) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_cap || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_CAP].get_addr) { + *power_cap = ioread32(dom->fc_info[POWERCAP_FC_CAP].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_GET, + domain_id, *power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_get(ph, domain_id, power_cap); +} + +static int scmi_powercap_xfer_cap_set(const struct scmi_protocol_handle *ph, + const struct scmi_powercap_info *pc, + u32 power_cap, bool ignore_dresp) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_CAP_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(pc->id); + msg->flags = + cpu_to_le32(FIELD_PREP(CAP_SET_ASYNC, !!pc->async_powercap_cap_set) | + FIELD_PREP(CAP_SET_IGNORE_DRESP, !!ignore_dresp)); + msg->value = cpu_to_le32(power_cap); + + if (!pc->async_powercap_cap_set || ignore_dresp) { + ret = ph->xops->do_xfer(ph, t); + } else { + ret = ph->xops->do_xfer_with_response(ph, t); + if (!ret) { + struct scmi_msg_resp_powercap_cap_set_complete *resp; + + resp = t->rx.buf; + if (le32_to_cpu(resp->domain) == pc->id) + dev_dbg(ph->dev, + "Powercap ID %d CAP set async to %u\n", + pc->id, + get_unaligned_le32(&resp->power_cap)); + else + ret = -EPROTO; + } + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_cap_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_cap, + bool ignore_dresp) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_cap_config || !power_cap || + power_cap < pc->min_power_cap || + power_cap > pc->max_power_cap) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_CAP].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_CAP]; + + iowrite32(power_cap, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_CAP_SET, + domain_id, power_cap, 0); + return 0; + } + + return scmi_powercap_xfer_cap_set(ph, pc, power_cap, ignore_dresp); +} + +static int scmi_powercap_xfer_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + int ret; + struct scmi_xfer *t; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_GET, sizeof(u32), + sizeof(u32), &t); + if (ret) + return ret; + + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) + *pai = get_unaligned_le32(t->rx.buf); + + ph->xops->xfer_put(ph, t); + + return ret; +} + +static int scmi_powercap_pai_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *pai) +{ + struct scmi_powercap_info *dom; + struct powercap_info *pi = ph->get_priv(ph); + + if (!pai || domain_id >= pi->num_domains) + return -EINVAL; + + dom = pi->powercaps + domain_id; + if (dom->fc_info && dom->fc_info[POWERCAP_FC_PAI].get_addr) { + *pai = ioread32(dom->fc_info[POWERCAP_FC_PAI].get_addr); + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_GET, + domain_id, *pai, 0); + return 0; + } + + return scmi_powercap_xfer_pai_get(ph, domain_id, pai); +} + +static int scmi_powercap_xfer_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_powercap_set_cap_or_pai *msg; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_PAI_SET, + sizeof(*msg), 0, &t); + if (ret) + return ret; + + msg = t->tx.buf; + msg->domain = cpu_to_le32(domain_id); + msg->flags = cpu_to_le32(0); + msg->value = cpu_to_le32(pai); + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int scmi_powercap_pai_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 pai) +{ + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_pai_config || !pai || + pai < pc->min_pai || pai > pc->max_pai) + return -EINVAL; + + if (pc->fc_info && pc->fc_info[POWERCAP_FC_PAI].set_addr) { + struct scmi_fc_info *fci = &pc->fc_info[POWERCAP_FC_PAI]; + + trace_scmi_fc_call(SCMI_PROTOCOL_POWERCAP, POWERCAP_PAI_SET, + domain_id, pai, 0); + iowrite32(pai, fci->set_addr); + ph->hops->fastchannel_db_ring(fci->set_db); + return 0; + } + + return scmi_powercap_xfer_pai_set(ph, domain_id, pai); +} + +static int scmi_powercap_measurements_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *average_power, + u32 *pai) +{ + int ret; + struct scmi_xfer *t; + struct scmi_msg_resp_powercap_meas_get *resp; + const struct scmi_powercap_info *pc; + + pc = scmi_powercap_dom_info_get(ph, domain_id); + if (!pc || !pc->powercap_monitoring || !pai || !average_power) + return -EINVAL; + + ret = ph->xops->xfer_get_init(ph, POWERCAP_MEASUREMENTS_GET, + sizeof(u32), sizeof(*resp), &t); + if (ret) + return ret; + + resp = t->rx.buf; + put_unaligned_le32(domain_id, t->tx.buf); + ret = ph->xops->do_xfer(ph, t); + if (!ret) { + *average_power = le32_to_cpu(resp->power); + *pai = le32_to_cpu(resp->pai); + } + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_measurements_threshold_get(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 *power_thresh_low, + u32 *power_thresh_high) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!power_thresh_low || !power_thresh_high || + domain_id >= pi->num_domains) + return -EINVAL; + + *power_thresh_low = THRESH_LOW(pi, domain_id); + *power_thresh_high = THRESH_HIGH(pi, domain_id); + + return 0; +} + +static int +scmi_powercap_measurements_threshold_set(const struct scmi_protocol_handle *ph, + u32 domain_id, u32 power_thresh_low, + u32 power_thresh_high) +{ + int ret = 0; + struct powercap_info *pi = ph->get_priv(ph); + + if (domain_id >= pi->num_domains || + power_thresh_low > power_thresh_high) + return -EINVAL; + + /* Anything to do ? */ + if (THRESH_LOW(pi, domain_id) == power_thresh_low && + THRESH_HIGH(pi, domain_id) == power_thresh_high) + return ret; + + pi->states[domain_id].thresholds = + (FIELD_PREP(GENMASK_ULL(31, 0), power_thresh_low) | + FIELD_PREP(GENMASK_ULL(63, 32), power_thresh_high)); + + /* Update thresholds if notification already enabled */ + if (pi->states[domain_id].meas_notif_enabled) + ret = scmi_powercap_notify(ph, domain_id, + POWERCAP_MEASUREMENTS_NOTIFY, + true); + + return ret; +} + +static const struct scmi_powercap_proto_ops powercap_proto_ops = { + .num_domains_get = scmi_powercap_num_domains_get, + .info_get = scmi_powercap_dom_info_get, + .cap_get = scmi_powercap_cap_get, + .cap_set = scmi_powercap_cap_set, + .pai_get = scmi_powercap_pai_get, + .pai_set = scmi_powercap_pai_set, + .measurements_get = scmi_powercap_measurements_get, + .measurements_threshold_set = scmi_powercap_measurements_threshold_set, + .measurements_threshold_get = scmi_powercap_measurements_threshold_get, +}; + +static void scmi_powercap_domain_init_fc(const struct scmi_protocol_handle *ph, + u32 domain, struct scmi_fc_info **p_fc) +{ + struct scmi_fc_info *fc; + + fc = devm_kcalloc(ph->dev, POWERCAP_FC_MAX, sizeof(*fc), GFP_KERNEL); + if (!fc) + return; + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_SET, 4, domain, + &fc[POWERCAP_FC_CAP].set_addr, + &fc[POWERCAP_FC_CAP].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_CAP_GET, 4, domain, + &fc[POWERCAP_FC_CAP].get_addr, NULL); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_SET, 4, domain, + &fc[POWERCAP_FC_PAI].set_addr, + &fc[POWERCAP_FC_PAI].set_db); + + ph->hops->fastchannel_init(ph, POWERCAP_DESCRIBE_FASTCHANNEL, + POWERCAP_PAI_GET, 4, domain, + &fc[POWERCAP_FC_PAI].get_addr, NULL); + + *p_fc = fc; +} + +static int scmi_powercap_notify(const struct scmi_protocol_handle *ph, + u32 domain, int message_id, bool enable) +{ + int ret; + struct scmi_xfer *t; + + switch (message_id) { + case POWERCAP_CAP_NOTIFY: + { + struct scmi_msg_powercap_notify_cap *notify; + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + break; + } + case POWERCAP_MEASUREMENTS_NOTIFY: + { + u32 low, high; + struct scmi_msg_powercap_notify_thresh *notify; + + /* + * Note that we have to pick the most recently configured + * thresholds to build a proper POWERCAP_MEASUREMENTS_NOTIFY + * enable request and we fail, complaining, if no thresholds + * were ever set, since this is an indication the API has been + * used wrongly. + */ + ret = scmi_powercap_measurements_threshold_get(ph, domain, + &low, &high); + if (ret) + return ret; + + if (enable && !low && !high) { + dev_err(ph->dev, + "Invalid Measurements Notify thresholds: %u/%u\n", + low, high); + return -EINVAL; + } + + ret = ph->xops->xfer_get_init(ph, message_id, + sizeof(*notify), 0, &t); + if (ret) + return ret; + + notify = t->tx.buf; + notify->domain = cpu_to_le32(domain); + notify->notify_enable = cpu_to_le32(enable ? BIT(0) : 0); + notify->power_thresh_low = cpu_to_le32(low); + notify->power_thresh_high = cpu_to_le32(high); + break; + } + default: + return -EINVAL; + } + + ret = ph->xops->do_xfer(ph, t); + + ph->xops->xfer_put(ph, t); + return ret; +} + +static int +scmi_powercap_set_notify_enabled(const struct scmi_protocol_handle *ph, + u8 evt_id, u32 src_id, bool enable) +{ + int ret, cmd_id; + struct powercap_info *pi = ph->get_priv(ph); + + if (evt_id >= ARRAY_SIZE(evt_2_cmd) || src_id >= pi->num_domains) + return -EINVAL; + + cmd_id = evt_2_cmd[evt_id]; + ret = scmi_powercap_notify(ph, src_id, cmd_id, enable); + if (ret) + pr_debug("FAIL_ENABLED - evt[%X] dom[%d] - ret:%d\n", + evt_id, src_id, ret); + else if (cmd_id == POWERCAP_MEASUREMENTS_NOTIFY) + /* + * On success save the current notification enabled state, so + * as to be able to properly update the notification thresholds + * when they are modified on a domain for which measurement + * notifications were currently enabled. + * + * This is needed because the SCMI Notification core machinery + * and API does not support passing per-notification custom + * arguments at callback registration time. + * + * Note that this can be done here with a simple flag since the + * SCMI core Notifications code takes care of keeping proper + * per-domain enables refcounting, so that this helper function + * will be called only once (for enables) when the first user + * registers a callback on this domain and once more (disable) + * when the last user de-registers its callback. + */ + pi->states[src_id].meas_notif_enabled = enable; + + return ret; +} + +static void * +scmi_powercap_fill_custom_report(const struct scmi_protocol_handle *ph, + u8 evt_id, ktime_t timestamp, + const void *payld, size_t payld_sz, + void *report, u32 *src_id) +{ + void *rep = NULL; + + switch (evt_id) { + case SCMI_EVENT_POWERCAP_CAP_CHANGED: + { + const struct scmi_powercap_cap_changed_notify_payld *p = payld; + struct scmi_powercap_cap_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power_cap = le32_to_cpu(p->power_cap); + r->pai = le32_to_cpu(p->pai); + *src_id = r->domain_id; + rep = r; + break; + } + case SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED: + { + const struct scmi_powercap_meas_changed_notify_payld *p = payld; + struct scmi_powercap_meas_changed_report *r = report; + + if (sizeof(*p) != payld_sz) + break; + + r->timestamp = timestamp; + r->agent_id = le32_to_cpu(p->agent_id); + r->domain_id = le32_to_cpu(p->domain_id); + r->power = le32_to_cpu(p->power); + *src_id = r->domain_id; + rep = r; + break; + } + default: + break; + } + + return rep; +} + +static int +scmi_powercap_get_num_sources(const struct scmi_protocol_handle *ph) +{ + struct powercap_info *pi = ph->get_priv(ph); + + if (!pi) + return -EINVAL; + + return pi->num_domains; +} + +static const struct scmi_event powercap_events[] = { + { + .id = SCMI_EVENT_POWERCAP_CAP_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_cap_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_cap_changed_report), + }, + { + .id = SCMI_EVENT_POWERCAP_MEASUREMENTS_CHANGED, + .max_payld_sz = + sizeof(struct scmi_powercap_meas_changed_notify_payld), + .max_report_sz = + sizeof(struct scmi_powercap_meas_changed_report), + }, +}; + +static const struct scmi_event_ops powercap_event_ops = { + .get_num_sources = scmi_powercap_get_num_sources, + .set_notify_enabled = scmi_powercap_set_notify_enabled, + .fill_custom_report = scmi_powercap_fill_custom_report, +}; + +static const struct scmi_protocol_events powercap_protocol_events = { + .queue_sz = SCMI_PROTO_QUEUE_SZ, + .ops = &powercap_event_ops, + .evts = powercap_events, + .num_events = ARRAY_SIZE(powercap_events), +}; + +static int +scmi_powercap_protocol_init(const struct scmi_protocol_handle *ph) +{ + int domain, ret; + u32 version; + struct powercap_info *pinfo; + + ret = ph->xops->version_get(ph, &version); + if (ret) + return ret; + + dev_dbg(ph->dev, "Powercap Version %d.%d\n", + PROTOCOL_REV_MAJOR(version), PROTOCOL_REV_MINOR(version)); + + pinfo = devm_kzalloc(ph->dev, sizeof(*pinfo), GFP_KERNEL); + if (!pinfo) + return -ENOMEM; + + ret = scmi_powercap_attributes_get(ph, pinfo); + if (ret) + return ret; + + pinfo->powercaps = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->powercaps), + GFP_KERNEL); + if (!pinfo->powercaps) + return -ENOMEM; + + /* + * Note that any failure in retrieving any domain attribute leads to + * the whole Powercap protocol initialization failure: this way the + * reported Powercap domains are all assured, when accessed, to be well + * formed and correlated by sane parent-child relationship (if any). + */ + for (domain = 0; domain < pinfo->num_domains; domain++) { + ret = scmi_powercap_domain_attributes_get(ph, pinfo, domain); + if (ret) + return ret; + + if (pinfo->powercaps[domain].fastchannels) + scmi_powercap_domain_init_fc(ph, domain, + &pinfo->powercaps[domain].fc_info); + } + + pinfo->states = devm_kcalloc(ph->dev, pinfo->num_domains, + sizeof(*pinfo->states), GFP_KERNEL); + if (!pinfo->states) + return -ENOMEM; + + pinfo->version = version; + + return ph->set_priv(ph, pinfo); +} + +static const struct scmi_protocol scmi_powercap = { + .id = SCMI_PROTOCOL_POWERCAP, + .owner = THIS_MODULE, + .instance_init = &scmi_powercap_protocol_init, + .ops = &powercap_proto_ops, + .events = &powercap_protocol_events, +}; + +DEFINE_SCMI_PROTOCOL_REGISTER_UNREGISTER(powercap, scmi_powercap) diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h index 73304af5ec4a..2f3bf691db7c 100644 --- a/drivers/firmware/arm_scmi/protocols.h +++ b/drivers/firmware/arm_scmi/protocols.h @@ -24,8 +24,6 @@ #include <asm/unaligned.h> -#define SCMI_SHORT_NAME_MAX_SIZE 16 - #define PROTOCOL_REV_MINOR_MASK GENMASK(15, 0) #define PROTOCOL_REV_MAJOR_MASK GENMASK(31, 16) #define PROTOCOL_REV_MAJOR(x) ((u16)(FIELD_GET(PROTOCOL_REV_MAJOR_MASK, (x)))) @@ -181,6 +179,8 @@ struct scmi_protocol_handle { * @max_resources: Maximum acceptable number of items, configured by the caller * depending on the underlying resources that it is querying. * @loop_idx: The iterator loop index in the current multi-part reply. + * @rx_len: Size in bytes of the currenly processed message; it can be used by + * the user of the iterator to verify a reply size. * @priv: Optional pointer to some additional state-related private data setup * by the caller during the iterations. */ @@ -190,6 +190,7 @@ struct scmi_iterator_state { unsigned int num_remaining; unsigned int max_resources; unsigned int loop_idx; + size_t rx_len; void *priv; }; @@ -214,6 +215,19 @@ struct scmi_iterator_ops { struct scmi_iterator_state *st, void *priv); }; +struct scmi_fc_db_info { + int width; + u64 set; + u64 mask; + void __iomem *addr; +}; + +struct scmi_fc_info { + void __iomem *set_addr; + void __iomem *get_addr; + struct scmi_fc_db_info *set_db; +}; + /** * struct scmi_proto_helpers_ops - References to common protocol helpers * @extended_name_get: A common helper function to retrieve extended naming @@ -229,6 +243,9 @@ struct scmi_iterator_ops { * provided in @ops. * @iter_response_run: A common helper to trigger the run of a previously * initialized iterator. + * @fastchannel_init: A common helper used to initialize FC descriptors by + * gathering FC descriptions from the SCMI platform server. + * @fastchannel_db_ring: A common helper to ring a FC doorbell. */ struct scmi_proto_helpers_ops { int (*extended_name_get)(const struct scmi_protocol_handle *ph, @@ -238,6 +255,12 @@ struct scmi_proto_helpers_ops { unsigned int max_resources, u8 msg_id, size_t tx_size, void *priv); int (*iter_response_run)(void *iter); + void (*fastchannel_init)(const struct scmi_protocol_handle *ph, + u8 describe_id, u32 message_id, + u32 valid_size, u32 domain, + void __iomem **p_addr, + struct scmi_fc_db_info **p_db); + void (*fastchannel_db_ring)(struct scmi_fc_db_info *db); }; /** @@ -314,5 +337,6 @@ DECLARE_SCMI_REGISTER_UNREGISTER(reset); DECLARE_SCMI_REGISTER_UNREGISTER(sensors); DECLARE_SCMI_REGISTER_UNREGISTER(voltage); DECLARE_SCMI_REGISTER_UNREGISTER(system); +DECLARE_SCMI_REGISTER_UNREGISTER(powercap); #endif /* _SCMI_PROTOCOLS_H */ diff --git a/drivers/firmware/arm_scmi/reset.c b/drivers/firmware/arm_scmi/reset.c index a420a9102094..673f3eb498f4 100644 --- a/drivers/firmware/arm_scmi/reset.c +++ b/drivers/firmware/arm_scmi/reset.c @@ -116,7 +116,7 @@ scmi_reset_domain_attributes_get(const struct scmi_protocol_handle *ph, dom_info->latency_us = le32_to_cpu(attr->latency); if (dom_info->latency_us == U32_MAX) dom_info->latency_us = 0; - strlcpy(dom_info->name, attr->name, SCMI_MAX_STR_SIZE); + strscpy(dom_info->name, attr->name, SCMI_SHORT_NAME_MAX_SIZE); } ph->xops->xfer_put(ph, t); diff --git a/drivers/firmware/arm_scmi/scmi_power_control.c b/drivers/firmware/arm_scmi/scmi_power_control.c new file mode 100644 index 000000000000..6eb7d2a4b6b1 --- /dev/null +++ b/drivers/firmware/arm_scmi/scmi_power_control.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SCMI Generic SystemPower Control driver. + * + * Copyright (C) 2020-2022 ARM Ltd. + */ +/* + * In order to handle platform originated SCMI SystemPower requests (like + * shutdowns or cold/warm resets) we register an SCMI Notification notifier + * block to react when such SCMI SystemPower events are emitted by platform. + * + * Once such a notification is received we act accordingly to perform the + * required system transition depending on the kind of request. + * + * Graceful requests are routed to userspace through the same API methods + * (orderly_poweroff/reboot()) used by ACPI when handling ACPI Shutdown bus + * events. + * + * Direct forceful requests are not supported since are not meant to be sent + * by the SCMI platform to an OSPM like Linux. + * + * Additionally, graceful request notifications can carry an optional timeout + * field stating the maximum amount of time allowed by the platform for + * completion after which they are converted to forceful ones: the assumption + * here is that even graceful requests can be upper-bound by a maximum final + * timeout strictly enforced by the platform itself which can ultimately cut + * the power off at will anytime; in order to avoid such extreme scenario, we + * track progress of graceful requests through the means of a reboot notifier + * converting timed-out graceful requests to forceful ones, so at least we + * try to perform a clean sync and shutdown/restart before the power is cut. + * + * Given the peculiar nature of SCMI SystemPower protocol, that is being in + * charge of triggering system wide shutdown/reboot events, there should be + * only one SCMI platform actively emitting SystemPower events. + * For this reason the SCMI core takes care to enforce the creation of one + * single unique device associated to the SCMI System Power protocol; no matter + * how many SCMI platforms are defined on the system, only one can be designated + * to support System Power: as a consequence this driver will never be probed + * more than once. + * + * For similar reasons as soon as the first valid SystemPower is received by + * this driver and the shutdown/reboot is started, any further notification + * possibly emitted by the platform will be ignored. + */ + +#include <linux/math.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/printk.h> +#include <linux/reboot.h> +#include <linux/scmi_protocol.h> +#include <linux/slab.h> +#include <linux/time64.h> +#include <linux/timer.h> +#include <linux/types.h> +#include <linux/workqueue.h> + +#ifndef MODULE +#include <linux/fs.h> +#endif + +enum scmi_syspower_state { + SCMI_SYSPOWER_IDLE, + SCMI_SYSPOWER_IN_PROGRESS, + SCMI_SYSPOWER_REBOOTING +}; + +/** + * struct scmi_syspower_conf - Common configuration + * + * @dev: A reference device + * @state: Current SystemPower state + * @state_mtx: @state related mutex + * @required_transition: The requested transition as decribed in the received + * SCMI SystemPower notification + * @userspace_nb: The notifier_block registered against the SCMI SystemPower + * notification to start the needed userspace interactions. + * @reboot_nb: A notifier_block optionally used to track reboot progress + * @forceful_work: A worker used to trigger a forceful transition once a + * graceful has timed out. + */ +struct scmi_syspower_conf { + struct device *dev; + enum scmi_syspower_state state; + /* Protect access to state */ + struct mutex state_mtx; + enum scmi_system_events required_transition; + + struct notifier_block userspace_nb; + struct notifier_block reboot_nb; + + struct delayed_work forceful_work; +}; + +#define userspace_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, userspace_nb) + +#define reboot_nb_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, reboot_nb) + +#define dwork_to_sconf(x) \ + container_of(x, struct scmi_syspower_conf, forceful_work) + +/** + * scmi_reboot_notifier - A reboot notifier to catch an ongoing successful + * system transition + * @nb: Reference to the related notifier block + * @reason: The reason for the ongoing reboot + * @__unused: The cmd being executed on a restart request (unused) + * + * When an ongoing system transition is detected, compatible with the one + * requested by SCMI, cancel the delayed work. + * + * Return: NOTIFY_OK in any case + */ +static int scmi_reboot_notifier(struct notifier_block *nb, + unsigned long reason, void *__unused) +{ + struct scmi_syspower_conf *sc = reboot_nb_to_sconf(nb); + + mutex_lock(&sc->state_mtx); + switch (reason) { + case SYS_HALT: + case SYS_POWER_OFF: + if (sc->required_transition == SCMI_SYSTEM_SHUTDOWN) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + case SYS_RESTART: + if (sc->required_transition == SCMI_SYSTEM_COLDRESET || + sc->required_transition == SCMI_SYSTEM_WARMRESET) + sc->state = SCMI_SYSPOWER_REBOOTING; + break; + default: + break; + } + + if (sc->state == SCMI_SYSPOWER_REBOOTING) { + dev_dbg(sc->dev, "Reboot in progress...cancel delayed work.\n"); + cancel_delayed_work_sync(&sc->forceful_work); + } + mutex_unlock(&sc->state_mtx); + + return NOTIFY_OK; +} + +/** + * scmi_request_forceful_transition - Request forceful SystemPower transition + * @sc: A reference to the configuration data + * + * Initiates the required SystemPower transition without involving userspace: + * just trigger the action at the kernel level after issuing an emergency + * sync. (if possible at all) + */ +static inline void +scmi_request_forceful_transition(struct scmi_syspower_conf *sc) +{ + dev_dbg(sc->dev, "Serving forceful request:%d\n", + sc->required_transition); + +#ifndef MODULE + emergency_sync(); +#endif + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + kernel_power_off(); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + kernel_restart(NULL); + break; + default: + break; + } +} + +static void scmi_forceful_work_func(struct work_struct *work) +{ + struct scmi_syspower_conf *sc; + struct delayed_work *dwork; + + if (system_state > SYSTEM_RUNNING) + return; + + dwork = to_delayed_work(work); + sc = dwork_to_sconf(dwork); + + dev_dbg(sc->dev, "Graceful request timed out...forcing !\n"); + mutex_lock(&sc->state_mtx); + /* avoid deadlock by unregistering reboot notifier first */ + unregister_reboot_notifier(&sc->reboot_nb); + if (sc->state == SCMI_SYSPOWER_IN_PROGRESS) + scmi_request_forceful_transition(sc); + mutex_unlock(&sc->state_mtx); +} + +/** + * scmi_request_graceful_transition - Request graceful SystemPower transition + * @sc: A reference to the configuration data + * @timeout_ms: The desired timeout to wait for the shutdown to complete before + * system is forcibly shutdown. + * + * Initiates the required SystemPower transition, requesting userspace + * co-operation: it uses the same orderly_ methods used by ACPI Shutdown event + * processing. + * + * Takes care also to register a reboot notifier and to schedule a delayed work + * in order to detect if userspace actions are taking too long and in such a + * case to trigger a forceful transition. + */ +static void scmi_request_graceful_transition(struct scmi_syspower_conf *sc, + unsigned int timeout_ms) +{ + unsigned int adj_timeout_ms = 0; + + if (timeout_ms) { + int ret; + + sc->reboot_nb.notifier_call = &scmi_reboot_notifier; + ret = register_reboot_notifier(&sc->reboot_nb); + if (!ret) { + /* Wait only up to 75% of the advertised timeout */ + adj_timeout_ms = mult_frac(timeout_ms, 3, 4); + INIT_DELAYED_WORK(&sc->forceful_work, + scmi_forceful_work_func); + schedule_delayed_work(&sc->forceful_work, + msecs_to_jiffies(adj_timeout_ms)); + } else { + /* Carry on best effort even without a reboot notifier */ + dev_warn(sc->dev, + "Cannot register reboot notifier !\n"); + } + } + + dev_dbg(sc->dev, + "Serving graceful req:%d (timeout_ms:%u adj_timeout_ms:%u)\n", + sc->required_transition, timeout_ms, adj_timeout_ms); + + switch (sc->required_transition) { + case SCMI_SYSTEM_SHUTDOWN: + /* + * When triggered early at boot-time the 'orderly' call will + * partially fail due to the lack of userspace itself, but + * the force=true argument will start anyway a successful + * forced shutdown. + */ + orderly_poweroff(true); + break; + case SCMI_SYSTEM_COLDRESET: + case SCMI_SYSTEM_WARMRESET: + orderly_reboot(); + break; + default: + break; + } +} + +/** + * scmi_userspace_notifier - Notifier callback to act on SystemPower + * Notifications + * @nb: Reference to the related notifier block + * @event: The SystemPower notification event id + * @data: The SystemPower event report + * + * This callback is in charge of decoding the received SystemPower report + * and act accordingly triggering a graceful or forceful system transition. + * + * Note that once a valid SCMI SystemPower event starts being served, any + * other following SystemPower notification received from the same SCMI + * instance (handle) will be ignored. + * + * Return: NOTIFY_OK once a valid SystemPower event has been successfully + * processed. + */ +static int scmi_userspace_notifier(struct notifier_block *nb, + unsigned long event, void *data) +{ + struct scmi_system_power_state_notifier_report *er = data; + struct scmi_syspower_conf *sc = userspace_nb_to_sconf(nb); + + if (er->system_state >= SCMI_SYSTEM_POWERUP) { + dev_err(sc->dev, "Ignoring unsupported system_state: 0x%X\n", + er->system_state); + return NOTIFY_DONE; + } + + if (!SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(er->flags)) { + dev_err(sc->dev, "Ignoring forceful notification.\n"); + return NOTIFY_DONE; + } + + /* + * Bail out if system is already shutting down or an SCMI SystemPower + * requested is already being served. + */ + if (system_state > SYSTEM_RUNNING) + return NOTIFY_DONE; + mutex_lock(&sc->state_mtx); + if (sc->state != SCMI_SYSPOWER_IDLE) { + dev_dbg(sc->dev, + "Transition already in progress...ignore.\n"); + mutex_unlock(&sc->state_mtx); + return NOTIFY_DONE; + } + sc->state = SCMI_SYSPOWER_IN_PROGRESS; + mutex_unlock(&sc->state_mtx); + + sc->required_transition = er->system_state; + + /* Leaving a trace in logs of who triggered the shutdown/reboot. */ + dev_info(sc->dev, "Serving shutdown/reboot request: %d\n", + sc->required_transition); + + scmi_request_graceful_transition(sc, er->timeout); + + return NOTIFY_OK; +} + +static int scmi_syspower_probe(struct scmi_device *sdev) +{ + int ret; + struct scmi_syspower_conf *sc; + struct scmi_handle *handle = sdev->handle; + + if (!handle) + return -ENODEV; + + ret = handle->devm_protocol_acquire(sdev, SCMI_PROTOCOL_SYSTEM); + if (ret) + return ret; + + sc = devm_kzalloc(&sdev->dev, sizeof(*sc), GFP_KERNEL); + if (!sc) + return -ENOMEM; + + sc->state = SCMI_SYSPOWER_IDLE; + mutex_init(&sc->state_mtx); + sc->required_transition = SCMI_SYSTEM_MAX; + sc->userspace_nb.notifier_call = &scmi_userspace_notifier; + sc->dev = &sdev->dev; + + return handle->notify_ops->devm_event_notifier_register(sdev, + SCMI_PROTOCOL_SYSTEM, + SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER, + NULL, &sc->userspace_nb); +} + +static const struct scmi_device_id scmi_id_table[] = { + { SCMI_PROTOCOL_SYSTEM, "syspower" }, + { }, +}; +MODULE_DEVICE_TABLE(scmi, scmi_id_table); + +static struct scmi_driver scmi_system_power_driver = { + .name = "scmi-system-power", + .probe = scmi_syspower_probe, + .id_table = scmi_id_table, +}; +module_scmi_driver(scmi_system_power_driver); + +MODULE_AUTHOR("Cristian Marussi <cristian.marussi@arm.com>"); +MODULE_DESCRIPTION("ARM SCMI SystemPower Control driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 21e0ce89b153..7288c6117838 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -338,7 +338,6 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, struct scmi_sensor_info *s) { void *iter; - struct scmi_msg_sensor_list_update_intervals *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_intervals_prepare_message, .update_state = iter_intervals_update_state, @@ -351,22 +350,28 @@ static int scmi_sensor_update_intervals(const struct scmi_protocol_handle *ph, iter = ph->hops->iter_response_init(ph, &ops, s->intervals.count, SENSOR_LIST_UPDATE_INTERVALS, - sizeof(*msg), &upriv); + sizeof(struct scmi_msg_sensor_list_update_intervals), + &upriv); if (IS_ERR(iter)) return PTR_ERR(iter); return ph->hops->iter_response_run(iter); } +struct scmi_apriv { + bool any_axes_support_extended_names; + struct scmi_sensor_info *s; +}; + static void iter_axes_desc_prepare_message(void *message, const unsigned int desc_index, const void *priv) { struct scmi_msg_sensor_axis_description_get *msg = message; - const struct scmi_sensor_info *s = priv; + const struct scmi_apriv *apriv = priv; /* Set the number of sensors to be skipped/already read */ - msg->id = cpu_to_le32(s->id); + msg->id = cpu_to_le32(apriv->s->id); msg->axis_desc_index = cpu_to_le32(desc_index); } @@ -393,19 +398,21 @@ iter_axes_desc_process_response(const struct scmi_protocol_handle *ph, u32 attrh, attrl; struct scmi_sensor_axis_info *a; size_t dsize = SCMI_MSG_RESP_AXIS_DESCR_BASE_SZ; - struct scmi_sensor_info *s = priv; + struct scmi_apriv *apriv = priv; const struct scmi_axis_descriptor *adesc = st->priv; attrl = le32_to_cpu(adesc->attributes_low); + if (SUPPORTS_EXTENDED_AXIS_NAMES(attrl)) + apriv->any_axes_support_extended_names = true; - a = &s->axis[st->desc_index + st->loop_idx]; + a = &apriv->s->axis[st->desc_index + st->loop_idx]; a->id = le32_to_cpu(adesc->id); a->extended_attrs = SUPPORTS_EXTEND_ATTRS(attrl); attrh = le32_to_cpu(adesc->attributes_high); a->scale = S32_EXT(SENSOR_SCALE(attrh)); a->type = SENSOR_TYPE(attrh); - strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); + strscpy(a->name, adesc->name, SCMI_SHORT_NAME_MAX_SIZE); if (a->extended_attrs) { unsigned int ares = le32_to_cpu(adesc->resolution); @@ -444,10 +451,19 @@ iter_axes_extended_name_process_response(const struct scmi_protocol_handle *ph, void *priv) { struct scmi_sensor_axis_info *a; - const struct scmi_sensor_info *s = priv; + const struct scmi_apriv *apriv = priv; struct scmi_sensor_axis_name_descriptor *adesc = st->priv; + u32 axis_id = le32_to_cpu(adesc->axis_id); - a = &s->axis[st->desc_index + st->loop_idx]; + if (axis_id >= st->max_resources) + return -EPROTO; + + /* + * Pick the corresponding descriptor based on the axis_id embedded + * in the reply since the list of axes supporting extended names + * can be a subset of all the axes. + */ + a = &apriv->s->axis[axis_id]; strscpy(a->name, adesc->name, SCMI_MAX_STR_SIZE); st->priv = ++adesc; @@ -458,21 +474,36 @@ static int scmi_sensor_axis_extended_names_get(const struct scmi_protocol_handle *ph, struct scmi_sensor_info *s) { + int ret; void *iter; - struct scmi_msg_sensor_axis_description_get *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_axes_desc_prepare_message, .update_state = iter_axes_extended_name_update_state, .process_response = iter_axes_extended_name_process_response, }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, SENSOR_AXIS_NAME_GET, - sizeof(*msg), s); + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); if (IS_ERR(iter)) return PTR_ERR(iter); - return ph->hops->iter_response_run(iter); + /* + * Do not cause whole protocol initialization failure when failing to + * get extended names for axes. + */ + ret = ph->hops->iter_response_run(iter); + if (ret) + dev_warn(ph->dev, + "Failed to get axes extended names for %s (ret:%d).\n", + s->name, ret); + + return 0; } static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, @@ -481,12 +512,15 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, { int ret; void *iter; - struct scmi_msg_sensor_axis_description_get *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_axes_desc_prepare_message, .update_state = iter_axes_desc_update_state, .process_response = iter_axes_desc_process_response, }; + struct scmi_apriv apriv = { + .any_axes_support_extended_names = false, + .s = s, + }; s->axis = devm_kcalloc(ph->dev, s->num_axis, sizeof(*s->axis), GFP_KERNEL); @@ -495,7 +529,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, iter = ph->hops->iter_response_init(ph, &ops, s->num_axis, SENSOR_AXIS_DESCRIPTION_GET, - sizeof(*msg), s); + sizeof(struct scmi_msg_sensor_axis_description_get), + &apriv); if (IS_ERR(iter)) return PTR_ERR(iter); @@ -503,7 +538,8 @@ static int scmi_sensor_axis_description(const struct scmi_protocol_handle *ph, if (ret) return ret; - if (PROTOCOL_REV_MAJOR(version) >= 0x3) + if (PROTOCOL_REV_MAJOR(version) >= 0x3 && + apriv.any_axes_support_extended_names) ret = scmi_sensor_axis_extended_names_get(ph, s); return ret; @@ -598,7 +634,7 @@ iter_sens_descr_process_response(const struct scmi_protocol_handle *ph, SUPPORTS_AXIS(attrh) ? SENSOR_AXIS_NUMBER(attrh) : 0, SCMI_MAX_NUM_SENSOR_AXIS); - strscpy(s->name, sdesc->name, SCMI_MAX_STR_SIZE); + strscpy(s->name, sdesc->name, SCMI_SHORT_NAME_MAX_SIZE); /* * If supported overwrite short name with the extended diff --git a/drivers/firmware/arm_scmi/system.c b/drivers/firmware/arm_scmi/system.c index 220e399118ad..9383d7584539 100644 --- a/drivers/firmware/arm_scmi/system.c +++ b/drivers/firmware/arm_scmi/system.c @@ -27,10 +27,12 @@ struct scmi_system_power_state_notifier_payld { __le32 agent_id; __le32 flags; __le32 system_state; + __le32 timeout; }; struct scmi_system_info { u32 version; + bool graceful_timeout_supported; }; static int scmi_system_request_notify(const struct scmi_protocol_handle *ph, @@ -72,17 +74,27 @@ scmi_system_fill_custom_report(const struct scmi_protocol_handle *ph, const void *payld, size_t payld_sz, void *report, u32 *src_id) { + size_t expected_sz; const struct scmi_system_power_state_notifier_payld *p = payld; struct scmi_system_power_state_notifier_report *r = report; + struct scmi_system_info *pinfo = ph->get_priv(ph); + expected_sz = pinfo->graceful_timeout_supported ? + sizeof(*p) : sizeof(*p) - sizeof(__le32); if (evt_id != SCMI_EVENT_SYSTEM_POWER_STATE_NOTIFIER || - sizeof(*p) != payld_sz) + payld_sz != expected_sz) return NULL; r->timestamp = timestamp; r->agent_id = le32_to_cpu(p->agent_id); r->flags = le32_to_cpu(p->flags); r->system_state = le32_to_cpu(p->system_state); + if (pinfo->graceful_timeout_supported && + r->system_state == SCMI_SYSTEM_SHUTDOWN && + SCMI_SYSPOWER_IS_REQUEST_GRACEFUL(r->flags)) + r->timeout = le32_to_cpu(p->timeout); + else + r->timeout = 0x00; *src_id = 0; return r; @@ -129,6 +141,9 @@ static int scmi_system_protocol_init(const struct scmi_protocol_handle *ph) return -ENOMEM; pinfo->version = version; + if (PROTOCOL_REV_MAJOR(pinfo->version) >= 0x2) + pinfo->graceful_timeout_supported = true; + return ph->set_priv(ph, pinfo); } diff --git a/drivers/firmware/arm_scmi/voltage.c b/drivers/firmware/arm_scmi/voltage.c index 9d195d8719ab..eaa8d944926a 100644 --- a/drivers/firmware/arm_scmi/voltage.c +++ b/drivers/firmware/arm_scmi/voltage.c @@ -180,7 +180,6 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph, { int ret; void *iter; - struct scmi_msg_cmd_describe_levels *msg; struct scmi_iterator_ops ops = { .prepare_message = iter_volt_levels_prepare_message, .update_state = iter_volt_levels_update_state, @@ -193,7 +192,8 @@ static int scmi_voltage_levels_get(const struct scmi_protocol_handle *ph, iter = ph->hops->iter_response_init(ph, &ops, v->num_levels, VOLTAGE_DESCRIBE_LEVELS, - sizeof(*msg), &vpriv); + sizeof(struct scmi_msg_cmd_describe_levels), + &vpriv); if (IS_ERR(iter)) return PTR_ERR(iter); @@ -225,15 +225,14 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, /* Retrieve domain attributes at first ... */ put_unaligned_le32(dom, td->tx.buf); - ret = ph->xops->do_xfer(ph, td); /* Skip domain on comms error */ - if (ret) + if (ph->xops->do_xfer(ph, td)) continue; v = vinfo->domains + dom; v->id = dom; attributes = le32_to_cpu(resp_dom->attr); - strlcpy(v->name, resp_dom->name, SCMI_MAX_STR_SIZE); + strscpy(v->name, resp_dom->name, SCMI_SHORT_NAME_MAX_SIZE); /* * If supported overwrite short name with the extended one; @@ -249,12 +248,8 @@ static int scmi_voltage_descriptors_get(const struct scmi_protocol_handle *ph, v->async_level_set = true; } - ret = scmi_voltage_levels_get(ph, v); /* Skip invalid voltage descriptors */ - if (ret) - continue; - - ph->xops->reset_rx_to_maxsz(ph, td); + scmi_voltage_levels_get(ph, v); } ph->xops->xfer_put(ph, td); diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index ddf0b9ff9e15..435d0e2658a4 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -815,7 +815,7 @@ static int scpi_init_versions(struct scpi_drvinfo *info) info->firmware_version = le32_to_cpu(caps.platform_version); } /* Ignore error if not implemented */ - if (scpi_info->is_legacy && ret == -EOPNOTSUPP) + if (info->is_legacy && ret == -EOPNOTSUPP) return 0; return ret; @@ -913,13 +913,14 @@ static int scpi_probe(struct platform_device *pdev) struct resource res; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + struct scpi_drvinfo *scpi_drvinfo; - scpi_info = devm_kzalloc(dev, sizeof(*scpi_info), GFP_KERNEL); - if (!scpi_info) + scpi_drvinfo = devm_kzalloc(dev, sizeof(*scpi_drvinfo), GFP_KERNEL); + if (!scpi_drvinfo) return -ENOMEM; if (of_match_device(legacy_scpi_of_match, &pdev->dev)) - scpi_info->is_legacy = true; + scpi_drvinfo->is_legacy = true; count = of_count_phandle_with_args(np, "mboxes", "#mbox-cells"); if (count < 0) { @@ -927,19 +928,19 @@ static int scpi_probe(struct platform_device *pdev) return -ENODEV; } - scpi_info->channels = devm_kcalloc(dev, count, sizeof(struct scpi_chan), - GFP_KERNEL); - if (!scpi_info->channels) + scpi_drvinfo->channels = + devm_kcalloc(dev, count, sizeof(struct scpi_chan), GFP_KERNEL); + if (!scpi_drvinfo->channels) return -ENOMEM; - ret = devm_add_action(dev, scpi_free_channels, scpi_info); + ret = devm_add_action(dev, scpi_free_channels, scpi_drvinfo); if (ret) return ret; - for (; scpi_info->num_chans < count; scpi_info->num_chans++) { + for (; scpi_drvinfo->num_chans < count; scpi_drvinfo->num_chans++) { resource_size_t size; - int idx = scpi_info->num_chans; - struct scpi_chan *pchan = scpi_info->channels + idx; + int idx = scpi_drvinfo->num_chans; + struct scpi_chan *pchan = scpi_drvinfo->channels + idx; struct mbox_client *cl = &pchan->cl; struct device_node *shmem = of_parse_phandle(np, "shmem", idx); @@ -986,45 +987,53 @@ static int scpi_probe(struct platform_device *pdev) return ret; } - scpi_info->commands = scpi_std_commands; + scpi_drvinfo->commands = scpi_std_commands; - platform_set_drvdata(pdev, scpi_info); + platform_set_drvdata(pdev, scpi_drvinfo); - if (scpi_info->is_legacy) { + if (scpi_drvinfo->is_legacy) { /* Replace with legacy variants */ scpi_ops.clk_set_val = legacy_scpi_clk_set_val; - scpi_info->commands = scpi_legacy_commands; + scpi_drvinfo->commands = scpi_legacy_commands; /* Fill priority bitmap */ for (idx = 0; idx < ARRAY_SIZE(legacy_hpriority_cmds); idx++) set_bit(legacy_hpriority_cmds[idx], - scpi_info->cmd_priority); + scpi_drvinfo->cmd_priority); } - ret = scpi_init_versions(scpi_info); + scpi_info = scpi_drvinfo; + + ret = scpi_init_versions(scpi_drvinfo); if (ret) { dev_err(dev, "incorrect or no SCP firmware found\n"); + scpi_info = NULL; return ret; } - if (scpi_info->is_legacy && !scpi_info->protocol_version && - !scpi_info->firmware_version) + if (scpi_drvinfo->is_legacy && !scpi_drvinfo->protocol_version && + !scpi_drvinfo->firmware_version) dev_info(dev, "SCP Protocol legacy pre-1.0 firmware\n"); else dev_info(dev, "SCP Protocol %lu.%lu Firmware %lu.%lu.%lu version\n", FIELD_GET(PROTO_REV_MAJOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(PROTO_REV_MINOR_MASK, - scpi_info->protocol_version), + scpi_drvinfo->protocol_version), FIELD_GET(FW_REV_MAJOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_MINOR_MASK, - scpi_info->firmware_version), + scpi_drvinfo->firmware_version), FIELD_GET(FW_REV_PATCH_MASK, - scpi_info->firmware_version)); - scpi_info->scpi_ops = &scpi_ops; + scpi_drvinfo->firmware_version)); + + scpi_drvinfo->scpi_ops = &scpi_ops; - return devm_of_platform_populate(dev); + ret = devm_of_platform_populate(dev); + if (ret) + scpi_info = NULL; + + return ret; } static const struct of_device_id scpi_of_match[] = { diff --git a/drivers/firmware/efi/sysfb_efi.c b/drivers/firmware/efi/sysfb_efi.c index 4c7c9dd7733f..7882d4b3f2be 100644 --- a/drivers/firmware/efi/sysfb_efi.c +++ b/drivers/firmware/efi/sysfb_efi.c @@ -26,8 +26,6 @@ #include <linux/sysfb.h> #include <video/vga.h> -#include <asm/efi.h> - enum { OVERRIDE_NONE = 0x0, OVERRIDE_BASE = 0x1, diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index b55c74a5e064..1ee62cd58582 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -15,6 +15,7 @@ #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/seq_file.h> +#include <linux/types.h> #define CRYSTALCOVE_GPIO_NUM 16 #define CRYSTALCOVE_VGPIO_NUM 95 @@ -110,8 +111,7 @@ static inline int to_reg(int gpio, enum ctrl_register reg_type) return reg + gpio % 8; } -static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, - int gpio) +static void crystalcove_update_irq_mask(struct crystalcove_gpio *cg, int gpio) { u8 mirqs0 = gpio < 8 ? MGPIO0IRQS0 : MGPIO1IRQS0; int mask = BIT(gpio % 8); @@ -140,8 +140,7 @@ static int crystalcove_gpio_dir_in(struct gpio_chip *chip, unsigned int gpio) return regmap_write(cg->regmap, reg, CTLO_INPUT_SET); } -static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, - int value) +static int crystalcove_gpio_dir_out(struct gpio_chip *chip, unsigned int gpio, int value) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); @@ -168,8 +167,7 @@ static int crystalcove_gpio_get(struct gpio_chip *chip, unsigned int gpio) return val & 0x1; } -static void crystalcove_gpio_set(struct gpio_chip *chip, - unsigned int gpio, int value) +static void crystalcove_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int reg = to_reg(gpio, CTRL_OUT); @@ -185,10 +183,10 @@ static void crystalcove_gpio_set(struct gpio_chip *chip, static int crystalcove_irq_type(struct irq_data *data, unsigned int type) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq >= CRYSTALCOVE_GPIO_NUM) + if (hwirq >= CRYSTALCOVE_GPIO_NUM) return 0; switch (type) { @@ -215,22 +213,20 @@ static int crystalcove_irq_type(struct irq_data *data, unsigned int type) static void crystalcove_bus_lock(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); mutex_lock(&cg->buslock); } static void crystalcove_bus_sync_unlock(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); - int gpio = data->hwirq; + struct crystalcove_gpio *cg = gpiochip_get_data(irq_data_get_irq_chip_data(data)); + irq_hw_number_t hwirq = irqd_to_hwirq(data); if (cg->update & UPDATE_IRQ_TYPE) - crystalcove_update_irq_ctrl(cg, gpio); + crystalcove_update_irq_ctrl(cg, hwirq); if (cg->update & UPDATE_IRQ_MASK) - crystalcove_update_irq_mask(cg, gpio); + crystalcove_update_irq_mask(cg, hwirq); cg->update = 0; mutex_unlock(&cg->buslock); @@ -238,34 +234,43 @@ static void crystalcove_bus_sync_unlock(struct irq_data *data) static void crystalcove_irq_unmask(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct crystalcove_gpio *cg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { - cg->set_irq_mask = false; - cg->update |= UPDATE_IRQ_MASK; - } + if (hwirq >= CRYSTALCOVE_GPIO_NUM) + return; + + gpiochip_enable_irq(gc, hwirq); + + cg->set_irq_mask = false; + cg->update |= UPDATE_IRQ_MASK; } static void crystalcove_irq_mask(struct irq_data *data) { - struct crystalcove_gpio *cg = - gpiochip_get_data(irq_data_get_irq_chip_data(data)); + struct gpio_chip *gc = irq_data_get_irq_chip_data(data); + struct crystalcove_gpio *cg = gpiochip_get_data(gc); + irq_hw_number_t hwirq = irqd_to_hwirq(data); - if (data->hwirq < CRYSTALCOVE_GPIO_NUM) { - cg->set_irq_mask = true; - cg->update |= UPDATE_IRQ_MASK; - } + if (hwirq >= CRYSTALCOVE_GPIO_NUM) + return; + + cg->set_irq_mask = true; + cg->update |= UPDATE_IRQ_MASK; + + gpiochip_disable_irq(gc, hwirq); } -static struct irq_chip crystalcove_irqchip = { +static const struct irq_chip crystalcove_irqchip = { .name = "Crystal Cove", .irq_mask = crystalcove_irq_mask, .irq_unmask = crystalcove_irq_unmask, .irq_set_type = crystalcove_irq_type, .irq_bus_lock = crystalcove_bus_lock, .irq_bus_sync_unlock = crystalcove_bus_sync_unlock, - .flags = IRQCHIP_SKIP_SET_WAKE, + .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) @@ -293,8 +298,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) return IRQ_HANDLED; } -static void crystalcove_gpio_dbg_show(struct seq_file *s, - struct gpio_chip *chip) +static void crystalcove_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct crystalcove_gpio *cg = gpiochip_get_data(chip); int gpio, offset; @@ -353,7 +357,7 @@ static int crystalcove_gpio_probe(struct platform_device *pdev) cg->regmap = pmic->regmap; girq = &cg->chip.irq; - girq->chip = &crystalcove_irqchip; + gpio_irq_chip_set_chip(girq, &crystalcove_irqchip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c index 08b9e2cf4f2d..71fa437b491f 100644 --- a/drivers/gpio/gpio-dln2.c +++ b/drivers/gpio/gpio-dln2.c @@ -46,7 +46,6 @@ struct dln2_gpio { struct platform_device *pdev; struct gpio_chip gpio; - struct irq_chip irqchip; /* * Cache pin direction to save us one transfer, since the hardware has @@ -306,6 +305,7 @@ static void dln2_irq_unmask(struct irq_data *irqd) struct dln2_gpio *dln2 = gpiochip_get_data(gc); int pin = irqd_to_hwirq(irqd); + gpiochip_enable_irq(gc, pin); set_bit(pin, dln2->unmasked_irqs); } @@ -316,6 +316,7 @@ static void dln2_irq_mask(struct irq_data *irqd) int pin = irqd_to_hwirq(irqd); clear_bit(pin, dln2->unmasked_irqs); + gpiochip_disable_irq(gc, pin); } static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) @@ -384,6 +385,17 @@ static void dln2_irq_bus_unlock(struct irq_data *irqd) mutex_unlock(&dln2->irq_lock); } +static const struct irq_chip dln2_irqchip = { + .name = "dln2-irq", + .irq_mask = dln2_irq_mask, + .irq_unmask = dln2_irq_unmask, + .irq_set_type = dln2_irq_set_type, + .irq_bus_lock = dln2_irq_bus_lock, + .irq_bus_sync_unlock = dln2_irq_bus_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static void dln2_gpio_event(struct platform_device *pdev, u16 echo, const void *data, int len) { @@ -465,15 +477,8 @@ static int dln2_gpio_probe(struct platform_device *pdev) dln2->gpio.direction_output = dln2_gpio_direction_output; dln2->gpio.set_config = dln2_gpio_set_config; - dln2->irqchip.name = "dln2-irq", - dln2->irqchip.irq_mask = dln2_irq_mask, - dln2->irqchip.irq_unmask = dln2_irq_unmask, - dln2->irqchip.irq_set_type = dln2_irq_set_type, - dln2->irqchip.irq_bus_lock = dln2_irq_bus_lock, - dln2->irqchip.irq_bus_sync_unlock = dln2_irq_bus_unlock, - girq = &dln2->gpio.irq; - girq->chip = &dln2->irqchip; + gpio_irq_chip_set_chip(girq, &dln2_irqchip); /* The event comes from the outside so no parent handler */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-dwapb.c b/drivers/gpio/gpio-dwapb.c index 04afe728e187..c22fcaa44a61 100644 --- a/drivers/gpio/gpio-dwapb.c +++ b/drivers/gpio/gpio-dwapb.c @@ -662,10 +662,9 @@ static int dwapb_get_clks(struct dwapb_gpio *gpio) gpio->clks[1].id = "db"; err = devm_clk_bulk_get_optional(gpio->dev, DWAPB_NR_CLOCKS, gpio->clks); - if (err) { - dev_err(gpio->dev, "Cannot get APB/Debounce clocks\n"); - return err; - } + if (err) + return dev_err_probe(gpio->dev, err, + "Cannot get APB/Debounce clocks\n"); err = clk_bulk_prepare_enable(DWAPB_NR_CLOCKS, gpio->clks); if (err) { diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index df563616f943..bea0e32c195d 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -434,25 +434,13 @@ static int grgpio_probe(struct platform_device *ofdev) static int grgpio_remove(struct platform_device *ofdev) { struct grgpio_priv *priv = platform_get_drvdata(ofdev); - int i; - int ret = 0; - - if (priv->domain) { - for (i = 0; i < GRGPIO_MAX_NGPIO; i++) { - if (priv->uirqs[i].refcnt != 0) { - ret = -EBUSY; - goto out; - } - } - } gpiochip_remove(&priv->gc); if (priv->domain) irq_domain_remove(priv->domain); -out: - return ret; + return 0; } static const struct of_device_id grgpio_match[] = { diff --git a/drivers/gpio/gpio-merrifield.c b/drivers/gpio/gpio-merrifield.c index f3d1baeacbe9..72ac09a59702 100644 --- a/drivers/gpio/gpio-merrifield.c +++ b/drivers/gpio/gpio-merrifield.c @@ -220,10 +220,8 @@ static void mrfld_irq_ack(struct irq_data *d) raw_spin_unlock_irqrestore(&priv->lock, flags); } -static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask) +static void mrfld_irq_unmask_mask(struct mrfld_gpio *priv, u32 gpio, bool unmask) { - struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); - u32 gpio = irqd_to_hwirq(d); void __iomem *gimr = gpio_reg(&priv->chip, gpio, GIMR); unsigned long flags; u32 value; @@ -241,12 +239,20 @@ static void mrfld_irq_unmask_mask(struct irq_data *d, bool unmask) static void mrfld_irq_mask(struct irq_data *d) { - mrfld_irq_unmask_mask(d, false); + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + + mrfld_irq_unmask_mask(priv, gpio, false); + gpiochip_disable_irq(&priv->chip, gpio); } static void mrfld_irq_unmask(struct irq_data *d) { - mrfld_irq_unmask_mask(d, true); + struct mrfld_gpio *priv = irq_data_get_irq_chip_data(d); + u32 gpio = irqd_to_hwirq(d); + + gpiochip_enable_irq(&priv->chip, gpio); + mrfld_irq_unmask_mask(priv, gpio, true); } static int mrfld_irq_set_type(struct irq_data *d, unsigned int type) @@ -329,13 +335,15 @@ static int mrfld_irq_set_wake(struct irq_data *d, unsigned int on) return 0; } -static struct irq_chip mrfld_irqchip = { +static const struct irq_chip mrfld_irqchip = { .name = "gpio-merrifield", .irq_ack = mrfld_irq_ack, .irq_mask = mrfld_irq_mask, .irq_unmask = mrfld_irq_unmask, .irq_set_type = mrfld_irq_set_type, .irq_set_wake = mrfld_irq_set_wake, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static void mrfld_irq_handler(struct irq_desc *desc) @@ -482,7 +490,7 @@ static int mrfld_gpio_probe(struct pci_dev *pdev, const struct pci_device_id *id return retval; girq = &priv->chip.irq; - girq->chip = &mrfld_irqchip; + gpio_irq_chip_set_chip(girq, &mrfld_irqchip); girq->init_hw = mrfld_irq_init_hw; girq->parent_handler = mrfld_irq_handler; girq->num_parents = 1; diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c index c5166cd47c9c..7f59e5d936c2 100644 --- a/drivers/gpio/gpio-mxs.c +++ b/drivers/gpio/gpio-mxs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ // -// MXC GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> +// MXS GPIO support. (c) 2008 Daniel Mack <daniel@caiaq.de> // Copyright 2008 Juergen Beisert, kernel@pengutronix.de // // Based on code from Freescale, diff --git a/drivers/gpio/gpio-realtek-otto.c b/drivers/gpio/gpio-realtek-otto.c index c52b2cb1acae..63dcf42f7c20 100644 --- a/drivers/gpio/gpio-realtek-otto.c +++ b/drivers/gpio/gpio-realtek-otto.c @@ -172,6 +172,8 @@ static void realtek_gpio_irq_unmask(struct irq_data *data) unsigned long flags; u16 m; + gpiochip_enable_irq(&ctrl->gc, line); + raw_spin_lock_irqsave(&ctrl->lock, flags); m = ctrl->intr_mask[port]; m |= realtek_gpio_imr_bits(port_pin, REALTEK_GPIO_IMR_LINE_MASK); @@ -195,6 +197,8 @@ static void realtek_gpio_irq_mask(struct irq_data *data) ctrl->intr_mask[port] = m; realtek_gpio_write_imr(ctrl, port, ctrl->intr_type[port], m); raw_spin_unlock_irqrestore(&ctrl->lock, flags); + + gpiochip_disable_irq(&ctrl->gc, line); } static int realtek_gpio_irq_set_type(struct irq_data *data, unsigned int flow_type) @@ -315,13 +319,15 @@ static int realtek_gpio_irq_init(struct gpio_chip *gc) return 0; } -static struct irq_chip realtek_gpio_irq_chip = { +static const struct irq_chip realtek_gpio_irq_chip = { .name = "realtek-otto-gpio", .irq_ack = realtek_gpio_irq_ack, .irq_mask = realtek_gpio_irq_mask, .irq_unmask = realtek_gpio_irq_unmask, .irq_set_type = realtek_gpio_irq_set_type, .irq_set_affinity = realtek_gpio_irq_set_affinity, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static const struct of_device_id realtek_gpio_of_match[] = { @@ -404,7 +410,7 @@ static int realtek_gpio_probe(struct platform_device *pdev) irq = platform_get_irq_optional(pdev, 0); if (!(dev_flags & GPIO_INTERRUPTS_DISABLED) && irq > 0) { girq = &ctrl->gc.irq; - girq->chip = &realtek_gpio_irq_chip; + gpio_irq_chip_set_chip(girq, &realtek_gpio_irq_chip); girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; girq->parent_handler = realtek_gpio_irq_handler; diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c index acda4c5052d3..8a83f7bf4382 100644 --- a/drivers/gpio/gpio-sch.c +++ b/drivers/gpio/gpio-sch.c @@ -38,7 +38,6 @@ struct sch_gpio { struct gpio_chip chip; - struct irq_chip irqchip; spinlock_t lock; unsigned short iobase; unsigned short resume_base; @@ -218,11 +217,9 @@ static void sch_irq_ack(struct irq_data *d) spin_unlock_irqrestore(&sch->lock, flags); } -static void sch_irq_mask_unmask(struct irq_data *d, int val) +static void sch_irq_mask_unmask(struct gpio_chip *gc, irq_hw_number_t gpio_num, int val) { - struct gpio_chip *gc = irq_data_get_irq_chip_data(d); struct sch_gpio *sch = gpiochip_get_data(gc); - irq_hw_number_t gpio_num = irqd_to_hwirq(d); unsigned long flags; spin_lock_irqsave(&sch->lock, flags); @@ -232,14 +229,32 @@ static void sch_irq_mask_unmask(struct irq_data *d, int val) static void sch_irq_mask(struct irq_data *d) { - sch_irq_mask_unmask(d, 0); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + + sch_irq_mask_unmask(gc, gpio_num, 0); + gpiochip_disable_irq(gc, gpio_num); } static void sch_irq_unmask(struct irq_data *d) { - sch_irq_mask_unmask(d, 1); + struct gpio_chip *gc = irq_data_get_irq_chip_data(d); + irq_hw_number_t gpio_num = irqd_to_hwirq(d); + + gpiochip_enable_irq(gc, gpio_num); + sch_irq_mask_unmask(gc, gpio_num, 1); } +static const struct irq_chip sch_irqchip = { + .name = "sch_gpio", + .irq_ack = sch_irq_ack, + .irq_mask = sch_irq_mask, + .irq_unmask = sch_irq_unmask, + .irq_set_type = sch_irq_type, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, +}; + static u32 sch_gpio_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context) { struct sch_gpio *sch = context; @@ -367,14 +382,8 @@ static int sch_gpio_probe(struct platform_device *pdev) platform_set_drvdata(pdev, sch); - sch->irqchip.name = "sch_gpio"; - sch->irqchip.irq_ack = sch_irq_ack; - sch->irqchip.irq_mask = sch_irq_mask; - sch->irqchip.irq_unmask = sch_irq_unmask; - sch->irqchip.irq_set_type = sch_irq_type; - girq = &sch->chip.irq; - girq->chip = &sch->irqchip; + gpio_irq_chip_set_chip(girq, &sch_irqchip); girq->num_parents = 0; girq->parents = NULL; girq->parent_handler = NULL; diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c index 98cd715ccc33..8d09b619c166 100644 --- a/drivers/gpio/gpio-vr41xx.c +++ b/drivers/gpio/gpio-vr41xx.c @@ -217,8 +217,6 @@ static int giu_get_irq(unsigned int irq) printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n", maskl, pendl, maskh, pendh); - atomic_inc(&irq_err_count); - return -EINVAL; } diff --git a/drivers/gpio/gpio-wcove.c b/drivers/gpio/gpio-wcove.c index 16a0fae1e32e..c18b6b47384f 100644 --- a/drivers/gpio/gpio-wcove.c +++ b/drivers/gpio/gpio-wcove.c @@ -299,6 +299,8 @@ static void wcove_irq_unmask(struct irq_data *data) if (gpio >= WCOVE_GPIO_NUM) return; + gpiochip_enable_irq(chip, gpio); + wg->set_irq_mask = false; wg->update |= UPDATE_IRQ_MASK; } @@ -314,15 +316,19 @@ static void wcove_irq_mask(struct irq_data *data) wg->set_irq_mask = true; wg->update |= UPDATE_IRQ_MASK; + + gpiochip_disable_irq(chip, gpio); } -static struct irq_chip wcove_irqchip = { +static const struct irq_chip wcove_irqchip = { .name = "Whiskey Cove", .irq_mask = wcove_irq_mask, .irq_unmask = wcove_irq_unmask, .irq_set_type = wcove_irq_type, .irq_bus_lock = wcove_bus_lock, .irq_bus_sync_unlock = wcove_bus_sync_unlock, + .flags = IRQCHIP_IMMUTABLE, + GPIOCHIP_IRQ_RESOURCE_HELPERS, }; static irqreturn_t wcove_gpio_irq_handler(int irq, void *data) @@ -452,7 +458,7 @@ static int wcove_gpio_probe(struct platform_device *pdev) } girq = &wg->chip.irq; - girq->chip = &wcove_irqchip; + gpio_irq_chip_set_chip(girq, &wcove_irqchip); /* This will let us handle the parent IRQ in the driver */ girq->parent_handler = NULL; girq->num_parents = 0; diff --git a/drivers/gpio/gpio-winbond.c b/drivers/gpio/gpio-winbond.c index 7f8f5b02e31d..4b61d975cc0e 100644 --- a/drivers/gpio/gpio-winbond.c +++ b/drivers/gpio/gpio-winbond.c @@ -385,12 +385,13 @@ static int winbond_gpio_get(struct gpio_chip *gc, unsigned int offset) unsigned long *base = gpiochip_get_data(gc); const struct winbond_gpio_info *info; bool val; + int ret; winbond_gpio_get_info(&offset, &info); - val = winbond_sio_enter(*base); - if (val) - return val; + ret = winbond_sio_enter(*base); + if (ret) + return ret; winbond_sio_select_logical(*base, info->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 67abf8dcd30a..6b6d46e29e6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1918,9 +1918,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, return -EINVAL; } - /* delete kgd_mem from kfd_bo_list to avoid re-validating - * this BO in BO's restoring after eviction. - */ mutex_lock(&mem->process_info->lock); ret = amdgpu_bo_reserve(bo, true); @@ -1943,7 +1940,6 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct amdgpu_device *adev, amdgpu_amdkfd_remove_eviction_fence( bo, mem->process_info->eviction_fence); - list_del_init(&mem->validate_list.head); if (size) *size = amdgpu_bo_size(bo); @@ -2512,12 +2508,15 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) process_info->eviction_fence = new_fence; *ef = dma_fence_get(&new_fence->base); - /* Attach new eviction fence to all BOs */ + /* Attach new eviction fence to all BOs except pinned ones */ list_for_each_entry(mem, &process_info->kfd_bo_list, - validate_list.head) + validate_list.head) { + if (mem->bo->tbo.pin_count) + continue; + amdgpu_bo_fence(mem->bo, &process_info->eviction_fence->base, true); - + } /* Attach eviction fence to PD / PT BOs */ list_for_each_entry(peer_vm, &process_info->vm_list_head, vm_list_node) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index ede2fa56f6c9..16699158e00d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -594,17 +594,20 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block) { int r; - r = amdgpu_ras_block_late_init(adev, ras_block); - if (r) - return r; if (amdgpu_ras_is_supported(adev, ras_block->block)) { if (!amdgpu_persistent_edc_harvesting_supported(adev)) amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX); + r = amdgpu_ras_block_late_init(adev, ras_block); + if (r) + return r; + r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0); if (r) goto late_fini; + } else { + amdgpu_ras_feature_enable_on_boot(adev, ras_block, 0); } return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 798c56214a23..aebc384531ac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -518,6 +518,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(9, 1, 0): /* RENOIR looks like RAVEN */ case IP_VERSION(9, 3, 0): + /* GC 10.3.7 */ + case IP_VERSION(10, 3, 7): if (amdgpu_tmz == 0) { adev->gmc.tmz_enabled = false; dev_info(adev->dev, @@ -540,8 +542,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev) case IP_VERSION(10, 3, 1): /* YELLOW_CARP*/ case IP_VERSION(10, 3, 3): - /* GC 10.3.7 */ - case IP_VERSION(10, 3, 7): /* Don't enable it by default yet. */ if (amdgpu_tmz < 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 801f6fa692e9..6de63ea6687e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -642,7 +642,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) atomic64_read(&adev->visible_pin_size), vram_gtt.vram_size); vram_gtt.gtt_size = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT)->size; - vram_gtt.gtt_size *= PAGE_SIZE; vram_gtt.gtt_size -= atomic64_read(&adev->gart_pin_size); return copy_to_user(out, &vram_gtt, min((size_t)size, sizeof(vram_gtt))) ? -EFAULT : 0; @@ -675,7 +674,6 @@ int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) mem.cpu_accessible_vram.usable_heap_size * 3 / 4; mem.gtt.total_heap_size = gtt_man->size; - mem.gtt.total_heap_size *= PAGE_SIZE; mem.gtt.usable_heap_size = mem.gtt.total_heap_size - atomic64_read(&adev->gart_pin_size); mem.gtt.heap_usage = ttm_resource_manager_usage(gtt_man); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2de9309a4193..dac202ae864d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -197,6 +197,13 @@ static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; + /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); + } + s = snprintf(val, sizeof(val), "%s: %lu\n%s: %lu\n", "ue", info.ue_count, "ce", info.ce_count); @@ -550,9 +557,10 @@ static ssize_t amdgpu_ras_sysfs_read(struct device *dev, if (amdgpu_ras_query_error_status(obj->adev, &info)) return -EINVAL; - if (obj->adev->asic_type == CHIP_ALDEBARAN) { + if (obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + obj->adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { if (amdgpu_ras_reset_error_status(obj->adev, info.head.block)) - DRM_WARN("Failed to reset error counter and error status"); + dev_warn(obj->adev->dev, "Failed to reset error counter and error status"); } return sysfs_emit(buf, "%s: %lu\n%s: %lu\n", "ue", info.ue_count, @@ -1027,9 +1035,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } } - if (!amdgpu_persistent_edc_harvesting_supported(adev)) - amdgpu_ras_reset_error_status(adev, info->head.block); - return 0; } @@ -1149,6 +1154,12 @@ int amdgpu_ras_query_error_count(struct amdgpu_device *adev, if (res) return res; + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } + ce += info.ce_count; ue += info.ue_count; } @@ -1792,6 +1803,12 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) continue; amdgpu_ras_query_error_status(adev, &info); + + if (adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 2) && + adev->ip_versions[MP0_HWIP][0] != IP_VERSION(11, 0, 4)) { + if (amdgpu_ras_reset_error_status(adev, info.head.block)) + dev_warn(adev->dev, "Failed to reset error counter and error status"); + } } } @@ -2278,8 +2295,9 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) !amdgpu_ras_asic_supported(adev)) return; - if (!(amdgpu_sriov_vf(adev) && - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)))) + /* If driver run on sriov guest side, only enable ras for aldebaran */ + if (amdgpu_sriov_vf(adev) && + adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) return; if (!adev->gmc.xgmi.connected_to_cpu) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index be6f76a30ac6..3b4c19412625 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1798,18 +1798,26 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); - /* Compute GTT size, either bsaed on 3/4th the size of RAM size + /* Compute GTT size, either based on 1/2 the size of RAM size * or whatever the user passed on module init */ if (amdgpu_gtt_size == -1) { struct sysinfo si; si_meminfo(&si); - gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->gmc.mc_vram_size), - ((uint64_t)si.totalram * si.mem_unit * 3/4)); - } - else + /* Certain GL unit tests for large textures can cause problems + * with the OOM killer since there is no way to link this memory + * to a process. This was originally mitigated (but not necessarily + * eliminated) by limiting the GTT size. The problem is this limit + * is often too low for many modern games so just make the limit 1/2 + * of system memory which aligns with TTM. The OOM accounting needs + * to be addressed, but we shouldn't prevent common 3D applications + * from being usable just to potentially mitigate that corner case. + */ + gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + (u64)si.totalram * si.mem_unit / 2); + } else { gtt_size = (uint64_t)amdgpu_gtt_size << 20; + } /* Initialize GTT memory pool */ r = amdgpu_gtt_mgr_init(adev, gtt_size); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 2ceeaa4c793a..dc76d2b3ce52 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -679,6 +679,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, { struct amdgpu_vm_update_params params; struct amdgpu_vm_bo_base *entry; + bool flush_tlb_needed = false; int r, idx; if (list_empty(&vm->relocated)) @@ -697,6 +698,9 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, goto error; list_for_each_entry(entry, &vm->relocated, vm_status) { + /* vm_flush_needed after updating moved PDEs */ + flush_tlb_needed |= entry->moved; + r = amdgpu_vm_pde_update(¶ms, entry); if (r) goto error; @@ -706,8 +710,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, if (r) goto error; - /* vm_flush_needed after updating PDEs */ - atomic64_inc(&vm->tlb_seq); + if (flush_tlb_needed) + atomic64_inc(&vm->tlb_seq); while (!list_empty(&vm->relocated)) { entry = list_first_entry(&vm->relocated, @@ -789,6 +793,11 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, flush_tlb |= adev->gmc.xgmi.num_physical_nodes && adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 0); + /* + * On GFX8 and older any 8 PTE block with a valid bit set enters the TLB + */ + flush_tlb |= adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 0); + memset(¶ms, 0, sizeof(params)); params.adev = adev; params.vm = vm; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8c0a3fc7aaa6..a4a6751b1e44 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1096,6 +1096,7 @@ static void gfx_v11_0_read_wave_data(struct amdgpu_device *adev, uint32_t simd, dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_STS2); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_IB_DBG1); dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_M0); + dst[(*no_fields)++] = wave_read_ind(adev, wave, ixSQ_WAVE_MODE); } static void gfx_v11_0_read_wave_sgprs(struct amdgpu_device *adev, uint32_t simd, @@ -1316,7 +1317,7 @@ static void gfx_v11_0_rlc_backdoor_autoload_copy_ucode(struct amdgpu_device *ade memset(ptr + toc_offset + fw_size, 0, toc_fw_size - fw_size); if ((id != SOC21_FIRMWARE_ID_RS64_PFP) && (id != SOC21_FIRMWARE_ID_RS64_ME)) - *(uint64_t *)fw_autoload_mask |= 1 << id; + *(uint64_t *)fw_autoload_mask |= 1ULL << id; } static void gfx_v11_0_rlc_backdoor_autoload_copy_toc_ucode(struct amdgpu_device *adev, @@ -1983,7 +1984,7 @@ static int gfx_v11_0_init_csb(struct amdgpu_device *adev) return 0; } -void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) +static void gfx_v11_0_rlc_stop(struct amdgpu_device *adev) { u32 tmp = RREG32_SOC15(GC, 0, regRLC_CNTL); @@ -6028,6 +6029,7 @@ static void gfx_v11_0_handle_priv_fault(struct amdgpu_device *adev, break; default: BUG(); + break; } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index a0c0b7d9f444..7f4b480ae66e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -638,6 +638,12 @@ static int gmc_v11_0_mc_init(struct amdgpu_device *adev) adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); +#ifdef CONFIG_X86_64 + if ((adev->flags & AMD_IS_APU) && !amdgpu_passthrough(adev)) { + adev->gmc.aper_base = adev->mmhub.funcs->get_mc_fb_offset(adev); + adev->gmc.aper_size = adev->gmc.real_vram_size; + } +#endif /* In case the PCI BAR is larger than the actual amount of vram */ adev->gmc.visible_vram_size = adev->gmc.aper_size; if (adev->gmc.visible_vram_size > adev->gmc.real_vram_size) diff --git a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c index 5d2dfeff8fe5..d63d3f2b8a16 100644 --- a/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/imu_v11_0.c @@ -299,7 +299,7 @@ static const struct imu_rlc_ram_golden imu_rlc_ram_golden_11_0_2[] = IMU_RLC_RAM_GOLDEN_VALUE(GC, 0, regCPG_PSP_DEBUG, CPG_PSP_DEBUG__GPA_OVERRIDE_MASK, 0) }; -void program_imu_rlc_ram(struct amdgpu_device *adev, +static void program_imu_rlc_ram(struct amdgpu_device *adev, const struct imu_rlc_ram_golden *regs, const u32 array_size) { diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c index d2722adabd1b..f3c1af5130ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c @@ -535,6 +535,10 @@ void jpeg_v2_0_dec_ring_emit_ib(struct amdgpu_ring *ring, { unsigned vmid = AMDGPU_JOB_GET_VMID(job); + amdgpu_ring_write(ring, PACKETJ(mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET, + 0, 0, PACKETJ_TYPE0)); + amdgpu_ring_write(ring, (vmid << JPEG_IH_CTRL__IH_VMID__SHIFT)); + amdgpu_ring_write(ring, PACKETJ(mmUVD_LMI_JRBC_IB_VMID_INTERNAL_OFFSET, 0, 0, PACKETJ_TYPE0)); amdgpu_ring_write(ring, (vmid | (vmid << 4))); @@ -768,7 +772,7 @@ static const struct amdgpu_ring_funcs jpeg_v2_0_dec_ring_vm_funcs = { 8 + /* jpeg_v2_0_dec_ring_emit_vm_flush */ 18 + 18 + /* jpeg_v2_0_dec_ring_emit_fence x2 vm fence */ 8 + 16, - .emit_ib_size = 22, /* jpeg_v2_0_dec_ring_emit_ib */ + .emit_ib_size = 24, /* jpeg_v2_0_dec_ring_emit_ib */ .emit_ib = jpeg_v2_0_dec_ring_emit_ib, .emit_fence = jpeg_v2_0_dec_ring_emit_fence, .emit_vm_flush = jpeg_v2_0_dec_ring_emit_vm_flush, diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h index 1a03baa59755..654e43e83e2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.h @@ -41,6 +41,7 @@ #define mmUVD_JRBC_RB_REF_DATA_INTERNAL_OFFSET 0x4084 #define mmUVD_JRBC_STATUS_INTERNAL_OFFSET 0x4089 #define mmUVD_JPEG_PITCH_INTERNAL_OFFSET 0x401f +#define mmUVD_JPEG_IH_CTRL_INTERNAL_OFFSET 0x4149 #define JRBC_DEC_EXTERNAL_REG_WRITE_ADDR 0x18000 diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index fcf51947bb18..7eee004cf3ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -541,7 +541,7 @@ static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable) /* This function is for backdoor MES firmware */ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, - enum admgpu_mes_pipe pipe) + enum admgpu_mes_pipe pipe, bool prime_icache) { int r; uint32_t data; @@ -593,16 +593,18 @@ static int mes_v11_0_load_microcode(struct amdgpu_device *adev, /* Set 0x3FFFF (256K-1) to CP_MES_MDBOUND_LO */ WREG32_SOC15(GC, 0, regCP_MES_MDBOUND_LO, 0x3FFFF); - /* invalidate ICACHE */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); - - /* prime the ICACHE. */ - data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); - data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); - WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + if (prime_icache) { + /* invalidate ICACHE */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 0); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, INVALIDATE_CACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + + /* prime the ICACHE. */ + data = RREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL); + data = REG_SET_FIELD(data, CP_MES_IC_OP_CNTL, PRIME_ICACHE, 1); + WREG32_SOC15(GC, 0, regCP_MES_IC_OP_CNTL, data); + } soc21_grbm_select(adev, 0, 0, 0, 0); mutex_unlock(&adev->srbm_mutex); @@ -1044,17 +1046,19 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev) int r = 0; if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE); + + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE, false); if (r) { - DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); + DRM_ERROR("failed to load MES fw, r=%d\n", r); return r; } - r = mes_v11_0_load_microcode(adev, AMDGPU_MES_SCHED_PIPE); + r = mes_v11_0_load_microcode(adev, AMDGPU_MES_KIQ_PIPE, true); if (r) { - DRM_ERROR("failed to load MES fw, r=%d\n", r); + DRM_ERROR("failed to load MES kiq fw, r=%d\n", r); return r; } + } mes_v11_0_enable(adev, true); @@ -1086,7 +1090,7 @@ static int mes_v11_0_hw_init(void *handle) if (!adev->enable_mes_kiq) { if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT) { r = mes_v11_0_load_microcode(adev, - AMDGPU_MES_SCHED_PIPE); + AMDGPU_MES_SCHED_PIPE, true); if (r) { DRM_ERROR("failed to MES fw, r=%d\n", r); return r; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index d016e3c3e221..b3fba8dea63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -170,6 +170,7 @@ static const struct amdgpu_video_codec_info yc_video_codecs_decode_array[] = { {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, }; static const struct amdgpu_video_codecs yc_video_codecs_decode = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c index 06b2635b142a..83c6ccaaa9e4 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_2.c @@ -469,6 +469,7 @@ static void sdma_v5_2_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 se } } + /** * sdma_v5_2_gfx_stop - stop the gfx async dma engines * @@ -514,21 +515,17 @@ static void sdma_v5_2_rlc_stop(struct amdgpu_device *adev) } /** - * sdma_v5_2_ctx_switch_enable_for_instance - start the async dma engines - * context switch for an instance + * sdma_v5_2_ctx_switch_enable - stop the async dma engines context switch * * @adev: amdgpu_device pointer - * @instance_idx: the index of the SDMA instance + * @enable: enable/disable the DMA MEs context switch. * - * Unhalt the async dma engines context switch. + * Halt or unhalt the async dma engines context switch. */ -static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, int instance_idx) +static void sdma_v5_2_ctx_switch_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl, phase_quantum = 0; - - if (WARN_ON(instance_idx >= adev->sdma.num_instances)) { - return; - } + int i; if (amdgpu_sdma_phase_quantum) { unsigned value = amdgpu_sdma_phase_quantum; @@ -552,68 +549,50 @@ static void sdma_v5_2_ctx_switch_enable_for_instance(struct amdgpu_device *adev, phase_quantum = value << SDMA0_PHASE0_QUANTUM__VALUE__SHIFT | unit << SDMA0_PHASE0_QUANTUM__UNIT__SHIFT; - - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE0_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE1_QUANTUM), - phase_quantum); - WREG32_SOC15_IP(GC, - sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_PHASE2_QUANTUM), - phase_quantum); } - if (!amdgpu_sriov_vf(adev)) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 1); - WREG32(sdma_v5_2_get_reg_offset(adev, instance_idx, mmSDMA0_CNTL), f32_cntl); + for (i = 0; i < adev->sdma.num_instances; i++) { + if (enable && amdgpu_sdma_phase_quantum) { + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE0_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE1_QUANTUM), + phase_quantum); + WREG32_SOC15_IP(GC, sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_PHASE2_QUANTUM), + phase_quantum); + } + + if (!amdgpu_sriov_vf(adev)) { + f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, + AUTO_CTXSW_ENABLE, enable ? 1 : 0); + WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + } } + } /** - * sdma_v5_2_ctx_switch_disable_all - stop the async dma engines context switch + * sdma_v5_2_enable - stop the async dma engines * * @adev: amdgpu_device pointer + * @enable: enable/disable the DMA MEs. * - * Halt the async dma engines context switch. + * Halt or unhalt the async dma engines. */ -static void sdma_v5_2_ctx_switch_disable_all(struct amdgpu_device *adev) +static void sdma_v5_2_enable(struct amdgpu_device *adev, bool enable) { u32 f32_cntl; int i; - if (amdgpu_sriov_vf(adev)) - return; - - for (i = 0; i < adev->sdma.num_instances; i++) { - f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_CNTL, - AUTO_CTXSW_ENABLE, 0); - WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_CNTL), f32_cntl); + if (!enable) { + sdma_v5_2_gfx_stop(adev); + sdma_v5_2_rlc_stop(adev); } -} - -/** - * sdma_v5_2_halt - stop the async dma engines - * - * @adev: amdgpu_device pointer - * - * Halt the async dma engines. - */ -static void sdma_v5_2_halt(struct amdgpu_device *adev) -{ - int i; - u32 f32_cntl; - - sdma_v5_2_gfx_stop(adev); - sdma_v5_2_rlc_stop(adev); if (!amdgpu_sriov_vf(adev)) { for (i = 0; i < adev->sdma.num_instances; i++) { f32_cntl = RREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL)); - f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, 1); + f32_cntl = REG_SET_FIELD(f32_cntl, SDMA0_F32_CNTL, HALT, enable ? 0 : 1); WREG32(sdma_v5_2_get_reg_offset(adev, i, mmSDMA0_F32_CNTL), f32_cntl); } } @@ -625,9 +604,6 @@ static void sdma_v5_2_halt(struct amdgpu_device *adev) * @adev: amdgpu_device pointer * * Set up the gfx DMA ring buffers and enable them. - * It assumes that the dma engine is stopped for each instance. - * The function enables the engine and preemptions sequentially for each instance. - * * Returns 0 for success, error for failure. */ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) @@ -769,7 +745,10 @@ static int sdma_v5_2_gfx_resume(struct amdgpu_device *adev) ring->sched.ready = true; - sdma_v5_2_ctx_switch_enable_for_instance(adev, i); + if (amdgpu_sriov_vf(adev)) { /* bare-metal sequence doesn't need below to lines */ + sdma_v5_2_ctx_switch_enable(adev, true); + sdma_v5_2_enable(adev, true); + } r = amdgpu_ring_test_ring(ring); if (r) { @@ -813,7 +792,7 @@ static int sdma_v5_2_load_microcode(struct amdgpu_device *adev) int i, j; /* halt the MEs */ - sdma_v5_2_halt(adev); + sdma_v5_2_enable(adev, false); for (i = 0; i < adev->sdma.num_instances; i++) { if (!adev->sdma.instance[i].fw) @@ -885,8 +864,8 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) int r = 0; if (amdgpu_sriov_vf(adev)) { - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); /* set RB registers */ r = sdma_v5_2_gfx_resume(adev); @@ -910,10 +889,12 @@ static int sdma_v5_2_start(struct amdgpu_device *adev) amdgpu_gfx_off_ctrl(adev, false); sdma_v5_2_soft_reset(adev); + /* unhalt the MEs */ + sdma_v5_2_enable(adev, true); + /* enable sdma ring preemption */ + sdma_v5_2_ctx_switch_enable(adev, true); - /* Soft reset supposes to disable the dma engine and preemption. - * Now start the gfx rings and rlc compute queues. - */ + /* start the gfx rings and rlc compute queues */ r = sdma_v5_2_gfx_resume(adev); if (adev->in_s0ix) amdgpu_gfx_off_ctrl(adev, true); @@ -1447,8 +1428,8 @@ static int sdma_v5_2_hw_fini(void *handle) if (amdgpu_sriov_vf(adev)) return 0; - sdma_v5_2_ctx_switch_disable_all(adev); - sdma_v5_2_halt(adev); + sdma_v5_2_ctx_switch_enable(adev, false); + sdma_v5_2_enable(adev, false); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 3cabceee5f57..39405f0db824 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -1761,23 +1761,21 @@ static const struct amdgpu_ring_funcs vcn_v3_0_dec_sw_ring_vm_funcs = { .emit_reg_write_reg_wait = amdgpu_ring_emit_reg_write_reg_wait_helper, }; -static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p, - struct amdgpu_job *job) +static int vcn_v3_0_limit_sched(struct amdgpu_cs_parser *p) { struct drm_gpu_scheduler **scheds; /* The create msg must be in the first IB submitted */ - if (atomic_read(&job->base.entity->fence_seq)) + if (atomic_read(&p->entity->fence_seq)) return -EINVAL; scheds = p->adev->gpu_sched[AMDGPU_HW_IP_VCN_DEC] [AMDGPU_RING_PRIO_DEFAULT].sched; - drm_sched_entity_modify_sched(job->base.entity, scheds, 1); + drm_sched_entity_modify_sched(p->entity, scheds, 1); return 0; } -static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, - uint64_t addr) +static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, uint64_t addr) { struct ttm_operation_ctx ctx = { false, false }; struct amdgpu_bo_va_mapping *map; @@ -1848,7 +1846,7 @@ static int vcn_v3_0_dec_msg(struct amdgpu_cs_parser *p, struct amdgpu_job *job, if (create[0] == 0x7 || create[0] == 0x10 || create[0] == 0x11) continue; - r = vcn_v3_0_limit_sched(p, job); + r = vcn_v3_0_limit_sched(p); if (r) goto out; } @@ -1862,7 +1860,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, struct amdgpu_job *job, struct amdgpu_ib *ib) { - struct amdgpu_ring *ring = to_amdgpu_ring(job->base.sched); + struct amdgpu_ring *ring = to_amdgpu_ring(p->entity->rq->sched); uint32_t msg_lo = 0, msg_hi = 0; unsigned i; int r; @@ -1881,8 +1879,7 @@ static int vcn_v3_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p, msg_hi = val; } else if (reg == PACKET0(p->adev->vcn.internal.cmd, 0) && val == 0) { - r = vcn_v3_0_dec_msg(p, job, - ((u64)msg_hi) << 32 | msg_lo); + r = vcn_v3_0_dec_msg(p, ((u64)msg_hi) << 32 | msg_lo); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 5e9adbc71bbd..cbfb32b3d235 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -1516,6 +1516,8 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); break; case IP_VERSION(10, 3, 3): + case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */ + case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */ pcache_info = yellow_carp_cache_info; num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 8667e3df2d0b..bf4200457772 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -73,6 +73,8 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) case IP_VERSION(4, 1, 2):/* RENOIR */ case IP_VERSION(5, 2, 1):/* VANGOGH */ case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ + case IP_VERSION(5, 2, 6):/* GC 10.3.6 */ + case IP_VERSION(5, 2, 7):/* GC 10.3.7 */ case IP_VERSION(6, 0, 1): kfd->device_info.num_sdma_queues_per_engine = 2; break; @@ -127,6 +129,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 2): /* ALDEBARAN */ case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ + case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ + case IP_VERSION(10, 3, 7): /* GC 10.3.7 */ case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */ case IP_VERSION(10, 1, 4): case IP_VERSION(10, 1, 10): /* NAVI10 */ @@ -178,7 +182,9 @@ static void kfd_device_info_init(struct kfd_dev *kfd, if (gc_version < IP_VERSION(11, 0, 0)) { /* Navi2x+, Navi1x+ */ - if (gc_version >= IP_VERSION(10, 3, 0)) + if (gc_version == IP_VERSION(10, 3, 6)) + kfd->device_info.no_atomic_fw_version = 14; + else if (gc_version >= IP_VERSION(10, 3, 0)) kfd->device_info.no_atomic_fw_version = 92; else if (gc_version >= IP_VERSION(10, 1, 1)) kfd->device_info.no_atomic_fw_version = 145; @@ -368,6 +374,16 @@ struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) if (!vf) f2g = &gfx_v10_3_kfd2kgd; break; + case IP_VERSION(10, 3, 6): + gfx_target_version = 100306; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; + case IP_VERSION(10, 3, 7): + gfx_target_version = 100307; + if (!vf) + f2g = &gfx_v10_3_kfd2kgd; + break; case IP_VERSION(11, 0, 0): gfx_target_version = 110000; f2g = &gfx_v11_kfd2kgd; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 997650d597ec..e44376c2ecdc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -296,7 +296,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, dma_addr_t *scratch) { - uint64_t npages = migrate->cpages; + uint64_t npages = migrate->npages; struct device *dev = adev->dev; struct amdgpu_res_cursor cursor; dma_addr_t *src; @@ -344,7 +344,7 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, mfence); if (r) goto out_free_vram_pages; - amdgpu_res_next(&cursor, j << PAGE_SHIFT); + amdgpu_res_next(&cursor, (j + 1) << PAGE_SHIFT); j = 0; } else { amdgpu_res_next(&cursor, PAGE_SIZE); @@ -590,7 +590,7 @@ svm_migrate_copy_to_ram(struct amdgpu_device *adev, struct svm_range *prange, continue; } src[i] = svm_migrate_addr(adev, spage); - if (i > 0 && src[i] != src[i - 1] + PAGE_SIZE) { + if (j > 0 && src[i] != src[i - 1] + PAGE_SIZE) { r = svm_migrate_copy_memory_gart(adev, dst + i - j, src + i - j, j, FROM_VRAM_TO_RAM, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 2ebf0132c25b..7b332246eda3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1295,7 +1295,7 @@ svm_range_map_to_gpu(struct kfd_process_device *pdd, struct svm_range *prange, r = amdgpu_vm_update_range(adev, vm, false, false, flush_tlb, NULL, last_start, prange->start + i, pte_flags, - last_start - prange->start, + (last_start - prange->start) << PAGE_SHIFT, bo_adev ? bo_adev->vm_manager.vram_base_offset : 0, NULL, dma_addr, &vm->last_update); @@ -2307,6 +2307,8 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, if (range->event == MMU_NOTIFY_RELEASE) return true; + if (!mmget_not_zero(mni->mm)) + return true; start = mni->interval_tree.start; last = mni->interval_tree.last; @@ -2333,6 +2335,7 @@ svm_range_cpu_invalidate_pagetables(struct mmu_interval_notifier *mni, } svm_range_unlock(prange); + mmput(mni->mm); return true; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 70be67a56673..39b425d83bb1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2812,7 +2812,7 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { - u32 max_cll, min_cll, max, min, q, r; + u32 max_avg, min_cll, max, min, q, r; struct amdgpu_dm_backlight_caps *caps; struct amdgpu_display_manager *dm; struct drm_connector *conn_base; @@ -2842,7 +2842,7 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) caps = &dm->backlight_caps[i]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; caps->aux_support = false; - max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll; + max_avg = conn_base->hdr_sink_metadata.hdmi_type1.max_fall; min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll; if (caps->ext_caps->bits.oled == 1 /*|| @@ -2870,8 +2870,8 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) * The results of the above expressions can be verified at * pre_computed_values. */ - q = max_cll >> 5; - r = max_cll % 32; + q = max_avg >> 5; + r = max_avg % 32; max = (1 << q) * pre_computed_values[r]; // min luminance: maxLum * (CV/255)^2 / 100 diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index ceb34376decb..bca5f01da763 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -287,8 +287,11 @@ static void dcn31_enable_pme_wa(struct clk_mgr *clk_mgr_base) void dcn31_init_clocks(struct clk_mgr *clk_mgr) { + uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; + memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); // Assumption is that boot state always supports pstate + clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN; @@ -638,8 +641,14 @@ static void dcn31_set_low_power_state(struct clk_mgr *clk_mgr_base) } } +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base) +{ + return clk_mgr_base->clks.ref_dtbclk_khz; +} + static struct clk_mgr_funcs dcn31_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn31_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn31_enable_pme_wa, @@ -719,7 +728,7 @@ void dcn31_clk_mgr_construct( } clk_mgr->base.base.dprefclk_khz = 600000; - clk_mgr->base.dccg->ref_dtbclk_khz = 600000; + clk_mgr->base.base.clks.ref_dtbclk_khz = 600000; dce_clock_read_ss_info(&clk_mgr->base); /*if bios enabled SS, driver needs to adjust dtb clock, only enable with correct bios*/ //clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(clk_mgr_internal, clk_mgr->base.base.dprefclk_khz); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h index 961b10a49486..be06fdbd0c22 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.h @@ -51,6 +51,8 @@ void dcn31_clk_mgr_construct(struct dc_context *ctx, struct pp_smu_funcs *pp_smu, struct dccg *dccg); +int dcn31_get_dtb_ref_freq_khz(struct clk_mgr *clk_mgr_base); + void dcn31_clk_mgr_destroy(struct clk_mgr_internal *clk_mgr_int); #endif //__DCN31_CLK_MGR_H__ diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c index a2ade6e93f5e..f4381725b210 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn315/dcn315_clk_mgr.c @@ -41,9 +41,7 @@ #include "dc_dmub_srv.h" -#if defined (CONFIG_DRM_AMD_DC_DP2_0) #include "dc_link_dp.h" -#endif #define TO_CLK_MGR_DCN315(clk_mgr)\ container_of(clk_mgr, struct clk_mgr_dcn315, base) @@ -552,7 +550,7 @@ static void dcn315_clk_mgr_helper_populate_bw_params( if (!bw_params->clk_table.entries[i].dtbclk_mhz) bw_params->clk_table.entries[i].dtbclk_mhz = def_max.dtbclk_mhz; } - ASSERT(bw_params->clk_table.entries[i].dcfclk_mhz); + ASSERT(bw_params->clk_table.entries[i-1].dcfclk_mhz); bw_params->vram_type = bios_info->memory_type; bw_params->num_channels = bios_info->ma_channel_number; if (!bw_params->num_channels) @@ -580,6 +578,7 @@ static void dcn315_enable_pme_wa(struct clk_mgr *clk_mgr_base) static struct clk_mgr_funcs dcn315_funcs = { .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn315_update_clocks, .init_clocks = dcn31_init_clocks, .enable_pme_wa = dcn315_enable_pme_wa, @@ -656,9 +655,9 @@ void dcn315_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn315_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); + clk_mgr->base.base.clks.ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz); clk_mgr->base.base.bw_params = &dcn315_bw_params; diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index fc3af81ed6c6..e4bb9c6193b5 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -571,6 +571,7 @@ static void dcn316_clk_mgr_helper_populate_bw_params( static struct clk_mgr_funcs dcn316_funcs = { .enable_pme_wa = dcn316_enable_pme_wa, .get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz, + .get_dtb_ref_clk_frequency = dcn31_get_dtb_ref_freq_khz, .update_clocks = dcn316_update_clocks, .init_clocks = dcn31_init_clocks, .are_clock_states_equal = dcn31_are_clock_states_equal, @@ -685,7 +686,7 @@ void dcn316_clk_mgr_construct( clk_mgr->base.base.dprefclk_khz = 600000; clk_mgr->base.base.dprefclk_khz = dcn316_smu_get_dpref_clk(&clk_mgr->base); - clk_mgr->base.dccg->ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; + clk_mgr->base.base.clks.ref_dtbclk_khz = clk_mgr->base.base.dprefclk_khz; dce_clock_read_ss_info(&clk_mgr->base); /*clk_mgr->base.dccg->ref_dtbclk_khz = dce_adjust_dp_ref_freq_for_ss(&clk_mgr->base, clk_mgr->base.base.dprefclk_khz);*/ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index dc30ac366a50..d8eee89e63ce 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -114,8 +114,8 @@ static const struct dc_link_settings fail_safe_link_settings = { static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result); static void maximize_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); @@ -944,7 +944,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti return; - for (lane = 1; lane < LANE_COUNT_DP_MAX; lane++) { + for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { if (lt_settings->voltage_swing) lane_settings[lane].VOLTAGE_SWING = *lt_settings->voltage_swing; if (lt_settings->pre_emphasis) @@ -2784,6 +2784,7 @@ bool perform_link_training_with_retries( enum dp_panel_mode panel_mode = dp_get_panel_mode(link); enum link_training_result status = LINK_TRAINING_CR_FAIL_LANE0; struct dc_link_settings cur_link_settings = *link_setting; + struct dc_link_settings max_link_settings = *link_setting; const struct link_hwss *link_hwss = get_link_hwss(link, &pipe_ctx->link_res); int fail_count = 0; bool is_link_bw_low = false; /* link bandwidth < stream bandwidth */ @@ -2793,7 +2794,6 @@ bool perform_link_training_with_retries( dp_trace_commit_lt_init(link); - if (dp_get_link_encoding_format(&cur_link_settings) == DP_8b_10b_ENCODING) /* We need to do this before the link training to ensure the idle * pattern in SST mode will be sent right after the link training @@ -2909,19 +2909,15 @@ bool perform_link_training_with_retries( uint32_t req_bw; uint32_t link_bw; - decide_fallback_link_setting(link, *link_setting, &cur_link_settings, status); - /* Flag if reduced link bandwidth no longer meets stream requirements or fallen back to - * minimum link bandwidth. + decide_fallback_link_setting(link, &max_link_settings, + &cur_link_settings, status); + /* Fail link training if reduced link bandwidth no longer meets + * stream requirements. */ req_bw = dc_bandwidth_in_kbps_from_timing(&stream->timing); link_bw = dc_link_bandwidth_kbps(link, &cur_link_settings); - is_link_bw_low = (req_bw > link_bw); - is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && - (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - - if (is_link_bw_low) - DC_LOG_WARNING("%s: Link bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", - __func__, req_bw, link_bw); + if (req_bw > link_bw) + break; } msleep(delay_between_attempts); @@ -3309,7 +3305,7 @@ static bool dp_verify_link_cap( int *fail_count) { struct dc_link_settings cur_link_settings = {0}; - struct dc_link_settings initial_link_settings = *known_limit_link_setting; + struct dc_link_settings max_link_settings = *known_limit_link_setting; bool success = false; bool skip_video_pattern; enum clock_source_id dp_cs_id = get_clock_source_id(link); @@ -3318,7 +3314,7 @@ static bool dp_verify_link_cap( struct link_resource link_res; memset(&irq_data, 0, sizeof(irq_data)); - cur_link_settings = initial_link_settings; + cur_link_settings = max_link_settings; /* Grant extended timeout request */ if ((link->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) && (link->dpcd_caps.lttpr_caps.max_ext_timeout > 0)) { @@ -3361,7 +3357,7 @@ static bool dp_verify_link_cap( dp_trace_lt_result_update(link, status, true); dp_disable_link_phy(link, &link_res, link->connector_signal); } while (!success && decide_fallback_link_setting(link, - initial_link_settings, &cur_link_settings, status)); + &max_link_settings, &cur_link_settings, status)); link->verified_link_cap = success ? cur_link_settings : fail_safe_link_settings; @@ -3596,16 +3592,19 @@ static bool decide_fallback_link_setting_max_bw_policy( */ static bool decide_fallback_link_setting( struct dc_link *link, - struct dc_link_settings initial_link_settings, - struct dc_link_settings *current_link_setting, + struct dc_link_settings *max, + struct dc_link_settings *cur, enum link_training_result training_result) { - if (!current_link_setting) + if (!cur) + return false; + if (!max) return false; - if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || + + if (dp_get_link_encoding_format(max) == DP_128b_132b_ENCODING || link->dc->debug.force_dp2_lt_fallback_method) - return decide_fallback_link_setting_max_bw_policy(link, &initial_link_settings, - current_link_setting, training_result); + return decide_fallback_link_setting_max_bw_policy(link, max, cur, + training_result); switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -3613,28 +3612,18 @@ static bool decide_fallback_link_setting( case LINK_TRAINING_CR_FAIL_LANE23: case LINK_TRAINING_LQA_FAIL: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - } else if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->link_rate = - initial_link_settings.link_rate; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + } else if (!reached_minimum_lane_count(cur->lane_count)) { + cur->link_rate = max->link_rate; if (training_result == LINK_TRAINING_CR_FAIL_LANE0) return false; else if (training_result == LINK_TRAINING_CR_FAIL_LANE1) - current_link_setting->lane_count = - LANE_COUNT_ONE; - else if (training_result == - LINK_TRAINING_CR_FAIL_LANE23) - current_link_setting->lane_count = - LANE_COUNT_TWO; + cur->lane_count = LANE_COUNT_ONE; + else if (training_result == LINK_TRAINING_CR_FAIL_LANE23) + cur->lane_count = LANE_COUNT_TWO; else - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); + cur->lane_count = reduce_lane_count(cur->lane_count); } else { return false; } @@ -3642,17 +3631,17 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_EQ: { - if (!reached_minimum_lane_count - (current_link_setting->lane_count)) { - current_link_setting->lane_count = - reduce_lane_count( - current_link_setting->lane_count); - } else if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_lane_count(cur->lane_count)) { + cur->lane_count = reduce_lane_count(cur->lane_count); + } else if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } @@ -3660,12 +3649,15 @@ static bool decide_fallback_link_setting( } case LINK_TRAINING_EQ_FAIL_CR: { - if (!reached_minimum_link_rate - (current_link_setting->link_rate)) { - current_link_setting->link_rate = - reduce_link_rate( - current_link_setting->link_rate); - current_link_setting->lane_count = initial_link_settings.lane_count; + if (!reached_minimum_link_rate(cur->link_rate)) { + cur->link_rate = reduce_link_rate(cur->link_rate); + /* Reduce max link rate to avoid potential infinite loop. + * Needed so that any subsequent CR_FAIL fallback can't + * re-set the link rate higher than the link rate from + * the latest EQ_FAIL fallback. + */ + max->link_rate = cur->link_rate; + cur->lane_count = max->lane_count; } else { return false; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 3960c74482be..817028d3c4a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.186" +#define DC_VER "3.2.187" #define MAX_SURFACES 3 #define MAX_PLANES 6 @@ -416,6 +416,7 @@ struct dc_clocks { bool p_state_change_support; enum dcn_zstate_support_state zstate_support; bool dtbclk_en; + int ref_dtbclk_khz; enum dcn_pwr_state pwr_state; /* * Elements below are not compared for the purposes of @@ -719,6 +720,8 @@ struct dc_debug_options { bool apply_vendor_specific_lttpr_wa; bool extended_blank_optimization; union aux_wake_wa_options aux_wake_wa; + /* uses value at boot and disables switch */ + bool disable_dtb_ref_clk_switch; uint8_t psr_power_use_phy_fsm; enum dml_hostvm_override_opts dml_hostvm_override; }; diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index 7eff7811769d..5f2afa5b4814 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1766,29 +1766,9 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context) break; } } - - /* - * TO-DO: So far the code logic below only addresses single eDP case. - * For dual eDP case, there are a few things that need to be - * implemented first: - * - * 1. Change the fastboot logic above, so eDP link[0 or 1]'s - * stream[0 or 1] will all be checked. - * - * 2. Change keep_edp_vdd_on to an array, and maintain keep_edp_vdd_on - * for each eDP. - * - * Once above 2 things are completed, we can then change the logic below - * correspondingly, so dual eDP case will be fully covered. - */ - - // We are trying to enable eDP, don't power down VDD if eDP stream is existing - if ((edp_stream_num == 1 && edp_streams[0] != NULL) || can_apply_edp_fast_boot) { + // We are trying to enable eDP, don't power down VDD + if (can_apply_edp_fast_boot) keep_edp_vdd_on = true; - DC_LOG_EVENT_LINK_TRAINING("Keep eDP Vdd on\n"); - } else { - DC_LOG_EVENT_LINK_TRAINING("No eDP stream enabled, turn eDP Vdd off\n"); - } } // Check seamless boot support diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c index 970b65efeac1..eaa7032f0f1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c @@ -212,6 +212,9 @@ static void dpp2_cnv_setup ( break; } + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + if (is_2bit == 1 && alpha_2bit_lut != NULL) { REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c index 8b6505b7dca8..f50ab961bc17 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_dpp.c @@ -153,6 +153,9 @@ static void dpp201_cnv_setup( break; } + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + if (is_2bit == 1 && alpha_2bit_lut != NULL) { REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index ab3918c0a15b..0dcc07531643 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -294,6 +294,9 @@ static void dpp3_cnv_setup ( break; } + /* Set default color space based on format if none is given. */ + color_space = input_color_space ? input_color_space : color_space; + if (is_2bit == 1 && alpha_2bit_lut != NULL) { REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT0, alpha_2bit_lut->lut0); REG_UPDATE(ALPHA_2BIT_LUT, ALPHA_2BIT_LUT1, alpha_2bit_lut->lut1); diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 287a1066b547..bbc58d167c63 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -513,12 +513,10 @@ void dccg31_set_physymclk( /* Controls the generation of pixel valid for OTG in (OTG -> HPO case) */ static void dccg31_set_dtbclk_dto( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing) + struct dtbclk_dto_params *params) { struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); + int req_dtbclk_khz = params->pixclk_khz; uint32_t dtbdto_div; /* Mode DTBDTO Rate DTBCLK_DTO<x>_DIV Register @@ -529,57 +527,53 @@ static void dccg31_set_dtbclk_dto( * DSC native 4:2:2 pixel rate/2 4 * Other modes pixel rate 8 */ - if (num_odm_segments == 4) { + if (params->num_odm_segments == 4) { dtbdto_div = 2; - req_dtbclk_khz = req_dtbclk_khz / 4; - } else if ((num_odm_segments == 2) || - (timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || - (timing->flags.DSC && timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 - && !timing->dsc_cfg.ycbcr422_simple)) { + req_dtbclk_khz = params->pixclk_khz / 4; + } else if ((params->num_odm_segments == 2) || + (params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR420) || + (params->timing->flags.DSC && params->timing->pixel_encoding == PIXEL_ENCODING_YCBCR422 + && !params->timing->dsc_cfg.ycbcr422_simple)) { dtbdto_div = 4; - req_dtbclk_khz = req_dtbclk_khz / 2; + req_dtbclk_khz = params->pixclk_khz / 2; } else dtbdto_div = 8; - if (dccg->ref_dtbclk_khz && req_dtbclk_khz) { + if (params->ref_dtbclk_khz && req_dtbclk_khz) { uint32_t modulo, phase; // phase / modulo = dtbclk / dtbclk ref - modulo = dccg->ref_dtbclk_khz * 1000; - phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + dccg->ref_dtbclk_khz - 1), - dccg->ref_dtbclk_khz); + modulo = params->ref_dtbclk_khz * 1000; + phase = div_u64((((unsigned long long)modulo * req_dtbclk_khz) + params->ref_dtbclk_khz - 1), + params->ref_dtbclk_khz); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], modulo); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], phase); + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], modulo); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], phase); - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 1); + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 1); - REG_WAIT(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLKDTO_ENABLE_STATUS[dtbclk_inst], 1, + REG_WAIT(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLKDTO_ENABLE_STATUS[params->otg_inst], 1, 1, 100); /* The recommended programming sequence to enable DTBCLK DTO to generate * valid pixel HPO DPSTREAM ENCODER, specifies that DTO source select should * be set only after DTO is enabled */ - REG_UPDATE(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - PIPE_DTO_SRC_SEL[dtbclk_inst], 1); - - dccg->dtbclk_khz[dtbclk_inst] = req_dtbclk_khz; + REG_UPDATE(OTG_PIXEL_RATE_CNTL[params->otg_inst], + PIPE_DTO_SRC_SEL[params->otg_inst], 1); } else { - REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[dtbclk_inst], - DTBCLK_DTO_ENABLE[dtbclk_inst], 0, - PIPE_DTO_SRC_SEL[dtbclk_inst], 0, - DTBCLK_DTO_DIV[dtbclk_inst], dtbdto_div); + REG_UPDATE_3(OTG_PIXEL_RATE_CNTL[params->otg_inst], + DTBCLK_DTO_ENABLE[params->otg_inst], 0, + PIPE_DTO_SRC_SEL[params->otg_inst], 0, + DTBCLK_DTO_DIV[params->otg_inst], dtbdto_div); - REG_WRITE(DTBCLK_DTO_MODULO[dtbclk_inst], 0); - REG_WRITE(DTBCLK_DTO_PHASE[dtbclk_inst], 0); - - dccg->dtbclk_khz[dtbclk_inst] = 0; + REG_WRITE(DTBCLK_DTO_MODULO[params->otg_inst], 0); + REG_WRITE(DTBCLK_DTO_PHASE[params->otg_inst], 0); } } @@ -606,16 +600,12 @@ void dccg31_set_audio_dtbclk_dto( REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 4); // 04 - DCCG_AUDIO_DTO_SEL_AUDIO_DTO_DTBCLK - - dccg->audio_dtbclk_khz = req_audio_dtbclk_khz; } else { REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_PHASE, 0); REG_WRITE(DCCG_AUDIO_DTBCLK_DTO_MODULO, 0); REG_UPDATE(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO_SEL, 3); // 03 - DCCG_AUDIO_DTO_SEL_NO_AUDIO_DTO - - dccg->audio_dtbclk_khz = 0; } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c index d94fd1010deb..8b12b4111c88 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dio_link_encoder.c @@ -230,9 +230,7 @@ static void enc31_hw_init(struct link_encoder *enc) AUX_RX_PHASE_DETECT_LEN, [21,20] = 0x3 default is 3 AUX_RX_DETECTION_THRESHOLD [30:28] = 1 */ - AUX_REG_WRITE(AUX_DPHY_RX_CONTROL0, 0x103d1110); - - AUX_REG_WRITE(AUX_DPHY_TX_CONTROL, 0x21c7a); + // dmub will read AUX_DPHY_RX_CONTROL0/AUX_DPHY_TX_CONTROL from vbios table in dp_aux_init //AUX_DPHY_TX_REF_CONTROL'AUX_TX_REF_DIV HW default is 0x32; // Set AUX_TX_REF_DIV Divider to generate 2 MHz reference from refclk diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c index 789f7562cdc7..d2273674e872 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml_wrapper.c @@ -1284,10 +1284,8 @@ static bool is_dtbclk_required(struct dc *dc, struct dc_state *context) for (i = 0; i < dc->res_pool->pipe_count; i++) { if (!context->res_ctx.pipe_ctx[i].stream) continue; -#if defined (CONFIG_DRM_AMD_DC_DP2_0) if (is_dp_128b_132b_signal(&context->res_ctx.pipe_ctx[i])) return true; -#endif } return false; } diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h index 46ce5a0ee4ec..b5570aa8e39d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h @@ -237,6 +237,7 @@ struct clk_mgr_funcs { bool safe_to_lower); int (*get_dp_ref_clk_frequency)(struct clk_mgr *clk_mgr); + int (*get_dtb_ref_clk_frequency)(struct clk_mgr *clk_mgr); void (*set_low_power_state)(struct clk_mgr *clk_mgr); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index b2fa4de47734..c7021915bac8 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -60,8 +60,17 @@ struct dccg { const struct dccg_funcs *funcs; int pipe_dppclk_khz[MAX_PIPES]; int ref_dppclk; - int dtbclk_khz[MAX_PIPES]; - int audio_dtbclk_khz; + //int dtbclk_khz[MAX_PIPES];/* TODO needs to be removed */ + //int audio_dtbclk_khz;/* TODO needs to be removed */ + int ref_dtbclk_khz;/* TODO needs to be removed */ +}; + +struct dtbclk_dto_params { + const struct dc_crtc_timing *timing; + int otg_inst; + int pixclk_khz; + int req_audio_dtbclk_khz; + int num_odm_segments; int ref_dtbclk_khz; }; @@ -111,10 +120,7 @@ struct dccg_funcs { void (*set_dtbclk_dto)( struct dccg *dccg, - int dtbclk_inst, - int req_dtbclk_khz, - int num_odm_segments, - const struct dc_crtc_timing *timing); + struct dtbclk_dto_params *dto_params); void (*set_audio_dtbclk_dto)( struct dccg *dccg, diff --git a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c index 87972dc8443d..ea6cf8bfce30 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_hwss_hpo_dp.c @@ -27,6 +27,7 @@ #include "core_types.h" #include "dccg.h" #include "dc_link_dp.h" +#include "clk_mgr.h" static enum phyd32clk_clock_source get_phyd32clk_src(struct dc_link *link) { @@ -106,14 +107,18 @@ static void setup_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_link_encoder *link_enc = pipe_ctx->link_res.hpo_dp_link_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; - int odm_segment_count = get_odm_segment_count(pipe_ctx); + struct dtbclk_dto_params dto_params = {0}; enum phyd32clk_clock_source phyd32clk = get_phyd32clk_src(pipe_ctx->stream->link); + dto_params.otg_inst = tg->inst; + dto_params.pixclk_khz = pipe_ctx->stream->phy_pix_clk; + dto_params.num_odm_segments = get_odm_segment_count(pipe_ctx); + dto_params.timing = &pipe_ctx->stream->timing; + dto_params.ref_dtbclk_khz = dc->clk_mgr->funcs->get_dtb_ref_clk_frequency(dc->clk_mgr); + dccg->funcs->set_dpstreamclk(dccg, DTBCLK0, tg->inst); dccg->funcs->enable_symclk32_se(dccg, stream_enc->inst, phyd32clk); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, pipe_ctx->stream->phy_pix_clk, - odm_segment_count, - &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); stream_enc->funcs->enable_stream(stream_enc); stream_enc->funcs->map_stream_to_link(stream_enc, stream_enc->inst, link_enc->inst); } @@ -124,9 +129,13 @@ static void reset_hpo_dp_stream_encoder(struct pipe_ctx *pipe_ctx) struct hpo_dp_stream_encoder *stream_enc = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dccg *dccg = dc->res_pool->dccg; struct timing_generator *tg = pipe_ctx->stream_res.tg; + struct dtbclk_dto_params dto_params = {0}; + + dto_params.otg_inst = tg->inst; + dto_params.timing = &pipe_ctx->stream->timing; stream_enc->funcs->disable(stream_enc); - dccg->funcs->set_dtbclk_dto(dccg, tg->inst, 0, 0, &pipe_ctx->stream->timing); + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); dccg->funcs->disable_symclk32_se(dccg, stream_enc->inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst); } diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index 7c9330a61ac1..c7bd7e216710 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -84,7 +84,7 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) { union dmub_gpint_data_register cmd; const uint32_t timeout = 100; - uint32_t in_reset, scratch, i; + uint32_t in_reset, scratch, i, pwait_mode; REG_GET(DMCUB_CNTL2, DMCUB_SOFT_RESET, &in_reset); @@ -115,6 +115,13 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) udelay(1); } + for (i = 0; i < timeout; ++i) { + REG_GET(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS, &pwait_mode); + if (pwait_mode & (1 << 0)) + break; + + udelay(1); + } /* Force reset in case we timed out, DMCUB is likely hung. */ } @@ -125,6 +132,8 @@ void dmub_dcn31_reset(struct dmub_srv *dmub) REG_WRITE(DMCUB_INBOX1_WPTR, 0); REG_WRITE(DMCUB_OUTBOX1_RPTR, 0); REG_WRITE(DMCUB_OUTBOX1_WPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_RPTR, 0); + REG_WRITE(DMCUB_OUTBOX0_WPTR, 0); REG_WRITE(DMCUB_SCRATCH0, 0); /* Clear the GPINT command manually so we don't send anything during boot. */ diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h index 59ddc81b5a0e..f6db6f89d45d 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.h @@ -151,7 +151,8 @@ struct dmub_srv; DMUB_SF(DCN_VM_FB_OFFSET, FB_OFFSET) \ DMUB_SF(DMCUB_INBOX0_WPTR, DMCUB_INBOX0_WPTR) \ DMUB_SF(DMCUB_INTERRUPT_ENABLE, DMCUB_GPINT_IH_INT_EN) \ - DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) + DMUB_SF(DMCUB_INTERRUPT_ACK, DMCUB_GPINT_IH_INT_ACK) \ + DMUB_SF(DMCUB_CNTL, DMCUB_PWAIT_MODE_STATUS) struct dmub_srv_dcn31_reg_offset { #define DMUB_SR(reg) uint32_t reg; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 73b9e0a87e54..20a3d4e23f66 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -127,6 +127,8 @@ struct av_sync_data { static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; +static const u8 DP_SINK_BRANCH_DEV_NAME_7580[] = "7580\x80u"; + /*MST Dock*/ static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h index 247c6e9632ba..1cb399dbc7cc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_7_PPTABLE_H #define SMU_11_0_7_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_7_TABLE_FORMAT_REVISION 15 @@ -139,7 +140,7 @@ struct smu_11_0_7_overdrive_table uint32_t max[SMU_11_0_7_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_7_MAX_ODSETTING]; //default minimum settings int16_t pm_setting[SMU_11_0_7_MAX_PMSETTING]; //Optimized power mode feature settings -} __attribute__((packed)); +}; enum SMU_11_0_7_PPCLOCK_ID { SMU_11_0_7_PPCLOCK_GFXCLK = 0, @@ -166,7 +167,7 @@ struct smu_11_0_7_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT uint32_t max[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_7_powerplay_table { @@ -191,6 +192,8 @@ struct smu_11_0_7_powerplay_table struct smu_11_0_7_overdrive_table overdrive_table; PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h index 7a63cf8e85ed..0116e3d04fad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h @@ -22,6 +22,7 @@ #ifndef SMU_11_0_PPTABLE_H #define SMU_11_0_PPTABLE_H +#pragma pack(push, 1) #define SMU_11_0_TABLE_FORMAT_REVISION 12 @@ -109,7 +110,7 @@ struct smu_11_0_overdrive_table uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_11_0_PPCLOCK_ID { SMU_11_0_PPCLOCK_GFXCLK = 0, @@ -133,7 +134,7 @@ struct smu_11_0_power_saving_clock_table uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_11_0_powerplay_table { @@ -162,6 +163,8 @@ struct smu_11_0_powerplay_table #ifndef SMU_11_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h index 3f29f4327378..478862ded0bd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_7_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_7_PPTABLE_H #define SMU_13_0_7_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_7_TABLE_FORMAT_REVISION 15 //// POWERPLAYTABLE::ulPlatformCaps @@ -194,7 +196,8 @@ struct smu_13_0_7_powerplay_table struct smu_13_0_7_overdrive_table overdrive_table; uint8_t padding1; PPTable_t smc_pptable; //PPTable_t in driver_if.h -} __attribute__((packed)); +}; +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h index 1f311396b706..043307485528 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h @@ -22,6 +22,8 @@ #ifndef SMU_13_0_PPTABLE_H #define SMU_13_0_PPTABLE_H +#pragma pack(push, 1) + #define SMU_13_0_TABLE_FORMAT_REVISION 1 //// POWERPLAYTABLE::ulPlatformCaps @@ -109,7 +111,7 @@ struct smu_13_0_overdrive_table { uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings -} __attribute__((packed)); +}; enum SMU_13_0_PPCLOCK_ID { SMU_13_0_PPCLOCK_GFXCLK = 0, @@ -132,7 +134,7 @@ struct smu_13_0_power_saving_clock_table { uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz -} __attribute__((packed)); +}; struct smu_13_0_powerplay_table { struct atom_common_table_header header; @@ -160,6 +162,8 @@ struct smu_13_0_powerplay_table { #ifndef SMU_13_0_PARTIAL_PPTABLE PPTable_t smc_pptable; //PPTable_t in driver_if.h #endif -} __attribute__((packed)); +}; + +#pragma pack(pop) #endif diff --git a/drivers/gpu/drm/ast/ast_dp.c b/drivers/gpu/drm/ast/ast_dp.c index 4551bc8a3ecf..f573d582407e 100644 --- a/drivers/gpu/drm/ast/ast_dp.c +++ b/drivers/gpu/drm/ast/ast_dp.c @@ -160,13 +160,12 @@ void ast_dp_launch(struct drm_device *dev, u8 bPower) } if (bDPExecute) - ast->tx_chip_type = AST_TX_ASTDP; + ast->tx_chip_types |= BIT(AST_TX_ASTDP); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xE5, (u8) ~ASTDP_HOST_EDID_READ_DONE_MASK, ASTDP_HOST_EDID_READ_DONE); - } else - ast->tx_chip_type = AST_TX_NONE; + } } diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c index 204c926a18ea..4f75a9efb610 100644 --- a/drivers/gpu/drm/ast/ast_dp501.c +++ b/drivers/gpu/drm/ast/ast_dp501.c @@ -450,7 +450,7 @@ void ast_init_3rdtx(struct drm_device *dev) ast_init_dvo(dev); break; default: - if (ast->tx_chip_type == AST_TX_SIL164) + if (ast->tx_chip_types & BIT(AST_TX_SIL164)) ast_init_dvo(dev); else ast_init_analog(dev); diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index afebe35f205e..a34db4380f68 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -73,6 +73,11 @@ enum ast_tx_chip { AST_TX_ASTDP, }; +#define AST_TX_NONE_BIT BIT(AST_TX_NONE) +#define AST_TX_SIL164_BIT BIT(AST_TX_SIL164) +#define AST_TX_DP501_BIT BIT(AST_TX_DP501) +#define AST_TX_ASTDP_BIT BIT(AST_TX_ASTDP) + #define AST_DRAM_512Mx16 0 #define AST_DRAM_1Gx16 1 #define AST_DRAM_512Mx32 2 @@ -173,7 +178,7 @@ struct ast_private { struct drm_plane primary_plane; struct ast_cursor_plane cursor_plane; struct drm_crtc crtc; - union { + struct { struct { struct drm_encoder encoder; struct ast_vga_connector vga_connector; @@ -199,7 +204,7 @@ struct ast_private { ast_use_defaults } config_mode; - enum ast_tx_chip tx_chip_type; + unsigned long tx_chip_types; /* bitfield of enum ast_chip_type */ u8 *dp501_fw_addr; const struct firmware *dp501_fw; /* dp501 fw */ }; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index d770d5a23c1a..067453266897 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -216,7 +216,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } /* Check 3rd Tx option (digital output afaik) */ - ast->tx_chip_type = AST_TX_NONE; + ast->tx_chip_types |= AST_TX_NONE_BIT; /* * VGACRA3 Enhanced Color Mode Register, check if DVO is already @@ -229,7 +229,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) if (!*need_post) { jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xff); if (jreg & 0x80) - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; } if ((ast->chip == AST2300) || (ast->chip == AST2400) || (ast->chip == AST2500)) { @@ -241,7 +241,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) jreg = ast_get_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xd1, 0xff); switch (jreg) { case 0x04: - ast->tx_chip_type = AST_TX_SIL164; + ast->tx_chip_types = AST_TX_SIL164_BIT; break; case 0x08: ast->dp501_fw_addr = drmm_kzalloc(dev, 32*1024, GFP_KERNEL); @@ -254,22 +254,19 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post) } fallthrough; case 0x0c: - ast->tx_chip_type = AST_TX_DP501; + ast->tx_chip_types = AST_TX_DP501_BIT; } } else if (ast->chip == AST2600) ast_dp_launch(&ast->base, 0); /* Print stuff for diagnostic purposes */ - switch(ast->tx_chip_type) { - case AST_TX_SIL164: + if (ast->tx_chip_types & AST_TX_NONE_BIT) + drm_info(dev, "Using analog VGA\n"); + if (ast->tx_chip_types & AST_TX_SIL164_BIT) drm_info(dev, "Using Sil164 TMDS transmitter\n"); - break; - case AST_TX_DP501: + if (ast->tx_chip_types & AST_TX_DP501_BIT) drm_info(dev, "Using DP501 DisplayPort transmitter\n"); - break; - default: - drm_info(dev, "Analog VGA only\n"); - } + return 0; } diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c index 323af2746aa9..db2010a55674 100644 --- a/drivers/gpu/drm/ast/ast_mode.c +++ b/drivers/gpu/drm/ast/ast_mode.c @@ -997,10 +997,10 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_ON: ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, 0); - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 1); - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_power_on_off(crtc->dev, AST_DP_POWER_ON); ast_wait_for_vretrace(ast); ast_dp_set_on_off(crtc->dev, 1); @@ -1012,17 +1012,17 @@ static void ast_crtc_dpms(struct drm_crtc *crtc, int mode) case DRM_MODE_DPMS_SUSPEND: case DRM_MODE_DPMS_OFF: ch = mode; - if (ast->tx_chip_type == AST_TX_DP501) + if (ast->tx_chip_types & AST_TX_DP501_BIT) ast_set_dp501_video_output(crtc->dev, 0); - break; - if (ast->tx_chip_type == AST_TX_ASTDP) { + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ast_dp_set_on_off(crtc->dev, 0); ast_dp_power_on_off(crtc->dev, AST_DP_POWER_OFF); } ast_set_index_reg_mask(ast, AST_IO_SEQ_PORT, 0x01, 0xdf, 0x20); ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xb6, 0xfc, ch); + break; } } @@ -1155,7 +1155,7 @@ ast_crtc_helper_atomic_flush(struct drm_crtc *crtc, ast_crtc_load_lut(ast, crtc); //Set Aspeed Display-Port - if (ast->tx_chip_type == AST_TX_ASTDP) + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) ast_dp_set_mode(crtc, vbios_mode_info); mutex_unlock(&ast->ioregs_lock); @@ -1739,22 +1739,26 @@ int ast_mode_config_init(struct ast_private *ast) ast_crtc_init(dev); - switch (ast->tx_chip_type) { - case AST_TX_NONE: + if (ast->tx_chip_types & AST_TX_NONE_BIT) { ret = ast_vga_output_init(ast); - break; - case AST_TX_SIL164: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_SIL164_BIT) { ret = ast_sil164_output_init(ast); - break; - case AST_TX_DP501: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_DP501_BIT) { ret = ast_dp501_output_init(ast); - break; - case AST_TX_ASTDP: + if (ret) + return ret; + } + if (ast->tx_chip_types & AST_TX_ASTDP_BIT) { ret = ast_astdp_output_init(ast); - break; + if (ret) + return ret; } - if (ret) - return ret; drm_mode_config_reset(dev); diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c index 0aa9cf0fb5c3..82fd3c8adee1 100644 --- a/drivers/gpu/drm/ast/ast_post.c +++ b/drivers/gpu/drm/ast/ast_post.c @@ -391,7 +391,7 @@ void ast_post_gpu(struct drm_device *dev) ast_init_3rdtx(dev); } else { - if (ast->tx_chip_type != AST_TX_NONE) + if (ast->tx_chip_types & AST_TX_SIL164_BIT) ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */ } } diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c index b97f6e8f0f6b..01c8b80e34ec 100644 --- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c +++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c @@ -1267,6 +1267,25 @@ static int analogix_dp_bridge_attach(struct drm_bridge *bridge, } static +struct drm_crtc *analogix_dp_get_old_crtc(struct analogix_dp_device *dp, + struct drm_atomic_state *state) +{ + struct drm_encoder *encoder = dp->encoder; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + connector = drm_atomic_get_old_connector_for_encoder(state, encoder); + if (!connector) + return NULL; + + conn_state = drm_atomic_get_old_connector_state(state, connector); + if (!conn_state) + return NULL; + + return conn_state->crtc; +} + +static struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp, struct drm_atomic_state *state) { @@ -1446,14 +1465,16 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, { struct drm_atomic_state *old_state = old_bridge_state->base.state; struct analogix_dp_device *dp = bridge->driver_private; - struct drm_crtc *crtc; + struct drm_crtc *old_crtc, *new_crtc; + struct drm_crtc_state *old_crtc_state = NULL; struct drm_crtc_state *new_crtc_state = NULL; + int ret; - crtc = analogix_dp_get_new_crtc(dp, old_state); - if (!crtc) + new_crtc = analogix_dp_get_new_crtc(dp, old_state); + if (!new_crtc) goto out; - new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc); + new_crtc_state = drm_atomic_get_new_crtc_state(old_state, new_crtc); if (!new_crtc_state) goto out; @@ -1462,6 +1483,19 @@ analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge, return; out: + old_crtc = analogix_dp_get_old_crtc(dp, old_state); + if (old_crtc) { + old_crtc_state = drm_atomic_get_old_crtc_state(old_state, + old_crtc); + + /* When moving from PSR to fully disabled, exit PSR first. */ + if (old_crtc_state && old_crtc_state->self_refresh_active) { + ret = analogix_dp_disable_psr(dp); + if (ret) + DRM_ERROR("Failed to disable psr (%d)\n", ret); + } + } + analogix_dp_bridge_disable(bridge); } diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c index 2831f0813c3a..ac66f408b40c 100644 --- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c +++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c @@ -577,7 +577,7 @@ static int sn65dsi83_parse_dt(struct sn65dsi83 *ctx, enum sn65dsi83_model model) ctx->host_node = of_graph_get_remote_port_parent(endpoint); of_node_put(endpoint); - if (ctx->dsi_lanes < 0 || ctx->dsi_lanes > 4) { + if (ctx->dsi_lanes <= 0 || ctx->dsi_lanes > 4) { ret = -EINVAL; goto err_put_node; } diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c index 9603193d2fa1..987e4b212e9f 100644 --- a/drivers/gpu/drm/drm_atomic_helper.c +++ b/drivers/gpu/drm/drm_atomic_helper.c @@ -1011,9 +1011,19 @@ crtc_needs_disable(struct drm_crtc_state *old_state, return drm_atomic_crtc_effectively_active(old_state); /* - * We need to run through the crtc_funcs->disable() function if the CRTC - * is currently on, if it's transitioning to self refresh mode, or if - * it's in self refresh mode and needs to be fully disabled. + * We need to disable bridge(s) and CRTC if we're transitioning out of + * self-refresh and changing CRTCs at the same time, because the + * bridge tracks self-refresh status via CRTC state. + */ + if (old_state->self_refresh_active && + old_state->crtc != new_state->crtc) + return true; + + /* + * We also need to run through the crtc_funcs->disable() function if + * the CRTC is currently on, if it's transitioning to self refresh + * mode, or if it's in self refresh mode and needs to be fully + * disabled. */ return old_state->active || (old_state->self_refresh_active && !new_state->active) || diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 4e853acfd1e8..df87ba99a87c 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -152,6 +152,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "AYA NEO 2021"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* AYA NEO NEXT */ + .matches = { + DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AYANEO"), + DMI_MATCH(DMI_BOARD_NAME, "NEXT"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Chuwi HiBook (CWI514) */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 424ea23eec32..16c539657f73 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -177,15 +177,15 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = { DRV_PTR(mixer_driver, CONFIG_DRM_EXYNOS_MIXER), DRM_COMPONENT_DRIVER }, { - DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), - DRM_COMPONENT_DRIVER - }, { DRV_PTR(dp_driver, CONFIG_DRM_EXYNOS_DP), DRM_COMPONENT_DRIVER }, { DRV_PTR(dsi_driver, CONFIG_DRM_EXYNOS_DSI), DRM_COMPONENT_DRIVER }, { + DRV_PTR(mic_driver, CONFIG_DRM_EXYNOS_MIC), + DRM_COMPONENT_DRIVER + }, { DRV_PTR(hdmi_driver, CONFIG_DRM_EXYNOS_HDMI), DRM_COMPONENT_DRIVER }, { diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c index 9e06f8e2a863..09ce28ee08d9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_mic.c +++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c @@ -26,6 +26,7 @@ #include <drm/drm_print.h> #include "exynos_drm_drv.h" +#include "exynos_drm_crtc.h" /* Sysreg registers for MIC */ #define DSD_CFG_MUX 0x1004 @@ -100,9 +101,7 @@ struct exynos_mic { bool i80_mode; struct videomode vm; - struct drm_encoder *encoder; struct drm_bridge bridge; - struct drm_bridge *next_bridge; bool enabled; }; @@ -229,8 +228,6 @@ static void mic_set_reg_on(struct exynos_mic *mic, bool enable) writel(reg, mic->reg + MIC_OP); } -static void mic_disable(struct drm_bridge *bridge) { } - static void mic_post_disable(struct drm_bridge *bridge) { struct exynos_mic *mic = bridge->driver_private; @@ -297,34 +294,30 @@ unlock: mutex_unlock(&mic_mutex); } -static void mic_enable(struct drm_bridge *bridge) { } - -static int mic_attach(struct drm_bridge *bridge, - enum drm_bridge_attach_flags flags) -{ - struct exynos_mic *mic = bridge->driver_private; - - return drm_bridge_attach(bridge->encoder, mic->next_bridge, - &mic->bridge, flags); -} - static const struct drm_bridge_funcs mic_bridge_funcs = { - .disable = mic_disable, .post_disable = mic_post_disable, .mode_set = mic_mode_set, .pre_enable = mic_pre_enable, - .enable = mic_enable, - .attach = mic_attach, }; static int exynos_mic_bind(struct device *dev, struct device *master, void *data) { struct exynos_mic *mic = dev_get_drvdata(dev); + struct drm_device *drm_dev = data; + struct exynos_drm_crtc *crtc = exynos_drm_crtc_get_by_type(drm_dev, + EXYNOS_DISPLAY_TYPE_LCD); + struct drm_encoder *e, *encoder = NULL; + + drm_for_each_encoder(e, drm_dev) + if (e->possible_crtcs == drm_crtc_mask(&crtc->base)) + encoder = e; + if (!encoder) + return -ENODEV; mic->bridge.driver_private = mic; - return 0; + return drm_bridge_attach(encoder, &mic->bridge, NULL, 0); } static void exynos_mic_unbind(struct device *dev, struct device *master, @@ -388,7 +381,6 @@ static int exynos_mic_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct exynos_mic *mic; - struct device_node *remote; struct resource res; int ret, i; @@ -432,16 +424,6 @@ static int exynos_mic_probe(struct platform_device *pdev) } } - remote = of_graph_get_remote_node(dev->of_node, 1, 0); - mic->next_bridge = of_drm_find_bridge(remote); - if (IS_ERR(mic->next_bridge)) { - DRM_DEV_ERROR(dev, "mic: Failed to find next bridge\n"); - ret = PTR_ERR(mic->next_bridge); - goto err; - } - - of_node_put(remote); - platform_set_drvdata(pdev, mic); mic->bridge.funcs = &mic_bridge_funcs; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index e4a79c11fd25..ff67899522cf 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -388,13 +388,23 @@ static int dg2_max_source_rate(struct intel_dp *intel_dp) return intel_dp_is_edp(intel_dp) ? 810000 : 1350000; } +static bool is_low_voltage_sku(struct drm_i915_private *i915, enum phy phy) +{ + u32 voltage; + + voltage = intel_de_read(i915, ICL_PORT_COMP_DW3(phy)) & VOLTAGE_INFO_MASK; + + return voltage == VOLTAGE_INFO_0_85V; +} + static int icl_max_source_rate(struct intel_dp *intel_dp) { struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); - if (intel_phy_is_combo(dev_priv, phy) && !intel_dp_is_edp(intel_dp)) + if (intel_phy_is_combo(dev_priv, phy) && + (is_low_voltage_sku(dev_priv, phy) || !intel_dp_is_edp(intel_dp))) return 540000; return 810000; @@ -402,7 +412,23 @@ static int icl_max_source_rate(struct intel_dp *intel_dp) static int ehl_max_source_rate(struct intel_dp *intel_dp) { - if (intel_dp_is_edp(intel_dp)) + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port); + + if (intel_dp_is_edp(intel_dp) || is_low_voltage_sku(dev_priv, phy)) + return 540000; + + return 810000; +} + +static int dg1_max_source_rate(struct intel_dp *intel_dp) +{ + struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); + struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum phy phy = intel_port_to_phy(i915, dig_port->base.port); + + if (intel_phy_is_combo(i915, phy) && is_low_voltage_sku(i915, phy)) return 540000; return 810000; @@ -445,7 +471,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp) max_rate = dg2_max_source_rate(intel_dp); else if (IS_ALDERLAKE_P(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) - max_rate = 810000; + max_rate = dg1_max_source_rate(intel_dp); else if (IS_JSL_EHL(dev_priv)) max_rate = ehl_max_source_rate(intel_dp); else diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 22f55574a35c..88c2f38aa870 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2396,7 +2396,7 @@ static void icl_wrpll_params_populate(struct skl_wrpll_params *params, } /* - * Display WA #22010492432: ehl, tgl, adl-p + * Display WA #22010492432: ehl, tgl, adl-s, adl-p * Program half of the nominal DCO divider fraction value. */ static bool @@ -2404,7 +2404,7 @@ ehl_combo_pll_div_frac_wa_needed(struct drm_i915_private *i915) { return ((IS_PLATFORM(i915, INTEL_ELKHARTLAKE) && IS_JSL_EHL_DISPLAY_STEP(i915, STEP_B0, STEP_FOREVER)) || - IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) && + IS_TIGERLAKE(i915) || IS_ALDERLAKE_S(i915) || IS_ALDERLAKE_P(i915)) && i915->dpll.ref_clks.nssc == 38400; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index c326bd2b444f..30fe847c6664 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -999,7 +999,8 @@ static int eb_validate_vmas(struct i915_execbuffer *eb) } } - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); + /* Reserve enough slots to accommodate composite fences */ + err = dma_resv_reserve_fences(vma->obj->base.resv, eb->num_batches); if (err) return err; diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index 53307ca0eed0..51a0fe60c050 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -785,6 +785,7 @@ void intel_gt_driver_unregister(struct intel_gt *gt) { intel_wakeref_t wakeref; + intel_gt_sysfs_unregister(gt); intel_rps_driver_unregister(>->rps); intel_gsc_fini(>->gsc); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c index 8ec8bc660c8c..9e4ebf53379b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.c @@ -24,7 +24,7 @@ bool is_object_gt(struct kobject *kobj) static struct intel_gt *kobj_to_gt(struct kobject *kobj) { - return container_of(kobj, struct kobj_gt, base)->gt; + return container_of(kobj, struct intel_gt, sysfs_gt); } struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, @@ -72,9 +72,9 @@ static struct attribute *id_attrs[] = { }; ATTRIBUTE_GROUPS(id); +/* A kobject needs a release() method even if it does nothing */ static void kobj_gt_release(struct kobject *kobj) { - kfree(kobj); } static struct kobj_type kobj_gt_type = { @@ -85,8 +85,6 @@ static struct kobj_type kobj_gt_type = { void intel_gt_sysfs_register(struct intel_gt *gt) { - struct kobj_gt *kg; - /* * We need to make things right with the * ABI compatibility. The files were originally @@ -98,25 +96,22 @@ void intel_gt_sysfs_register(struct intel_gt *gt) if (gt_is_root(gt)) intel_gt_sysfs_pm_init(gt, gt_get_parent_obj(gt)); - kg = kzalloc(sizeof(*kg), GFP_KERNEL); - if (!kg) + /* init and xfer ownership to sysfs tree */ + if (kobject_init_and_add(>->sysfs_gt, &kobj_gt_type, + gt->i915->sysfs_gt, "gt%d", gt->info.id)) goto exit_fail; - kobject_init(&kg->base, &kobj_gt_type); - kg->gt = gt; - - /* xfer ownership to sysfs tree */ - if (kobject_add(&kg->base, gt->i915->sysfs_gt, "gt%d", gt->info.id)) - goto exit_kobj_put; - - intel_gt_sysfs_pm_init(gt, &kg->base); + intel_gt_sysfs_pm_init(gt, >->sysfs_gt); return; -exit_kobj_put: - kobject_put(&kg->base); - exit_fail: + kobject_put(>->sysfs_gt); drm_warn(>->i915->drm, "failed to initialize gt%d sysfs root\n", gt->info.id); } + +void intel_gt_sysfs_unregister(struct intel_gt *gt) +{ + kobject_put(>->sysfs_gt); +} diff --git a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h index 9471b26752cf..a99aa7e8b01a 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_sysfs.h @@ -13,11 +13,6 @@ struct intel_gt; -struct kobj_gt { - struct kobject base; - struct intel_gt *gt; -}; - bool is_object_gt(struct kobject *kobj); struct drm_i915_private *kobj_to_i915(struct kobject *kobj); @@ -28,6 +23,7 @@ intel_gt_create_kobj(struct intel_gt *gt, const char *name); void intel_gt_sysfs_register(struct intel_gt *gt); +void intel_gt_sysfs_unregister(struct intel_gt *gt); struct intel_gt *intel_gt_sysfs_get_drvdata(struct device *dev, const char *name); diff --git a/drivers/gpu/drm/i915/gt/intel_gt_types.h b/drivers/gpu/drm/i915/gt/intel_gt_types.h index b06611c1d4ad..edd7a3cf5f5f 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_types.h +++ b/drivers/gpu/drm/i915/gt/intel_gt_types.h @@ -224,6 +224,9 @@ struct intel_gt { } mocs; struct intel_pxp pxp; + + /* gt/gtN sysfs */ + struct kobject sysfs_gt; }; enum intel_gt_scratch_field { diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c index d078f884b5e3..f0d7b57b741e 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c @@ -156,7 +156,7 @@ __uc_fw_auto_select(struct drm_i915_private *i915, struct intel_uc_fw *uc_fw) [INTEL_UC_FW_TYPE_GUC] = { blobs_guc, ARRAY_SIZE(blobs_guc) }, [INTEL_UC_FW_TYPE_HUC] = { blobs_huc, ARRAY_SIZE(blobs_huc) }, }; - static const struct uc_fw_platform_requirement *fw_blobs; + const struct uc_fw_platform_requirement *fw_blobs; enum intel_platform p = INTEL_INFO(i915)->platform; u32 fw_count; u8 rev = INTEL_REVID(i915); diff --git a/drivers/gpu/drm/i915/i915_drm_client.c b/drivers/gpu/drm/i915/i915_drm_client.c index 18d38cb59923..b09d1d386574 100644 --- a/drivers/gpu/drm/i915/i915_drm_client.c +++ b/drivers/gpu/drm/i915/i915_drm_client.c @@ -116,8 +116,9 @@ show_client_class(struct seq_file *m, total += busy_add(ctx, class); rcu_read_unlock(); - seq_printf(m, "drm-engine-%s:\t%llu ns\n", - uabi_class_names[class], total); + if (capacity) + seq_printf(m, "drm-engine-%s:\t%llu ns\n", + uabi_class_names[class], total); if (capacity > 1) seq_printf(m, "drm-engine-capacity-%s:\t%u\n", diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 8521daba212a..1e2750210831 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -166,7 +166,14 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, struct device *kdev = kobj_to_dev(kobj); struct drm_i915_private *i915 = kdev_minor_to_i915(kdev); struct i915_gpu_coredump *gpu; - ssize_t ret; + ssize_t ret = 0; + + /* + * FIXME: Concurrent clients triggering resets and reading + clearing + * dumps can cause inconsistent sysfs reads when a user calls in with a + * non-zero offset to complete a prior partial read but the + * gpu_coredump has been cleared or replaced. + */ gpu = i915_first_error_state(i915); if (IS_ERR(gpu)) { @@ -178,8 +185,10 @@ static ssize_t error_state_read(struct file *filp, struct kobject *kobj, const char *str = "No error state collected\n"; size_t len = strlen(str); - ret = min_t(size_t, count, len - off); - memcpy(buf, str + off, ret); + if (off < len) { + ret = min_t(size_t, count, len - off); + memcpy(buf, str + off, ret); + } } return ret; @@ -259,4 +268,6 @@ void i915_teardown_sysfs(struct drm_i915_private *dev_priv) device_remove_bin_file(kdev, &dpf_attrs_1); device_remove_bin_file(kdev, &dpf_attrs); + + kobject_put(dev_priv->sysfs_gt); } diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 4f6db539571a..0bffb70b3c5f 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -23,6 +23,7 @@ */ #include <linux/sched/mm.h> +#include <linux/dma-fence-array.h> #include <drm/drm_gem.h> #include "display/intel_frontbuffer.h" @@ -1823,6 +1824,21 @@ int _i915_vma_move_to_active(struct i915_vma *vma, if (unlikely(err)) return err; + /* + * Reserve fences slot early to prevent an allocation after preparing + * the workload and associating fences with dma_resv. + */ + if (fence && !(flags & __EXEC_OBJECT_NO_RESERVE)) { + struct dma_fence *curr; + int idx; + + dma_fence_array_for_each(curr, idx, fence) + ; + err = dma_resv_reserve_fences(vma->obj->base.resv, idx); + if (unlikely(err)) + return err; + } + if (flags & EXEC_OBJECT_WRITE) { struct intel_frontbuffer *front; @@ -1832,31 +1848,23 @@ int _i915_vma_move_to_active(struct i915_vma *vma, i915_active_add_request(&front->write, rq); intel_frontbuffer_put(front); } + } - if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); - if (unlikely(err)) - return err; - } + if (fence) { + struct dma_fence *curr; + enum dma_resv_usage usage; + int idx; - if (fence) { - dma_resv_add_fence(vma->obj->base.resv, fence, - DMA_RESV_USAGE_WRITE); + obj->read_domains = 0; + if (flags & EXEC_OBJECT_WRITE) { + usage = DMA_RESV_USAGE_WRITE; obj->write_domain = I915_GEM_DOMAIN_RENDER; - obj->read_domains = 0; - } - } else { - if (!(flags & __EXEC_OBJECT_NO_RESERVE)) { - err = dma_resv_reserve_fences(vma->obj->base.resv, 1); - if (unlikely(err)) - return err; + } else { + usage = DMA_RESV_USAGE_READ; } - if (fence) { - dma_resv_add_fence(vma->obj->base.resv, fence, - DMA_RESV_USAGE_READ); - obj->write_domain = 0; - } + dma_fence_array_for_each(curr, idx, fence) + dma_resv_add_fence(vma->obj->base.resv, curr, usage); } if (flags & EXEC_OBJECT_NEEDS_FENCE && vma->fence) diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index 9c8829f945b2..f7863d6dea80 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -69,7 +69,7 @@ static void ipu_crtc_disable_planes(struct ipu_crtc *ipu_crtc, drm_atomic_crtc_state_for_each_plane(plane, old_crtc_state) { if (plane == &ipu_crtc->plane[0]->base) disable_full = true; - if (&ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) + if (ipu_crtc->plane[1] && plane == &ipu_crtc->plane[1]->base) disable_partial = true; } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 4e665c806a14..efe9840e28fa 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -498,10 +498,15 @@ int adreno_hw_init(struct msm_gpu *gpu) ring->cur = ring->start; ring->next = ring->start; - - /* reset completed fence seqno: */ - ring->memptrs->fence = ring->fctx->completed_fence; ring->memptrs->rptr = 0; + + /* Detect and clean up an impossible fence, ie. if GPU managed + * to scribble something invalid, we don't want that to confuse + * us into mistakingly believing that submits have completed. + */ + if (fence_before(ring->fctx->last_fence, ring->memptrs->fence)) { + ring->memptrs->fence = ring->fctx->last_fence; + } } return 0; @@ -1057,7 +1062,8 @@ void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu) for (i = 0; i < ARRAY_SIZE(adreno_gpu->info->fw); i++) release_firmware(adreno_gpu->fw[i]); - pm_runtime_disable(&priv->gpu_pdev->dev); + if (pm_runtime_enabled(&priv->gpu_pdev->dev)) + pm_runtime_disable(&priv->gpu_pdev->dev); msm_gpu_cleanup(&adreno_gpu->base); } diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c index 399115e4e217..2fd787079f9b 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_writeback.c @@ -11,7 +11,14 @@ static int dpu_wb_conn_get_modes(struct drm_connector *connector) struct msm_drm_private *priv = dev->dev_private; struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms); - return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_linewidth, + /* + * We should ideally be limiting the modes only to the maxlinewidth but + * on some chipsets this will allow even 4k modes to be added which will + * fail the per SSPP bandwidth checks. So, till we have dual-SSPP support + * and source split support added lets limit the modes based on max_mixer_width + * as 4K modes can then be supported. + */ + return drm_add_modes_noedid(connector, dpu_kms->catalog->caps->max_mixer_width, dev->mode_config.max_height); } diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index fb48c8c19ec3..17cb1fc78379 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -216,6 +216,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, encoder = mdp4_lcdc_encoder_init(dev, panel_node); if (IS_ERR(encoder)) { DRM_DEV_ERROR(dev->dev, "failed to construct LCDC encoder\n"); + of_node_put(panel_node); return PTR_ERR(encoder); } @@ -225,6 +226,7 @@ static int mdp4_modeset_init_intf(struct mdp4_kms *mdp4_kms, connector = mdp4_lvds_connector_init(dev, panel_node, encoder); if (IS_ERR(connector)) { DRM_DEV_ERROR(dev->dev, "failed to initialize LVDS connector\n"); + of_node_put(panel_node); return PTR_ERR(connector); } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index b7f5b8d3bbd6..703249384e7c 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1534,6 +1534,8 @@ end: return ret; } +static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl); + static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) { int ret = 0; @@ -1557,7 +1559,7 @@ static int dp_ctrl_process_phy_test_request(struct dp_ctrl_private *ctrl) ret = dp_ctrl_on_link(&ctrl->dp_ctrl); if (!ret) - ret = dp_ctrl_on_stream(&ctrl->dp_ctrl); + ret = dp_ctrl_on_stream_phy_test_report(&ctrl->dp_ctrl); else DRM_ERROR("failed to enable DP link controller\n"); @@ -1813,7 +1815,27 @@ static int dp_ctrl_link_retrain(struct dp_ctrl_private *ctrl) return dp_ctrl_setup_main_link(ctrl, &training_step); } -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) +static int dp_ctrl_on_stream_phy_test_report(struct dp_ctrl *dp_ctrl) +{ + int ret; + struct dp_ctrl_private *ctrl; + + ctrl = container_of(dp_ctrl, struct dp_ctrl_private, dp_ctrl); + + ctrl->dp_ctrl.pixel_rate = ctrl->panel->dp_mode.drm_mode.clock; + + ret = dp_ctrl_enable_stream_clocks(ctrl); + if (ret) { + DRM_ERROR("Failed to start pixel clocks. ret=%d\n", ret); + return ret; + } + + dp_ctrl_send_phy_test_pattern(ctrl); + + return 0; +} + +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train) { int ret = 0; bool mainlink_ready = false; @@ -1849,12 +1871,7 @@ int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl) goto end; } - if (ctrl->link->sink_request & DP_TEST_LINK_PHY_TEST_PATTERN) { - dp_ctrl_send_phy_test_pattern(ctrl); - return 0; - } - - if (!dp_ctrl_channel_eq_ok(ctrl)) + if (force_link_train || !dp_ctrl_channel_eq_ok(ctrl)) dp_ctrl_link_retrain(ctrl); /* stop txing train pattern to end link training */ diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.h b/drivers/gpu/drm/msm/dp/dp_ctrl.h index 0745fde01b45..b563e2e3bfe5 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.h +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.h @@ -21,7 +21,7 @@ struct dp_ctrl { }; int dp_ctrl_on_link(struct dp_ctrl *dp_ctrl); -int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl); +int dp_ctrl_on_stream(struct dp_ctrl *dp_ctrl, bool force_link_train); int dp_ctrl_off_link_stream(struct dp_ctrl *dp_ctrl); int dp_ctrl_off_link(struct dp_ctrl *dp_ctrl); int dp_ctrl_off(struct dp_ctrl *dp_ctrl); diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index bce77935394f..ec26855079c1 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -309,7 +309,8 @@ static void dp_display_unbind(struct device *dev, struct device *master, struct msm_drm_private *priv = dev_get_drvdata(master); /* disable all HPD interrupts */ - dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); + if (dp->core_initialized) + dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_INT_MASK, false); kthread_stop(dp->ev_tsk); @@ -872,7 +873,7 @@ static int dp_display_enable(struct dp_display_private *dp, u32 data) return 0; } - rc = dp_ctrl_on_stream(dp->ctrl); + rc = dp_ctrl_on_stream(dp->ctrl, data); if (!rc) dp_display->power_on = true; @@ -1659,6 +1660,7 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) int rc = 0; struct dp_display_private *dp_display; u32 state; + bool force_link_train = false; dp_display = container_of(dp, struct dp_display_private, dp_display); if (!dp_display->dp_mode.drm_mode.clock) { @@ -1693,10 +1695,12 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) state = dp_display->hpd_state; - if (state == ST_DISPLAY_OFF) + if (state == ST_DISPLAY_OFF) { dp_display_host_phy_init(dp_display); + force_link_train = true; + } - dp_display_enable(dp_display, 0); + dp_display_enable(dp_display, force_link_train); rc = dp_display_post_enable(dp); if (rc) { @@ -1705,10 +1709,6 @@ void dp_bridge_enable(struct drm_bridge *drm_bridge) dp_display_unprepare(dp); } - /* manual kick off plug event to train link */ - if (state == ST_DISPLAY_OFF) - dp_add_event(dp_display, EV_IRQ_HPD_INT, 0, 0); - /* completed connection */ dp_display->hpd_state = ST_CONNECTED; diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 44485363f37a..14ab9a627d8b 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -964,7 +964,7 @@ static const struct drm_driver msm_driver = { .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, - .gem_prime_mmap = drm_gem_prime_mmap, + .gem_prime_mmap = msm_gem_prime_mmap, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, #endif diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 08388d742d65..099a67d10c3a 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -246,6 +246,7 @@ unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_t void msm_gem_shrinker_init(struct drm_device *dev); void msm_gem_shrinker_cleanup(struct drm_device *dev); +int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c index 3df255402a33..38e3323bc232 100644 --- a/drivers/gpu/drm/msm/msm_fence.c +++ b/drivers/gpu/drm/msm/msm_fence.c @@ -46,12 +46,14 @@ bool msm_fence_completed(struct msm_fence_context *fctx, uint32_t fence) (int32_t)(*fctx->fenceptr - fence) >= 0; } -/* called from workqueue */ +/* called from irq handler and workqueue (in recover path) */ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence) { - spin_lock(&fctx->spinlock); + unsigned long flags; + + spin_lock_irqsave(&fctx->spinlock, flags); fctx->completed_fence = max(fence, fctx->completed_fence); - spin_unlock(&fctx->spinlock); + spin_unlock_irqrestore(&fctx->spinlock, flags); } struct msm_fence { diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 97d5b4d8b9b0..7f92231785a0 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -439,14 +439,12 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) return ret; } -void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) +void msm_gem_unpin_locked(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); GEM_WARN_ON(!msm_gem_is_locked(obj)); - msm_gem_unpin_vma(vma); - msm_obj->pin_count--; GEM_WARN_ON(msm_obj->pin_count < 0); @@ -586,7 +584,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, msm_gem_lock(obj); vma = lookup_vma(obj, aspace); if (!GEM_WARN_ON(!vma)) { - msm_gem_unpin_vma_locked(obj, vma); + msm_gem_unpin_vma(vma); + msm_gem_unpin_locked(obj); } msm_gem_unlock(obj); } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index c75d3b879a53..6b7d5bb3b575 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -145,7 +145,7 @@ struct msm_gem_object { uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); -void msm_gem_unpin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); +void msm_gem_unpin_locked(struct drm_gem_object *obj); struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, struct msm_gem_address_space *aspace); int msm_gem_get_iova(struct drm_gem_object *obj, @@ -377,10 +377,11 @@ struct msm_gem_submit { } *cmd; /* array of size nr_cmds */ struct { /* make sure these don't conflict w/ MSM_SUBMIT_BO_x */ -#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ -#define BO_LOCKED 0x4000 /* obj lock is held */ -#define BO_ACTIVE 0x2000 /* active refcnt is held */ -#define BO_PINNED 0x1000 /* obj is pinned and on active list */ +#define BO_VALID 0x8000 /* is current addr in cmdstream correct/valid? */ +#define BO_LOCKED 0x4000 /* obj lock is held */ +#define BO_ACTIVE 0x2000 /* active refcnt is held */ +#define BO_OBJ_PINNED 0x1000 /* obj (pages) is pinned and on active list */ +#define BO_VMA_PINNED 0x0800 /* vma (virtual address) is pinned */ uint32_t flags; union { struct msm_gem_object *obj; diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 94ab705e9b8a..dcc8a573bc76 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -11,6 +11,21 @@ #include "msm_drv.h" #include "msm_gem.h" +int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) +{ + int ret; + + /* Ensure the mmap offset is initialized. We lazily initialize it, + * so if it has not been first mmap'd directly as a GEM object, the + * mmap offset will not be already initialized. + */ + ret = drm_gem_create_mmap_offset(obj); + if (ret) + return ret; + + return drm_gem_prime_mmap(obj, vma); +} + struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 80975229b4de..3c3a0cfade36 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -232,8 +232,11 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, */ submit->bos[i].flags &= ~cleanup_flags; - if (flags & BO_PINNED) - msm_gem_unpin_vma_locked(obj, submit->bos[i].vma); + if (flags & BO_VMA_PINNED) + msm_gem_unpin_vma(submit->bos[i].vma); + + if (flags & BO_OBJ_PINNED) + msm_gem_unpin_locked(obj); if (flags & BO_ACTIVE) msm_gem_active_put(obj); @@ -244,7 +247,9 @@ static void submit_cleanup_bo(struct msm_gem_submit *submit, int i, static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i) { - submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE | BO_LOCKED); + unsigned cleanup_flags = BO_VMA_PINNED | BO_OBJ_PINNED | + BO_ACTIVE | BO_LOCKED; + submit_cleanup_bo(submit, i, cleanup_flags); if (!(submit->bos[i].flags & BO_VALID)) submit->bos[i].iova = 0; @@ -375,7 +380,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) if (ret) break; - submit->bos[i].flags |= BO_PINNED; + submit->bos[i].flags |= BO_OBJ_PINNED | BO_VMA_PINNED; submit->bos[i].vma = vma; if (vma->iova == submit->bos[i].iova) { @@ -511,7 +516,7 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool error) unsigned i; if (error) - cleanup_flags |= BO_PINNED | BO_ACTIVE; + cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED | BO_ACTIVE; for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; @@ -529,7 +534,8 @@ void msm_submit_retire(struct msm_gem_submit *submit) struct drm_gem_object *obj = &submit->bos[i].obj->base; msm_gem_lock(obj); - submit_cleanup_bo(submit, i, BO_PINNED | BO_ACTIVE); + /* Note, VMA already fence-unpinned before submit: */ + submit_cleanup_bo(submit, i, BO_OBJ_PINNED | BO_ACTIVE); msm_gem_unlock(obj); drm_gem_object_put(obj); } diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 3c1dc9241831..c471aebcdbab 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -62,8 +62,7 @@ void msm_gem_purge_vma(struct msm_gem_address_space *aspace, unsigned size = vma->node.size; /* Print a message if we try to purge a vma in use */ - if (GEM_WARN_ON(msm_gem_vma_inuse(vma))) - return; + GEM_WARN_ON(msm_gem_vma_inuse(vma)); /* Don't do anything if the memory isn't mapped */ if (!vma->mapped) @@ -128,8 +127,7 @@ msm_gem_map_vma(struct msm_gem_address_space *aspace, void msm_gem_close_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma) { - if (GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped)) - return; + GEM_WARN_ON(msm_gem_vma_inuse(vma) || vma->mapped); spin_lock(&aspace->lock); if (vma->iova) diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index eb8a6663f309..c8cd9bfa3eeb 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -164,24 +164,6 @@ int msm_gpu_hw_init(struct msm_gpu *gpu) return ret; } -static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring, - uint32_t fence) -{ - struct msm_gem_submit *submit; - unsigned long flags; - - spin_lock_irqsave(&ring->submit_lock, flags); - list_for_each_entry(submit, &ring->submits, node) { - if (fence_after(submit->seqno, fence)) - break; - - msm_update_fence(submit->ring->fctx, - submit->hw_fence->seqno); - dma_fence_signal(submit->hw_fence); - } - spin_unlock_irqrestore(&ring->submit_lock, flags); -} - #ifdef CONFIG_DEV_COREDUMP static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset, size_t count, void *data, size_t datalen) @@ -436,9 +418,9 @@ static void recover_worker(struct kthread_work *work) * one more to clear the faulting submit */ if (ring == cur_ring) - fence++; + ring->memptrs->fence = ++fence; - update_fences(gpu, ring, fence); + msm_update_fence(ring->fctx, fence); } if (msm_gpu_active(gpu)) { @@ -672,7 +654,6 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, msm_submit_retire(submit); pm_runtime_mark_last_busy(&gpu->pdev->dev); - pm_runtime_put_autosuspend(&gpu->pdev->dev); spin_lock_irqsave(&ring->submit_lock, flags); list_del(&submit->node); @@ -686,6 +667,8 @@ static void retire_submit(struct msm_gpu *gpu, struct msm_ringbuffer *ring, msm_devfreq_idle(gpu); mutex_unlock(&gpu->active_lock); + pm_runtime_put_autosuspend(&gpu->pdev->dev); + msm_gem_submit_put(submit); } @@ -735,7 +718,7 @@ void msm_gpu_retire(struct msm_gpu *gpu) int i; for (i = 0; i < gpu->nr_rings; i++) - update_fences(gpu, gpu->rb[i], gpu->rb[i]->memptrs->fence); + msm_update_fence(gpu->rb[i]->fctx, gpu->rb[i]->memptrs->fence); kthread_queue_work(gpu->worker, &gpu->retire_work); update_sw_cntrs(gpu); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index bcaddbba564d..a54ed354578b 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -58,7 +58,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, u64 addr = iova; unsigned int i; - for_each_sg(sgt->sgl, sg, sgt->nents, i) { + for_each_sgtable_sg(sgt, sg, i) { size_t size = sg->length; phys_addr_t phys = sg_phys(sg); diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index 43066320ff8c..56eecb4a72dc 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -25,7 +25,7 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) msm_gem_lock(obj); msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx); - submit->bos[i].flags &= ~BO_PINNED; + submit->bos[i].flags &= ~BO_VMA_PINNED; msm_gem_unlock(obj); } diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 7fcbc2a5b6cd..087e69b98d06 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -233,6 +233,7 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, struct drm_file *file) { struct panfrost_device *pfdev = dev->dev_private; + struct panfrost_file_priv *file_priv = file->driver_priv; struct drm_panfrost_submit *args = data; struct drm_syncobj *sync_out = NULL; struct panfrost_job *job; @@ -262,12 +263,12 @@ static int panfrost_ioctl_submit(struct drm_device *dev, void *data, job->jc = args->jc; job->requirements = args->requirements; job->flush_id = panfrost_gpu_get_latest_flush_id(pfdev); - job->file_priv = file->driver_priv; + job->mmu = file_priv->mmu; slot = panfrost_job_get_slot(job); ret = drm_sched_job_init(&job->base, - &job->file_priv->sched_entity[slot], + &file_priv->sched_entity[slot], NULL); if (ret) goto out_put_job; diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index fda5871aebe3..7c4208476fbd 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -201,7 +201,7 @@ static void panfrost_job_hw_submit(struct panfrost_job *job, int js) return; } - cfg = panfrost_mmu_as_get(pfdev, job->file_priv->mmu); + cfg = panfrost_mmu_as_get(pfdev, job->mmu); job_write(pfdev, JS_HEAD_NEXT_LO(js), lower_32_bits(jc_head)); job_write(pfdev, JS_HEAD_NEXT_HI(js), upper_32_bits(jc_head)); @@ -435,7 +435,7 @@ static void panfrost_job_handle_err(struct panfrost_device *pfdev, job->jc = 0; } - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); if (signal_fence) @@ -456,7 +456,7 @@ static void panfrost_job_handle_done(struct panfrost_device *pfdev, * happen when we receive the DONE interrupt while doing a GPU reset). */ job->jc = 0; - panfrost_mmu_as_put(pfdev, job->file_priv->mmu); + panfrost_mmu_as_put(pfdev, job->mmu); panfrost_devfreq_record_idle(&pfdev->pfdevfreq); dma_fence_signal_locked(job->done_fence); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 77e6d0e6f612..8becc1ba0eb9 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -17,7 +17,7 @@ struct panfrost_job { struct kref refcount; struct panfrost_device *pfdev; - struct panfrost_file_priv *file_priv; + struct panfrost_mmu *mmu; /* Fence to be signaled by IRQ handler when the job is complete. */ struct dma_fence *done_fence; diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index 275f7e4a03ae..6eb1aabdb161 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -7,6 +7,7 @@ */ #include <linux/component.h> +#include <linux/dma-mapping.h> #include <linux/kfifo.h> #include <linux/module.h> #include <linux/of_graph.h> @@ -73,7 +74,6 @@ static int sun4i_drv_bind(struct device *dev) goto free_drm; } - dev_set_drvdata(dev, drm); drm->dev_private = drv; INIT_LIST_HEAD(&drv->frontend_list); INIT_LIST_HEAD(&drv->engine_list); @@ -114,6 +114,8 @@ static int sun4i_drv_bind(struct device *dev) drm_fbdev_generic_setup(drm, 32); + dev_set_drvdata(dev, drm); + return 0; finish_poll: @@ -130,6 +132,7 @@ static void sun4i_drv_unbind(struct device *dev) { struct drm_device *drm = dev_get_drvdata(dev); + dev_set_drvdata(dev, NULL); drm_dev_unregister(drm); drm_kms_helper_poll_fini(drm); drm_atomic_helper_shutdown(drm); @@ -367,6 +370,13 @@ static int sun4i_drv_probe(struct platform_device *pdev) INIT_KFIFO(list.fifo); + /* + * DE2 and DE3 cores actually supports 40-bit addresses, but + * driver does not. + */ + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + dma_set_max_seg_size(&pdev->dev, UINT_MAX); + for (i = 0;; i++) { struct device_node *pipeline = of_parse_phandle(np, "allwinner,pipelines", diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c index 6d43080791a0..85fb9e800ddf 100644 --- a/drivers/gpu/drm/sun4i/sun4i_layer.c +++ b/drivers/gpu/drm/sun4i/sun4i_layer.c @@ -117,7 +117,7 @@ static bool sun4i_layer_format_mod_supported(struct drm_plane *plane, struct sun4i_layer *layer = plane_to_sun4i_layer(plane); if (IS_ERR_OR_NULL(layer->backend->frontend)) - sun4i_backend_format_is_supported(format, modifier); + return sun4i_backend_format_is_supported(format, modifier); return sun4i_backend_format_is_supported(format, modifier) || sun4i_frontend_format_is_supported(format, modifier); diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c index a8d75fd7e9f4..477cb6985b4d 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c @@ -93,34 +93,10 @@ crtcs_exit: return crtcs; } -static int sun8i_dw_hdmi_find_connector_pdev(struct device *dev, - struct platform_device **pdev_out) -{ - struct platform_device *pdev; - struct device_node *remote; - - remote = of_graph_get_remote_node(dev->of_node, 1, -1); - if (!remote) - return -ENODEV; - - if (!of_device_is_compatible(remote, "hdmi-connector")) { - of_node_put(remote); - return -ENODEV; - } - - pdev = of_find_device_by_node(remote); - of_node_put(remote); - if (!pdev) - return -ENODEV; - - *pdev_out = pdev; - return 0; -} - static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, void *data) { - struct platform_device *pdev = to_platform_device(dev), *connector_pdev; + struct platform_device *pdev = to_platform_device(dev); struct dw_hdmi_plat_data *plat_data; struct drm_device *drm = data; struct device_node *phy_node; @@ -167,30 +143,16 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master, return dev_err_probe(dev, PTR_ERR(hdmi->regulator), "Couldn't get regulator\n"); - ret = sun8i_dw_hdmi_find_connector_pdev(dev, &connector_pdev); - if (!ret) { - hdmi->ddc_en = gpiod_get_optional(&connector_pdev->dev, - "ddc-en", GPIOD_OUT_HIGH); - platform_device_put(connector_pdev); - - if (IS_ERR(hdmi->ddc_en)) { - dev_err(dev, "Couldn't get ddc-en gpio\n"); - return PTR_ERR(hdmi->ddc_en); - } - } - ret = regulator_enable(hdmi->regulator); if (ret) { dev_err(dev, "Failed to enable regulator\n"); - goto err_unref_ddc_en; + return ret; } - gpiod_set_value(hdmi->ddc_en, 1); - ret = reset_control_deassert(hdmi->rst_ctrl); if (ret) { dev_err(dev, "Could not deassert ctrl reset control\n"); - goto err_disable_ddc_en; + goto err_disable_regulator; } ret = clk_prepare_enable(hdmi->clk_tmds); @@ -245,12 +207,8 @@ err_disable_clk_tmds: clk_disable_unprepare(hdmi->clk_tmds); err_assert_ctrl_reset: reset_control_assert(hdmi->rst_ctrl); -err_disable_ddc_en: - gpiod_set_value(hdmi->ddc_en, 0); +err_disable_regulator: regulator_disable(hdmi->regulator); -err_unref_ddc_en: - if (hdmi->ddc_en) - gpiod_put(hdmi->ddc_en); return ret; } @@ -264,11 +222,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master, sun8i_hdmi_phy_deinit(hdmi->phy); clk_disable_unprepare(hdmi->clk_tmds); reset_control_assert(hdmi->rst_ctrl); - gpiod_set_value(hdmi->ddc_en, 0); regulator_disable(hdmi->regulator); - - if (hdmi->ddc_en) - gpiod_put(hdmi->ddc_en); } static const struct component_ops sun8i_dw_hdmi_ops = { diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h index bffe1b9cd3dc..9ad09522947a 100644 --- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h +++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h @@ -9,7 +9,6 @@ #include <drm/bridge/dw_hdmi.h> #include <drm/drm_encoder.h> #include <linux/clk.h> -#include <linux/gpio/consumer.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> @@ -193,7 +192,6 @@ struct sun8i_dw_hdmi { struct regulator *regulator; const struct sun8i_dw_hdmi_quirks *quirks; struct reset_control *rst_ctrl; - struct gpio_desc *ddc_en; }; extern struct platform_driver sun8i_hdmi_phy_driver; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 75d308ec173d..406e9c324e76 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -109,11 +109,11 @@ void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo, return; spin_lock(&bo->bdev->lru_lock); - if (bo->bulk_move && bo->resource) - ttm_lru_bulk_move_del(bo->bulk_move, bo->resource); + if (bo->resource) + ttm_resource_del_bulk_move(bo->resource, bo); bo->bulk_move = bulk; - if (bo->bulk_move && bo->resource) - ttm_lru_bulk_move_add(bo->bulk_move, bo->resource); + if (bo->resource) + ttm_resource_add_bulk_move(bo->resource, bo); spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_set_bulk_move); @@ -689,8 +689,11 @@ void ttm_bo_pin(struct ttm_buffer_object *bo) { dma_resv_assert_held(bo->base.resv); WARN_ON_ONCE(!kref_read(&bo->kref)); - if (!(bo->pin_count++) && bo->bulk_move && bo->resource) - ttm_lru_bulk_move_del(bo->bulk_move, bo->resource); + spin_lock(&bo->bdev->lru_lock); + if (bo->resource) + ttm_resource_del_bulk_move(bo->resource, bo); + ++bo->pin_count; + spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_pin); @@ -707,8 +710,11 @@ void ttm_bo_unpin(struct ttm_buffer_object *bo) if (WARN_ON_ONCE(!bo->pin_count)) return; - if (!(--bo->pin_count) && bo->bulk_move && bo->resource) - ttm_lru_bulk_move_add(bo->bulk_move, bo->resource); + spin_lock(&bo->bdev->lru_lock); + --bo->pin_count; + if (bo->resource) + ttm_resource_add_bulk_move(bo->resource, bo); + spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_bo_unpin); diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index a0562ab386f5..e7147e304637 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -156,8 +156,12 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, ttm_resource_manager_for_each_res(man, &cursor, res) { struct ttm_buffer_object *bo = res->bo; - uint32_t num_pages = PFN_UP(bo->base.size); + uint32_t num_pages; + if (!bo) + continue; + + num_pages = PFN_UP(bo->base.size); ret = ttm_bo_swapout(bo, ctx, gfp_flags); /* ttm_bo_swapout has dropped the lru_lock */ if (!ret) diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index 65889b3caf50..20f9adcc3235 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -91,8 +91,8 @@ static void ttm_lru_bulk_move_pos_tail(struct ttm_lru_bulk_move_pos *pos, } /* Add the resource to a bulk_move cursor */ -void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res) +static void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res) { struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); @@ -105,8 +105,8 @@ void ttm_lru_bulk_move_add(struct ttm_lru_bulk_move *bulk, } /* Remove the resource from a bulk_move range */ -void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, - struct ttm_resource *res) +static void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, + struct ttm_resource *res) { struct ttm_lru_bulk_move_pos *pos = ttm_lru_bulk_move_pos(bulk, res); @@ -122,6 +122,22 @@ void ttm_lru_bulk_move_del(struct ttm_lru_bulk_move *bulk, } } +/* Add the resource to a bulk move if the BO is configured for it */ +void ttm_resource_add_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo) +{ + if (bo->bulk_move && !bo->pin_count) + ttm_lru_bulk_move_add(bo->bulk_move, res); +} + +/* Remove the resource from a bulk move if the BO is configured for it */ +void ttm_resource_del_bulk_move(struct ttm_resource *res, + struct ttm_buffer_object *bo) +{ + if (bo->bulk_move && !bo->pin_count) + ttm_lru_bulk_move_del(bo->bulk_move, res); +} + /* Move a resource to the LRU or bulk tail */ void ttm_resource_move_to_lru_tail(struct ttm_resource *res) { @@ -169,15 +185,14 @@ void ttm_resource_init(struct ttm_buffer_object *bo, res->bus.is_iomem = false; res->bus.caching = ttm_cached; res->bo = bo; - INIT_LIST_HEAD(&res->lru); man = ttm_manager_type(bo->bdev, place->mem_type); spin_lock(&bo->bdev->lru_lock); - man->usage += res->num_pages << PAGE_SHIFT; - if (bo->bulk_move) - ttm_lru_bulk_move_add(bo->bulk_move, res); + if (bo->pin_count) + list_add_tail(&res->lru, &bo->bdev->pinned); else - ttm_resource_move_to_lru_tail(res); + list_add_tail(&res->lru, &man->lru[bo->priority]); + man->usage += res->num_pages << PAGE_SHIFT; spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_resource_init); @@ -210,8 +225,16 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo, { struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, place->mem_type); + int ret; + + ret = man->func->alloc(man, bo, place, res_ptr); + if (ret) + return ret; - return man->func->alloc(man, bo, place, res_ptr); + spin_lock(&bo->bdev->lru_lock); + ttm_resource_add_bulk_move(*res_ptr, bo); + spin_unlock(&bo->bdev->lru_lock); + return 0; } void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) @@ -221,12 +244,9 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource **res) if (!*res) return; - if (bo->bulk_move) { - spin_lock(&bo->bdev->lru_lock); - ttm_lru_bulk_move_del(bo->bulk_move, *res); - spin_unlock(&bo->bdev->lru_lock); - } - + spin_lock(&bo->bdev->lru_lock); + ttm_resource_del_bulk_move(*res, bo); + spin_unlock(&bo->bdev->lru_lock); man = ttm_manager_type(bo->bdev, (*res)->mem_type); man->func->free(man, *res); *res = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 49c0f2ac868b..b8d856312846 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -248,6 +248,9 @@ void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4->purgeable.lock); list_add_tail(&bo->size_head, &vc4->purgeable.list); vc4->purgeable.num++; @@ -259,6 +262,9 @@ static void vc4_bo_remove_from_purgeable_pool_locked(struct vc4_bo *bo) { struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* list_del_init() is used here because the caller might release * the purgeable lock in order to acquire the madv one and update the * madv status. @@ -387,6 +393,9 @@ struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return ERR_PTR(-ENODEV); + bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (!bo) return ERR_PTR(-ENOMEM); @@ -413,6 +422,9 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, struct drm_gem_cma_object *cma_obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return ERR_PTR(-ENODEV); + if (size == 0) return ERR_PTR(-EINVAL); @@ -471,19 +483,20 @@ struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t unaligned_size, return bo; } -int vc4_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args) +int vc4_bo_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) { - int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_bo *bo = NULL; int ret; - if (args->pitch < min_pitch) - args->pitch = min_pitch; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; - if (args->size < args->pitch * args->height) - args->size = args->pitch * args->height; + ret = vc4_dumb_fixup_args(args); + if (ret) + return ret; bo = vc4_bo_create(dev, args->size, false, VC4_BO_TYPE_DUMB); if (IS_ERR(bo)) @@ -601,8 +614,12 @@ static void vc4_bo_cache_time_work(struct work_struct *work) int vc4_bo_inc_usecnt(struct vc4_bo *bo) { + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + /* Fast path: if the BO is already retained by someone, no need to * check the madv status. */ @@ -637,6 +654,11 @@ int vc4_bo_inc_usecnt(struct vc4_bo *bo) void vc4_bo_dec_usecnt(struct vc4_bo *bo) { + struct vc4_dev *vc4 = to_vc4_dev(bo->base.base.dev); + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* Fast path: if the BO is still retained by someone, no need to test * the madv value. */ @@ -756,6 +778,9 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + ret = vc4_grab_bin_bo(vc4, vc4file); if (ret) return ret; @@ -779,9 +804,13 @@ int vc4_create_bo_ioctl(struct drm_device *dev, void *data, int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_mmap_bo *args = data; struct drm_gem_object *gem_obj; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { DRM_DEBUG("Failed to look up GEM BO %d\n", args->handle); @@ -805,6 +834,9 @@ vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, struct vc4_bo *bo = NULL; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->size == 0) return -EINVAL; @@ -875,11 +907,15 @@ fail: int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_set_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; bool t_format; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->flags != 0) return -EINVAL; @@ -918,10 +954,14 @@ int vc4_set_tiling_ioctl(struct drm_device *dev, void *data, int vc4_get_tiling_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_get_tiling *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->flags != 0 || args->modifier != 0) return -EINVAL; @@ -948,6 +988,9 @@ int vc4_bo_cache_init(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); int i; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + /* Create the initial set of BO labels that the kernel will * use. This lets us avoid a bunch of string reallocation in * the kernel's draw and BO allocation paths. @@ -1007,6 +1050,9 @@ int vc4_label_bo_ioctl(struct drm_device *dev, void *data, struct drm_gem_object *gem_obj; int ret = 0, label; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!args->len) return -EINVAL; diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c index 59b20c8f132b..9355213dc883 100644 --- a/drivers/gpu/drm/vc4/vc4_crtc.c +++ b/drivers/gpu/drm/vc4/vc4_crtc.c @@ -256,7 +256,7 @@ static u32 vc4_get_fifo_full_level(struct vc4_crtc *vc4_crtc, u32 format) * Removing 1 from the FIFO full level however * seems to completely remove that issue. */ - if (!vc4->hvs->hvs5) + if (!vc4->is_vc5) return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX - 1; return fifo_len_bytes - 3 * HVS_FIFO_LATENCY_PIX; @@ -389,7 +389,7 @@ static void vc4_crtc_config_pv(struct drm_crtc *crtc, struct drm_encoder *encode if (is_dsi) CRTC_WRITE(PV_HACT_ACT, mode->hdisplay * pixel_rep); - if (vc4->hvs->hvs5) + if (vc4->is_vc5) CRTC_WRITE(PV_MUX_CFG, VC4_SET_FIELD(PV_MUX_CFG_RGB_PIXEL_MUX_MODE_NO_SWAP, PV_MUX_CFG_RGB_PIXEL_MUX_MODE)); @@ -775,17 +775,18 @@ struct vc4_async_flip_state { struct drm_framebuffer *old_fb; struct drm_pending_vblank_event *event; - struct vc4_seqno_cb cb; + union { + struct dma_fence_cb fence; + struct vc4_seqno_cb seqno; + } cb; }; /* Called when the V3D execution for the BO being flipped to is done, so that * we can actually update the plane's address to point to it. */ static void -vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) +vc4_async_page_flip_complete(struct vc4_async_flip_state *flip_state) { - struct vc4_async_flip_state *flip_state = - container_of(cb, struct vc4_async_flip_state, cb); struct drm_crtc *crtc = flip_state->crtc; struct drm_device *dev = crtc->dev; struct drm_plane *plane = crtc->primary; @@ -802,59 +803,96 @@ vc4_async_page_flip_complete(struct vc4_seqno_cb *cb) drm_crtc_vblank_put(crtc); drm_framebuffer_put(flip_state->fb); - /* Decrement the BO usecnt in order to keep the inc/dec calls balanced - * when the planes are updated through the async update path. - * FIXME: we should move to generic async-page-flip when it's - * available, so that we can get rid of this hand-made cleanup_fb() - * logic. - */ - if (flip_state->old_fb) { - struct drm_gem_cma_object *cma_bo; - struct vc4_bo *bo; + if (flip_state->old_fb) + drm_framebuffer_put(flip_state->old_fb); + + kfree(flip_state); +} + +static void vc4_async_page_flip_seqno_complete(struct vc4_seqno_cb *cb) +{ + struct vc4_async_flip_state *flip_state = + container_of(cb, struct vc4_async_flip_state, cb.seqno); + struct vc4_bo *bo = NULL; - cma_bo = drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); + if (flip_state->old_fb) { + struct drm_gem_cma_object *cma_bo = + drm_fb_cma_get_gem_obj(flip_state->old_fb, 0); bo = to_vc4_bo(&cma_bo->base); - vc4_bo_dec_usecnt(bo); - drm_framebuffer_put(flip_state->old_fb); } - kfree(flip_state); + vc4_async_page_flip_complete(flip_state); + + /* + * Decrement the BO usecnt in order to keep the inc/dec + * calls balanced when the planes are updated through + * the async update path. + * + * FIXME: we should move to generic async-page-flip when + * it's available, so that we can get rid of this + * hand-made cleanup_fb() logic. + */ + if (bo) + vc4_bo_dec_usecnt(bo); } -/* Implements async (non-vblank-synced) page flips. - * - * The page flip ioctl needs to return immediately, so we grab the - * modeset semaphore on the pipe, and queue the address update for - * when V3D is done with the BO being flipped to. - */ -static int vc4_async_page_flip(struct drm_crtc *crtc, - struct drm_framebuffer *fb, - struct drm_pending_vblank_event *event, - uint32_t flags) +static void vc4_async_page_flip_fence_complete(struct dma_fence *fence, + struct dma_fence_cb *cb) { - struct drm_device *dev = crtc->dev; - struct drm_plane *plane = crtc->primary; - int ret = 0; - struct vc4_async_flip_state *flip_state; + struct vc4_async_flip_state *flip_state = + container_of(cb, struct vc4_async_flip_state, cb.fence); + + vc4_async_page_flip_complete(flip_state); + dma_fence_put(fence); +} + +static int vc4_async_set_fence_cb(struct drm_device *dev, + struct vc4_async_flip_state *flip_state) +{ + struct drm_framebuffer *fb = flip_state->fb; struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); - struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct dma_fence *fence; + int ret; - /* Increment the BO usecnt here, so that we never end up with an - * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the - * plane is later updated through the non-async path. - * FIXME: we should move to generic async-page-flip when it's - * available, so that we can get rid of this hand-made prepare_fb() - * logic. - */ - ret = vc4_bo_inc_usecnt(bo); + if (!vc4->is_vc5) { + struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + + return vc4_queue_seqno_cb(dev, &flip_state->cb.seqno, bo->seqno, + vc4_async_page_flip_seqno_complete); + } + + ret = dma_resv_get_singleton(cma_bo->base.resv, DMA_RESV_USAGE_READ, &fence); if (ret) return ret; + /* If there's no fence, complete the page flip immediately */ + if (!fence) { + vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence); + return 0; + } + + /* If the fence has already been completed, complete the page flip */ + if (dma_fence_add_callback(fence, &flip_state->cb.fence, + vc4_async_page_flip_fence_complete)) + vc4_async_page_flip_fence_complete(fence, &flip_state->cb.fence); + + return 0; +} + +static int +vc4_async_page_flip_common(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + struct drm_device *dev = crtc->dev; + struct drm_plane *plane = crtc->primary; + struct vc4_async_flip_state *flip_state; + flip_state = kzalloc(sizeof(*flip_state), GFP_KERNEL); - if (!flip_state) { - vc4_bo_dec_usecnt(bo); + if (!flip_state) return -ENOMEM; - } drm_framebuffer_get(fb); flip_state->fb = fb; @@ -881,23 +919,79 @@ static int vc4_async_page_flip(struct drm_crtc *crtc, */ drm_atomic_set_fb_for_plane(plane->state, fb); - vc4_queue_seqno_cb(dev, &flip_state->cb, bo->seqno, - vc4_async_page_flip_complete); + vc4_async_set_fence_cb(dev, flip_state); /* Driver takes ownership of state on successful async commit. */ return 0; } +/* Implements async (non-vblank-synced) page flips. + * + * The page flip ioctl needs to return immediately, so we grab the + * modeset semaphore on the pipe, and queue the address update for + * when V3D is done with the BO being flipped to. + */ +static int vc4_async_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + struct drm_gem_cma_object *cma_bo = drm_fb_cma_get_gem_obj(fb, 0); + struct vc4_bo *bo = to_vc4_bo(&cma_bo->base); + int ret; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + + /* + * Increment the BO usecnt here, so that we never end up with an + * unbalanced number of vc4_bo_{dec,inc}_usecnt() calls when the + * plane is later updated through the non-async path. + * + * FIXME: we should move to generic async-page-flip when + * it's available, so that we can get rid of this + * hand-made prepare_fb() logic. + */ + ret = vc4_bo_inc_usecnt(bo); + if (ret) + return ret; + + ret = vc4_async_page_flip_common(crtc, fb, event, flags); + if (ret) { + vc4_bo_dec_usecnt(bo); + return ret; + } + + return 0; +} + +static int vc5_async_page_flip(struct drm_crtc *crtc, + struct drm_framebuffer *fb, + struct drm_pending_vblank_event *event, + uint32_t flags) +{ + return vc4_async_page_flip_common(crtc, fb, event, flags); +} + int vc4_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t flags, struct drm_modeset_acquire_ctx *ctx) { - if (flags & DRM_MODE_PAGE_FLIP_ASYNC) - return vc4_async_page_flip(crtc, fb, event, flags); - else + if (flags & DRM_MODE_PAGE_FLIP_ASYNC) { + struct drm_device *dev = crtc->dev; + struct vc4_dev *vc4 = to_vc4_dev(dev); + + if (vc4->is_vc5) + return vc5_async_page_flip(crtc, fb, event, flags); + else + return vc4_async_page_flip(crtc, fb, event, flags); + } else { return drm_atomic_helper_page_flip(crtc, fb, event, flags, ctx); + } } struct drm_crtc_state *vc4_crtc_duplicate_state(struct drm_crtc *crtc) @@ -1149,7 +1243,7 @@ int vc4_crtc_init(struct drm_device *drm, struct vc4_crtc *vc4_crtc, crtc_funcs, NULL); drm_crtc_helper_add(crtc, crtc_helper_funcs); - if (!vc4->hvs->hvs5) { + if (!vc4->is_vc5) { drm_mode_crtc_set_gamma_size(crtc, ARRAY_SIZE(vc4_crtc->lut_r)); drm_crtc_enable_color_mgmt(crtc, 0, false, crtc->gamma_size); diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index 162bc18e7497..0f0f0263e744 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -63,6 +63,32 @@ void __iomem *vc4_ioremap_regs(struct platform_device *pdev, int index) return map; } +int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args) +{ + int min_pitch = DIV_ROUND_UP(args->width * args->bpp, 8); + + if (args->pitch < min_pitch) + args->pitch = min_pitch; + + if (args->size < args->pitch * args->height) + args->size = args->pitch * args->height; + + return 0; +} + +static int vc5_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args) +{ + int ret; + + ret = vc4_dumb_fixup_args(args); + if (ret) + return ret; + + return drm_gem_cma_dumb_create_internal(file_priv, dev, args); +} + static int vc4_get_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -73,6 +99,9 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, if (args->pad != 0) return -EINVAL; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) return -ENODEV; @@ -116,11 +145,16 @@ static int vc4_get_param_ioctl(struct drm_device *dev, void *data, static int vc4_open(struct drm_device *dev, struct drm_file *file) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + vc4file = kzalloc(sizeof(*vc4file), GFP_KERNEL); if (!vc4file) return -ENOMEM; + vc4file->dev = vc4; vc4_perfmon_open_file(vc4file); file->driver_priv = vc4file; @@ -132,6 +166,9 @@ static void vc4_close(struct drm_device *dev, struct drm_file *file) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_file *vc4file = file->driver_priv; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (vc4file->bin_bo_used) vc4_v3d_bin_bo_put(vc4); @@ -160,7 +197,7 @@ static const struct drm_ioctl_desc vc4_drm_ioctls[] = { DRM_IOCTL_DEF_DRV(VC4_PERFMON_GET_VALUES, vc4_perfmon_get_values_ioctl, DRM_RENDER_ALLOW), }; -static struct drm_driver vc4_drm_driver = { +static const struct drm_driver vc4_drm_driver = { .driver_features = (DRIVER_MODESET | DRIVER_ATOMIC | DRIVER_GEM | @@ -175,7 +212,7 @@ static struct drm_driver vc4_drm_driver = { .gem_create_object = vc4_create_object, - DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_dumb_create), + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc4_bo_dumb_create), .ioctls = vc4_drm_ioctls, .num_ioctls = ARRAY_SIZE(vc4_drm_ioctls), @@ -189,6 +226,27 @@ static struct drm_driver vc4_drm_driver = { .patchlevel = DRIVER_PATCHLEVEL, }; +static const struct drm_driver vc5_drm_driver = { + .driver_features = (DRIVER_MODESET | + DRIVER_ATOMIC | + DRIVER_GEM), + +#if defined(CONFIG_DEBUG_FS) + .debugfs_init = vc4_debugfs_init, +#endif + + DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(vc5_dumb_create), + + .fops = &vc4_drm_fops, + + .name = DRIVER_NAME, + .desc = DRIVER_DESC, + .date = DRIVER_DATE, + .major = DRIVER_MAJOR, + .minor = DRIVER_MINOR, + .patchlevel = DRIVER_PATCHLEVEL, +}; + static void vc4_match_add_drivers(struct device *dev, struct component_match **match, struct platform_driver *const *drivers, @@ -212,42 +270,49 @@ static void vc4_match_add_drivers(struct device *dev, static int vc4_drm_bind(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); + const struct drm_driver *driver; struct rpi_firmware *firmware = NULL; struct drm_device *drm; struct vc4_dev *vc4; struct device_node *node; struct drm_crtc *crtc; + bool is_vc5; int ret = 0; dev->coherent_dma_mask = DMA_BIT_MASK(32); - /* If VC4 V3D is missing, don't advertise render nodes. */ - node = of_find_matching_node_and_match(NULL, vc4_v3d_dt_match, NULL); - if (!node || !of_device_is_available(node)) - vc4_drm_driver.driver_features &= ~DRIVER_RENDER; - of_node_put(node); + is_vc5 = of_device_is_compatible(dev->of_node, "brcm,bcm2711-vc5"); + if (is_vc5) + driver = &vc5_drm_driver; + else + driver = &vc4_drm_driver; - vc4 = devm_drm_dev_alloc(dev, &vc4_drm_driver, struct vc4_dev, base); + vc4 = devm_drm_dev_alloc(dev, driver, struct vc4_dev, base); if (IS_ERR(vc4)) return PTR_ERR(vc4); + vc4->is_vc5 = is_vc5; drm = &vc4->base; platform_set_drvdata(pdev, drm); INIT_LIST_HEAD(&vc4->debugfs_list); - mutex_init(&vc4->bin_bo_lock); + if (!is_vc5) { + mutex_init(&vc4->bin_bo_lock); - ret = vc4_bo_cache_init(drm); - if (ret) - return ret; + ret = vc4_bo_cache_init(drm); + if (ret) + return ret; + } ret = drmm_mode_config_init(drm); if (ret) return ret; - ret = vc4_gem_init(drm); - if (ret) - return ret; + if (!is_vc5) { + ret = vc4_gem_init(drm); + if (ret) + return ret; + } node = of_find_compatible_node(NULL, NULL, "raspberrypi,bcm2835-firmware"); if (node) { @@ -258,7 +323,7 @@ static int vc4_drm_bind(struct device *dev) return -EPROBE_DEFER; } - ret = drm_aperture_remove_framebuffers(false, &vc4_drm_driver); + ret = drm_aperture_remove_framebuffers(false, driver); if (ret) return ret; diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h index 15e0c2ac3940..93fd55b9e99e 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.h +++ b/drivers/gpu/drm/vc4/vc4_drv.h @@ -48,6 +48,8 @@ enum vc4_kernel_bo_type { * done. This way, only events related to a specific job will be counted. */ struct vc4_perfmon { + struct vc4_dev *dev; + /* Tracks the number of users of the perfmon, when this counter reaches * zero the perfmon is destroyed. */ @@ -74,6 +76,8 @@ struct vc4_perfmon { struct vc4_dev { struct drm_device base; + bool is_vc5; + unsigned int irq; struct vc4_hvs *hvs; @@ -316,6 +320,7 @@ struct vc4_v3d { }; struct vc4_hvs { + struct vc4_dev *vc4; struct platform_device *pdev; void __iomem *regs; u32 __iomem *dlist; @@ -333,9 +338,6 @@ struct vc4_hvs { struct drm_mm_node mitchell_netravali_filter; struct debugfs_regset32 regset; - - /* HVS version 5 flag, therefore requires updated dlist structures */ - bool hvs5; }; struct vc4_plane { @@ -580,6 +582,8 @@ to_vc4_crtc_state(struct drm_crtc_state *crtc_state) #define VC4_REG32(reg) { .name = #reg, .offset = reg } struct vc4_exec_info { + struct vc4_dev *dev; + /* Sequence number for this bin/render job. */ uint64_t seqno; @@ -701,6 +705,8 @@ struct vc4_exec_info { * released when the DRM file is closed should be placed here. */ struct vc4_file { + struct vc4_dev *dev; + struct { struct idr idr; struct mutex lock; @@ -814,9 +820,9 @@ struct vc4_validated_shader_info { struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size); struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size, bool from_cache, enum vc4_kernel_bo_type type); -int vc4_dumb_create(struct drm_file *file_priv, - struct drm_device *dev, - struct drm_mode_create_dumb *args); +int vc4_bo_dumb_create(struct drm_file *file_priv, + struct drm_device *dev, + struct drm_mode_create_dumb *args); int vc4_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data, @@ -885,6 +891,7 @@ static inline void vc4_debugfs_add_regset32(struct drm_device *drm, /* vc4_drv.c */ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index); +int vc4_dumb_fixup_args(struct drm_mode_create_dumb *args); /* vc4_dpi.c */ extern struct platform_driver vc4_dpi_driver; diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c index 9eaf304fc20d..fe10d9c3fff8 100644 --- a/drivers/gpu/drm/vc4/vc4_gem.c +++ b/drivers/gpu/drm/vc4/vc4_gem.c @@ -76,6 +76,9 @@ vc4_get_hang_state_ioctl(struct drm_device *dev, void *data, u32 i; int ret = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("VC4_GET_HANG_STATE with no VC4 V3D probed\n"); return -ENODEV; @@ -386,6 +389,9 @@ vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno, uint64_t timeout_ns, unsigned long timeout_expire; DEFINE_WAIT(wait); + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (vc4->finished_seqno >= seqno) return 0; @@ -468,6 +474,9 @@ vc4_submit_next_bin_job(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_exec_info *exec; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + again: exec = vc4_first_bin_job(vc4); if (!exec) @@ -513,6 +522,9 @@ vc4_submit_next_render_job(struct drm_device *dev) if (!exec) return; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* A previous RCL may have written to one of our textures, and * our full cache flush at bin time may have occurred before * that RCL completed. Flush the texture cache now, but not @@ -531,6 +543,9 @@ vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec) struct vc4_dev *vc4 = to_vc4_dev(dev); bool was_empty = list_empty(&vc4->render_job_list); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + list_move_tail(&exec->head, &vc4->render_job_list); if (was_empty) vc4_submit_next_render_job(dev); @@ -997,6 +1012,9 @@ vc4_job_handle_completed(struct vc4_dev *vc4) unsigned long irqflags; struct vc4_seqno_cb *cb, *cb_temp; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + spin_lock_irqsave(&vc4->job_lock, irqflags); while (!list_empty(&vc4->job_done_list)) { struct vc4_exec_info *exec = @@ -1033,6 +1051,9 @@ int vc4_queue_seqno_cb(struct drm_device *dev, struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + cb->func = func; INIT_WORK(&cb->work, vc4_seqno_cb_work); @@ -1083,8 +1104,12 @@ int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_wait_seqno *args = data; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + return vc4_wait_for_seqno_ioctl_helper(dev, args->seqno, &args->timeout_ns); } @@ -1093,11 +1118,15 @@ int vc4_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; struct drm_vc4_wait_bo *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->pad != 0) return -EINVAL; @@ -1144,6 +1173,9 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, args->shader_rec_size, args->bo_handle_count); + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("VC4_SUBMIT_CL with no VC4 V3D probed\n"); return -ENODEV; @@ -1167,6 +1199,7 @@ vc4_submit_cl_ioctl(struct drm_device *dev, void *data, DRM_ERROR("malloc failure on exec struct\n"); return -ENOMEM; } + exec->dev = vc4; ret = vc4_v3d_pm_get(vc4); if (ret) { @@ -1276,6 +1309,9 @@ int vc4_gem_init(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + vc4->dma_fence_context = dma_fence_context_alloc(1); INIT_LIST_HEAD(&vc4->bin_job_list); @@ -1321,11 +1357,15 @@ static void vc4_gem_destroy(struct drm_device *dev, void *unused) int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_vc4_gem_madvise *args = data; struct drm_gem_object *gem_obj; struct vc4_bo *bo; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + switch (args->madv) { case VC4_MADV_DONTNEED: case VC4_MADV_WILLNEED: diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 823d812f4982..ce9d16666d91 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1481,7 +1481,7 @@ vc4_hdmi_encoder_compute_mode_clock(const struct drm_display_mode *mode, unsigned int bpc, enum vc4_hdmi_output_format fmt) { - unsigned long long clock = mode->clock * 1000; + unsigned long long clock = mode->clock * 1000ULL; if (mode->flags & DRM_MODE_FLAG_DBLCLK) clock = clock * 2; diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c index 2a58fc421cf6..ba2c8e5a9b64 100644 --- a/drivers/gpu/drm/vc4/vc4_hvs.c +++ b/drivers/gpu/drm/vc4/vc4_hvs.c @@ -220,10 +220,11 @@ u8 vc4_hvs_get_fifo_frame_count(struct vc4_hvs *hvs, unsigned int fifo) int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) { + struct vc4_dev *vc4 = hvs->vc4; u32 reg; int ret; - if (!hvs->hvs5) + if (!vc4->is_vc5) return output; switch (output) { @@ -273,6 +274,7 @@ int vc4_hvs_get_fifo_from_output(struct vc4_hvs *hvs, unsigned int output) static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, struct drm_display_mode *mode, bool oneshot) { + struct vc4_dev *vc4 = hvs->vc4; struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc); struct vc4_crtc_state *vc4_crtc_state = to_vc4_crtc_state(crtc->state); unsigned int chan = vc4_crtc_state->assigned_channel; @@ -291,7 +293,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, */ dispctrl = SCALER_DISPCTRLX_ENABLE; - if (!hvs->hvs5) + if (!vc4->is_vc5) dispctrl |= VC4_SET_FIELD(mode->hdisplay, SCALER_DISPCTRLX_WIDTH) | VC4_SET_FIELD(mode->vdisplay, @@ -312,7 +314,7 @@ static int vc4_hvs_init_channel(struct vc4_hvs *hvs, struct drm_crtc *crtc, HVS_WRITE(SCALER_DISPBKGNDX(chan), dispbkgndx | SCALER_DISPBKGND_AUTOHS | - ((!hvs->hvs5) ? SCALER_DISPBKGND_GAMMA : 0) | + ((!vc4->is_vc5) ? SCALER_DISPBKGND_GAMMA : 0) | (interlace ? SCALER_DISPBKGND_INTERLACE : 0)); /* Reload the LUT, since the SRAMs would have been disabled if @@ -617,11 +619,9 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) if (!hvs) return -ENOMEM; + hvs->vc4 = vc4; hvs->pdev = pdev; - if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm2711-hvs")) - hvs->hvs5 = true; - hvs->regs = vc4_ioremap_regs(pdev, 0); if (IS_ERR(hvs->regs)) return PTR_ERR(hvs->regs); @@ -630,7 +630,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) hvs->regset.regs = hvs_regs; hvs->regset.nregs = ARRAY_SIZE(hvs_regs); - if (hvs->hvs5) { + if (vc4->is_vc5) { hvs->core_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(hvs->core_clk)) { dev_err(&pdev->dev, "Couldn't get core clock\n"); @@ -644,7 +644,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) } } - if (!hvs->hvs5) + if (!vc4->is_vc5) hvs->dlist = hvs->regs + SCALER_DLIST_START; else hvs->dlist = hvs->regs + SCALER5_DLIST_START; @@ -665,7 +665,7 @@ static int vc4_hvs_bind(struct device *dev, struct device *master, void *data) * between planes when they don't overlap on the screen, but * for now we just allocate globally. */ - if (!hvs->hvs5) + if (!vc4->is_vc5) /* 48k words of 2x12-bit pixels */ drm_mm_init(&hvs->lbm_mm, 0, 48 * 1024); else diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c index 4342fb43e8c1..2eacfb6773d2 100644 --- a/drivers/gpu/drm/vc4/vc4_irq.c +++ b/drivers/gpu/drm/vc4/vc4_irq.c @@ -265,6 +265,9 @@ vc4_irq_enable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (!vc4->v3d) return; @@ -279,6 +282,9 @@ vc4_irq_disable(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (!vc4->v3d) return; @@ -296,8 +302,12 @@ vc4_irq_disable(struct drm_device *dev) int vc4_irq_install(struct drm_device *dev, int irq) { + struct vc4_dev *vc4 = to_vc4_dev(dev); int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (irq == IRQ_NOTCONNECTED) return -ENOTCONN; @@ -316,6 +326,9 @@ void vc4_irq_uninstall(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + vc4_irq_disable(dev); free_irq(vc4->irq, dev); } @@ -326,6 +339,9 @@ void vc4_irq_reset(struct drm_device *dev) struct vc4_dev *vc4 = to_vc4_dev(dev); unsigned long irqflags; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + /* Acknowledge any stale IRQs. */ V3D_WRITE(V3D_INTCTL, V3D_DRIVER_IRQS); diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index c169bd72e53b..893d831b24aa 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -393,7 +393,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) old_hvs_state->fifo_state[channel].pending_commit = NULL; } - if (vc4->hvs->hvs5) { + if (vc4->is_vc5) { unsigned long state_rate = max(old_hvs_state->core_clock_rate, new_hvs_state->core_clock_rate); unsigned long core_rate = max_t(unsigned long, @@ -412,7 +412,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) vc4_ctm_commit(vc4, state); - if (vc4->hvs->hvs5) + if (vc4->is_vc5) vc5_hvs_pv_muxing_commit(vc4, state); else vc4_hvs_pv_muxing_commit(vc4, state); @@ -430,7 +430,7 @@ static void vc4_atomic_commit_tail(struct drm_atomic_state *state) drm_atomic_helper_cleanup_planes(dev, state); - if (vc4->hvs->hvs5) { + if (vc4->is_vc5) { drm_dbg(dev, "Running the core clock at %lu Hz\n", new_hvs_state->core_clock_rate); @@ -479,8 +479,12 @@ static struct drm_framebuffer *vc4_fb_create(struct drm_device *dev, struct drm_file *file_priv, const struct drm_mode_fb_cmd2 *mode_cmd) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_mode_fb_cmd2 mode_cmd_local; + if (WARN_ON_ONCE(vc4->is_vc5)) + return ERR_PTR(-ENODEV); + /* If the user didn't specify a modifier, use the * vc4_set_tiling_ioctl() state for the BO. */ @@ -997,11 +1001,15 @@ static const struct drm_mode_config_funcs vc4_mode_funcs = { .fb_create = vc4_fb_create, }; +static const struct drm_mode_config_funcs vc5_mode_funcs = { + .atomic_check = vc4_atomic_check, + .atomic_commit = drm_atomic_helper_commit, + .fb_create = drm_gem_fb_create, +}; + int vc4_kms_load(struct drm_device *dev) { struct vc4_dev *vc4 = to_vc4_dev(dev); - bool is_vc5 = of_device_is_compatible(dev->dev->of_node, - "brcm,bcm2711-vc5"); int ret; /* @@ -1009,7 +1017,7 @@ int vc4_kms_load(struct drm_device *dev) * the BCM2711, but the load tracker computations are used for * the core clock rate calculation. */ - if (!is_vc5) { + if (!vc4->is_vc5) { /* Start with the load tracker enabled. Can be * disabled through the debugfs load_tracker file. */ @@ -1025,7 +1033,7 @@ int vc4_kms_load(struct drm_device *dev) return ret; } - if (is_vc5) { + if (vc4->is_vc5) { dev->mode_config.max_width = 7680; dev->mode_config.max_height = 7680; } else { @@ -1033,7 +1041,7 @@ int vc4_kms_load(struct drm_device *dev) dev->mode_config.max_height = 2048; } - dev->mode_config.funcs = &vc4_mode_funcs; + dev->mode_config.funcs = vc4->is_vc5 ? &vc5_mode_funcs : &vc4_mode_funcs; dev->mode_config.helper_private = &vc4_mode_config_helpers; dev->mode_config.preferred_depth = 24; dev->mode_config.async_page_flip = true; diff --git a/drivers/gpu/drm/vc4/vc4_perfmon.c b/drivers/gpu/drm/vc4/vc4_perfmon.c index 18abc06335c1..c7f5adb6bcf8 100644 --- a/drivers/gpu/drm/vc4/vc4_perfmon.c +++ b/drivers/gpu/drm/vc4/vc4_perfmon.c @@ -17,13 +17,27 @@ void vc4_perfmon_get(struct vc4_perfmon *perfmon) { + struct vc4_dev *vc4 = perfmon->dev; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (perfmon) refcount_inc(&perfmon->refcnt); } void vc4_perfmon_put(struct vc4_perfmon *perfmon) { - if (perfmon && refcount_dec_and_test(&perfmon->refcnt)) + struct vc4_dev *vc4; + + if (!perfmon) + return; + + vc4 = perfmon->dev; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + + if (refcount_dec_and_test(&perfmon->refcnt)) kfree(perfmon); } @@ -32,6 +46,9 @@ void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon) unsigned int i; u32 mask; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (WARN_ON_ONCE(!perfmon || vc4->active_perfmon)) return; @@ -49,6 +66,9 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, { unsigned int i; + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + if (WARN_ON_ONCE(!vc4->active_perfmon || perfmon != vc4->active_perfmon)) return; @@ -64,8 +84,12 @@ void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon, struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) { + struct vc4_dev *vc4 = vc4file->dev; struct vc4_perfmon *perfmon; + if (WARN_ON_ONCE(vc4->is_vc5)) + return NULL; + mutex_lock(&vc4file->perfmon.lock); perfmon = idr_find(&vc4file->perfmon.idr, id); vc4_perfmon_get(perfmon); @@ -76,8 +100,14 @@ struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id) void vc4_perfmon_open_file(struct vc4_file *vc4file) { + struct vc4_dev *vc4 = vc4file->dev; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_init(&vc4file->perfmon.lock); idr_init_base(&vc4file->perfmon.idr, VC4_PERFMONID_MIN); + vc4file->dev = vc4; } static int vc4_perfmon_idr_del(int id, void *elem, void *data) @@ -91,6 +121,11 @@ static int vc4_perfmon_idr_del(int id, void *elem, void *data) void vc4_perfmon_close_file(struct vc4_file *vc4file) { + struct vc4_dev *vc4 = vc4file->dev; + + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4file->perfmon.lock); idr_for_each(&vc4file->perfmon.idr, vc4_perfmon_idr_del, NULL); idr_destroy(&vc4file->perfmon.idr); @@ -107,6 +142,9 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, unsigned int i; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("Creating perfmon no VC4 V3D probed\n"); return -ENODEV; @@ -127,6 +165,7 @@ int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data, GFP_KERNEL); if (!perfmon) return -ENOMEM; + perfmon->dev = vc4; for (i = 0; i < req->ncounters; i++) perfmon->events[i] = req->events[i]; @@ -157,6 +196,9 @@ int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data, struct drm_vc4_perfmon_destroy *req = data; struct vc4_perfmon *perfmon; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("Destroying perfmon no VC4 V3D probed\n"); return -ENODEV; @@ -182,6 +224,9 @@ int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data, struct vc4_perfmon *perfmon; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (!vc4->v3d) { DRM_DEBUG("Getting perfmon no VC4 V3D probed\n"); return -ENODEV; diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c index b3438f4a81ce..1e866dc00ac3 100644 --- a/drivers/gpu/drm/vc4/vc4_plane.c +++ b/drivers/gpu/drm/vc4/vc4_plane.c @@ -489,10 +489,10 @@ static u32 vc4_lbm_size(struct drm_plane_state *state) } /* Align it to 64 or 128 (hvs5) bytes */ - lbm = roundup(lbm, vc4->hvs->hvs5 ? 128 : 64); + lbm = roundup(lbm, vc4->is_vc5 ? 128 : 64); /* Each "word" of the LBM memory contains 2 or 4 (hvs5) pixels */ - lbm /= vc4->hvs->hvs5 ? 4 : 2; + lbm /= vc4->is_vc5 ? 4 : 2; return lbm; } @@ -608,7 +608,7 @@ static int vc4_plane_allocate_lbm(struct drm_plane_state *state) ret = drm_mm_insert_node_generic(&vc4->hvs->lbm_mm, &vc4_state->lbm, lbm_size, - vc4->hvs->hvs5 ? 64 : 32, + vc4->is_vc5 ? 64 : 32, 0, 0); spin_unlock_irqrestore(&vc4->hvs->mm_lock, irqflags); @@ -917,7 +917,7 @@ static int vc4_plane_mode_set(struct drm_plane *plane, mix_plane_alpha = state->alpha != DRM_BLEND_ALPHA_OPAQUE && fb->format->has_alpha; - if (!vc4->hvs->hvs5) { + if (!vc4->is_vc5) { /* Control word */ vc4_dlist_write(vc4_state, SCALER_CTL0_VALID | @@ -1321,6 +1321,10 @@ static int vc4_plane_atomic_async_check(struct drm_plane *plane, old_vc4_state = to_vc4_plane_state(plane->state); new_vc4_state = to_vc4_plane_state(new_plane_state); + + if (!new_vc4_state->hw_dlist) + return -EINVAL; + if (old_vc4_state->dlist_count != new_vc4_state->dlist_count || old_vc4_state->pos0_offset != new_vc4_state->pos0_offset || old_vc4_state->pos2_offset != new_vc4_state->pos2_offset || @@ -1385,6 +1389,13 @@ static const struct drm_plane_helper_funcs vc4_plane_helper_funcs = { .atomic_async_update = vc4_plane_atomic_async_update, }; +static const struct drm_plane_helper_funcs vc5_plane_helper_funcs = { + .atomic_check = vc4_plane_atomic_check, + .atomic_update = vc4_plane_atomic_update, + .atomic_async_check = vc4_plane_atomic_async_check, + .atomic_async_update = vc4_plane_atomic_async_update, +}; + static bool vc4_format_mod_supported(struct drm_plane *plane, uint32_t format, uint64_t modifier) @@ -1453,14 +1464,13 @@ static const struct drm_plane_funcs vc4_plane_funcs = { struct drm_plane *vc4_plane_init(struct drm_device *dev, enum drm_plane_type type) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct drm_plane *plane = NULL; struct vc4_plane *vc4_plane; u32 formats[ARRAY_SIZE(hvs_formats)]; int num_formats = 0; int ret = 0; unsigned i; - bool hvs5 = of_device_is_compatible(dev->dev->of_node, - "brcm,bcm2711-vc5"); static const uint64_t modifiers[] = { DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED, DRM_FORMAT_MOD_BROADCOM_SAND128, @@ -1476,7 +1486,7 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, return ERR_PTR(-ENOMEM); for (i = 0; i < ARRAY_SIZE(hvs_formats); i++) { - if (!hvs_formats[i].hvs5_only || hvs5) { + if (!hvs_formats[i].hvs5_only || vc4->is_vc5) { formats[num_formats] = hvs_formats[i].drm; num_formats++; } @@ -1490,7 +1500,10 @@ struct drm_plane *vc4_plane_init(struct drm_device *dev, if (ret) return ERR_PTR(ret); - drm_plane_helper_add(plane, &vc4_plane_helper_funcs); + if (vc4->is_vc5) + drm_plane_helper_add(plane, &vc5_plane_helper_funcs); + else + drm_plane_helper_add(plane, &vc4_plane_helper_funcs); drm_plane_create_alpha_property(plane); drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c index 3c918eeaf56e..f6b7dc3df08c 100644 --- a/drivers/gpu/drm/vc4/vc4_render_cl.c +++ b/drivers/gpu/drm/vc4/vc4_render_cl.c @@ -593,11 +593,15 @@ vc4_rcl_render_config_surface_setup(struct vc4_exec_info *exec, int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec) { + struct vc4_dev *vc4 = to_vc4_dev(dev); struct vc4_rcl_setup setup = {0}; struct drm_vc4_submit_cl *args = exec->args; bool has_bin = args->bin_cl_size != 0; int ret; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + if (args->min_x_tile > args->max_x_tile || args->min_y_tile > args->max_y_tile) { DRM_DEBUG("Bad render tile set (%d,%d)-(%d,%d)\n", diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c index 7bb3067f8425..cc714dcfe1f2 100644 --- a/drivers/gpu/drm/vc4/vc4_v3d.c +++ b/drivers/gpu/drm/vc4/vc4_v3d.c @@ -127,6 +127,9 @@ static int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) int vc4_v3d_pm_get(struct vc4_dev *vc4) { + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + mutex_lock(&vc4->power_lock); if (vc4->power_refcount++ == 0) { int ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev); @@ -145,6 +148,9 @@ vc4_v3d_pm_get(struct vc4_dev *vc4) void vc4_v3d_pm_put(struct vc4_dev *vc4) { + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4->power_lock); if (--vc4->power_refcount == 0) { pm_runtime_mark_last_busy(&vc4->v3d->pdev->dev); @@ -172,6 +178,9 @@ int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) uint64_t seqno = 0; struct vc4_exec_info *exec; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + try_again: spin_lock_irqsave(&vc4->job_lock, irqflags); slot = ffs(~vc4->bin_alloc_used); @@ -316,6 +325,9 @@ int vc4_v3d_bin_bo_get(struct vc4_dev *vc4, bool *used) { int ret = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + mutex_lock(&vc4->bin_bo_lock); if (used && *used) @@ -348,6 +360,9 @@ static void bin_bo_release(struct kref *ref) void vc4_v3d_bin_bo_put(struct vc4_dev *vc4) { + if (WARN_ON_ONCE(vc4->is_vc5)) + return; + mutex_lock(&vc4->bin_bo_lock); kref_put(&vc4->bin_bo_kref, bin_bo_release); mutex_unlock(&vc4->bin_bo_lock); diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c index eec76af49f04..2feba55bcef7 100644 --- a/drivers/gpu/drm/vc4/vc4_validate.c +++ b/drivers/gpu/drm/vc4/vc4_validate.c @@ -105,9 +105,13 @@ size_is_lt(uint32_t width, uint32_t height, int cpp) struct drm_gem_cma_object * vc4_use_bo(struct vc4_exec_info *exec, uint32_t hindex) { + struct vc4_dev *vc4 = exec->dev; struct drm_gem_cma_object *obj; struct vc4_bo *bo; + if (WARN_ON_ONCE(vc4->is_vc5)) + return NULL; + if (hindex >= exec->bo_count) { DRM_DEBUG("BO index %d greater than BO count %d\n", hindex, exec->bo_count); @@ -160,10 +164,14 @@ vc4_check_tex_size(struct vc4_exec_info *exec, struct drm_gem_cma_object *fbo, uint32_t offset, uint8_t tiling_format, uint32_t width, uint32_t height, uint8_t cpp) { + struct vc4_dev *vc4 = exec->dev; uint32_t aligned_width, aligned_height, stride, size; uint32_t utile_w = utile_width(cpp); uint32_t utile_h = utile_height(cpp); + if (WARN_ON_ONCE(vc4->is_vc5)) + return false; + /* The shaded vertex format stores signed 12.4 fixed point * (-2048,2047) offsets from the viewport center, so we should * never have a render target larger than 4096. The texture @@ -482,10 +490,14 @@ vc4_validate_bin_cl(struct drm_device *dev, void *unvalidated, struct vc4_exec_info *exec) { + struct vc4_dev *vc4 = to_vc4_dev(dev); uint32_t len = exec->args->bin_cl_size; uint32_t dst_offset = 0; uint32_t src_offset = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + while (src_offset < len) { void *dst_pkt = validated + dst_offset; void *src_pkt = unvalidated + src_offset; @@ -926,9 +938,13 @@ int vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec) { + struct vc4_dev *vc4 = to_vc4_dev(dev); uint32_t i; int ret = 0; + if (WARN_ON_ONCE(vc4->is_vc5)) + return -ENODEV; + for (i = 0; i < exec->shader_state_count; i++) { ret = validate_gl_shader_rec(dev, exec, &exec->shader_state[i]); if (ret) diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index 7cf82b071de2..e315aeb5fef5 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -778,6 +778,7 @@ vc4_handle_branch_target(struct vc4_shader_validation_state *validation_state) struct vc4_validated_shader_info * vc4_validate_shader(struct drm_gem_cma_object *shader_obj) { + struct vc4_dev *vc4 = to_vc4_dev(shader_obj->base.dev); bool found_shader_end = false; int shader_end_ip = 0; uint32_t last_thread_switch_ip = -3; @@ -785,6 +786,9 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) struct vc4_validated_shader_info *validated_shader = NULL; struct vc4_shader_validation_state validation_state; + if (WARN_ON_ONCE(vc4->is_vc5)) + return NULL; + memset(&validation_state, 0, sizeof(validation_state)); validation_state.shader = shader_obj->vaddr; validation_state.max_ip = shader_obj->base.size / sizeof(uint64_t); diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c index 5a5bf4e5b717..e31554d7139f 100644 --- a/drivers/gpu/drm/xen/xen_drm_front_gem.c +++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c @@ -71,7 +71,7 @@ static int xen_drm_front_gem_object_mmap(struct drm_gem_object *gem_obj, * the whole buffer. */ vma->vm_flags &= ~VM_PFNMAP; - vma->vm_flags |= VM_MIXEDMAP; + vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND; vma->vm_pgoff = 0; /* diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index 978ee2aab2d4..e0bc73124196 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -199,7 +199,8 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, if (!input_device->hid_desc) goto cleanup; - input_device->report_desc_size = desc->desc[0].wDescriptorLength; + input_device->report_desc_size = le16_to_cpu( + desc->desc[0].wDescriptorLength); if (input_device->report_desc_size == 0) { input_device->dev_info_status = -EINVAL; goto cleanup; @@ -217,7 +218,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device, memcpy(input_device->report_desc, ((unsigned char *)desc) + desc->bLength, - desc->desc[0].wDescriptorLength); + le16_to_cpu(desc->desc[0].wDescriptorLength)); /* Send the ack */ memset(&ack, 0, sizeof(struct mousevsc_prt_msg)); diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index b60f13481bdc..5b120402d405 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -21,6 +21,7 @@ #include <linux/cpu.h> #include <linux/hyperv.h> #include <asm/mshyperv.h> +#include <linux/sched/isolation.h> #include "hyperv_vmbus.h" @@ -638,6 +639,7 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ if (newchannel->offermsg.offer.sub_channel_index == 0) { mutex_unlock(&vmbus_connection.channel_mutex); + cpus_read_unlock(); /* * Don't call free_channel(), because newchannel->kobj * is not initialized yet. @@ -728,16 +730,20 @@ static void init_vp_index(struct vmbus_channel *channel) u32 i, ncpu = num_online_cpus(); cpumask_var_t available_mask; struct cpumask *allocated_mask; + const struct cpumask *hk_mask = housekeeping_cpumask(HK_TYPE_MANAGED_IRQ); u32 target_cpu; int numa_node; if (!perf_chn || - !alloc_cpumask_var(&available_mask, GFP_KERNEL)) { + !alloc_cpumask_var(&available_mask, GFP_KERNEL) || + cpumask_empty(hk_mask)) { /* * If the channel is not a performance critical * channel, bind it to VMBUS_CONNECT_CPU. * In case alloc_cpumask_var() fails, bind it to * VMBUS_CONNECT_CPU. + * If all the cpus are isolated, bind it to + * VMBUS_CONNECT_CPU. */ channel->target_cpu = VMBUS_CONNECT_CPU; if (perf_chn) @@ -758,17 +764,19 @@ static void init_vp_index(struct vmbus_channel *channel) } allocated_mask = &hv_context.hv_numa_map[numa_node]; - if (cpumask_equal(allocated_mask, cpumask_of_node(numa_node))) { +retry: + cpumask_xor(available_mask, allocated_mask, cpumask_of_node(numa_node)); + cpumask_and(available_mask, available_mask, hk_mask); + + if (cpumask_empty(available_mask)) { /* * We have cycled through all the CPUs in the node; * reset the allocated map. */ cpumask_clear(allocated_mask); + goto retry; } - cpumask_xor(available_mask, allocated_mask, - cpumask_of_node(numa_node)); - target_cpu = cpumask_first(available_mask); cpumask_set_cpu(target_cpu, allocated_mask); diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c index c698592b83e4..d35b60c06114 100644 --- a/drivers/hv/hv_kvp.c +++ b/drivers/hv/hv_kvp.c @@ -394,7 +394,7 @@ kvp_send_key(struct work_struct *dummy) in_msg = kvp_transaction.kvp_msg; /* - * The key/value strings sent from the host are encoded in + * The key/value strings sent from the host are encoded * in utf16; convert it to utf8 strings. * The host assures us that the utf16 strings will not exceed * the max lengths specified. We will however, reserve room diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 714d549b7b46..547ae334e5cd 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -21,6 +21,7 @@ #include <linux/kernel_stat.h> #include <linux/clockchips.h> #include <linux/cpu.h> +#include <linux/sched/isolation.h> #include <linux/sched/task_stack.h> #include <linux/delay.h> @@ -1770,6 +1771,9 @@ static ssize_t target_cpu_store(struct vmbus_channel *channel, if (target_cpu >= nr_cpumask_bits) return -EINVAL; + if (!cpumask_test_cpu(target_cpu, housekeeping_cpumask(HK_TYPE_MANAGED_IRQ))) + return -EINVAL; + /* No CPUs should come up or down during this. */ cpus_read_lock(); diff --git a/drivers/hwmon/asus-ec-sensors.c b/drivers/hwmon/asus-ec-sensors.c index 57e11b2bab74..3633ab691662 100644 --- a/drivers/hwmon/asus-ec-sensors.c +++ b/drivers/hwmon/asus-ec-sensors.c @@ -259,7 +259,7 @@ static const struct ec_board_info board_info[] = { }, { .board_names = { - "ROG CROSSHAIR VIII FORMULA" + "ROG CROSSHAIR VIII FORMULA", "ROG CROSSHAIR VIII HERO", "ROG CROSSHAIR VIII HERO (WI-FI)", }, diff --git a/drivers/hwmon/occ/common.c b/drivers/hwmon/occ/common.c index d78f4bebc718..ea070b91e5b9 100644 --- a/drivers/hwmon/occ/common.c +++ b/drivers/hwmon/occ/common.c @@ -1228,10 +1228,15 @@ EXPORT_SYMBOL_GPL(occ_setup); void occ_shutdown(struct occ *occ) { + mutex_lock(&occ->lock); + occ_shutdown_sysfs(occ); if (occ->hwmon) hwmon_device_unregister(occ->hwmon); + occ->hwmon = NULL; + + mutex_unlock(&occ->lock); } EXPORT_SYMBOL_GPL(occ_shutdown); diff --git a/drivers/i2c/busses/i2c-designware-common.c b/drivers/i2c/busses/i2c-designware-common.c index e7d316b1401a..c023b691441e 100644 --- a/drivers/i2c/busses/i2c-designware-common.c +++ b/drivers/i2c/busses/i2c-designware-common.c @@ -477,9 +477,6 @@ int i2c_dw_prepare_clk(struct dw_i2c_dev *dev, bool prepare) { int ret; - if (IS_ERR(dev->clk)) - return PTR_ERR(dev->clk); - if (prepare) { /* Optional interface clock */ ret = clk_prepare_enable(dev->pclk); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 70ade5306e45..ba043b547393 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -320,8 +320,17 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) goto exit_reset; } - dev->clk = devm_clk_get(&pdev->dev, NULL); - if (!i2c_dw_prepare_clk(dev, true)) { + dev->clk = devm_clk_get_optional(&pdev->dev, NULL); + if (IS_ERR(dev->clk)) { + ret = PTR_ERR(dev->clk); + goto exit_reset; + } + + ret = i2c_dw_prepare_clk(dev, true); + if (ret) + goto exit_reset; + + if (dev->clk) { u64 clk_khz; dev->get_clk_rate_khz = i2c_dw_get_clk_rate_khz; diff --git a/drivers/i2c/busses/i2c-mt65xx.c b/drivers/i2c/busses/i2c-mt65xx.c index bdecb78bfc26..8e6985354fd5 100644 --- a/drivers/i2c/busses/i2c-mt65xx.c +++ b/drivers/i2c/busses/i2c-mt65xx.c @@ -1420,17 +1420,22 @@ static int mtk_i2c_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "Request I2C IRQ %d fail\n", irq); - return ret; + goto err_bulk_unprepare; } i2c_set_adapdata(&i2c->adap, i2c); ret = i2c_add_adapter(&i2c->adap); if (ret) - return ret; + goto err_bulk_unprepare; platform_set_drvdata(pdev, i2c); return 0; + +err_bulk_unprepare: + clk_bulk_unprepare(I2C_MT65XX_CLK_MAX, i2c->clocks); + + return ret; } static int mtk_i2c_remove(struct platform_device *pdev) diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index 5960ccde6574..aede9d551130 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2372,8 +2372,7 @@ static struct platform_driver npcm_i2c_bus_driver = { static int __init npcm_i2c_init(void) { npcm_i2c_debugfs_dir = debugfs_create_dir("npcm_i2c", NULL); - platform_driver_register(&npcm_i2c_bus_driver); - return 0; + return platform_driver_register(&npcm_i2c_bus_driver); } module_init(npcm_i2c_init); diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index b9bb94bd0f67..424ef470223d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c @@ -115,6 +115,18 @@ static unsigned int mwait_substates __initdata; #define flg2MWAIT(flags) (((flags) >> 24) & 0xFF) #define MWAIT2flg(eax) ((eax & 0xFF) << 24) +static __always_inline int __intel_idle(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + struct cpuidle_state *state = &drv->states[index]; + unsigned long eax = flg2MWAIT(state->flags); + unsigned long ecx = 1; /* break on interrupt flag */ + + mwait_idle_with_hints(eax, ecx); + + return index; +} + /** * intel_idle - Ask the processor to enter the given idle state. * @dev: cpuidle device of the target CPU. @@ -132,16 +144,19 @@ static unsigned int mwait_substates __initdata; static __cpuidle int intel_idle(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { - struct cpuidle_state *state = &drv->states[index]; - unsigned long eax = flg2MWAIT(state->flags); - unsigned long ecx = 1; /* break on interrupt flag */ + return __intel_idle(dev, drv, index); +} - if (state->flags & CPUIDLE_FLAG_IRQ_ENABLE) - local_irq_enable(); +static __cpuidle int intel_idle_irq(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) +{ + int ret; - mwait_idle_with_hints(eax, ecx); + raw_local_irq_enable(); + ret = __intel_idle(dev, drv, index); + raw_local_irq_disable(); - return index; + return ret; } /** @@ -1801,6 +1816,9 @@ static void __init intel_idle_init_cstates_icpu(struct cpuidle_driver *drv) /* Structure copy. */ drv->states[drv->state_count] = cpuidle_state_table[cstate]; + if (cpuidle_state_table[cstate].flags & CPUIDLE_FLAG_IRQ_ENABLE) + drv->states[drv->state_count].enter = intel_idle_irq; + if ((disabled_states_mask & BIT(drv->state_count)) || ((icpu->use_acpi || force_use_acpi) && intel_idle_off_by_default(mwait_hint) && diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c index 4f73bc827eec..9c9e98578667 100644 --- a/drivers/iio/accel/bma180.c +++ b/drivers/iio/accel/bma180.c @@ -1006,11 +1006,12 @@ static int bma180_probe(struct i2c_client *client, data->trig->ops = &bma180_trigger_ops; iio_trigger_set_drvdata(data->trig, indio_dev); - indio_dev->trig = iio_trigger_get(data->trig); ret = iio_trigger_register(data->trig); if (ret) goto err_trigger_free; + + indio_dev->trig = iio_trigger_get(data->trig); } ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c index ac74cdcd2bc8..748b35c2f0c3 100644 --- a/drivers/iio/accel/kxcjk-1013.c +++ b/drivers/iio/accel/kxcjk-1013.c @@ -1554,12 +1554,12 @@ static int kxcjk1013_probe(struct i2c_client *client, data->dready_trig->ops = &kxcjk1013_trigger_ops; iio_trigger_set_drvdata(data->dready_trig, indio_dev); - indio_dev->trig = data->dready_trig; - iio_trigger_get(indio_dev->trig); ret = iio_trigger_register(data->dready_trig); if (ret) goto err_poweroff; + indio_dev->trig = iio_trigger_get(data->dready_trig); + data->motion_trig->ops = &kxcjk1013_trigger_ops; iio_trigger_set_drvdata(data->motion_trig, indio_dev); ret = iio_trigger_register(data->motion_trig); diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c index 912a447e6310..c7d9ca96dbaa 100644 --- a/drivers/iio/accel/mma8452.c +++ b/drivers/iio/accel/mma8452.c @@ -1511,10 +1511,14 @@ static int mma8452_reset(struct i2c_client *client) int i; int ret; - ret = i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, + /* + * Find on fxls8471, after config reset bit, it reset immediately, + * and will not give ACK, so here do not check the return value. + * The following code will read the reset register, and check whether + * this reset works. + */ + i2c_smbus_write_byte_data(client, MMA8452_CTRL_REG2, MMA8452_CTRL_REG2_RST); - if (ret < 0) - return ret; for (i = 0; i < 10; i++) { usleep_range(100, 200); @@ -1557,11 +1561,13 @@ static int mma8452_probe(struct i2c_client *client, mutex_init(&data->lock); data->chip_info = device_get_match_data(&client->dev); - if (!data->chip_info && id) { - data->chip_info = &mma_chip_info_table[id->driver_data]; - } else { - dev_err(&client->dev, "unknown device model\n"); - return -ENODEV; + if (!data->chip_info) { + if (id) { + data->chip_info = &mma_chip_info_table[id->driver_data]; + } else { + dev_err(&client->dev, "unknown device model\n"); + return -ENODEV; + } } ret = iio_read_mount_matrix(&client->dev, &data->orientation); diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c index b3afbf064915..df600d2917c0 100644 --- a/drivers/iio/accel/mxc4005.c +++ b/drivers/iio/accel/mxc4005.c @@ -456,8 +456,6 @@ static int mxc4005_probe(struct i2c_client *client, data->dready_trig->ops = &mxc4005_trigger_ops; iio_trigger_set_drvdata(data->dready_trig, indio_dev); - indio_dev->trig = data->dready_trig; - iio_trigger_get(indio_dev->trig); ret = devm_iio_trigger_register(&client->dev, data->dready_trig); if (ret) { @@ -465,6 +463,8 @@ static int mxc4005_probe(struct i2c_client *client, "failed to register trigger\n"); return ret; } + + indio_dev->trig = iio_trigger_get(data->dready_trig); } return devm_iio_device_register(&client->dev, indio_dev); diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index a73e3c2d212f..a9e655e69eaa 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -322,16 +322,19 @@ static struct adi_axi_adc_client *adi_axi_adc_attach_client(struct device *dev) if (!try_module_get(cl->dev->driver->owner)) { mutex_unlock(®istered_clients_lock); + of_node_put(cln); return ERR_PTR(-ENODEV); } get_device(cl->dev); cl->info = info; mutex_unlock(®istered_clients_lock); + of_node_put(cln); return cl; } mutex_unlock(®istered_clients_lock); + of_node_put(cln); return ERR_PTR(-EPROBE_DEFER); } diff --git a/drivers/iio/adc/aspeed_adc.c b/drivers/iio/adc/aspeed_adc.c index 0793d2474cdc..9341e0e0eb55 100644 --- a/drivers/iio/adc/aspeed_adc.c +++ b/drivers/iio/adc/aspeed_adc.c @@ -186,6 +186,7 @@ static int aspeed_adc_set_trim_data(struct iio_dev *indio_dev) return -EOPNOTSUPP; } scu = syscon_node_to_regmap(syscon); + of_node_put(syscon); if (IS_ERR(scu)) { dev_warn(data->dev, "Failed to get syscon regmap\n"); return -EOPNOTSUPP; diff --git a/drivers/iio/adc/axp288_adc.c b/drivers/iio/adc/axp288_adc.c index a4b8be5b8f88..580361bd9849 100644 --- a/drivers/iio/adc/axp288_adc.c +++ b/drivers/iio/adc/axp288_adc.c @@ -196,6 +196,14 @@ static const struct dmi_system_id axp288_adc_ts_bias_override[] = { }, .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, }, + { + /* Nuvision Solo 10 Draw */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "TMAX"), + DMI_MATCH(DMI_PRODUCT_NAME, "TM101W610L"), + }, + .driver_data = (void *)(uintptr_t)AXP288_ADC_TS_BIAS_80UA, + }, {} }; diff --git a/drivers/iio/adc/rzg2l_adc.c b/drivers/iio/adc/rzg2l_adc.c index 7585144b9715..5b09a93fdf34 100644 --- a/drivers/iio/adc/rzg2l_adc.c +++ b/drivers/iio/adc/rzg2l_adc.c @@ -334,11 +334,15 @@ static int rzg2l_adc_parse_properties(struct platform_device *pdev, struct rzg2l i = 0; device_for_each_child_node(&pdev->dev, fwnode) { ret = fwnode_property_read_u32(fwnode, "reg", &channel); - if (ret) + if (ret) { + fwnode_handle_put(fwnode); return ret; + } - if (channel >= RZG2L_ADC_MAX_CHANNELS) + if (channel >= RZG2L_ADC_MAX_CHANNELS) { + fwnode_handle_put(fwnode); return -EINVAL; + } chan_array[i].type = IIO_VOLTAGE; chan_array[i].indexed = 1; diff --git a/drivers/iio/adc/stm32-adc-core.c b/drivers/iio/adc/stm32-adc-core.c index 142656232157..3efb8c404ccc 100644 --- a/drivers/iio/adc/stm32-adc-core.c +++ b/drivers/iio/adc/stm32-adc-core.c @@ -64,6 +64,7 @@ struct stm32_adc_priv; * @max_clk_rate_hz: maximum analog clock rate (Hz, from datasheet) * @has_syscfg: SYSCFG capability flags * @num_irqs: number of interrupt lines + * @num_adcs: maximum number of ADC instances in the common registers */ struct stm32_adc_priv_cfg { const struct stm32_adc_common_regs *regs; @@ -71,6 +72,7 @@ struct stm32_adc_priv_cfg { u32 max_clk_rate_hz; unsigned int has_syscfg; unsigned int num_irqs; + unsigned int num_adcs; }; /** @@ -352,7 +354,7 @@ static void stm32_adc_irq_handler(struct irq_desc *desc) * before invoking the interrupt handler (e.g. call ISR only for * IRQ-enabled ADCs). */ - for (i = 0; i < priv->cfg->num_irqs; i++) { + for (i = 0; i < priv->cfg->num_adcs; i++) { if ((status & priv->cfg->regs->eoc_msk[i] && stm32_adc_eoc_enabled(priv, i)) || (status & priv->cfg->regs->ovr_msk[i])) @@ -792,6 +794,7 @@ static const struct stm32_adc_priv_cfg stm32f4_adc_priv_cfg = { .clk_sel = stm32f4_adc_clk_sel, .max_clk_rate_hz = 36000000, .num_irqs = 1, + .num_adcs = 3, }; static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { @@ -800,14 +803,16 @@ static const struct stm32_adc_priv_cfg stm32h7_adc_priv_cfg = { .max_clk_rate_hz = 36000000, .has_syscfg = HAS_VBOOSTER, .num_irqs = 1, + .num_adcs = 2, }; static const struct stm32_adc_priv_cfg stm32mp1_adc_priv_cfg = { .regs = &stm32h7_adc_common_regs, .clk_sel = stm32h7_adc_clk_sel, - .max_clk_rate_hz = 40000000, + .max_clk_rate_hz = 36000000, .has_syscfg = HAS_VBOOSTER | HAS_ANASWVDD, .num_irqs = 2, + .num_adcs = 2, }; static const struct of_device_id stm32_adc_of_match[] = { diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index a68ecbda6480..11ef873d6453 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -1365,7 +1365,7 @@ static int stm32_adc_read_raw(struct iio_dev *indio_dev, else ret = -EINVAL; - if (mask == IIO_CHAN_INFO_PROCESSED && adc->vrefint.vrefint_cal) + if (mask == IIO_CHAN_INFO_PROCESSED) *val = STM32_ADC_VREFINT_VOLTAGE * adc->vrefint.vrefint_cal / *val; iio_device_release_direct_mode(indio_dev); @@ -1407,7 +1407,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) struct stm32_adc *adc = iio_priv(indio_dev); const struct stm32_adc_regspec *regs = adc->cfg->regs; u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); - u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); /* Check ovr status right now, as ovr mask should be already disabled */ if (status & regs->isr_ovr.mask) { @@ -1422,11 +1421,6 @@ static irqreturn_t stm32_adc_threaded_isr(int irq, void *data) return IRQ_HANDLED; } - if (!(status & mask)) - dev_err_ratelimited(&indio_dev->dev, - "Unexpected IRQ: IER=0x%08x, ISR=0x%08x\n", - mask, status); - return IRQ_NONE; } @@ -1436,10 +1430,6 @@ static irqreturn_t stm32_adc_isr(int irq, void *data) struct stm32_adc *adc = iio_priv(indio_dev); const struct stm32_adc_regspec *regs = adc->cfg->regs; u32 status = stm32_adc_readl(adc, regs->isr_eoc.reg); - u32 mask = stm32_adc_readl(adc, regs->ier_eoc.reg); - - if (!(status & mask)) - return IRQ_WAKE_THREAD; if (status & regs->isr_ovr.mask) { /* @@ -1979,10 +1969,10 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n for (i = 0; i < STM32_ADC_INT_CH_NB; i++) { if (!strncmp(stm32_adc_ic[i].name, ch_name, STM32_ADC_CH_SZ)) { - adc->int_ch[i] = chan; - - if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) - continue; + if (stm32_adc_ic[i].idx != STM32_ADC_INT_CH_VREFINT) { + adc->int_ch[i] = chan; + break; + } /* Get calibration data for vrefint channel */ ret = nvmem_cell_read_u16(&indio_dev->dev, "vrefint", &vrefint); @@ -1990,10 +1980,15 @@ static int stm32_adc_populate_int_ch(struct iio_dev *indio_dev, const char *ch_n return dev_err_probe(indio_dev->dev.parent, ret, "nvmem access error\n"); } - if (ret == -ENOENT) - dev_dbg(&indio_dev->dev, "vrefint calibration not found\n"); - else - adc->vrefint.vrefint_cal = vrefint; + if (ret == -ENOENT) { + dev_dbg(&indio_dev->dev, "vrefint calibration not found. Skip vrefint channel\n"); + return ret; + } else if (!vrefint) { + dev_dbg(&indio_dev->dev, "Null vrefint calibration value. Skip vrefint channel\n"); + return -ENOENT; + } + adc->int_ch[i] = chan; + adc->vrefint.vrefint_cal = vrefint; } } @@ -2030,7 +2025,9 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev, } strncpy(adc->chan_name[val], name, STM32_ADC_CH_SZ); ret = stm32_adc_populate_int_ch(indio_dev, name, val); - if (ret) + if (ret == -ENOENT) + continue; + else if (ret) goto err; } else if (ret != -EINVAL) { dev_err(&indio_dev->dev, "Invalid label %d\n", ret); diff --git a/drivers/iio/adc/ti-ads131e08.c b/drivers/iio/adc/ti-ads131e08.c index 0c2025a22575..80a09817c119 100644 --- a/drivers/iio/adc/ti-ads131e08.c +++ b/drivers/iio/adc/ti-ads131e08.c @@ -739,7 +739,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) device_for_each_child_node(dev, node) { ret = fwnode_property_read_u32(node, "reg", &channel); if (ret) - return ret; + goto err_child_out; ret = fwnode_property_read_u32(node, "ti,gain", &tmp); if (ret) { @@ -747,7 +747,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) } else { ret = ads131e08_pga_gain_to_field_value(st, tmp); if (ret < 0) - return ret; + goto err_child_out; channel_config[i].pga_gain = tmp; } @@ -758,7 +758,7 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) } else { ret = ads131e08_validate_channel_mux(st, tmp); if (ret) - return ret; + goto err_child_out; channel_config[i].mux = tmp; } @@ -784,6 +784,10 @@ static int ads131e08_alloc_channels(struct iio_dev *indio_dev) st->channel_config = channel_config; return 0; + +err_child_out: + fwnode_handle_put(node); + return ret; } static void ads131e08_regulator_disable(void *data) diff --git a/drivers/iio/adc/xilinx-ams.c b/drivers/iio/adc/xilinx-ams.c index a55396c1f8b2..a7687706012d 100644 --- a/drivers/iio/adc/xilinx-ams.c +++ b/drivers/iio/adc/xilinx-ams.c @@ -1409,7 +1409,7 @@ static int ams_probe(struct platform_device *pdev) irq = platform_get_irq(pdev, 0); if (irq < 0) - return ret; + return irq; ret = devm_request_irq(&pdev->dev, irq, &ams_irq, 0, "ams-irq", indio_dev); diff --git a/drivers/iio/afe/iio-rescale.c b/drivers/iio/afe/iio-rescale.c index c6cf709f0f05..6949d2151025 100644 --- a/drivers/iio/afe/iio-rescale.c +++ b/drivers/iio/afe/iio-rescale.c @@ -277,7 +277,7 @@ static int rescale_configure_channel(struct device *dev, chan->ext_info = rescale->ext_info; chan->type = rescale->cfg->type; - if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) || + if (iio_channel_has_info(schan, IIO_CHAN_INFO_RAW) && iio_channel_has_info(schan, IIO_CHAN_INFO_SCALE)) { dev_info(dev, "using raw+scale source channel\n"); } else if (iio_channel_has_info(schan, IIO_CHAN_INFO_PROCESSED)) { diff --git a/drivers/iio/chemical/ccs811.c b/drivers/iio/chemical/ccs811.c index 847194fa1e46..80ef1aa9aae3 100644 --- a/drivers/iio/chemical/ccs811.c +++ b/drivers/iio/chemical/ccs811.c @@ -499,11 +499,11 @@ static int ccs811_probe(struct i2c_client *client, data->drdy_trig->ops = &ccs811_trigger_ops; iio_trigger_set_drvdata(data->drdy_trig, indio_dev); - indio_dev->trig = data->drdy_trig; - iio_trigger_get(indio_dev->trig); ret = iio_trigger_register(data->drdy_trig); if (ret) goto err_poweroff; + + indio_dev->trig = iio_trigger_get(data->drdy_trig); } ret = iio_triggered_buffer_setup(indio_dev, NULL, diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c index a7994f8e6b9b..1aac5665b5de 100644 --- a/drivers/iio/frequency/admv1014.c +++ b/drivers/iio/frequency/admv1014.c @@ -700,8 +700,10 @@ static int admv1014_init(struct admv1014_state *st) ADMV1014_DET_EN_MSK; enable_reg = FIELD_PREP(ADMV1014_P1DB_COMPENSATION_MSK, st->p1db_comp ? 3 : 0) | - FIELD_PREP(ADMV1014_IF_AMP_PD_MSK, !(st->input_mode)) | - FIELD_PREP(ADMV1014_BB_AMP_PD_MSK, st->input_mode) | + FIELD_PREP(ADMV1014_IF_AMP_PD_MSK, + (st->input_mode == ADMV1014_IF_MODE) ? 0 : 1) | + FIELD_PREP(ADMV1014_BB_AMP_PD_MSK, + (st->input_mode == ADMV1014_IF_MODE) ? 1 : 0) | FIELD_PREP(ADMV1014_DET_EN_MSK, st->det_en); return __admv1014_spi_update_bits(st, ADMV1014_REG_ENABLE, enable_reg_msk, enable_reg); diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c index 4f19dc7ffe57..5908a96ca8af 100644 --- a/drivers/iio/gyro/mpu3050-core.c +++ b/drivers/iio/gyro/mpu3050-core.c @@ -875,6 +875,7 @@ static int mpu3050_power_up(struct mpu3050 *mpu3050) ret = regmap_update_bits(mpu3050->map, MPU3050_PWR_MGM, MPU3050_PWR_MGM_SLEEP, 0); if (ret) { + regulator_bulk_disable(ARRAY_SIZE(mpu3050->regs), mpu3050->regs); dev_err(mpu3050->dev, "error setting power mode\n"); return ret; } diff --git a/drivers/iio/humidity/hts221_buffer.c b/drivers/iio/humidity/hts221_buffer.c index f29692b9d2db..66b32413cf5e 100644 --- a/drivers/iio/humidity/hts221_buffer.c +++ b/drivers/iio/humidity/hts221_buffer.c @@ -135,9 +135,12 @@ int hts221_allocate_trigger(struct iio_dev *iio_dev) iio_trigger_set_drvdata(hw->trig, iio_dev); hw->trig->ops = &hts221_trigger_ops; + + err = devm_iio_trigger_register(hw->dev, hw->trig); + iio_dev->trig = iio_trigger_get(hw->trig); - return devm_iio_trigger_register(hw->dev, hw->trig); + return err; } static int hts221_buffer_preenable(struct iio_dev *iio_dev) diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600.h b/drivers/iio/imu/inv_icm42600/inv_icm42600.h index c0f5059b13b3..995a9dc06521 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600.h +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600.h @@ -17,6 +17,7 @@ #include "inv_icm42600_buffer.h" enum inv_icm42600_chip { + INV_CHIP_INVALID, INV_CHIP_ICM42600, INV_CHIP_ICM42602, INV_CHIP_ICM42605, diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c index 86858da9cc38..ca85fccc9839 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_core.c @@ -565,7 +565,7 @@ int inv_icm42600_core_probe(struct regmap *regmap, int chip, int irq, bool open_drain; int ret; - if (chip < 0 || chip >= INV_CHIP_NB) { + if (chip <= INV_CHIP_INVALID || chip >= INV_CHIP_NB) { dev_err(dev, "invalid chip = %d\n", chip); return -ENODEV; } diff --git a/drivers/iio/magnetometer/yamaha-yas530.c b/drivers/iio/magnetometer/yamaha-yas530.c index 9ff7b0e56cf6..b2bc637150bf 100644 --- a/drivers/iio/magnetometer/yamaha-yas530.c +++ b/drivers/iio/magnetometer/yamaha-yas530.c @@ -639,7 +639,7 @@ static int yas532_get_calibration_data(struct yas5xx *yas5xx) dev_dbg(yas5xx->dev, "calibration data: %*ph\n", 14, data); /* Sanity check, is this all zeroes? */ - if (memchr_inv(data, 0x00, 13)) { + if (memchr_inv(data, 0x00, 13) == NULL) { if (!(data[13] & BIT(7))) dev_warn(yas5xx->dev, "calibration is blank!\n"); } diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c index 70c37f664f6d..63fbcaa4cac8 100644 --- a/drivers/iio/proximity/sx9324.c +++ b/drivers/iio/proximity/sx9324.c @@ -885,6 +885,9 @@ sx9324_get_default_reg(struct device *dev, int idx, break; ret = device_property_read_u32_array(dev, prop, pin_defs, ARRAY_SIZE(pin_defs)); + if (ret) + break; + for (pin = 0; pin < SX9324_NUM_PINS; pin++) raw |= (pin_defs[pin] << (2 * pin)) & SX9324_REG_AFE_PH0_PIN_MASK(pin); diff --git a/drivers/iio/test/Kconfig b/drivers/iio/test/Kconfig index 56ca0ad7e77a..4c66c3f18c34 100644 --- a/drivers/iio/test/Kconfig +++ b/drivers/iio/test/Kconfig @@ -6,7 +6,7 @@ # Keep in alphabetical order config IIO_RESCALE_KUNIT_TEST bool "Test IIO rescale conversion functions" - depends on KUNIT=y && !IIO_RESCALE + depends on KUNIT=y && IIO_RESCALE=y default KUNIT_ALL_TESTS help If you want to run tests on the iio-rescale code say Y here. diff --git a/drivers/iio/test/Makefile b/drivers/iio/test/Makefile index f15ae0a6394f..880360f8d02c 100644 --- a/drivers/iio/test/Makefile +++ b/drivers/iio/test/Makefile @@ -4,6 +4,6 @@ # # Keep in alphabetical order -obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o ../afe/iio-rescale.o +obj-$(CONFIG_IIO_RESCALE_KUNIT_TEST) += iio-test-rescale.o obj-$(CONFIG_IIO_TEST_FORMAT) += iio-test-format.o CFLAGS_iio-test-format.o += $(DISABLE_STRUCTLEAK_PLUGIN) diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index f1a8704e6cc1..d6c5e9644738 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -190,6 +190,7 @@ static int iio_sysfs_trigger_remove(int id) } iio_trigger_unregister(t->trig); + irq_work_sync(&t->work); iio_trigger_free(t->trig); list_del(&t->l); diff --git a/drivers/input/joystick/Kconfig b/drivers/input/joystick/Kconfig index 505a032e2786..9dcf3f51f2dd 100644 --- a/drivers/input/joystick/Kconfig +++ b/drivers/input/joystick/Kconfig @@ -402,6 +402,7 @@ config JOYSTICK_N64 config JOYSTICK_SENSEHAT tristate "Raspberry Pi Sense HAT joystick" depends on INPUT && I2C + depends on HAS_IOMEM select MFD_SIMPLE_MFD_I2C help Say Y here if you want to enable the driver for the diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index cbb1599a520e..480476121c01 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -85,13 +85,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { }, { /* - * Lenovo Yoga Tab2 1051L, something messes with the home-button + * Lenovo Yoga Tab2 1051F/1051L, something messes with the home-button * IRQ settings, leading to a non working home-button. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "60073"), - DMI_MATCH(DMI_PRODUCT_VERSION, "1051L"), + DMI_MATCH(DMI_PRODUCT_VERSION, "1051"), }, }, {} /* Terminating entry */ diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c index 59a14505b9cd..ca150618d32f 100644 --- a/drivers/input/mouse/bcm5974.c +++ b/drivers/input/mouse/bcm5974.c @@ -942,17 +942,22 @@ static int bcm5974_probe(struct usb_interface *iface, if (!dev->tp_data) goto err_free_bt_buffer; - if (dev->bt_urb) + if (dev->bt_urb) { usb_fill_int_urb(dev->bt_urb, udev, usb_rcvintpipe(udev, cfg->bt_ep), dev->bt_data, dev->cfg.bt_datalen, bcm5974_irq_button, dev, 1); + dev->bt_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + } + usb_fill_int_urb(dev->tp_urb, udev, usb_rcvintpipe(udev, cfg->tp_ep), dev->tp_data, dev->cfg.tp_datalen, bcm5974_irq_trackpad, dev, 1); + dev->tp_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; + /* create bcm5974 device */ usb_make_path(udev, dev->phys, sizeof(dev->phys)); strlcat(dev->phys, "/input0", sizeof(dev->phys)); diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 8fdb84b3642b..1d42084d0276 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -987,7 +987,7 @@ static const struct of_device_id ipmmu_of_ids[] = { .compatible = "renesas,ipmmu-r8a779a0", .data = &ipmmu_features_rcar_gen4, }, { - .compatible = "renesas,rcar-gen4-ipmmu", + .compatible = "renesas,rcar-gen4-ipmmu-vmsa", .data = &ipmmu_features_rcar_gen4, }, { /* Terminator */ diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 4ab1038b5482..1f23a6be7d88 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -298,7 +298,7 @@ config XTENSA_MX config XILINX_INTC bool "Xilinx Interrupt Controller IP" - depends on MICROBLAZE || ARCH_ZYNQ || ARCH_ZYNQMP + depends on OF select IRQ_DOMAIN help Support for the Xilinx Interrupt Controller IP core. diff --git a/drivers/irqchip/irq-apple-aic.c b/drivers/irqchip/irq-apple-aic.c index 12dd48727a15..5ac83185ff47 100644 --- a/drivers/irqchip/irq-apple-aic.c +++ b/drivers/irqchip/irq-apple-aic.c @@ -1035,6 +1035,7 @@ static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff) continue; cpu = of_cpu_node_to_id(cpu_node); + of_node_put(cpu_node); if (WARN_ON(cpu < 0)) continue; @@ -1143,6 +1144,7 @@ static int __init aic_of_ic_init(struct device_node *node, struct device_node *p for_each_child_of_node(affs, chld) build_fiq_affinity(irqc, chld); } + of_node_put(affs); set_handle_irq(aic_handle_irq); set_handle_fiq(aic_handle_fiq); diff --git a/drivers/irqchip/irq-gic-realview.c b/drivers/irqchip/irq-gic-realview.c index b4c1924f0255..38fab02ffe9d 100644 --- a/drivers/irqchip/irq-gic-realview.c +++ b/drivers/irqchip/irq-gic-realview.c @@ -57,6 +57,7 @@ realview_gic_of_init(struct device_node *node, struct device_node *parent) /* The PB11MPCore GIC needs to be configured in the syscon */ map = syscon_node_to_regmap(np); + of_node_put(np); if (!IS_ERR(map)) { /* new irq mode with no DCC */ regmap_write(map, REALVIEW_SYS_LOCK_OFFSET, diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 2be8dea6b6b0..5c1cf907ee68 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -1932,7 +1932,7 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); if (!gic_data.ppi_descs) - return; + goto out_put_node; nr_parts = of_get_child_count(parts_node); @@ -1973,12 +1973,15 @@ static void __init gic_populate_ppi_partitions(struct device_node *gic_node) continue; cpu = of_cpu_node_to_id(cpu_node); - if (WARN_ON(cpu < 0)) + if (WARN_ON(cpu < 0)) { + of_node_put(cpu_node); continue; + } pr_cont("%pOF[%d] ", cpu_node, cpu); cpumask_set_cpu(cpu, &part->mask); + of_node_put(cpu_node); } pr_cont("}\n"); diff --git a/drivers/irqchip/irq-loongson-liointc.c b/drivers/irqchip/irq-loongson-liointc.c index aed88857d90f..8d05d8bcf56f 100644 --- a/drivers/irqchip/irq-loongson-liointc.c +++ b/drivers/irqchip/irq-loongson-liointc.c @@ -39,6 +39,12 @@ #define LIOINTC_ERRATA_IRQ 10 +#if defined(CONFIG_MIPS) +#define liointc_core_id get_ebase_cpunum() +#else +#define liointc_core_id get_csr_cpuid() +#endif + struct liointc_handler_data { struct liointc_priv *priv; u32 parent_int_map; @@ -57,7 +63,7 @@ static void liointc_chained_handle_irq(struct irq_desc *desc) struct liointc_handler_data *handler = irq_desc_get_handler_data(desc); struct irq_chip *chip = irq_desc_get_chip(desc); struct irq_chip_generic *gc = handler->priv->gc; - int core = cpu_logical_map(smp_processor_id()) % LIOINTC_NUM_CORES; + int core = liointc_core_id % LIOINTC_NUM_CORES; u32 pending; chained_irq_enter(chip, desc); diff --git a/drivers/irqchip/irq-realtek-rtl.c b/drivers/irqchip/irq-realtek-rtl.c index 50a56820c99b..56bf502d9c67 100644 --- a/drivers/irqchip/irq-realtek-rtl.c +++ b/drivers/irqchip/irq-realtek-rtl.c @@ -134,9 +134,9 @@ static int __init map_interrupts(struct device_node *node, struct irq_domain *do if (!cpu_ictl) return -EINVAL; ret = of_property_read_u32(cpu_ictl, "#interrupt-cells", &tmp); + of_node_put(cpu_ictl); if (ret || tmp != 1) return -EINVAL; - of_node_put(cpu_ictl); cpu_int = be32_to_cpup(imap + 2); if (cpu_int > 7 || cpu_int < 2) diff --git a/drivers/irqchip/irq-uniphier-aidet.c b/drivers/irqchip/irq-uniphier-aidet.c index 89121b39be26..716b1bb88bf2 100644 --- a/drivers/irqchip/irq-uniphier-aidet.c +++ b/drivers/irqchip/irq-uniphier-aidet.c @@ -237,6 +237,7 @@ static const struct of_device_id uniphier_aidet_match[] = { { .compatible = "socionext,uniphier-ld11-aidet" }, { .compatible = "socionext,uniphier-ld20-aidet" }, { .compatible = "socionext,uniphier-pxs3-aidet" }, + { .compatible = "socionext,uniphier-nx1-aidet" }, { /* sentinel */ } }; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index d21648a923ea..c954ff91870e 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -33,6 +33,14 @@ struct dm_kobject_holder { * access their members! */ +/* + * For mempools pre-allocation at the table loading time. + */ +struct dm_md_mempools { + struct bio_set bs; + struct bio_set io_bs; +}; + struct mapped_device { struct mutex suspend_lock; @@ -110,8 +118,7 @@ struct mapped_device { /* * io objects are allocated from here. */ - struct bio_set io_bs; - struct bio_set bs; + struct dm_md_mempools *mempools; /* kobject and completion */ struct dm_kobject_holder kobj_holder; @@ -265,6 +272,7 @@ struct dm_io { atomic_t io_count; struct mapped_device *md; + struct bio *split_bio; /* The three fields represent mapped part of original bio */ struct bio *orig_bio; unsigned int sector_offset; /* offset to end of orig_bio */ diff --git a/drivers/md/dm-era-target.c b/drivers/md/dm-era-target.c index 1f6bf152b3c7..e92c1afc3677 100644 --- a/drivers/md/dm-era-target.c +++ b/drivers/md/dm-era-target.c @@ -1400,7 +1400,7 @@ static void start_worker(struct era *era) static void stop_worker(struct era *era) { atomic_set(&era->suspended, 1); - flush_workqueue(era->wq); + drain_workqueue(era->wq); } /*---------------------------------------------------------------- @@ -1570,6 +1570,12 @@ static void era_postsuspend(struct dm_target *ti) } stop_worker(era); + + r = metadata_commit(era->md); + if (r) { + DMERR("%s: metadata_commit failed", __func__); + /* FIXME: fail mode */ + } } static int era_preresume(struct dm_target *ti) diff --git a/drivers/md/dm-log.c b/drivers/md/dm-log.c index 06f328928a7f..0c6620e7b7bf 100644 --- a/drivers/md/dm-log.c +++ b/drivers/md/dm-log.c @@ -415,8 +415,7 @@ static int create_log_context(struct dm_dirty_log *log, struct dm_target *ti, /* * Work out how many "unsigned long"s we need to hold the bitset. */ - bitset_size = dm_round_up(region_count, - sizeof(*lc->clean_bits) << BYTE_SHIFT); + bitset_size = dm_round_up(region_count, BITS_PER_LONG); bitset_size >>= BYTE_SHIFT; lc->bitset_uint32_count = bitset_size / sizeof(*lc->clean_bits); @@ -616,7 +615,7 @@ static int disk_resume(struct dm_dirty_log *log) log_clear_bit(lc, lc->clean_bits, i); /* clear any old bits -- device has shrunk */ - for (i = lc->region_count; i % (sizeof(*lc->clean_bits) << BYTE_SHIFT); i++) + for (i = lc->region_count; i % BITS_PER_LONG; i++) log_clear_bit(lc, lc->clean_bits, i); /* copy clean across to sync */ diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 5e41fbae3f6b..9526ccbedafb 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3725,7 +3725,7 @@ static int raid_message(struct dm_target *ti, unsigned int argc, char **argv, if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) { if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, false); + md_reap_sync_thread(mddev); } } else if (decipher_sync_action(mddev, mddev->recovery) != st_idle) return -EBUSY; diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 6087cdcaad46..a83b98a8d2a9 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -319,7 +319,7 @@ static int setup_clone(struct request *clone, struct request *rq, { int r; - r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask, + r = blk_rq_prep_clone(clone, rq, &tio->md->mempools->bs, gfp_mask, dm_rq_bio_constructor, tio); if (r) return r; diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 0e833a154b31..bd539afbfe88 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -1038,17 +1038,6 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * return 0; } -void dm_table_free_md_mempools(struct dm_table *t) -{ - dm_free_md_mempools(t->mempools); - t->mempools = NULL; -} - -struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t) -{ - return t->mempools; -} - static int setup_indexes(struct dm_table *t) { int i; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index dfb0a551bd88..2b75f1ef7386 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -136,14 +136,6 @@ static int get_swap_bios(void) return latch; } -/* - * For mempools pre-allocation at the table loading time. - */ -struct dm_md_mempools { - struct bio_set bs; - struct bio_set io_bs; -}; - struct table_device { struct list_head list; refcount_t count; @@ -563,6 +555,10 @@ static void dm_start_io_acct(struct dm_io *io, struct bio *clone) unsigned long flags; /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */ spin_lock_irqsave(&io->lock, flags); + if (dm_io_flagged(io, DM_IO_ACCOUNTED)) { + spin_unlock_irqrestore(&io->lock, flags); + return; + } dm_io_set_flag(io, DM_IO_ACCOUNTED); spin_unlock_irqrestore(&io->lock, flags); } @@ -581,7 +577,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) struct dm_target_io *tio; struct bio *clone; - clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->io_bs); + clone = bio_alloc_clone(NULL, bio, GFP_NOIO, &md->mempools->io_bs); /* Set default bdev, but target must bio_set_dev() before issuing IO */ clone->bi_bdev = md->disk->part0; @@ -598,6 +594,7 @@ static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio) atomic_set(&io->io_count, 2); this_cpu_inc(*md->pending_io); io->orig_bio = bio; + io->split_bio = NULL; io->md = md; spin_lock_init(&io->lock); io->start_time = jiffies; @@ -628,7 +625,8 @@ static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti, } else { struct mapped_device *md = ci->io->md; - clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, &md->bs); + clone = bio_alloc_clone(NULL, ci->bio, gfp_mask, + &md->mempools->bs); if (!clone) return NULL; /* Set default bdev, but target must bio_set_dev() before issuing IO */ @@ -718,18 +716,18 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) } static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, - int *srcu_idx, struct bio *bio) + int *srcu_idx, unsigned bio_opf) { - if (bio->bi_opf & REQ_NOWAIT) + if (bio_opf & REQ_NOWAIT) return dm_get_live_table_fast(md); else return dm_get_live_table(md, srcu_idx); } static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, - struct bio *bio) + unsigned bio_opf) { - if (bio->bi_opf & REQ_NOWAIT) + if (bio_opf & REQ_NOWAIT) dm_put_live_table_fast(md); else dm_put_live_table(md, srcu_idx); @@ -890,7 +888,7 @@ static void dm_io_complete(struct dm_io *io) { blk_status_t io_error; struct mapped_device *md = io->md; - struct bio *bio = io->orig_bio; + struct bio *bio = io->split_bio ? io->split_bio : io->orig_bio; if (io->status == BLK_STS_DM_REQUEUE) { unsigned long flags; @@ -942,9 +940,11 @@ static void dm_io_complete(struct dm_io *io) if (io_error == BLK_STS_AGAIN) { /* io_uring doesn't handle BLK_STS_AGAIN (yet) */ queue_io(md, bio); + return; } } - return; + if (io_error == BLK_STS_DM_REQUEUE) + return; } if (bio_is_flush_with_data(bio)) { @@ -1023,23 +1023,19 @@ static void clone_endio(struct bio *bio) struct dm_io *io = tio->io; struct mapped_device *md = io->md; - if (likely(bio->bi_bdev != md->disk->part0)) { - struct request_queue *q = bdev_get_queue(bio->bi_bdev); - - if (unlikely(error == BLK_STS_TARGET)) { - if (bio_op(bio) == REQ_OP_DISCARD && - !bdev_max_discard_sectors(bio->bi_bdev)) - disable_discard(md); - else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && - !q->limits.max_write_zeroes_sectors) - disable_write_zeroes(md); - } - - if (static_branch_unlikely(&zoned_enabled) && - unlikely(blk_queue_is_zoned(q))) - dm_zone_endio(io, bio); + if (unlikely(error == BLK_STS_TARGET)) { + if (bio_op(bio) == REQ_OP_DISCARD && + !bdev_max_discard_sectors(bio->bi_bdev)) + disable_discard(md); + else if (bio_op(bio) == REQ_OP_WRITE_ZEROES && + !bdev_write_zeroes_sectors(bio->bi_bdev)) + disable_write_zeroes(md); } + if (static_branch_unlikely(&zoned_enabled) && + unlikely(blk_queue_is_zoned(bdev_get_queue(bio->bi_bdev)))) + dm_zone_endio(io, bio); + if (endio) { int r = endio(ti, bio, &error); switch (r) { @@ -1620,7 +1616,12 @@ static blk_status_t __split_and_process_bio(struct clone_info *ci) ti = dm_table_find_target(ci->map, ci->sector); if (unlikely(!ti)) return BLK_STS_IOERR; - else if (unlikely(ci->is_abnormal_io)) + + if (unlikely((ci->bio->bi_opf & REQ_NOWAIT) != 0) && + unlikely(!dm_target_supports_nowait(ti->type))) + return BLK_STS_NOTSUPP; + + if (unlikely(ci->is_abnormal_io)) return __process_abnormal_io(ci, ti); /* @@ -1693,9 +1694,11 @@ static void dm_split_and_process_bio(struct mapped_device *md, * Remainder must be passed to submit_bio_noacct() so it gets handled * *after* bios already submitted have been completely processed. */ - bio_trim(bio, io->sectors, ci.sector_count); - trace_block_split(bio, bio->bi_iter.bi_sector); - bio_inc_remaining(bio); + WARN_ON_ONCE(!dm_io_flagged(io, DM_IO_WAS_SPLIT)); + io->split_bio = bio_split(bio, io->sectors, GFP_NOIO, + &md->queue->bio_split); + bio_chain(io->split_bio, bio); + trace_block_split(io->split_bio, bio->bi_iter.bi_sector); submit_bio_noacct(bio); out: /* @@ -1722,8 +1725,9 @@ static void dm_submit_bio(struct bio *bio) struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; + unsigned bio_opf = bio->bi_opf; - map = dm_get_live_table_bio(md, &srcu_idx, bio); + map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); /* If suspended, or map not yet available, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || @@ -1739,7 +1743,7 @@ static void dm_submit_bio(struct bio *bio) dm_split_and_process_bio(md, map, bio); out: - dm_put_live_table_bio(md, srcu_idx, bio); + dm_put_live_table_bio(md, srcu_idx, bio_opf); } static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, @@ -1876,8 +1880,7 @@ static void cleanup_mapped_device(struct mapped_device *md) { if (md->wq) destroy_workqueue(md->wq); - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); + dm_free_md_mempools(md->mempools); if (md->dax_dev) { dax_remove_host(md->disk); @@ -2049,48 +2052,6 @@ static void free_dev(struct mapped_device *md) kvfree(md); } -static int __bind_mempools(struct mapped_device *md, struct dm_table *t) -{ - struct dm_md_mempools *p = dm_table_get_md_mempools(t); - int ret = 0; - - if (dm_table_bio_based(t)) { - /* - * The md may already have mempools that need changing. - * If so, reload bioset because front_pad may have changed - * because a different table was loaded. - */ - bioset_exit(&md->bs); - bioset_exit(&md->io_bs); - - } else if (bioset_initialized(&md->bs)) { - /* - * There's no need to reload with request-based dm - * because the size of front_pad doesn't change. - * Note for future: If you are to reload bioset, - * prep-ed requests in the queue may refer - * to bio from the old bioset, so you must walk - * through the queue to unprep. - */ - goto out; - } - - BUG_ON(!p || - bioset_initialized(&md->bs) || - bioset_initialized(&md->io_bs)); - - ret = bioset_init_from_src(&md->bs, &p->bs); - if (ret) - goto out; - ret = bioset_init_from_src(&md->io_bs, &p->io_bs); - if (ret) - bioset_exit(&md->bs); -out: - /* mempool bind completed, no longer need any mempools in the table */ - dm_table_free_md_mempools(t); - return ret; -} - /* * Bind a table to the device. */ @@ -2144,12 +2105,28 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t, * immutable singletons - used to optimize dm_mq_queue_rq. */ md->immutable_target = dm_table_get_immutable_target(t); - } - ret = __bind_mempools(md, t); - if (ret) { - old_map = ERR_PTR(ret); - goto out; + /* + * There is no need to reload with request-based dm because the + * size of front_pad doesn't change. + * + * Note for future: If you are to reload bioset, prep-ed + * requests in the queue may refer to bio from the old bioset, + * so you must walk through the queue to unprep. + */ + if (!md->mempools) { + md->mempools = t->mempools; + t->mempools = NULL; + } + } else { + /* + * The md may already have mempools that need changing. + * If so, reload bioset because front_pad may have changed + * because a different table was loaded. + */ + dm_free_md_mempools(md->mempools); + md->mempools = t->mempools; + t->mempools = NULL; } ret = dm_table_set_restrictions(t, md->queue, limits); diff --git a/drivers/md/dm.h b/drivers/md/dm.h index 3f89664fea01..a8405ce305a9 100644 --- a/drivers/md/dm.h +++ b/drivers/md/dm.h @@ -71,8 +71,6 @@ struct dm_target *dm_table_get_immutable_target(struct dm_table *t); struct dm_target *dm_table_get_wildcard_target(struct dm_table *t); bool dm_table_bio_based(struct dm_table *t); bool dm_table_request_based(struct dm_table *t); -void dm_table_free_md_mempools(struct dm_table *t); -struct dm_md_mempools *dm_table_get_md_mempools(struct dm_table *t); void dm_lock_md_type(struct mapped_device *md); void dm_unlock_md_type(struct mapped_device *md); diff --git a/drivers/md/md.c b/drivers/md/md.c index 8273ac5eef06..c7ecb0bffda0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -4831,7 +4831,7 @@ action_store(struct mddev *mddev, const char *page, size_t len) flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); } mddev_unlock(mddev); } @@ -6197,7 +6197,7 @@ static void __md_stop_writes(struct mddev *mddev) flush_workqueue(md_misc_wq); if (mddev->sync_thread) { set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); } del_timer_sync(&mddev->safemode_timer); @@ -9303,7 +9303,7 @@ void md_check_recovery(struct mddev *mddev) * ->spare_active and clear saved_raid_disk */ set_bit(MD_RECOVERY_INTR, &mddev->recovery); - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); @@ -9338,7 +9338,7 @@ void md_check_recovery(struct mddev *mddev) goto unlock; } if (mddev->sync_thread) { - md_reap_sync_thread(mddev, true); + md_reap_sync_thread(mddev); goto unlock; } /* Set RUNNING before clearing NEEDED to avoid @@ -9411,18 +9411,14 @@ void md_check_recovery(struct mddev *mddev) } EXPORT_SYMBOL(md_check_recovery); -void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held) +void md_reap_sync_thread(struct mddev *mddev) { struct md_rdev *rdev; sector_t old_dev_sectors = mddev->dev_sectors; bool is_reshaped = false; - if (reconfig_mutex_held) - mddev_unlock(mddev); /* resync has finished, collect result */ md_unregister_thread(&mddev->sync_thread); - if (reconfig_mutex_held) - mddev_lock_nointr(mddev); if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && mddev->degraded != mddev->raid_disks) { diff --git a/drivers/md/md.h b/drivers/md/md.h index 5f62c46ac2d3..cf2cbb17acbd 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -719,7 +719,7 @@ extern struct md_thread *md_register_thread( extern void md_unregister_thread(struct md_thread **threadp); extern void md_wakeup_thread(struct md_thread *thread); extern void md_check_recovery(struct mddev *mddev); -extern void md_reap_sync_thread(struct mddev *mddev, bool reconfig_mutex_held); +extern void md_reap_sync_thread(struct mddev *mddev); extern int mddev_init_writes_pending(struct mddev *mddev); extern bool md_write_start(struct mddev *mddev, struct bio *bi); extern void md_write_inc(struct mddev *mddev, struct bio *bi); diff --git a/drivers/md/raid5-ppl.c b/drivers/md/raid5-ppl.c index 973e2e06f19c..0a2e4806b1ec 100644 --- a/drivers/md/raid5-ppl.c +++ b/drivers/md/raid5-ppl.c @@ -629,9 +629,9 @@ static void ppl_do_flush(struct ppl_io_unit *io) if (bdev) { struct bio *bio; - bio = bio_alloc_bioset(bdev, 0, GFP_NOIO, + bio = bio_alloc_bioset(bdev, 0, REQ_OP_WRITE | REQ_PREFLUSH, - &ppl_conf->flush_bs); + GFP_NOIO, &ppl_conf->flush_bs); bio->bi_private = io; bio->bi_end_io = ppl_flush_endio; diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig index b7800b37af78..ac1a411648d8 100644 --- a/drivers/memory/Kconfig +++ b/drivers/memory/Kconfig @@ -105,6 +105,7 @@ config TI_EMIF config OMAP_GPMC tristate "Texas Instruments OMAP SoC GPMC driver" depends on OF_ADDRESS + depends on ARCH_OMAP2PLUS || ARCH_KEYSTONE || ARCH_K3 || COMPILE_TEST select GPIOLIB help This driver is for the General Purpose Memory Controller (GPMC) diff --git a/drivers/memory/mtk-smi.c b/drivers/memory/mtk-smi.c index 86a3d34f418e..d7cb7ead2ac7 100644 --- a/drivers/memory/mtk-smi.c +++ b/drivers/memory/mtk-smi.c @@ -21,11 +21,13 @@ /* SMI COMMON */ #define SMI_L1LEN 0x100 +#define SMI_L1_ARB 0x200 #define SMI_BUS_SEL 0x220 #define SMI_BUS_LARB_SHIFT(larbid) ((larbid) << 1) /* All are MMU0 defaultly. Only specialize mmu1 here. */ #define F_MMU1_LARB(larbid) (0x1 << SMI_BUS_LARB_SHIFT(larbid)) +#define SMI_READ_FIFO_TH 0x230 #define SMI_M4U_TH 0x234 #define SMI_FIFO_TH1 0x238 #define SMI_FIFO_TH2 0x23c @@ -360,6 +362,7 @@ static const struct of_device_id mtk_smi_larb_of_ids[] = { {.compatible = "mediatek,mt2701-smi-larb", .data = &mtk_smi_larb_mt2701}, {.compatible = "mediatek,mt2712-smi-larb", .data = &mtk_smi_larb_mt2712}, {.compatible = "mediatek,mt6779-smi-larb", .data = &mtk_smi_larb_mt6779}, + {.compatible = "mediatek,mt6795-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8167-smi-larb", .data = &mtk_smi_larb_mt8167}, {.compatible = "mediatek,mt8173-smi-larb", .data = &mtk_smi_larb_mt8173}, {.compatible = "mediatek,mt8183-smi-larb", .data = &mtk_smi_larb_mt8183}, @@ -404,13 +407,16 @@ static int mtk_smi_device_link_common(struct device *dev, struct device **com_de of_node_put(smi_com_node); if (smi_com_pdev) { /* smi common is the supplier, Make sure it is ready before */ - if (!platform_get_drvdata(smi_com_pdev)) + if (!platform_get_drvdata(smi_com_pdev)) { + put_device(&smi_com_pdev->dev); return -EPROBE_DEFER; + } smi_com_dev = &smi_com_pdev->dev; link = device_link_add(dev, smi_com_dev, DL_FLAG_PM_RUNTIME | DL_FLAG_STATELESS); if (!link) { dev_err(dev, "Unable to link smi-common dev\n"); + put_device(&smi_com_pdev->dev); return -ENODEV; } *com_dev = smi_com_dev; @@ -541,6 +547,13 @@ static struct platform_driver mtk_smi_larb_driver = { } }; +static const struct mtk_smi_reg_pair mtk_smi_common_mt6795_init[SMI_COMMON_INIT_REGS_NR] = { + {SMI_L1_ARB, 0x1b}, + {SMI_M4U_TH, 0xce810c85}, + {SMI_FIFO_TH1, 0x43214c8}, + {SMI_READ_FIFO_TH, 0x191f}, +}; + static const struct mtk_smi_reg_pair mtk_smi_common_mt8195_init[SMI_COMMON_INIT_REGS_NR] = { {SMI_L1LEN, 0xb}, {SMI_M4U_TH, 0xe100e10}, @@ -565,6 +578,12 @@ static const struct mtk_smi_common_plat mtk_smi_common_mt6779 = { F_MMU1_LARB(5) | F_MMU1_LARB(6) | F_MMU1_LARB(7), }; +static const struct mtk_smi_common_plat mtk_smi_common_mt6795 = { + .type = MTK_SMI_GEN2, + .bus_sel = F_MMU1_LARB(0), + .init = mtk_smi_common_mt6795_init, +}; + static const struct mtk_smi_common_plat mtk_smi_common_mt8183 = { .type = MTK_SMI_GEN2, .has_gals = true, @@ -609,6 +628,7 @@ static const struct of_device_id mtk_smi_common_of_ids[] = { {.compatible = "mediatek,mt2701-smi-common", .data = &mtk_smi_common_gen1}, {.compatible = "mediatek,mt2712-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt6779-smi-common", .data = &mtk_smi_common_mt6779}, + {.compatible = "mediatek,mt6795-smi-common", .data = &mtk_smi_common_mt6795}, {.compatible = "mediatek,mt8167-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8173-smi-common", .data = &mtk_smi_common_gen2}, {.compatible = "mediatek,mt8183-smi-common", .data = &mtk_smi_common_mt8183}, diff --git a/drivers/memory/samsung/exynos5422-dmc.c b/drivers/memory/samsung/exynos5422-dmc.c index 4733e7898ffe..c491cd549644 100644 --- a/drivers/memory/samsung/exynos5422-dmc.c +++ b/drivers/memory/samsung/exynos5422-dmc.c @@ -1187,33 +1187,39 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) dmc->timing_row = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_row) - return -ENOMEM; + if (!dmc->timing_row) { + ret = -ENOMEM; + goto put_node; + } dmc->timing_data = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_data) - return -ENOMEM; + if (!dmc->timing_data) { + ret = -ENOMEM; + goto put_node; + } dmc->timing_power = devm_kmalloc_array(dmc->dev, TIMING_COUNT, sizeof(u32), GFP_KERNEL); - if (!dmc->timing_power) - return -ENOMEM; + if (!dmc->timing_power) { + ret = -ENOMEM; + goto put_node; + } dmc->timings = of_lpddr3_get_ddr_timings(np_ddr, dmc->dev, DDR_TYPE_LPDDR3, &dmc->timings_arr_size); if (!dmc->timings) { - of_node_put(np_ddr); dev_warn(dmc->dev, "could not get timings from DT\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } dmc->min_tck = of_lpddr3_get_min_tck(np_ddr, dmc->dev); if (!dmc->min_tck) { - of_node_put(np_ddr); dev_warn(dmc->dev, "could not get tck from DT\n"); - return -EINVAL; + ret = -EINVAL; + goto put_node; } /* Sorted array of OPPs with frequency ascending */ @@ -1227,13 +1233,14 @@ static int of_get_dram_timings(struct exynos5_dmc *dmc) clk_period_ps); } - of_node_put(np_ddr); /* Take the highest frequency's timings as 'bypass' */ dmc->bypass_timing_row = dmc->timing_row[idx - 1]; dmc->bypass_timing_data = dmc->timing_data[idx - 1]; dmc->bypass_timing_power = dmc->timing_power[idx - 1]; +put_node: + of_node_put(np_ddr); return ret; } diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c index d6cd5537126c..69f9b0336410 100644 --- a/drivers/misc/atmel-ssc.c +++ b/drivers/misc/atmel-ssc.c @@ -232,9 +232,9 @@ static int ssc_probe(struct platform_device *pdev) clk_disable_unprepare(ssc->clk); ssc->irq = platform_get_irq(pdev, 0); - if (!ssc->irq) { + if (ssc->irq < 0) { dev_dbg(&pdev->dev, "could not get irq\n"); - return -ENXIO; + return ssc->irq; } mutex_lock(&user_lock); diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 749cc5a46d13..b1e76030cafd 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -407,6 +407,8 @@ static void rts5261_init_from_hw(struct rtsx_pcr *pcr) // default setting_reg1 = PCR_SETTING_REG1; setting_reg2 = PCR_SETTING_REG2; + } else { + return; } pci_read_config_dword(pdev, setting_reg2, &lval2); diff --git a/drivers/misc/eeprom/at25.c b/drivers/misc/eeprom/at25.c index 8d169a35cf13..c9c56fd194c1 100644 --- a/drivers/misc/eeprom/at25.c +++ b/drivers/misc/eeprom/at25.c @@ -79,6 +79,11 @@ static int at25_ee_read(void *priv, unsigned int offset, { struct at25_data *at25 = priv; char *buf = val; + size_t max_chunk = spi_max_transfer_size(at25->spi); + size_t num_msgs = DIV_ROUND_UP(count, max_chunk); + size_t nr_bytes = 0; + unsigned int msg_offset; + size_t msg_count; u8 *cp; ssize_t status; struct spi_transfer t[2]; @@ -92,54 +97,59 @@ static int at25_ee_read(void *priv, unsigned int offset, if (unlikely(!count)) return -EINVAL; - cp = at25->command; + msg_offset = (unsigned int)offset; + msg_count = min(count, max_chunk); + while (num_msgs) { + cp = at25->command; - instr = AT25_READ; - if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) - if (offset >= BIT(at25->addrlen * 8)) - instr |= AT25_INSTR_BIT3; + instr = AT25_READ; + if (at25->chip.flags & EE_INSTR_BIT3_IS_ADDR) + if (msg_offset >= BIT(at25->addrlen * 8)) + instr |= AT25_INSTR_BIT3; - mutex_lock(&at25->lock); + mutex_lock(&at25->lock); - *cp++ = instr; - - /* 8/16/24-bit address is written MSB first */ - switch (at25->addrlen) { - default: /* case 3 */ - *cp++ = offset >> 16; - fallthrough; - case 2: - *cp++ = offset >> 8; - fallthrough; - case 1: - case 0: /* can't happen: for better code generation */ - *cp++ = offset >> 0; - } + *cp++ = instr; - spi_message_init(&m); - memset(t, 0, sizeof(t)); + /* 8/16/24-bit address is written MSB first */ + switch (at25->addrlen) { + default: /* case 3 */ + *cp++ = msg_offset >> 16; + fallthrough; + case 2: + *cp++ = msg_offset >> 8; + fallthrough; + case 1: + case 0: /* can't happen: for better code generation */ + *cp++ = msg_offset >> 0; + } - t[0].tx_buf = at25->command; - t[0].len = at25->addrlen + 1; - spi_message_add_tail(&t[0], &m); + spi_message_init(&m); + memset(t, 0, sizeof(t)); - t[1].rx_buf = buf; - t[1].len = count; - spi_message_add_tail(&t[1], &m); + t[0].tx_buf = at25->command; + t[0].len = at25->addrlen + 1; + spi_message_add_tail(&t[0], &m); - /* - * Read it all at once. - * - * REVISIT that's potentially a problem with large chips, if - * other devices on the bus need to be accessed regularly or - * this chip is clocked very slowly. - */ - status = spi_sync(at25->spi, &m); - dev_dbg(&at25->spi->dev, "read %zu bytes at %d --> %zd\n", - count, offset, status); + t[1].rx_buf = buf + nr_bytes; + t[1].len = msg_count; + spi_message_add_tail(&t[1], &m); - mutex_unlock(&at25->lock); - return status; + status = spi_sync(at25->spi, &m); + + mutex_unlock(&at25->lock); + + if (status) + return status; + + --num_msgs; + msg_offset += msg_count; + nr_bytes += msg_count; + } + + dev_dbg(&at25->spi->dev, "read %zu bytes at %d\n", + count, offset); + return 0; } /* Read extra registers as ID or serial number */ @@ -190,6 +200,7 @@ ATTRIBUTE_GROUPS(sernum); static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) { struct at25_data *at25 = priv; + size_t maxsz = spi_max_transfer_size(at25->spi); const char *buf = val; int status = 0; unsigned buf_size; @@ -253,6 +264,8 @@ static int at25_ee_write(void *priv, unsigned int off, void *val, size_t count) segment = buf_size - (offset % buf_size); if (segment > count) segment = count; + if (segment > maxsz) + segment = maxsz; memcpy(cp, buf, segment); status = spi_write(at25->spi, bounce, segment + at25->addrlen + 1); diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c index cebcca6d6d3e..cf2b8261da14 100644 --- a/drivers/misc/mei/hbm.c +++ b/drivers/misc/mei/hbm.c @@ -1351,7 +1351,8 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr) if (dev->dev_state != MEI_DEV_INIT_CLIENTS || dev->hbm_state != MEI_HBM_CAP_SETUP) { - if (dev->dev_state == MEI_DEV_POWER_DOWN) { + if (dev->dev_state == MEI_DEV_POWER_DOWN || + dev->dev_state == MEI_DEV_POWERING_DOWN) { dev_dbg(dev->dev, "hbm: capabilities response: on shutdown, ignoring\n"); return 0; } diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h index 64ce3f830262..15e8e2b322b1 100644 --- a/drivers/misc/mei/hw-me-regs.h +++ b/drivers/misc/mei/hw-me-regs.h @@ -109,6 +109,8 @@ #define MEI_DEV_ID_ADP_P 0x51E0 /* Alder Lake Point P */ #define MEI_DEV_ID_ADP_N 0x54E0 /* Alder Lake Point N */ +#define MEI_DEV_ID_RPL_S 0x7A68 /* Raptor Lake Point S */ + /* * MEI HW Section */ diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index 9870bf717979..befa491e3344 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -1154,6 +1154,8 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) ret = mei_me_d0i3_exit_sync(dev); if (ret) return ret; + } else { + hw->pg_state = MEI_PG_OFF; } } diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 33e58821e478..5435604327a7 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -116,6 +116,8 @@ static const struct pci_device_id mei_me_pci_tbl[] = { {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_P, MEI_ME_PCH15_CFG)}, {MEI_PCI_DEVICE(MEI_DEV_ID_ADP_N, MEI_ME_PCH15_CFG)}, + {MEI_PCI_DEVICE(MEI_DEV_ID_RPL_S, MEI_ME_PCH15_CFG)}, + /* required last entry */ {0, } }; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 1259ca22d625..f4a1281658db 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1499,8 +1499,7 @@ void mmc_blk_cqe_recovery(struct mmc_queue *mq) err = mmc_cqe_recovery(host); if (err) mmc_blk_reset(mq->blkdata, host, MMC_BLK_CQE_RECOVERY); - else - mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); + mmc_blk_reset_success(mq->blkdata, MMC_BLK_CQE_RECOVERY); pr_debug("%s: CQE recovery done\n", mmc_hostname(host)); } diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 195dc897188b..9da4489dc345 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -1356,7 +1356,7 @@ static void msdc_data_xfer_next(struct msdc_host *host, struct mmc_request *mrq) msdc_request_done(host, mrq); } -static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, +static void msdc_data_xfer_done(struct msdc_host *host, u32 events, struct mmc_request *mrq, struct mmc_data *data) { struct mmc_command *stop; @@ -1376,7 +1376,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, spin_unlock_irqrestore(&host->lock, flags); if (done) - return true; + return; stop = data->stop; if (check_data || (stop && stop->error)) { @@ -1385,12 +1385,15 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); + ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CTRL, val, + !(val & MSDC_DMA_CTRL_STOP), 1, 20000); + if (ret) + dev_dbg(host->dev, "DMA stop timed out\n"); + ret = readl_poll_timeout_atomic(host->base + MSDC_DMA_CFG, val, !(val & MSDC_DMA_CFG_STS), 1, 20000); - if (ret) { - dev_dbg(host->dev, "DMA stop timed out\n"); - return false; - } + if (ret) + dev_dbg(host->dev, "DMA inactive timed out\n"); sdr_clr_bits(host->base + MSDC_INTEN, data_ints_mask); dev_dbg(host->dev, "DMA stop\n"); @@ -1415,9 +1418,7 @@ static bool msdc_data_xfer_done(struct msdc_host *host, u32 events, } msdc_data_xfer_next(host, mrq); - done = true; } - return done; } static void msdc_set_buswidth(struct msdc_host *host, u32 width) @@ -2416,6 +2417,9 @@ static void msdc_cqe_disable(struct mmc_host *mmc, bool recovery) if (recovery) { sdr_set_field(host->base + MSDC_DMA_CTRL, MSDC_DMA_CTRL_STOP, 1); + if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CTRL, val, + !(val & MSDC_DMA_CTRL_STOP), 1, 3000))) + return; if (WARN_ON(readl_poll_timeout(host->base + MSDC_DMA_CFG, val, !(val & MSDC_DMA_CFG_STS), 1, 3000))) return; diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c index 1499a64ec3aa..f13c08db3da5 100644 --- a/drivers/mmc/host/sdhci-pci-gli.c +++ b/drivers/mmc/host/sdhci-pci-gli.c @@ -982,6 +982,9 @@ static int gl9763e_runtime_resume(struct sdhci_pci_chip *chip) struct sdhci_host *host = slot->host; u16 clock; + if (host->mmc->ios.power_mode != MMC_POWER_ON) + return 0; + clock = sdhci_readw(host, SDHCI_CLOCK_CONTROL); clock |= SDHCI_CLOCK_PLL_EN; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 92c20cb8074a..0d4d343dbb77 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -152,6 +152,8 @@ static int sdhci_o2_get_cd(struct mmc_host *mmc) if (!(sdhci_readw(host, O2_PLL_DLL_WDT_CONTROL1) & O2_PLL_LOCK_STATUS)) sdhci_o2_enable_internal_clock(host); + else + sdhci_o2_wait_card_detect_stable(host); return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); } diff --git a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c index 0b68d05846e1..889e40329956 100644 --- a/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c +++ b/drivers/mtd/nand/raw/gpmi-nand/gpmi-nand.c @@ -890,7 +890,7 @@ static int gpmi_nfc_compute_timings(struct gpmi_nand_data *this, hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) | BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) | BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles); - hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096); + hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(DIV_ROUND_UP(busy_timeout_cycles, 4096)); /* * Derive NFC ideal delay from {3}: diff --git a/drivers/mtd/nand/raw/nand_ids.c b/drivers/mtd/nand/raw/nand_ids.c index 88c2440b47d8..dacc5529b3df 100644 --- a/drivers/mtd/nand/raw/nand_ids.c +++ b/drivers/mtd/nand/raw/nand_ids.c @@ -29,9 +29,6 @@ struct nand_flash_dev nand_flash_ids[] = { {"TC58NVG0S3E 1G 3.3V 8-bit", { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} }, SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), }, - {"TC58NVG0S3HTA00 1G 3.3V 8-bit", - { .id = {0x98, 0xf1, 0x80, 0x15} }, - SZ_2K, SZ_128, SZ_128K, 0, 4, 128, NAND_ECC_INFO(8, SZ_512), }, {"TC58NVG2S0F 4G 3.3V 8-bit", { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} }, SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) }, diff --git a/drivers/net/amt.c b/drivers/net/amt.c index ebee5f07a208..be2719a3ba70 100644 --- a/drivers/net/amt.c +++ b/drivers/net/amt.c @@ -51,6 +51,7 @@ static char *status_str[] = { }; static char *type_str[] = { + "", /* Type 0 is not defined */ "AMT_MSG_DISCOVERY", "AMT_MSG_ADVERTISEMENT", "AMT_MSG_REQUEST", @@ -2220,8 +2221,7 @@ static bool amt_advertisement_handler(struct amt_dev *amt, struct sk_buff *skb) struct amt_header_advertisement *amta; int hdr_size; - hdr_size = sizeof(*amta) - sizeof(struct amt_header); - + hdr_size = sizeof(*amta) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2251,19 +2251,27 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) struct ethhdr *eth; struct iphdr *iph; + hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) + return true; + amtmd = (struct amt_header_mcast_data *)(udp_hdr(skb) + 1); if (amtmd->reserved || amtmd->version) return true; - hdr_size = sizeof(*amtmd) + sizeof(struct udphdr); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_IP), false)) return true; + skb_reset_network_header(skb); skb_push(skb, sizeof(*eth)); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); eth = eth_hdr(skb); + + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; iph = ip_hdr(skb); + if (iph->version == 4) { if (!ipv4_is_multicast(iph->daddr)) return true; @@ -2274,6 +2282,9 @@ static bool amt_multicast_data_handler(struct amt_dev *amt, struct sk_buff *skb) } else if (iph->version == 6) { struct ipv6hdr *ip6h; + if (!pskb_may_pull(skb, sizeof(*ip6h))) + return true; + ip6h = ipv6_hdr(skb); if (!ipv6_addr_is_multicast(&ip6h->daddr)) return true; @@ -2306,8 +2317,7 @@ static bool amt_membership_query_handler(struct amt_dev *amt, struct iphdr *iph; int hdr_size, len; - hdr_size = sizeof(*amtmq) - sizeof(struct amt_header); - + hdr_size = sizeof(*amtmq) + sizeof(struct udphdr); if (!pskb_may_pull(skb, hdr_size)) return true; @@ -2315,22 +2325,27 @@ static bool amt_membership_query_handler(struct amt_dev *amt, if (amtmq->reserved || amtmq->version) return true; - hdr_size = sizeof(*amtmq) + sizeof(struct udphdr) - sizeof(*eth); + hdr_size -= sizeof(*eth); if (iptunnel_pull_header(skb, hdr_size, htons(ETH_P_TEB), false)) return true; + oeth = eth_hdr(skb); skb_reset_mac_header(skb); skb_pull(skb, sizeof(*eth)); skb_reset_network_header(skb); eth = eth_hdr(skb); + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { - if (!ipv4_is_multicast(iph->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS + sizeof(*ihv3))) return true; + if (!ipv4_is_multicast(iph->daddr)) + return true; + ihv3 = skb_pull(skb, sizeof(*iph) + AMT_IPHDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*iph) + AMT_IPHDR_OPTS); @@ -2345,15 +2360,17 @@ static bool amt_membership_query_handler(struct amt_dev *amt, ip_eth_mc_map(iph->daddr, eth->h_dest); #if IS_ENABLED(CONFIG_IPV6) } else if (iph->version == 6) { - struct ipv6hdr *ip6h = ipv6_hdr(skb); struct mld2_query *mld2q; + struct ipv6hdr *ip6h; - if (!ipv6_addr_is_multicast(&ip6h->daddr)) - return true; if (!pskb_may_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS + sizeof(*mld2q))) return true; + ip6h = ipv6_hdr(skb); + if (!ipv6_addr_is_multicast(&ip6h->daddr)) + return true; + mld2q = skb_pull(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); skb_reset_transport_header(skb); skb_push(skb, sizeof(*ip6h) + AMT_IP6HDR_OPTS); @@ -2389,23 +2406,23 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) { struct amt_header_membership_update *amtmu; struct amt_tunnel_list *tunnel; - struct udphdr *udph; struct ethhdr *eth; struct iphdr *iph; - int len; + int len, hdr_size; iph = ip_hdr(skb); - udph = udp_hdr(skb); - if (__iptunnel_pull_header(skb, sizeof(*udph), skb->protocol, - false, false)) + hdr_size = sizeof(*amtmu) + sizeof(struct udphdr); + if (!pskb_may_pull(skb, hdr_size)) return true; - amtmu = (struct amt_header_membership_update *)skb->data; + amtmu = (struct amt_header_membership_update *)(udp_hdr(skb) + 1); if (amtmu->reserved || amtmu->version) return true; - skb_pull(skb, sizeof(*amtmu)); + if (iptunnel_pull_header(skb, hdr_size, skb->protocol, false)) + return true; + skb_reset_network_header(skb); list_for_each_entry_rcu(tunnel, &amt->tunnel_list, list) { @@ -2426,6 +2443,9 @@ static bool amt_update_handler(struct amt_dev *amt, struct sk_buff *skb) return true; report: + if (!pskb_may_pull(skb, sizeof(*iph))) + return true; + iph = ip_hdr(skb); if (iph->version == 4) { if (ip_mc_check_igmp(skb)) { @@ -2679,7 +2699,8 @@ static int amt_rcv(struct sock *sk, struct sk_buff *skb) amt = rcu_dereference_sk_user_data(sk); if (!amt) { err = true; - goto drop; + kfree_skb(skb); + goto out; } skb->dev = amt->dev; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index f85372adf042..6ba4c83fe5fc 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3684,9 +3684,11 @@ re_arm: if (!rtnl_trylock()) return; - if (should_notify_peers) + if (should_notify_peers) { + bond->send_peer_notif--; call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev); + } if (should_notify_rtnl) { bond_slave_state_notify(bond); bond_slave_link_notify(bond); diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index 8af4def38a98..e531b93f3cb2 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -2070,8 +2070,10 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, for_each_available_child_of_node(gphy_fw_list_np, gphy_fw_np) { err = gswip_gphy_fw_probe(priv, &priv->gphy_fw[i], gphy_fw_np, i); - if (err) + if (err) { + of_node_put(gphy_fw_np); goto remove_gphy; + } i++; } diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index 7b37d45bc9fb..d94150d8f3f4 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -50,22 +50,25 @@ static int mv88e6390_serdes_write(struct mv88e6xxx_chip *chip, } static int mv88e6xxx_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, - u16 ctrl, u16 status, u16 lpa, + u16 bmsr, u16 lpa, u16 status, struct phylink_link_state *state) { + state->link = false; + + /* If the BMSR reports that the link had failed, report this to + * phylink. + */ + if (!(bmsr & BMSR_LSTATUS)) + return 0; + state->link = !!(status & MV88E6390_SGMII_PHY_STATUS_LINK); + state->an_complete = !!(bmsr & BMSR_ANEGCOMPLETE); if (status & MV88E6390_SGMII_PHY_STATUS_SPD_DPL_VALID) { /* The Spped and Duplex Resolved register is 1 if AN is enabled * and complete, or if AN is disabled. So with disabled AN we - * still get here on link up. But we want to set an_complete - * only if AN was enabled, thus we look at BMCR_ANENABLE. - * (According to 802.3-2008 section 22.2.4.2.10, we should be - * able to get this same value from BMSR_ANEGCAPABLE, but tests - * show that these Marvell PHYs don't conform to this part of - * the specificaion - BMSR_ANEGCAPABLE is simply always 1.) + * still get here on link up. */ - state->an_complete = !!(ctrl & BMCR_ANENABLE); state->duplex = status & MV88E6390_SGMII_PHY_STATUS_DUPLEX_FULL ? DUPLEX_FULL : DUPLEX_HALF; @@ -191,12 +194,12 @@ int mv88e6352_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; - err = mv88e6352_serdes_read(chip, MII_BMCR, &ctrl); + err = mv88e6352_serdes_read(chip, MII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -212,7 +215,7 @@ int mv88e6352_serdes_pcs_get_state(struct mv88e6xxx_chip *chip, int port, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } int mv88e6352_serdes_pcs_an_restart(struct mv88e6xxx_chip *chip, int port, @@ -918,13 +921,13 @@ int mv88e6390_serdes_pcs_config(struct mv88e6xxx_chip *chip, int port, static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, int port, int lane, struct phylink_link_state *state) { - u16 lpa, status, ctrl; + u16 bmsr, lpa, status; int err; err = mv88e6390_serdes_read(chip, lane, MDIO_MMD_PHYXS, - MV88E6390_SGMII_BMCR, &ctrl); + MV88E6390_SGMII_BMSR, &bmsr); if (err) { - dev_err(chip->dev, "can't read Serdes PHY control: %d\n", err); + dev_err(chip->dev, "can't read Serdes PHY BMSR: %d\n", err); return err; } @@ -942,7 +945,7 @@ static int mv88e6390_serdes_pcs_get_state_sgmii(struct mv88e6xxx_chip *chip, return err; } - return mv88e6xxx_serdes_pcs_get_state(chip, ctrl, status, lpa, state); + return mv88e6xxx_serdes_pcs_get_state(chip, bmsr, lpa, status, state); } static int mv88e6390_serdes_pcs_get_state_10g(struct mv88e6xxx_chip *chip, diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c index 2727d3169c25..1cbb05b0323f 100644 --- a/drivers/net/dsa/qca8k.c +++ b/drivers/net/dsa/qca8k.c @@ -2334,6 +2334,7 @@ static int qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct qca8k_priv *priv = ds->priv; + int ret; /* We have only have a general MTU setting. * DSA always set the CPU port's MTU to the largest MTU of the slave @@ -2344,8 +2345,27 @@ qca8k_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) if (!dsa_is_cpu_port(ds, port)) return 0; + /* To change the MAX_FRAME_SIZE the cpu ports must be off or + * the switch panics. + * Turn off both cpu ports before applying the new value to prevent + * this. + */ + if (priv->port_enabled_map & BIT(0)) + qca8k_port_set_status(priv, 0, 0); + + if (priv->port_enabled_map & BIT(6)) + qca8k_port_set_status(priv, 6, 0); + /* Include L2 header / FCS length */ - return qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); + ret = qca8k_write(priv, QCA8K_MAX_FRAME_SIZE, new_mtu + ETH_HLEN + ETH_FCS_LEN); + + if (priv->port_enabled_map & BIT(0)) + qca8k_port_set_status(priv, 0, 1); + + if (priv->port_enabled_map & BIT(6)) + qca8k_port_set_status(priv, 6, 1); + + return ret; } static int diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h index 04408e11402a..ec58d0e80a70 100644 --- a/drivers/net/dsa/qca8k.h +++ b/drivers/net/dsa/qca8k.h @@ -15,7 +15,7 @@ #define QCA8K_ETHERNET_MDIO_PRIORITY 7 #define QCA8K_ETHERNET_PHY_PRIORITY 6 -#define QCA8K_ETHERNET_TIMEOUT 100 +#define QCA8K_ETHERNET_TIMEOUT 5 #define QCA8K_NUM_PORTS 7 #define QCA8K_NUM_CPU_PORTS 2 diff --git a/drivers/net/dsa/realtek/rtl8365mb.c b/drivers/net/dsa/realtek/rtl8365mb.c index 3bb42a9f236d..769f672e9128 100644 --- a/drivers/net/dsa/realtek/rtl8365mb.c +++ b/drivers/net/dsa/realtek/rtl8365mb.c @@ -955,35 +955,21 @@ static int rtl8365mb_ext_config_forcemode(struct realtek_priv *priv, int port, return 0; } -static bool rtl8365mb_phy_mode_supported(struct dsa_switch *ds, int port, - phy_interface_t interface) -{ - int ext_int; - - ext_int = rtl8365mb_extint_port_map[port]; - - if (ext_int < 0 && - (interface == PHY_INTERFACE_MODE_NA || - interface == PHY_INTERFACE_MODE_INTERNAL || - interface == PHY_INTERFACE_MODE_GMII)) - /* Internal PHY */ - return true; - else if ((ext_int >= 1) && - phy_interface_mode_is_rgmii(interface)) - /* Extension MAC */ - return true; - - return false; -} - static void rtl8365mb_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { - if (dsa_is_user_port(ds, port)) + if (dsa_is_user_port(ds, port)) { __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); - else if (dsa_is_cpu_port(ds, port)) + + /* GMII is the default interface mode for phylib, so + * we have to support it for ports with integrated PHY. + */ + __set_bit(PHY_INTERFACE_MODE_GMII, + config->supported_interfaces); + } else if (dsa_is_cpu_port(ds, port)) { phy_interface_set_rgmii(config->supported_interfaces); + } config->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD; @@ -996,12 +982,6 @@ static void rtl8365mb_phylink_mac_config(struct dsa_switch *ds, int port, struct realtek_priv *priv = ds->priv; int ret; - if (!rtl8365mb_phy_mode_supported(ds, port, state->interface)) { - dev_err(priv->dev, "phy mode %s is unsupported on port %d\n", - phy_modes(state->interface), port); - return; - } - if (mode != MLO_AN_PHY && mode != MLO_AN_FIXED) { dev_err(priv->dev, "port %d supports only conventional PHY or fixed-link\n", diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index a3816264c35c..8c5828582c21 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -163,7 +163,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio = mdiobus_alloc(); if (mdio == NULL) { netdev_err(dev, "Error allocating MDIO bus\n"); - return -ENOMEM; + ret = -ENOMEM; + goto put_node; } mdio->name = ALTERA_TSE_RESOURCE_NAME; @@ -180,6 +181,7 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) mdio->id); goto out_free_mdio; } + of_node_put(mdio_node); if (netif_msg_drv(priv)) netdev_info(dev, "MDIO bus %s: created\n", mdio->id); @@ -189,6 +191,8 @@ static int altera_tse_mdio_create(struct net_device *dev, unsigned int id) out_free_mdio: mdiobus_free(mdio); mdio = NULL; +put_node: + of_node_put(mdio_node); return ret; } diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index c6f003975621..d5f2c6989221 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -820,7 +820,7 @@ static int au1000_rx(struct net_device *dev) pr_cont("\n"); } } - prxd->buff_stat = (u32)(pDB->dma_addr | RX_DMA_ENABLE); + prxd->buff_stat = lower_32_bits(pDB->dma_addr) | RX_DMA_ENABLE; aup->rx_head = (aup->rx_head + 1) & (NUM_RX_DMA - 1); wmb(); /* drain writebuffer */ @@ -996,7 +996,7 @@ static netdev_tx_t au1000_tx(struct sk_buff *skb, struct net_device *dev) ps->tx_packets++; ps->tx_bytes += ptxd->len; - ptxd->buff_stat = pDB->dma_addr | TX_DMA_ENABLE; + ptxd->buff_stat = lower_32_bits(pDB->dma_addr) | TX_DMA_ENABLE; wmb(); /* drain writebuffer */ dev_kfree_skb(skb); aup->tx_head = (aup->tx_head + 1) & (NUM_TX_DMA - 1); @@ -1131,9 +1131,9 @@ static int au1000_probe(struct platform_device *pdev) /* Allocate the data buffers * Snooping works fine with eth on all au1xxx */ - aup->vaddr = (u32)dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * - (NUM_TX_BUFFS + NUM_RX_BUFFS), - &aup->dma_addr, 0); + aup->vaddr = dma_alloc_coherent(&pdev->dev, MAX_BUF_SIZE * + (NUM_TX_BUFFS + NUM_RX_BUFFS), + &aup->dma_addr, 0); if (!aup->vaddr) { dev_err(&pdev->dev, "failed to allocate data buffers\n"); err = -ENOMEM; @@ -1234,8 +1234,8 @@ static int au1000_probe(struct platform_device *pdev) for (i = 0; i < (NUM_TX_BUFFS+NUM_RX_BUFFS); i++) { pDB->pnext = pDBfree; pDBfree = pDB; - pDB->vaddr = (u32 *)((unsigned)aup->vaddr + MAX_BUF_SIZE*i); - pDB->dma_addr = (dma_addr_t)virt_to_bus(pDB->vaddr); + pDB->vaddr = aup->vaddr + MAX_BUF_SIZE * i; + pDB->dma_addr = aup->dma_addr + MAX_BUF_SIZE * i; pDB++; } aup->pDBfree = pDBfree; @@ -1246,7 +1246,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->rx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->rx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->rx_db_inuse[i] = pDB; } @@ -1255,7 +1255,7 @@ static int au1000_probe(struct platform_device *pdev) if (!pDB) goto err_out; - aup->tx_dma_ring[i]->buff_stat = (unsigned)pDB->dma_addr; + aup->tx_dma_ring[i]->buff_stat = lower_32_bits(pDB->dma_addr); aup->tx_dma_ring[i]->len = 0; aup->tx_db_inuse[i] = pDB; } @@ -1310,7 +1310,7 @@ err_remap2: iounmap(aup->mac); err_remap1: dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); err_vaddr: free_netdev(dev); err_alloc: @@ -1343,7 +1343,7 @@ static int au1000_remove(struct platform_device *pdev) au1000_ReleaseDB(aup, aup->tx_db_inuse[i]); dma_free_coherent(&pdev->dev, MAX_BUF_SIZE * (NUM_TX_BUFFS + NUM_RX_BUFFS), - (void *)aup->vaddr, aup->dma_addr); + aup->vaddr, aup->dma_addr); iounmap(aup->macdma); iounmap(aup->mac); diff --git a/drivers/net/ethernet/amd/au1000_eth.h b/drivers/net/ethernet/amd/au1000_eth.h index e3a3ed29db61..2489c2f4fd8a 100644 --- a/drivers/net/ethernet/amd/au1000_eth.h +++ b/drivers/net/ethernet/amd/au1000_eth.h @@ -106,8 +106,8 @@ struct au1000_private { struct mac_reg *mac; /* mac registers */ u32 *enable; /* address of MAC Enable Register */ void __iomem *macdma; /* base of MAC DMA port */ - u32 vaddr; /* virtual address of rx/tx buffers */ - dma_addr_t dma_addr; /* dma address of rx/tx buffers */ + void *vaddr; /* virtual address of rx/tx buffers */ + dma_addr_t dma_addr; /* dma address of rx/tx buffers */ spinlock_t lock; /* Serialise access to device */ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index a3593290886f..4d46780fad13 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2784,7 +2784,7 @@ void xgbe_print_pkt(struct net_device *netdev, struct sk_buff *skb, bool tx_rx) netdev_dbg(netdev, "Dst MAC addr: %pM\n", eth->h_dest); netdev_dbg(netdev, "Src MAC addr: %pM\n", eth->h_source); - netdev_dbg(netdev, "Protocol: %#06hx\n", ntohs(eth->h_proto)); + netdev_dbg(netdev, "Protocol: %#06x\n", ntohs(eth->h_proto)); for (i = 0; i < skb->len; i += 32) { unsigned int len = min(skb->len - i, 32U); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c index 4ebd2410185a..4d790a89fe77 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-platform.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-platform.c @@ -338,7 +338,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) * the PHY resources listed last */ phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3; - phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1; + phy_irqnum = platform_irq_count(pdev) - 1; dma_irqnum = 1; dma_irqend = phy_irqnum; } else { @@ -348,7 +348,7 @@ static int xgbe_platform_probe(struct platform_device *pdev) phy_memnum = 0; phy_irqnum = 0; dma_irqnum = 1; - dma_irqend = xgbe_resource_count(pdev, IORESOURCE_IRQ); + dma_irqend = platform_irq_count(pdev); } /* Obtain the mmio areas for the device */ diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c index 086739e4f40a..9b83d5361699 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c @@ -234,6 +234,7 @@ struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac) np = of_get_child_by_name(core->dev.of_node, "mdio"); err = of_mdiobus_register(mii_bus, np); + of_node_put(np); if (err) { dev_err(&core->dev, "Registration of mii bus failed\n"); goto err_free_bus; diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c index e6f48786949c..02bd3cf9a260 100644 --- a/drivers/net/ethernet/broadcom/bgmac-bcma.c +++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c @@ -332,7 +332,6 @@ static void bgmac_remove(struct bcma_device *core) bcma_mdio_mii_unregister(bgmac->mii_bus); bgmac_enet_remove(bgmac); bcma_set_drvdata(core, NULL); - kfree(bgmac); } static struct bcma_driver bgmac_bcma_driver = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 8a3a446219f7..94f80e1c4020 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -769,6 +769,7 @@ struct hnae3_tc_info { u8 prio_tc[HNAE3_MAX_USER_PRIO]; /* TC indexed by prio */ u16 tqp_count[HNAE3_MAX_TC]; u16 tqp_offset[HNAE3_MAX_TC]; + u8 max_tc; /* Total number of TCs */ u8 num_tc; /* Total number of enabled TCs */ bool mqprio_active; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 6d20974519fe..4c7988e308a2 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -1129,7 +1129,7 @@ hns3_is_ringparam_changed(struct net_device *ndev, if (old_ringparam->tx_desc_num == new_ringparam->tx_desc_num && old_ringparam->rx_desc_num == new_ringparam->rx_desc_num && old_ringparam->rx_buf_len == new_ringparam->rx_buf_len) { - netdev_info(ndev, "ringparam not changed\n"); + netdev_info(ndev, "descriptor number and rx buffer length not changed\n"); return false; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 1ebad0e50e6a..fae79764dc44 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3268,7 +3268,7 @@ static int hclge_tp_port_init(struct hclge_dev *hdev) static int hclge_update_port_info(struct hclge_dev *hdev) { struct hclge_mac *mac = &hdev->hw.mac; - int speed = HCLGE_MAC_SPEED_UNKNOWN; + int speed; int ret; /* get the port info from SFP cmd if not copper port */ @@ -3279,10 +3279,13 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (!hdev->support_sfp_query) return 0; - if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) + if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { + speed = mac->speed; ret = hclge_get_sfp_info(hdev, mac); - else + } else { + speed = HCLGE_MAC_SPEED_UNKNOWN; ret = hclge_get_sfp_speed(hdev, &speed); + } if (ret == -EOPNOTSUPP) { hdev->support_sfp_query = false; @@ -3294,6 +3297,8 @@ static int hclge_update_port_info(struct hclge_dev *hdev) if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (mac->speed_type == QUERY_ACTIVE_SPEED) { hclge_update_port_capability(hdev, mac); + if (mac->speed != speed) + (void)hclge_tm_port_shaper_cfg(hdev); return 0; } return hclge_cfg_mac_speed_dup(hdev, mac->speed, @@ -3376,6 +3381,12 @@ static int hclge_set_vf_link_state(struct hnae3_handle *handle, int vf, link_state_old = vport->vf_info.link_state; vport->vf_info.link_state = link_state; + /* return success directly if the VF is unalive, VF will + * query link state itself when it starts work. + */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + return 0; + ret = hclge_push_vf_link_status(vport); if (ret) { vport->vf_info.link_state = link_state_old; @@ -10117,6 +10128,7 @@ static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport, if (ret) return ret; + vport->port_base_vlan_cfg.tbl_sta = false; /* remove old VLAN tag */ if (old_info->vlan_tag == 0) ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 1f87a8a3fe32..2f33b036a47a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -282,8 +282,8 @@ static int hclge_tm_pg_to_pri_map_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, - u16 qs_id, u8 pri) +static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, u16 qs_id, u8 pri, + bool link_vld) { struct hclge_qs_to_pri_link_cmd *map; struct hclge_desc desc; @@ -294,7 +294,7 @@ static int hclge_tm_qs_to_pri_map_cfg(struct hclge_dev *hdev, map->qs_id = cpu_to_le16(qs_id); map->priority = pri; - map->link_vld = HCLGE_TM_QS_PRI_LINK_VLD_MSK; + map->link_vld = link_vld ? HCLGE_TM_QS_PRI_LINK_VLD_MSK : 0; return hclge_cmd_send(&hdev->hw, &desc, 1); } @@ -420,7 +420,7 @@ static int hclge_tm_pg_shapping_cfg(struct hclge_dev *hdev, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev) { struct hclge_port_shapping_cmd *shap_cfg_cmd; struct hclge_shaper_ir_para ir_para; @@ -642,11 +642,13 @@ static void hclge_tm_update_kinfo_rss_size(struct hclge_vport *vport) * one tc for VF for simplicity. VF's vport_id is non zero. */ if (vport->vport_id) { + kinfo->tc_info.max_tc = 1; kinfo->tc_info.num_tc = 1; vport->qs_offset = HNAE3_MAX_TC + vport->vport_id - HCLGE_VF_VPORT_START_NUM; vport_max_rss_size = hdev->vf_rss_size_max; } else { + kinfo->tc_info.max_tc = hdev->tc_max; kinfo->tc_info.num_tc = min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc); vport->qs_offset = 0; @@ -679,7 +681,9 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport) kinfo->num_tqps = hclge_vport_get_tqp_num(vport); vport->dwrr = 100; /* 100 percent as init */ vport->bw_limit = hdev->tm_info.pg_info[0].bw_limit; - hdev->rss_cfg.rss_size = kinfo->rss_size; + + if (vport->vport_id == PF_VPORT_ID) + hdev->rss_cfg.rss_size = kinfo->rss_size; /* when enable mqprio, the tc_info has been updated. */ if (kinfo->tc_info.mqprio_active) @@ -714,14 +718,22 @@ static void hclge_tm_vport_info_update(struct hclge_dev *hdev) static void hclge_tm_tc_info_init(struct hclge_dev *hdev) { - u8 i; + u8 i, tc_sch_mode; + u32 bw_limit; + + for (i = 0; i < hdev->tc_max; i++) { + if (i < hdev->tm_info.num_tc) { + tc_sch_mode = HCLGE_SCH_MODE_DWRR; + bw_limit = hdev->tm_info.pg_info[0].bw_limit; + } else { + tc_sch_mode = HCLGE_SCH_MODE_SP; + bw_limit = 0; + } - for (i = 0; i < hdev->tm_info.num_tc; i++) { hdev->tm_info.tc_info[i].tc_id = i; - hdev->tm_info.tc_info[i].tc_sch_mode = HCLGE_SCH_MODE_DWRR; + hdev->tm_info.tc_info[i].tc_sch_mode = tc_sch_mode; hdev->tm_info.tc_info[i].pgid = 0; - hdev->tm_info.tc_info[i].bw_limit = - hdev->tm_info.pg_info[0].bw_limit; + hdev->tm_info.tc_info[i].bw_limit = bw_limit; } for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) @@ -926,10 +938,13 @@ static int hclge_tm_pri_q_qs_cfg_tc_base(struct hclge_dev *hdev) for (k = 0; k < hdev->num_alloc_vport; k++) { struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; - for (i = 0; i < kinfo->tc_info.num_tc; i++) { + for (i = 0; i < kinfo->tc_info.max_tc; i++) { + u8 pri = i < kinfo->tc_info.num_tc ? i : 0; + bool link_vld = i < kinfo->tc_info.num_tc; + ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - i); + pri, link_vld); if (ret) return ret; } @@ -949,7 +964,7 @@ static int hclge_tm_pri_q_qs_cfg_vnet_base(struct hclge_dev *hdev) for (i = 0; i < HNAE3_MAX_TC; i++) { ret = hclge_tm_qs_to_pri_map_cfg(hdev, vport[k].qs_offset + i, - k); + k, true); if (ret) return ret; } @@ -989,33 +1004,39 @@ static int hclge_tm_pri_tc_base_shaper_cfg(struct hclge_dev *hdev) { u32 max_tm_rate = hdev->ae_dev->dev_specs.max_tm_rate; struct hclge_shaper_ir_para ir_para; - u32 shaper_para; + u32 shaper_para_c, shaper_para_p; int ret; u32 i; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { u32 rate = hdev->tm_info.tc_info[i].bw_limit; - ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, - &ir_para, max_tm_rate); - if (ret) - return ret; + if (rate) { + ret = hclge_shaper_para_calc(rate, HCLGE_SHAPER_LVL_PRI, + &ir_para, max_tm_rate); + if (ret) + return ret; + + shaper_para_c = hclge_tm_get_shapping_para(0, 0, 0, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + shaper_para_p = hclge_tm_get_shapping_para(ir_para.ir_b, + ir_para.ir_u, + ir_para.ir_s, + HCLGE_SHAPER_BS_U_DEF, + HCLGE_SHAPER_BS_S_DEF); + } else { + shaper_para_c = 0; + shaper_para_p = 0; + } - shaper_para = hclge_tm_get_shapping_para(0, 0, 0, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_C_BUCKET, i, - shaper_para, rate); + shaper_para_c, rate); if (ret) return ret; - shaper_para = hclge_tm_get_shapping_para(ir_para.ir_b, - ir_para.ir_u, - ir_para.ir_s, - HCLGE_SHAPER_BS_U_DEF, - HCLGE_SHAPER_BS_S_DEF); ret = hclge_tm_pri_shapping_cfg(hdev, HCLGE_TM_SHAP_P_BUCKET, i, - shaper_para, rate); + shaper_para_p, rate); if (ret) return ret; } @@ -1125,7 +1146,7 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) int ret; u32 i, k; - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { pg_info = &hdev->tm_info.pg_info[hdev->tm_info.tc_info[i].pgid]; dwrr = pg_info->tc_dwrr[i]; @@ -1135,9 +1156,15 @@ static int hclge_tm_pri_tc_base_dwrr_cfg(struct hclge_dev *hdev) return ret; for (k = 0; k < hdev->num_alloc_vport; k++) { + struct hnae3_knic_private_info *kinfo = &vport[k].nic.kinfo; + + if (i >= kinfo->tc_info.max_tc) + continue; + + dwrr = i < kinfo->tc_info.num_tc ? vport[k].dwrr : 0; ret = hclge_tm_qs_weight_cfg( hdev, vport[k].qs_offset + i, - vport[k].dwrr); + dwrr); if (ret) return ret; } @@ -1303,6 +1330,7 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) { struct hclge_vport *vport = hdev->vport; int ret; + u8 mode; u16 i; ret = hclge_tm_pri_schd_mode_cfg(hdev, pri_id); @@ -1310,9 +1338,16 @@ static int hclge_tm_schd_mode_tc_base_cfg(struct hclge_dev *hdev, u8 pri_id) return ret; for (i = 0; i < hdev->num_alloc_vport; i++) { + struct hnae3_knic_private_info *kinfo = &vport[i].nic.kinfo; + + if (pri_id >= kinfo->tc_info.max_tc) + continue; + + mode = pri_id < kinfo->tc_info.num_tc ? HCLGE_SCH_MODE_DWRR : + HCLGE_SCH_MODE_SP; ret = hclge_tm_qs_schd_mode_cfg(hdev, vport[i].qs_offset + pri_id, - HCLGE_SCH_MODE_DWRR); + mode); if (ret) return ret; } @@ -1353,7 +1388,7 @@ static int hclge_tm_lvl34_schd_mode_cfg(struct hclge_dev *hdev) u8 i; if (hdev->tx_sch_mode == HCLGE_FLAG_TC_BASE_SCH_MODE) { - for (i = 0; i < hdev->tm_info.num_tc; i++) { + for (i = 0; i < hdev->tc_max; i++) { ret = hclge_tm_schd_mode_tc_base_cfg(hdev, i); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 619cc30a2dfc..d943943912f7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -237,6 +237,7 @@ int hclge_pause_addr_cfg(struct hclge_dev *hdev, const u8 *mac_addr); void hclge_pfc_rx_stats_get(struct hclge_dev *hdev, u64 *stats); void hclge_pfc_tx_stats_get(struct hclge_dev *hdev, u64 *stats); int hclge_tm_qs_shaper_cfg(struct hclge_vport *vport, int max_tx_rate); +int hclge_tm_port_shaper_cfg(struct hclge_dev *hdev); int hclge_tm_get_qset_num(struct hclge_dev *hdev, u16 *qset_num); int hclge_tm_get_pri_num(struct hclge_dev *hdev, u8 *pri_num); int hclge_tm_get_qset_map_pri(struct hclge_dev *hdev, u16 qset_id, u8 *priority, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 60ae8bfc5f69..1749d26f4bef 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -43,9 +43,7 @@ static bool check_image_valid(struct hinic_devlink_priv *priv, const u8 *buf, for (i = 0; i < fw_image->fw_info.fw_section_cnt; i++) { len += fw_image->fw_section_info[i].fw_section_len; - memcpy(&host_image->image_section_info[i], - &fw_image->fw_section_info[i], - sizeof(struct fw_section_info_st)); + host_image->image_section_info[i] = fw_image->fw_section_info[i]; } if (len != fw_image->fw_len || diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 610f00cbaff9..19704f5c8291 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2586,15 +2586,16 @@ static void i40e_diag_test(struct net_device *netdev, set_bit(__I40E_TESTING, pf->state); + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || + test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) { + dev_warn(&pf->pdev->dev, + "Cannot start offline testing when PF is in reset state.\n"); + goto skip_ol_tests; + } + if (i40e_active_vfs(pf) || i40e_active_vmdqs(pf)) { dev_warn(&pf->pdev->dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n"); - data[I40E_ETH_TEST_REG] = 1; - data[I40E_ETH_TEST_EEPROM] = 1; - data[I40E_ETH_TEST_INTR] = 1; - data[I40E_ETH_TEST_LINK] = 1; - eth_test->flags |= ETH_TEST_FL_FAILED; - clear_bit(__I40E_TESTING, pf->state); goto skip_ol_tests; } @@ -2641,9 +2642,17 @@ static void i40e_diag_test(struct net_device *netdev, data[I40E_ETH_TEST_INTR] = 0; } -skip_ol_tests: - netif_info(pf, drv, netdev, "testing finished\n"); + return; + +skip_ol_tests: + data[I40E_ETH_TEST_REG] = 1; + data[I40E_ETH_TEST_EEPROM] = 1; + data[I40E_ETH_TEST_INTR] = 1; + data[I40E_ETH_TEST_LINK] = 1; + eth_test->flags |= ETH_TEST_FL_FAILED; + clear_bit(__I40E_TESTING, pf->state); + netif_info(pf, drv, netdev, "testing failed\n"); } static void i40e_get_wol(struct net_device *netdev, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 332a608dbaa6..72576bb3e94d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -8542,6 +8542,11 @@ static int i40e_configure_clsflower(struct i40e_vsi *vsi, return -EOPNOTSUPP; } + if (!tc) { + dev_err(&pf->pdev->dev, "Unable to add filter because of invalid destination"); + return -EINVAL; + } + if (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state) || test_bit(__I40E_RESET_INTR_RECEIVED, pf->state)) return -EBUSY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 2606e8f0f19b..033ea71763e3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -2282,7 +2282,7 @@ static int i40e_vc_config_queues_msg(struct i40e_vf *vf, u8 *msg) } if (vf->adq_enabled) { - for (i = 0; i < I40E_MAX_VF_VSI; i++) + for (i = 0; i < vf->num_tc; i++) num_qps_all += vf->ch[i].num_qps; if (num_qps_all != qci->num_queue_pairs) { aq_ret = I40E_ERR_PARAM; diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 7dfcf78b57fb..f3ecb3bca33d 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -984,7 +984,7 @@ struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter, list_add_tail(&f->list, &adapter->mac_filter_list); f->add = true; f->is_new_mac = true; - f->is_primary = false; + f->is_primary = ether_addr_equal(macaddr, adapter->hw.mac.addr); adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER; } else { f->remove = false; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 1e71b70f0e52..70335f6e8524 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -2190,6 +2190,42 @@ ice_setup_autoneg(struct ice_port_info *p, struct ethtool_link_ksettings *ks, } /** + * ice_set_phy_type_from_speed - set phy_types based on speeds + * and advertised modes + * @ks: ethtool link ksettings struct + * @phy_type_low: pointer to the lower part of phy_type + * @phy_type_high: pointer to the higher part of phy_type + * @adv_link_speed: targeted link speeds bitmap + */ +static void +ice_set_phy_type_from_speed(const struct ethtool_link_ksettings *ks, + u64 *phy_type_low, u64 *phy_type_high, + u16 adv_link_speed) +{ + /* Handle 1000M speed in a special way because ice_update_phy_type + * enables all link modes, but having mixed copper and optical + * standards is not supported. + */ + adv_link_speed &= ~ICE_AQ_LINK_SPEED_1000MB; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseT_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_T | + ICE_PHY_TYPE_LOW_1G_SGMII; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseKX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_KX; + + if (ethtool_link_ksettings_test_link_mode(ks, advertising, + 1000baseX_Full)) + *phy_type_low |= ICE_PHY_TYPE_LOW_1000BASE_SX | + ICE_PHY_TYPE_LOW_1000BASE_LX; + + ice_update_phy_type(phy_type_low, phy_type_high, adv_link_speed); +} + +/** * ice_set_link_ksettings - Set Speed and Duplex * @netdev: network interface device structure * @ks: ethtool ksettings @@ -2320,7 +2356,8 @@ ice_set_link_ksettings(struct net_device *netdev, adv_link_speed = curr_link_speed; /* Convert the advertise link speeds to their corresponded PHY_TYPE */ - ice_update_phy_type(&phy_type_low, &phy_type_high, adv_link_speed); + ice_set_phy_type_from_speed(ks, &phy_type_low, &phy_type_high, + adv_link_speed); if (!autoneg_changed && adv_link_speed == curr_link_speed) { netdev_info(netdev, "Nothing changed, exiting without setting anything.\n"); @@ -3470,6 +3507,16 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) new_rx = ch->combined_count + ch->rx_count; new_tx = ch->combined_count + ch->tx_count; + if (new_rx < vsi->tc_cfg.numtc) { + netdev_err(dev, "Cannot set less Rx channels, than Traffic Classes you have (%u)\n", + vsi->tc_cfg.numtc); + return -EINVAL; + } + if (new_tx < vsi->tc_cfg.numtc) { + netdev_err(dev, "Cannot set less Tx channels, than Traffic Classes you have (%u)\n", + vsi->tc_cfg.numtc); + return -EINVAL; + } if (new_rx > ice_get_max_rxq(pf)) { netdev_err(dev, "Maximum allowed Rx channels is %d\n", ice_get_max_rxq(pf)); diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 454e01ae09b9..f7f9c973ec54 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -909,7 +909,7 @@ static void ice_set_dflt_vsi_ctx(struct ice_hw *hw, struct ice_vsi_ctx *ctxt) * @vsi: the VSI being configured * @ctxt: VSI context structure */ -static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) +static int ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { u16 offset = 0, qmap = 0, tx_count = 0, pow = 0; u16 num_txq_per_tc, num_rxq_per_tc; @@ -982,7 +982,18 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) else vsi->num_rxq = num_rxq_per_tc; + if (vsi->num_rxq > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + vsi->num_rxq, vsi->alloc_rxq); + return -EINVAL; + } + vsi->num_txq = tx_count; + if (vsi->num_txq > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + vsi->num_txq, vsi->alloc_txq); + return -EINVAL; + } if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); @@ -1000,6 +1011,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) */ ctxt->info.q_mapping[0] = cpu_to_le16(vsi->rxq_map[0]); ctxt->info.q_mapping[1] = cpu_to_le16(vsi->num_rxq); + + return 0; } /** @@ -1187,7 +1200,10 @@ static int ice_vsi_init(struct ice_vsi *vsi, bool init_vsi) if (vsi->type == ICE_VSI_CHNL) { ice_chnl_vsi_setup_q_map(vsi, ctxt); } else { - ice_vsi_setup_q_map(vsi, ctxt); + ret = ice_vsi_setup_q_map(vsi, ctxt); + if (ret) + goto out; + if (!init_vsi) /* means VSI being updated */ /* must to indicate which section of VSI context are * being modified @@ -3464,7 +3480,7 @@ void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc) * * Prepares VSI tc_config to have queue configurations based on MQPRIO options. */ -static void +static int ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, u8 ena_tc) { @@ -3513,7 +3529,18 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, /* Set actual Tx/Rx queue pairs */ vsi->num_txq = offset + qcount_tx; + if (vsi->num_txq > vsi->alloc_txq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Tx queues (%u), than were allocated (%u)!\n", + vsi->num_txq, vsi->alloc_txq); + return -EINVAL; + } + vsi->num_rxq = offset + qcount_rx; + if (vsi->num_rxq > vsi->alloc_rxq) { + dev_err(ice_pf_to_dev(vsi->back), "Trying to use more Rx queues (%u), than were allocated (%u)!\n", + vsi->num_rxq, vsi->alloc_rxq); + return -EINVAL; + } /* Setup queue TC[0].qmap for given VSI context */ ctxt->info.tc_mapping[0] = cpu_to_le16(qmap); @@ -3531,6 +3558,8 @@ ice_vsi_setup_q_map_mqprio(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt, dev_dbg(ice_pf_to_dev(vsi->back), "vsi->num_rxq = %d\n", vsi->num_rxq); dev_dbg(ice_pf_to_dev(vsi->back), "all_numtc %u, all_enatc: 0x%04x, tc_cfg.numtc %u\n", vsi->all_numtc, vsi->all_enatc, vsi->tc_cfg.numtc); + + return 0; } /** @@ -3580,9 +3609,12 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) if (vsi->type == ICE_VSI_PF && test_bit(ICE_FLAG_TC_MQPRIO, pf->flags)) - ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); + ret = ice_vsi_setup_q_map_mqprio(vsi, ctx, ena_tc); else - ice_vsi_setup_q_map(vsi, ctx); + ret = ice_vsi_setup_q_map(vsi, ctx); + + if (ret) + goto out; /* must to indicate which section of VSI context are being modified */ ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index e1cae253412c..c1ac2f746714 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5763,25 +5763,38 @@ static netdev_features_t ice_fix_features(struct net_device *netdev, netdev_features_t features) { struct ice_netdev_priv *np = netdev_priv(netdev); - netdev_features_t supported_vlan_filtering; - netdev_features_t requested_vlan_filtering; - struct ice_vsi *vsi = np->vsi; - - requested_vlan_filtering = features & NETIF_VLAN_FILTERING_FEATURES; - - /* make sure supported_vlan_filtering works for both SVM and DVM */ - supported_vlan_filtering = NETIF_F_HW_VLAN_CTAG_FILTER; - if (ice_is_dvm_ena(&vsi->back->hw)) - supported_vlan_filtering |= NETIF_F_HW_VLAN_STAG_FILTER; - - if (requested_vlan_filtering && - requested_vlan_filtering != supported_vlan_filtering) { - if (requested_vlan_filtering & NETIF_F_HW_VLAN_CTAG_FILTER) { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, enabling all supported VLAN filtering settings\n"); - features |= supported_vlan_filtering; + netdev_features_t req_vlan_fltr, cur_vlan_fltr; + bool cur_ctag, cur_stag, req_ctag, req_stag; + + cur_vlan_fltr = netdev->features & NETIF_VLAN_FILTERING_FEATURES; + cur_ctag = cur_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + cur_stag = cur_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + req_vlan_fltr = features & NETIF_VLAN_FILTERING_FEATURES; + req_ctag = req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER; + req_stag = req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER; + + if (req_vlan_fltr != cur_vlan_fltr) { + if (ice_is_dvm_ena(&np->vsi->back->hw)) { + if (req_ctag && req_stag) { + features |= NETIF_VLAN_FILTERING_FEATURES; + } else if (!req_ctag && !req_stag) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + } else if ((!cur_ctag && req_ctag && !cur_stag) || + (!cur_stag && req_stag && !cur_ctag)) { + features |= NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been enabled for both types.\n"); + } else if ((cur_ctag && !req_ctag && cur_stag) || + (cur_stag && !req_stag && cur_ctag)) { + features &= ~NETIF_VLAN_FILTERING_FEATURES; + netdev_warn(netdev, "802.1Q and 802.1ad VLAN filtering must be either both on or both off. VLAN filtering has been disabled for both types.\n"); + } } else { - netdev_warn(netdev, "cannot support requested VLAN filtering settings, clearing all supported VLAN filtering settings\n"); - features &= ~supported_vlan_filtering; + if (req_vlan_fltr & NETIF_F_HW_VLAN_STAG_FILTER) + netdev_warn(netdev, "cannot support requested 802.1ad filtering setting in SVM mode\n"); + + if (req_vlan_fltr & NETIF_F_HW_VLAN_CTAG_FILTER) + features |= NETIF_F_HW_VLAN_CTAG_FILTER; } } diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 662947c882e8..ef9344ef0d8e 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -2271,7 +2271,7 @@ static int ice_ptp_init_tx_e822(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { tx->quad = port / ICE_PORTS_PER_QUAD; - tx->quad_offset = tx->quad * INDEX_PER_PORT; + tx->quad_offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT; tx->len = INDEX_PER_PORT; return ice_ptp_alloc_tx_tracker(tx); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index afd048d69959..10e396abf130 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -49,6 +49,37 @@ struct ice_perout_channel { * To allow multiple ports to access the shared register block independently, * the blocks are split up so that indexes are assigned to each port based on * hardware logical port number. + * + * The timestamp blocks are handled differently for E810- and E822-based + * devices. In E810 devices, each port has its own block of timestamps, while in + * E822 there is a need to logically break the block of registers into smaller + * chunks based on the port number to avoid collisions. + * + * Example for port 5 in E810: + * +--------+--------+--------+--------+--------+--------+--------+--------+ + * |register|register|register|register|register|register|register|register| + * | block | block | block | block | block | block | block | block | + * | for | for | for | for | for | for | for | for | + * | port 0 | port 1 | port 2 | port 3 | port 4 | port 5 | port 6 | port 7 | + * +--------+--------+--------+--------+--------+--------+--------+--------+ + * ^^ + * || + * |--- quad offset is always 0 + * ---- quad number + * + * Example for port 5 in E822: + * +-----------------------------+-----------------------------+ + * | register block for quad 0 | register block for quad 1 | + * |+------+------+------+------+|+------+------+------+------+| + * ||port 0|port 1|port 2|port 3|||port 0|port 1|port 2|port 3|| + * |+------+------+------+------+|+------+------+------+------+| + * +-----------------------------+-------^---------------------+ + * ^ | + * | --- quad offset* + * ---- quad number + * + * * PHY port 5 is port 1 in quad 1 + * */ /** diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 0a0c55fb8699..b803f2ab3cc7 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -524,6 +524,7 @@ ice_eswitch_add_tc_fltr(struct ice_vsi *vsi, struct ice_tc_flower_fltr *fltr) */ fltr->rid = rule_added.rid; fltr->rule_id = rule_added.rule_id; + fltr->dest_id = rule_added.vsi_handle; exit: kfree(list); @@ -993,7 +994,9 @@ ice_parse_cls_flower(struct net_device *filter_dev, struct ice_vsi *vsi, n_proto_key = ntohs(match.key->n_proto); n_proto_mask = ntohs(match.mask->n_proto); - if (n_proto_key == ETH_P_ALL || n_proto_key == 0) { + if (n_proto_key == ETH_P_ALL || n_proto_key == 0 || + fltr->tunnel_type == TNL_GTPU || + fltr->tunnel_type == TNL_GTPC) { n_proto_key = 0; n_proto_mask = 0; } else { diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index cd8e6b50968c..7adf9ddf129e 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -504,6 +504,11 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) } if (ice_is_vf_disabled(vf)) { + vsi = ice_get_vf_vsi(vf); + if (WARN_ON(!vsi)) + return -EINVAL; + ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); + ice_vsi_stop_all_rx_rings(vsi); dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); return 0; diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl.c b/drivers/net/ethernet/intel/ice/ice_virtchnl.c index 1d9b84c3937a..4547bc1f7cee 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl.c @@ -1569,35 +1569,27 @@ error_param: */ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) { - enum virtchnl_status_code v_ret = VIRTCHNL_STATUS_SUCCESS; struct virtchnl_vsi_queue_config_info *qci = (struct virtchnl_vsi_queue_config_info *)msg; struct virtchnl_queue_pair_info *qpi; struct ice_pf *pf = vf->pf; struct ice_vsi *vsi; - int i, q_idx; + int i = -1, q_idx; - if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) goto error_param; - } - if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!ice_vc_isvalid_vsi_id(vf, qci->vsi_id)) goto error_param; - } vsi = ice_get_vf_vsi(vf); - if (!vsi) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (!vsi) goto error_param; - } if (qci->num_queue_pairs > ICE_MAX_RSS_QS_PER_VF || qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n", vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1610,7 +1602,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) !ice_vc_isvalid_ring_len(qpi->txq.ring_len) || !ice_vc_isvalid_ring_len(qpi->rxq.ring_len) || !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1620,7 +1611,6 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) * for selected "vsi" */ if (q_idx >= vsi->alloc_txq || q_idx >= vsi->alloc_rxq) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; goto error_param; } @@ -1630,14 +1620,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->tx_rings[i]->count = qpi->txq.ring_len; /* Disable any existing queue first */ - if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + if (ice_vf_vsi_dis_single_txq(vf, vsi, q_idx)) goto error_param; - } /* Configure a queue with the requested settings */ if (ice_vsi_cfg_single_txq(vsi, vsi->tx_rings, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure TX queue %d\n", + vf->vf_id, i); goto error_param; } } @@ -1651,17 +1640,13 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) if (qpi->rxq.databuffer_size != 0 && (qpi->rxq.databuffer_size > ((16 * 1024) - 128) || - qpi->rxq.databuffer_size < 1024)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.databuffer_size < 1024)) goto error_param; - } vsi->rx_buf_len = qpi->rxq.databuffer_size; vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len; if (qpi->rxq.max_pkt_size > max_frame_size || - qpi->rxq.max_pkt_size < 64) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + qpi->rxq.max_pkt_size < 64) goto error_param; - } vsi->max_frame = qpi->rxq.max_pkt_size; /* add space for the port VLAN since the VF driver is @@ -1672,16 +1657,30 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) vsi->max_frame += VLAN_HLEN; if (ice_vsi_cfg_single_rxq(vsi, q_idx)) { - v_ret = VIRTCHNL_STATUS_ERR_PARAM; + dev_warn(ice_pf_to_dev(pf), "VF-%d failed to configure RX queue %d\n", + vf->vf_id, i); goto error_param; } } } + /* send the response to the VF */ + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_SUCCESS, NULL, 0); error_param: + /* disable whatever we can */ + for (; i >= 0; i--) { + if (ice_vsi_ctrl_one_rx_ring(vsi, false, i, true)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable RX queue %d\n", + vf->vf_id, i); + if (ice_vf_vsi_dis_single_txq(vf, vsi, i)) + dev_err(ice_pf_to_dev(pf), "VF-%d could not disable TX queue %d\n", + vf->vf_id, i); + } + /* send the response to the VF */ - return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, v_ret, - NULL, 0); + return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES, + VIRTCHNL_STATUS_ERR_PARAM, NULL, 0); } /** diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 68be2976f539..c5f04c40284b 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -4819,8 +4819,11 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring) while (i != tx_ring->next_to_use) { union e1000_adv_tx_desc *eop_desc, *tx_desc; - /* Free all the Tx ring sk_buffs */ - dev_kfree_skb_any(tx_buffer->skb); + /* Free all the Tx ring sk_buffs or xdp frames */ + if (tx_buffer->type == IGB_TYPE_SKB) + dev_kfree_skb_any(tx_buffer->skb); + else + xdp_return_frame(tx_buffer->xdpf); /* unmap skb header data */ dma_unmap_single(tx_ring->dev, @@ -9898,11 +9901,10 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) struct e1000_hw *hw = &adapter->hw; u32 dmac_thr; u16 hwm; + u32 reg; if (hw->mac.type > e1000_82580) { if (adapter->flags & IGB_FLAG_DMAC) { - u32 reg; - /* force threshold to 0. */ wr32(E1000_DMCTXTH, 0); @@ -9935,7 +9937,6 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) /* Disable BMC-to-OS Watchdog Enable */ if (hw->mac.type != e1000_i354) reg &= ~E1000_DMACR_DC_BMC2OSW_EN; - wr32(E1000_DMACR, reg); /* no lower threshold to disable @@ -9952,12 +9953,12 @@ static void igb_init_dmac(struct igb_adapter *adapter, u32 pba) */ wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE - (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6); + } - /* make low power state decision controlled - * by DMA coal - */ + if (hw->mac.type >= e1000_i210 || + (adapter->flags & IGB_FLAG_DMAC)) { reg = rd32(E1000_PCIEMISC); - reg &= ~E1000_PCIEMISC_LX_DECISION; + reg |= E1000_PCIEMISC_LX_DECISION; wr32(E1000_PCIEMISC, reg); } /* endif adapter->dmac is not disabled */ } else if (hw->mac.type == e1000_82580) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 7f11c0a8e7a9..d4e63f0644c3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -1184,9 +1184,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, switch (xcast_mode) { case IXGBEVF_XCAST_MODE_NONE: - disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | + disable = IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; - enable = 0; + enable = IXGBE_VMOLR_BAM; break; case IXGBEVF_XCAST_MODE_MULTI: disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; @@ -1208,9 +1208,9 @@ static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, return -EPERM; } - disable = 0; + disable = IXGBE_VMOLR_VPE; enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | - IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; + IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE; break; default: return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index bc614a4def9e..3f60a80e34c8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -1390,7 +1390,8 @@ static int otx2vf_get_link_ksettings(struct net_device *netdev, static const struct ethtool_ops otx2vf_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, .supported_ring_params = ETHTOOL_RING_USE_RX_BUF_LEN | ETHTOOL_RING_USE_CQE_SIZE, .get_link = otx2_get_link, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index b3b3c079a0fa..59c9a10f83ba 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -899,6 +899,17 @@ static bool mtk_rx_get_desc(struct mtk_eth *eth, struct mtk_rx_dma_v2 *rxd, return true; } +static void *mtk_max_lro_buf_alloc(gfp_t gfp_mask) +{ + unsigned int size = mtk_max_frag_size(MTK_MAX_LRO_RX_LENGTH); + unsigned long data; + + data = __get_free_pages(gfp_mask | __GFP_COMP | __GFP_NOWARN, + get_order(size)); + + return (void *)data; +} + /* the qdma core needs scratch memory to be setup */ static int mtk_init_fq_dma(struct mtk_eth *eth) { @@ -1467,7 +1478,10 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; /* alloc new buffer */ - new_data = napi_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + new_data = napi_alloc_frag(ring->frag_size); + else + new_data = mtk_max_lro_buf_alloc(GFP_ATOMIC); if (unlikely(!new_data)) { netdev->stats.rx_dropped++; goto release_desc; @@ -1914,7 +1928,10 @@ static int mtk_rx_alloc(struct mtk_eth *eth, int ring_no, int rx_flag) return -ENOMEM; for (i = 0; i < rx_dma_size; i++) { - ring->data[i] = netdev_alloc_frag(ring->frag_size); + if (ring->frag_size <= PAGE_SIZE) + ring->data[i] = netdev_alloc_frag(ring->frag_size); + else + ring->data[i] = mtk_max_lro_buf_alloc(GFP_KERNEL); if (!ring->data[i]) return -ENOMEM; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index ed5038d98ef6..6400a827173c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -2110,7 +2110,7 @@ static int mlx4_en_get_module_eeprom(struct net_device *dev, en_err(priv, "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", i, offset, ee->len - i, ret); - return 0; + return ret; } i += ret; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 0eb9d74547f8..50422b56a64d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -579,17 +579,6 @@ static void *pci_get_other_drvdata(struct device *this, struct device *other) return pci_get_drvdata(to_pci_dev(other)); } -static int next_phys_dev(struct device *dev, const void *data) -{ - struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; - - mdev = pci_get_other_drvdata(this->device, dev); - if (!mdev) - return 0; - - return _next_phys_dev(mdev, data); -} - static int next_phys_dev_lag(struct device *dev, const void *data) { struct mlx5_core_dev *mdev, *this = (struct mlx5_core_dev *)data; @@ -624,13 +613,6 @@ static struct mlx5_core_dev *mlx5_get_next_dev(struct mlx5_core_dev *dev, } /* Must be called with intf_mutex held */ -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) -{ - lockdep_assert_held(&mlx5_intf_mutex); - return mlx5_get_next_dev(dev, &next_phys_dev); -} - -/* Must be called with intf_mutex held */ struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev) { lockdep_assert_held(&mlx5_intf_mutex); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index eae9aa9c0811..978a2bb8e122 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -675,6 +675,9 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) if (!tracer->owner) return; + if (unlikely(!tracer->str_db.loaded)) + goto arm; + block_count = tracer->buff.size / TRACER_BLOCK_SIZE_BYTE; start_offset = tracer->buff.consumer_index * TRACER_BLOCK_SIZE_BYTE; @@ -732,6 +735,7 @@ static void mlx5_fw_tracer_handle_traces(struct work_struct *work) &tmp_trace_block[TRACES_PER_BLOCK - 1]); } +arm: mlx5_fw_tracer_arm(dev); } @@ -1136,8 +1140,7 @@ static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void queue_work(tracer->work_queue, &tracer->ownership_change_work); break; case MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE: - if (likely(tracer->str_db.loaded)) - queue_work(tracer->work_queue, &tracer->handle_traces_work); + queue_work(tracer->work_queue, &tracer->handle_traces_work); break; default: mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 68364484a435..3c1edfa33aa7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -565,7 +565,8 @@ static void mlx5e_build_rx_cq_param(struct mlx5_core_dev *mdev, static u8 rq_end_pad_mode(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool lro_en = params->packet_merge.type == MLX5E_PACKET_MERGE_LRO; - bool ro = MLX5_CAP_GEN(mdev, relaxed_ordering_write); + bool ro = pcie_relaxed_ordering_enabled(mdev->pdev) && + MLX5_CAP_GEN(mdev, relaxed_ordering_write); return ro && lro_en ? MLX5_WQ_END_PAD_MODE_NONE : MLX5_WQ_END_PAD_MODE_ALIGN; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 43a536cb81db..c0f409c195bf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -38,11 +38,12 @@ void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { + bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); bool ro_write = MLX5_CAP_GEN(mdev, relaxed_ordering_write); bool ro_read = MLX5_CAP_GEN(mdev, relaxed_ordering_read); - MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_read); - MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_write); + MLX5_SET(mkc, mkc, relaxed_ordering_read, ro_pci_enable && ro_read); + MLX5_SET(mkc, mkc, relaxed_ordering_write, ro_pci_enable && ro_write); } static int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index eb90e79388f1..f797fd97d305 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -950,6 +950,13 @@ err_event_reg: return err; } +static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) +{ + mlx5e_rep_tc_netdevice_event_unregister(rpriv); + mlx5e_rep_bond_cleanup(rpriv); + mlx5e_rep_tc_cleanup(rpriv); +} + static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -961,42 +968,36 @@ static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) return err; } - err = mlx5e_tc_ht_init(&rpriv->tc_ht); - if (err) - goto err_ht_init; - if (rpriv->rep->vport == MLX5_VPORT_UPLINK) { err = mlx5e_init_uplink_rep_tx(rpriv); if (err) goto err_init_tx; } + err = mlx5e_tc_ht_init(&rpriv->tc_ht); + if (err) + goto err_ht_init; + return 0; -err_init_tx: - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); err_ht_init: + if (rpriv->rep->vport == MLX5_VPORT_UPLINK) + mlx5e_cleanup_uplink_rep_tx(rpriv); +err_init_tx: mlx5e_destroy_tises(priv); return err; } -static void mlx5e_cleanup_uplink_rep_tx(struct mlx5e_rep_priv *rpriv) -{ - mlx5e_rep_tc_netdevice_event_unregister(rpriv); - mlx5e_rep_bond_cleanup(rpriv); - mlx5e_rep_tc_cleanup(rpriv); -} - static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) { struct mlx5e_rep_priv *rpriv = priv->ppriv; - mlx5e_destroy_tises(priv); + mlx5e_tc_ht_cleanup(&rpriv->tc_ht); if (rpriv->rep->vport == MLX5_VPORT_UPLINK) mlx5e_cleanup_uplink_rep_tx(rpriv); - mlx5e_tc_ht_cleanup(&rpriv->tc_ht); + mlx5e_destroy_tises(priv); } static void mlx5e_rep_enable(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 217cac29057f..2ce3728576d1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2690,9 +2690,6 @@ static int mlx5_esw_offloads_devcom_event(int event, switch (event) { case ESW_OFFLOADS_DEVCOM_PAIR: - if (mlx5_get_next_phys_dev(esw->dev) != peer_esw->dev) - break; - if (mlx5_eswitch_vport_match_metadata_enabled(esw) != mlx5_eswitch_vport_match_metadata_enabled(peer_esw)) break; @@ -2744,6 +2741,9 @@ static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_register_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS, mlx5_esw_offloads_devcom_event, @@ -2761,6 +2761,9 @@ static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) return; + if (!mlx5_is_lag_supported(esw->dev)) + return; + mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, ESW_OFFLOADS_DEVCOM_UNPAIR, esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index fdcf7f529330..21e5c709b2d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1574,9 +1574,22 @@ static struct mlx5_flow_rule *find_flow_rule(struct fs_fte *fte, return NULL; } -static bool check_conflicting_actions(u32 action1, u32 action2) +static bool check_conflicting_actions_vlan(const struct mlx5_fs_vlan *vlan0, + const struct mlx5_fs_vlan *vlan1) { - u32 xored_actions = action1 ^ action2; + return vlan0->ethtype != vlan1->ethtype || + vlan0->vid != vlan1->vid || + vlan0->prio != vlan1->prio; +} + +static bool check_conflicting_actions(const struct mlx5_flow_act *act1, + const struct mlx5_flow_act *act2) +{ + u32 action1 = act1->action; + u32 action2 = act2->action; + u32 xored_actions; + + xored_actions = action1 ^ action2; /* if one rule only wants to count, it's ok */ if (action1 == MLX5_FLOW_CONTEXT_ACTION_COUNT || @@ -1593,6 +1606,22 @@ static bool check_conflicting_actions(u32 action1, u32 action2) MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2)) return true; + if (action1 & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT && + act1->pkt_reformat != act2->pkt_reformat) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + act1->modify_hdr != act2->modify_hdr) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH && + check_conflicting_actions_vlan(&act1->vlan[0], &act2->vlan[0])) + return true; + + if (action1 & MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 && + check_conflicting_actions_vlan(&act1->vlan[1], &act2->vlan[1])) + return true; + return false; } @@ -1600,7 +1629,7 @@ static int check_conflicting_ftes(struct fs_fte *fte, const struct mlx5_flow_context *flow_context, const struct mlx5_flow_act *flow_act) { - if (check_conflicting_actions(flow_act->action, fte->action.action)) { + if (check_conflicting_actions(flow_act, &fte->action)) { mlx5_core_warn(get_dev(&fte->node), "Found two FTEs with conflicting actions\n"); return -EEXIST; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c index 552b6e26e701..2a8fc547eb37 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.c @@ -783,7 +783,7 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; - struct lag_tracker tracker; + struct lag_tracker tracker = { }; bool do_bond, roce_lag; int err; int i; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h index 72f70fad4641..c81b173156d2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag/lag.h @@ -74,6 +74,16 @@ struct mlx5_lag { struct lag_mpesw lag_mpesw; }; +static inline bool mlx5_is_lag_supported(struct mlx5_core_dev *dev) +{ + if (!MLX5_CAP_GEN(dev, vport_group_manager) || + !MLX5_CAP_GEN(dev, lag_master) || + MLX5_CAP_GEN(dev, num_lag_ports) < 2 || + MLX5_CAP_GEN(dev, num_lag_ports) > MLX5_MAX_PORTS) + return false; + return true; +} + static inline struct mlx5_lag * mlx5_lag_dev(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 484cb1e4fc7f..9cc7afea2758 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -209,7 +209,6 @@ int mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev); int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); -struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev); struct mlx5_core_dev *mlx5_get_next_phys_dev_lag(struct mlx5_core_dev *dev); void mlx5_dev_list_lock(void); void mlx5_dev_list_unlock(void); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h index a68d931090dd..15c8d4de8350 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_cnt.h @@ -8,8 +8,8 @@ #include "spectrum.h" enum mlxsw_sp_counter_sub_pool_id { - MLXSW_SP_COUNTER_SUB_POOL_FLOW, MLXSW_SP_COUNTER_SUB_POOL_RIF, + MLXSW_SP_COUNTER_SUB_POOL_FLOW, }; int mlxsw_sp_counter_alloc(struct mlxsw_sp *mlxsw_sp, diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 443a5d6eb57b..7c31a46195b2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -507,6 +507,11 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv6); } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + map[FLOW_PAY_QINQ] = key_size; + key_size += sizeof(struct nfp_flower_vlan); + } + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) { map[FLOW_PAY_GRE] = key_size; if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) @@ -515,11 +520,6 @@ nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) key_size += sizeof(struct nfp_flower_ipv4_gre_tun); } - if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { - map[FLOW_PAY_QINQ] = key_size; - key_size += sizeof(struct nfp_flower_vlan); - } - if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) || (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) { map[FLOW_PAY_UDP_TUN] = key_size; @@ -758,6 +758,17 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } + if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { + offset = key_map[FLOW_PAY_QINQ]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, + (struct nfp_flower_vlan *)msk, + rules[i]); + } + } + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) { offset = key_map[FLOW_PAY_GRE]; key = kdata + offset; @@ -798,17 +809,6 @@ static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) } } - if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { - offset = key_map[FLOW_PAY_QINQ]; - key = kdata + offset; - msk = mdata + offset; - for (i = 0; i < _CT_TYPE_MAX; i++) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, - (struct nfp_flower_vlan *)msk, - rules[i]); - } - } - if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN || key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { offset = key_map[FLOW_PAY_UDP_TUN]; diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 193a167a6762..e01430139b6d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -625,6 +625,14 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_ipv6); } + if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, + (struct nfp_flower_vlan *)msk, + rule); + ext += sizeof(struct nfp_flower_vlan); + msk += sizeof(struct nfp_flower_vlan); + } + if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GRE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { struct nfp_flower_ipv6_gre_tun *gre_match; @@ -660,14 +668,6 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, } } - if (NFP_FLOWER_LAYER2_QINQ & key_ls->key_layer_two) { - nfp_flower_compile_vlan((struct nfp_flower_vlan *)ext, - (struct nfp_flower_vlan *)msk, - rule); - ext += sizeof(struct nfp_flower_vlan); - msk += sizeof(struct nfp_flower_vlan); - } - if (key_ls->key_layer & NFP_FLOWER_LAYER_VXLAN || key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c index 54af30961351..6eeeb0fda91f 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_sriov.c @@ -15,7 +15,7 @@ #include "nfp_net_sriov.h" static int -nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) +nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg, bool warn) { u16 cap_vf; @@ -24,12 +24,14 @@ nfp_net_sriov_check(struct nfp_app *app, int vf, u16 cap, const char *msg) cap_vf = readw(app->pf->vfcfg_tbl2 + NFP_NET_VF_CFG_MB_CAP); if ((cap_vf & cap) != cap) { - nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); + if (warn) + nfp_warn(app->pf->cpp, "ndo_set_vf_%s not supported\n", msg); return -EOPNOTSUPP; } if (vf < 0 || vf >= app->pf->num_vfs) { - nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); + if (warn) + nfp_warn(app->pf->cpp, "invalid VF id %d\n", vf); return -EINVAL; } @@ -65,7 +67,7 @@ int nfp_app_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) unsigned int vf_offset; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_MAC, "mac", true); if (err) return err; @@ -101,7 +103,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, u32 vlan_tag; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN, "vlan", true); if (err) return err; @@ -115,7 +117,7 @@ int nfp_app_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos, } /* Check if fw supports or not */ - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", true); if (err) is_proto_sup = false; @@ -149,7 +151,7 @@ int nfp_app_set_vf_rate(struct net_device *netdev, int vf, u32 vf_offset, ratevalue; int err; - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", true); if (err) return err; @@ -181,7 +183,7 @@ int nfp_app_set_vf_spoofchk(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_SPOOF, - "spoofchk"); + "spoofchk", true); if (err) return err; @@ -205,7 +207,7 @@ int nfp_app_set_vf_trust(struct net_device *netdev, int vf, bool enable) int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_TRUST, - "trust"); + "trust", true); if (err) return err; @@ -230,7 +232,7 @@ int nfp_app_set_vf_link_state(struct net_device *netdev, int vf, int err; err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_LINK_STATE, - "link_state"); + "link_state", true); if (err) return err; @@ -265,7 +267,7 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, u8 flags; int err; - err = nfp_net_sriov_check(app, vf, 0, ""); + err = nfp_net_sriov_check(app, vf, 0, "", true); if (err) return err; @@ -285,13 +287,13 @@ int nfp_app_get_vf_config(struct net_device *netdev, int vf, ivi->vlan = FIELD_GET(NFP_NET_VF_CFG_VLAN_VID, vlan_tag); ivi->qos = FIELD_GET(NFP_NET_VF_CFG_VLAN_QOS, vlan_tag); - if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto")) + if (!nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_VLAN_PROTO, "vlan_proto", false)) ivi->vlan_proto = htons(FIELD_GET(NFP_NET_VF_CFG_VLAN_PROT, vlan_tag)); ivi->spoofchk = FIELD_GET(NFP_NET_VF_CFG_CTRL_SPOOF, flags); ivi->trusted = FIELD_GET(NFP_NET_VF_CFG_CTRL_TRUST, flags); ivi->linkstate = FIELD_GET(NFP_NET_VF_CFG_CTRL_LINK_STATE, flags); - err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate"); + err = nfp_net_sriov_check(app, vf, NFP_NET_VF_CFG_MB_CAP_RATE, "rate", false); if (!err) { rate = readl(app->pf->vfcfg_tbl2 + vf_offset + NFP_NET_VF_CFG_RATE); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index f9f80933e0c9..38fe77d1035e 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -1072,13 +1072,11 @@ static int intel_eth_pci_probe(struct pci_dev *pdev, ret = stmmac_dvr_probe(&pdev->dev, plat, &res); if (ret) { - goto err_dvr_probe; + goto err_alloc_irq; } return 0; -err_dvr_probe: - pci_free_irq_vectors(pdev); err_alloc_irq: clk_disable_unprepare(plat->stmmac_clk); clk_unregister_fixed_rate(plat->stmmac_clk); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h index 4225efbeda3d..f2e2261b4b7d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet.h +++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h @@ -547,6 +547,57 @@ static inline void axienet_iow(struct axienet_local *lp, off_t offset, iowrite32(value, lp->regs + offset); } +/** + * axienet_dma_out32 - Memory mapped Axi DMA register write. + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * @value: Value to be written into the Axi DMA register + * + * This function writes the desired value into the corresponding Axi DMA + * register. + */ + +static inline void axienet_dma_out32(struct axienet_local *lp, + off_t reg, u32 value) +{ + iowrite32(value, lp->dma_regs + reg); +} + +#if defined(CONFIG_64BIT) && defined(iowrite64) +/** + * axienet_dma_out64 - Memory mapped Axi DMA register write. + * @lp: Pointer to axienet local structure + * @reg: Address offset from the base address of the Axi DMA core + * @value: Value to be written into the Axi DMA register + * + * This function writes the desired value into the corresponding Axi DMA + * register. + */ +static inline void axienet_dma_out64(struct axienet_local *lp, + off_t reg, u64 value) +{ + iowrite64(value, lp->dma_regs + reg); +} + +static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, + dma_addr_t addr) +{ + if (lp->features & XAE_FEATURE_DMA_64BIT) + axienet_dma_out64(lp, reg, addr); + else + axienet_dma_out32(lp, reg, lower_32_bits(addr)); +} + +#else /* CONFIG_64BIT */ + +static inline void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, + dma_addr_t addr) +{ + axienet_dma_out32(lp, reg, lower_32_bits(addr)); +} + +#endif /* CONFIG_64BIT */ + /* Function prototypes visible in xilinx_axienet_mdio.c for other files */ int axienet_mdio_enable(struct axienet_local *lp); void axienet_mdio_disable(struct axienet_local *lp); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 93c9f305bba4..1760930ec0c4 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -133,30 +133,6 @@ static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg) return ioread32(lp->dma_regs + reg); } -/** - * axienet_dma_out32 - Memory mapped Axi DMA register write. - * @lp: Pointer to axienet local structure - * @reg: Address offset from the base address of the Axi DMA core - * @value: Value to be written into the Axi DMA register - * - * This function writes the desired value into the corresponding Axi DMA - * register. - */ -static inline void axienet_dma_out32(struct axienet_local *lp, - off_t reg, u32 value) -{ - iowrite32(value, lp->dma_regs + reg); -} - -static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, - dma_addr_t addr) -{ - axienet_dma_out32(lp, reg, lower_32_bits(addr)); - - if (lp->features & XAE_FEATURE_DMA_64BIT) - axienet_dma_out32(lp, reg + 4, upper_32_bits(addr)); -} - static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr, struct axidma_bd *desc) { @@ -2061,6 +2037,11 @@ static int axienet_probe(struct platform_device *pdev) iowrite32(0x0, desc); } } + if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { + dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); + ret = -EINVAL; + goto cleanup_clk; + } ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); if (ret) { diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 45c3c4a1101b..9fb567524220 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -99,6 +99,7 @@ struct sixpack { unsigned int rx_count; unsigned int rx_count_cooked; + spinlock_t rxlock; int mtu; /* Our mtu (to spot changes!) */ int buffsize; /* Max buffers sizes */ @@ -565,6 +566,7 @@ static int sixpack_open(struct tty_struct *tty) sp->dev = dev; spin_lock_init(&sp->lock); + spin_lock_init(&sp->rxlock); refcount_set(&sp->refcnt, 1); init_completion(&sp->dead); @@ -913,6 +915,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd) sp->led_state = 0x60; /* fill trailing bytes with zeroes */ sp->tty->ops->write(sp->tty, &sp->led_state, 1); + spin_lock_bh(&sp->rxlock); rest = sp->rx_count; if (rest != 0) for (i = rest; i <= 3; i++) @@ -930,6 +933,7 @@ static void decode_std_command(struct sixpack *sp, unsigned char cmd) sp_bump(sp, 0); } sp->rx_count_cooked = 0; + spin_unlock_bh(&sp->rxlock); } break; case SIXP_TX_URUN: printk(KERN_DEBUG "6pack: TX underrun\n"); @@ -959,8 +963,11 @@ sixpack_decode(struct sixpack *sp, const unsigned char *pre_rbuff, int count) decode_prio_command(sp, inbyte); else if ((inbyte & SIXP_STD_CMD_MASK) != 0) decode_std_command(sp, inbyte); - else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) + else if ((sp->status & SIXP_RX_DCD_MASK) == SIXP_RX_DCD_MASK) { + spin_lock_bh(&sp->rxlock); decode_data(sp, inbyte); + spin_unlock_bh(&sp->rxlock); + } } } diff --git a/drivers/net/phy/aquantia_main.c b/drivers/net/phy/aquantia_main.c index a8db1a19011b..c7047f5d7a9b 100644 --- a/drivers/net/phy/aquantia_main.c +++ b/drivers/net/phy/aquantia_main.c @@ -34,6 +34,8 @@ #define MDIO_AN_VEND_PROV 0xc400 #define MDIO_AN_VEND_PROV_1000BASET_FULL BIT(15) #define MDIO_AN_VEND_PROV_1000BASET_HALF BIT(14) +#define MDIO_AN_VEND_PROV_5000BASET_FULL BIT(11) +#define MDIO_AN_VEND_PROV_2500BASET_FULL BIT(10) #define MDIO_AN_VEND_PROV_DOWNSHIFT_EN BIT(4) #define MDIO_AN_VEND_PROV_DOWNSHIFT_MASK GENMASK(3, 0) #define MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT 4 @@ -231,9 +233,20 @@ static int aqr_config_aneg(struct phy_device *phydev) phydev->advertising)) reg |= MDIO_AN_VEND_PROV_1000BASET_HALF; + /* Handle the case when the 2.5G and 5G speeds are not advertised */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_2500BASET_FULL; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + phydev->advertising)) + reg |= MDIO_AN_VEND_PROV_5000BASET_FULL; + ret = phy_modify_mmd_changed(phydev, MDIO_MMD_AN, MDIO_AN_VEND_PROV, MDIO_AN_VEND_PROV_1000BASET_HALF | - MDIO_AN_VEND_PROV_1000BASET_FULL, reg); + MDIO_AN_VEND_PROV_1000BASET_FULL | + MDIO_AN_VEND_PROV_2500BASET_FULL | + MDIO_AN_VEND_PROV_5000BASET_FULL, reg); if (ret < 0) return ret; if (ret > 0) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 6a467e7817a6..59fe356942b5 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -2072,6 +2072,8 @@ static struct phy_driver at803x_driver[] = { /* ATHEROS AR9331 */ PHY_ID_MATCH_EXACT(ATH9331_PHY_ID), .name = "Qualcomm Atheros AR9331 built-in PHY", + .probe = at803x_probe, + .remove = at803x_remove, .suspend = at803x_suspend, .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, @@ -2087,6 +2089,8 @@ static struct phy_driver at803x_driver[] = { /* Qualcomm Atheros QCA9561 */ PHY_ID_MATCH_EXACT(QCA9561_PHY_ID), .name = "Qualcomm Atheros QCA9561 built-in PHY", + .probe = at803x_probe, + .remove = at803x_remove, .suspend = at803x_suspend, .resume = at803x_resume, .flags = PHY_POLL_CABLE_TEST, @@ -2151,6 +2155,8 @@ static struct phy_driver at803x_driver[] = { PHY_ID_MATCH_EXACT(QCA8081_PHY_ID), .name = "Qualcomm QCA8081", .flags = PHY_POLL_CABLE_TEST, + .probe = at803x_probe, + .remove = at803x_remove, .config_intr = at803x_config_intr, .handle_interrupt = at803x_handle_interrupt, .get_tunable = at803x_get_tunable, diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 8561f2d4443b..13dafe7a29bd 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -137,6 +137,7 @@ #define DP83867_DOWNSHIFT_2_COUNT 2 #define DP83867_DOWNSHIFT_4_COUNT 4 #define DP83867_DOWNSHIFT_8_COUNT 8 +#define DP83867_SGMII_AUTONEG_EN BIT(7) /* CFG3 bits */ #define DP83867_CFG3_INT_OE BIT(7) @@ -855,6 +856,32 @@ static int dp83867_phy_reset(struct phy_device *phydev) DP83867_PHYCR_FORCE_LINK_GOOD, 0); } +static void dp83867_link_change_notify(struct phy_device *phydev) +{ + /* There is a limitation in DP83867 PHY device where SGMII AN is + * only triggered once after the device is booted up. Even after the + * PHY TPI is down and up again, SGMII AN is not triggered and + * hence no new in-band message from PHY to MAC side SGMII. + * This could cause an issue during power up, when PHY is up prior + * to MAC. At this condition, once MAC side SGMII is up, MAC side + * SGMII wouldn`t receive new in-band message from TI PHY with + * correct link status, speed and duplex info. + * Thus, implemented a SW solution here to retrigger SGMII Auto-Neg + * whenever there is a link change. + */ + if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { + int val = 0; + + val = phy_clear_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + if (val < 0) + return; + + phy_set_bits(phydev, DP83867_CFG2, + DP83867_SGMII_AUTONEG_EN); + } +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -879,6 +906,8 @@ static struct phy_driver dp83867_driver[] = { .suspend = genphy_suspend, .resume = genphy_resume, + + .link_change_notify = dp83867_link_change_notify, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index 58d602985877..8a2dbe849866 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -1046,7 +1046,6 @@ int __init mdio_bus_init(void) return ret; } -EXPORT_SYMBOL_GPL(mdio_bus_init); #if IS_ENABLED(CONFIG_PHYLIB) void mdio_bus_exit(void) diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 1b54684b68a0..96d3c40932d8 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -110,7 +110,7 @@ static int smsc_phy_config_init(struct phy_device *phydev) struct smsc_phy_priv *priv = phydev->priv; int rc; - if (!priv->energy_enable) + if (!priv->energy_enable || phydev->irq != PHY_POLL) return 0; rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); @@ -210,6 +210,8 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev) * response on link pulses to detect presence of plugged Ethernet cable. * The Energy Detect Power-Down mode is enabled again in the end of procedure to * save approximately 220 mW of power if cable is unplugged. + * The workaround is only applicable to poll mode. Energy Detect Power-Down may + * not be used in interrupt mode lest link change detection becomes unreliable. */ static int lan87xx_read_status(struct phy_device *phydev) { @@ -217,7 +219,7 @@ static int lan87xx_read_status(struct phy_device *phydev) int err = genphy_read_status(phydev); - if (!phydev->link && priv->energy_enable) { + if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) { /* Disable EDPD to wake up PHY */ int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS); if (rc < 0) diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index 7a8c11a26eb5..4704ed6f00ef 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1750,7 +1750,7 @@ static const struct driver_info ax88179_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1763,7 +1763,7 @@ static const struct driver_info ax88178a_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1776,7 +1776,7 @@ static const struct driver_info cypress_GX3_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1789,7 +1789,7 @@ static const struct driver_info dlink_dub1312_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1802,7 +1802,7 @@ static const struct driver_info sitecom_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1815,7 +1815,7 @@ static const struct driver_info samsung_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1828,7 +1828,7 @@ static const struct driver_info lenovo_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1841,7 +1841,7 @@ static const struct driver_info belkin_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1854,7 +1854,7 @@ static const struct driver_info toshiba_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1867,7 +1867,7 @@ static const struct driver_info mct_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1880,7 +1880,7 @@ static const struct driver_info at_umc2000_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1893,7 +1893,7 @@ static const struct driver_info at_umc200_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; @@ -1906,7 +1906,7 @@ static const struct driver_info at_umc2000sp_info = { .link_reset = ax88179_link_reset, .reset = ax88179_reset, .stop = ax88179_stop, - .flags = FLAG_ETHER | FLAG_FRAMING_AX, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_SEND_ZLP, .rx_fixup = ax88179_rx_fixup, .tx_fixup = ax88179_tx_fixup, }; diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 466da01ba2e3..2cb833b3006a 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -312,6 +312,7 @@ static bool veth_skb_is_eligible_for_gro(const struct net_device *dev, static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); + struct netdev_queue *queue = NULL; struct veth_rq *rq = NULL; struct net_device *rcv; int length = skb->len; @@ -329,6 +330,7 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) rxq = skb_get_queue_mapping(skb); if (rxq < rcv->real_num_rx_queues) { rq = &rcv_priv->rq[rxq]; + queue = netdev_get_tx_queue(dev, rxq); /* The napi pointer is available when an XDP program is * attached or when GRO is enabled @@ -340,6 +342,8 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) skb_tx_timestamp(skb); if (likely(veth_forward_skb(rcv, skb, rq, use_napi) == NET_RX_SUCCESS)) { + if (queue) + txq_trans_cond_update(queue); if (!use_napi) dev_lstats_add(dev, length); } else { diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index db05b5e930be..969a67970e71 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -2768,7 +2768,6 @@ static const struct ethtool_ops virtnet_ethtool_ops = { static void virtnet_freeze_down(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int i; /* Make sure no work handler is accessing the device */ flush_work(&vi->config_work); @@ -2776,14 +2775,8 @@ static void virtnet_freeze_down(struct virtio_device *vdev) netif_tx_lock_bh(vi->dev); netif_device_detach(vi->dev); netif_tx_unlock_bh(vi->dev); - cancel_delayed_work_sync(&vi->refill); - - if (netif_running(vi->dev)) { - for (i = 0; i < vi->max_queue_pairs; i++) { - napi_disable(&vi->rq[i].napi); - virtnet_napi_tx_disable(&vi->sq[i].napi); - } - } + if (netif_running(vi->dev)) + virtnet_close(vi->dev); } static int init_vqs(struct virtnet_info *vi); @@ -2791,7 +2784,7 @@ static int init_vqs(struct virtnet_info *vi); static int virtnet_restore_up(struct virtio_device *vdev) { struct virtnet_info *vi = vdev->priv; - int err, i; + int err; err = init_vqs(vi); if (err) @@ -2800,15 +2793,9 @@ static int virtnet_restore_up(struct virtio_device *vdev) virtio_device_ready(vdev); if (netif_running(vi->dev)) { - for (i = 0; i < vi->curr_queue_pairs; i++) - if (!try_fill_recv(vi, &vi->rq[i], GFP_KERNEL)) - schedule_delayed_work(&vi->refill, 0); - - for (i = 0; i < vi->max_queue_pairs; i++) { - virtnet_napi_enable(vi->rq[i].vq, &vi->rq[i].napi); - virtnet_napi_tx_enable(vi, vi->sq[i].vq, - &vi->sq[i].napi); - } + err = virtnet_open(vi->dev); + if (err) + return err; } netif_tx_lock_bh(vi->dev); diff --git a/drivers/nfc/nfcmrvl/usb.c b/drivers/nfc/nfcmrvl/usb.c index a99aedff795d..ea7309453096 100644 --- a/drivers/nfc/nfcmrvl/usb.c +++ b/drivers/nfc/nfcmrvl/usb.c @@ -388,13 +388,25 @@ static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data) int err; while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + usb_anchor_urb(urb, &drv_data->tx_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); - if (err) + if (err) { + kfree(urb->setup_packet); + usb_unanchor_urb(urb); + usb_free_urb(urb); break; + } drv_data->tx_in_flight++; + usb_free_urb(urb); + } + + /* Cleanup the rest deferred urbs. */ + while ((urb = usb_get_from_anchor(&drv_data->deferred))) { + kfree(urb->setup_packet); + usb_free_urb(urb); } - usb_scuttle_anchored_urbs(&drv_data->deferred); } static int nfcmrvl_resume(struct usb_interface *intf) diff --git a/drivers/nfc/st21nfca/se.c b/drivers/nfc/st21nfca/se.c index 7e213f8ddc98..df8d27cf2956 100644 --- a/drivers/nfc/st21nfca/se.c +++ b/drivers/nfc/st21nfca/se.c @@ -300,6 +300,8 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, int r = 0; struct device *dev = &hdev->ndev->dev; struct nfc_evt_transaction *transaction; + u32 aid_len; + u8 params_len; pr_debug("connectivity gate event: %x\n", event); @@ -308,43 +310,48 @@ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, r = nfc_se_connectivity(hdev->ndev, host); break; case ST21NFCA_EVT_TRANSACTION: - /* - * According to specification etsi 102 622 + /* According to specification etsi 102 622 * 11.2.2.4 EVT_TRANSACTION Table 52 * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 + * + * The key differences are aid storage length is variably sized + * in the packet, but fixed in nfc_evt_transaction, and that the aid_len + * is u8 in the packet, but u32 in the structure, and the tags in + * the packet are not included in nfc_evt_transaction. + * + * size in bytes: 1 1 5-16 1 1 0-255 + * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 + * member name: aid_tag(M) aid_len aid params_tag(M) params_len params + * example: 0x81 5-16 X 0x82 0-255 X */ - if (skb->len < NFC_MIN_AID_LENGTH + 2 && - skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) + if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; - transaction = devm_kzalloc(dev, skb->len - 2, GFP_KERNEL); - if (!transaction) - return -ENOMEM; - - transaction->aid_len = skb->data[1]; + aid_len = skb->data[1]; - /* Checking if the length of the AID is valid */ - if (transaction->aid_len > sizeof(transaction->aid)) - return -EINVAL; + if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid)) + return -EPROTO; - memcpy(transaction->aid, &skb->data[2], - transaction->aid_len); + params_len = skb->data[aid_len + 3]; - /* Check next byte is PARAMETERS tag (82) */ - if (skb->data[transaction->aid_len + 2] != - NFC_EVT_TRANSACTION_PARAMS_TAG) + /* Verify PARAMETERS tag is (82), and final check that there is enough + * space in the packet to read everything. + */ + if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) || + (skb->len < aid_len + 4 + params_len)) return -EPROTO; - transaction->params_len = skb->data[transaction->aid_len + 3]; + transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL); + if (!transaction) + return -ENOMEM; - /* Total size is allocated (skb->len - 2) minus fixed array members */ - if (transaction->params_len > ((skb->len - 2) - sizeof(struct nfc_evt_transaction))) - return -EINVAL; + transaction->aid_len = aid_len; + transaction->params_len = params_len; - memcpy(transaction->params, skb->data + - transaction->aid_len + 4, transaction->params_len); + memcpy(transaction->aid, &skb->data[2], aid_len); + memcpy(transaction->params, &skb->data[aid_len + 4], params_len); r = nfc_se_transaction(hdev->ndev, host, transaction); break; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 24165daee3c8..b3d9c29aba1e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2546,6 +2546,20 @@ static const struct nvme_core_quirk_entry core_quirks[] = { .vid = 0x1e0f, .mn = "KCD6XVUL6T40", .quirks = NVME_QUIRK_NO_APST, + }, + { + /* + * The external Samsung X5 SSD fails initialization without a + * delay before checking if it is ready and has a whole set of + * other problems. To make this even more interesting, it + * shares the PCI ID with internal Samsung 970 Evo Plus that + * does not need or want these quirks. + */ + .vid = 0x144d, + .mn = "Samsung Portable SSD X5", + .quirks = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | + NVME_QUIRK_NO_DEEPEST_PS | + NVME_QUIRK_IGNORE_DEV_SUBNQN, } }; @@ -3285,8 +3299,8 @@ static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, * we have no UUID set */ if (uuid_is_null(&ids->uuid)) { - printk_ratelimited(KERN_WARNING - "No UUID available providing old NGUID\n"); + dev_warn_ratelimited(dev, + "No UUID available providing old NGUID\n"); return sysfs_emit(buf, "%pU\n", ids->nguid); } return sysfs_emit(buf, "%pU\n", &ids->uuid); @@ -3863,6 +3877,7 @@ static int nvme_init_ns_head(struct nvme_ns *ns, unsigned nsid, if (ret) { dev_err(ctrl->device, "globally duplicate IDs for nsid %d\n", nsid); + nvme_print_device_info(ctrl); return ret; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9b72b6ecf33c..0da94b233fed 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -503,6 +503,7 @@ struct nvme_ctrl_ops { void (*submit_async_event)(struct nvme_ctrl *ctrl); void (*delete_ctrl)(struct nvme_ctrl *ctrl); int (*get_address)(struct nvme_ctrl *ctrl, char *buf, int size); + void (*print_device_info)(struct nvme_ctrl *ctrl); }; /* @@ -548,6 +549,33 @@ static inline struct request *nvme_cid_to_rq(struct blk_mq_tags *tags, return blk_mq_tag_to_rq(tags, nvme_tag_from_cid(command_id)); } +/* + * Return the length of the string without the space padding + */ +static inline int nvme_strlen(char *s, int len) +{ + while (s[len - 1] == ' ') + len--; + return len; +} + +static inline void nvme_print_device_info(struct nvme_ctrl *ctrl) +{ + struct nvme_subsystem *subsys = ctrl->subsys; + + if (ctrl->ops->print_device_info) { + ctrl->ops->print_device_info(ctrl); + return; + } + + dev_err(ctrl->device, + "VID:%04x model:%.*s firmware:%.*s\n", subsys->vendor_id, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS void nvme_fault_inject_init(struct nvme_fault_inject *fault_inj, const char *dev_name); diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 48f4f6eb877b..d7b24ee17285 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1334,6 +1334,14 @@ static void nvme_warn_reset(struct nvme_dev *dev, u32 csts) dev_warn(dev->ctrl.device, "controller is down; will reset: CSTS=0x%x, PCI_STATUS read failed (%d)\n", csts, result); + + if (csts != ~0) + return; + + dev_warn(dev->ctrl.device, + "Does your device have a faulty power saving mode enabled?\n"); + dev_warn(dev->ctrl.device, + "Try \"nvme_core.default_ps_max_latency_us=0 pcie_aspm=off\" and report a bug\n"); } static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) @@ -2976,6 +2984,21 @@ static int nvme_pci_get_address(struct nvme_ctrl *ctrl, char *buf, int size) return snprintf(buf, size, "%s\n", dev_name(&pdev->dev)); } + +static void nvme_pci_print_device_info(struct nvme_ctrl *ctrl) +{ + struct pci_dev *pdev = to_pci_dev(to_nvme_dev(ctrl)->dev); + struct nvme_subsystem *subsys = ctrl->subsys; + + dev_err(ctrl->device, + "VID:DID %04x:%04x model:%.*s firmware:%.*s\n", + pdev->vendor, pdev->device, + nvme_strlen(subsys->model, sizeof(subsys->model)), + subsys->model, nvme_strlen(subsys->firmware_rev, + sizeof(subsys->firmware_rev)), + subsys->firmware_rev); +} + static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .name = "pcie", .module = THIS_MODULE, @@ -2987,6 +3010,7 @@ static const struct nvme_ctrl_ops nvme_pci_ctrl_ops = { .free_ctrl = nvme_pci_free_ctrl, .submit_async_event = nvme_pci_submit_async_event, .get_address = nvme_pci_get_address, + .print_device_info = nvme_pci_print_device_info, }; static int nvme_dev_map(struct nvme_dev *dev) @@ -3421,7 +3445,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(REDHAT, 0x0010), /* Qemu emulated controller */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x126f, 0x2263), /* Silicon Motion unidentified */ - .driver_data = NVME_QUIRK_NO_NS_DESC_LIST, }, + .driver_data = NVME_QUIRK_NO_NS_DESC_LIST | + NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1bb1, 0x0100), /* Seagate Nytro Flash Storage */ .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_NO_NS_DESC_LIST, }, @@ -3437,6 +3462,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY | NVME_QUIRK_DISABLE_WRITE_ZEROES| NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1987, 0x5012), /* Phison E12 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1987, 0x5016), /* Phison E16 */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1b4b, 0x1092), /* Lexar 256 GB SSD */ @@ -3447,12 +3474,24 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_DEVICE(0x1cc1, 0x8201), /* ADATA SX8200PNP 512GB */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_IGNORE_DEV_SUBNQN, }, + { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ + .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x15b7, 0x2001), /* Sandisk Skyhawk */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1d97, 0x2263), /* SPCC */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa80b), /* Samsung PM9B1 256G and 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x144d, 0xa809), /* Samsung MZALQ256HBJD 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1cc4, 0x6303), /* UMIS RPJTJ512MGE1QDY 512G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1cc4, 0x6302), /* UMIS RPJTJ256MGE1QDY 256G */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x2262), /* KINGSTON SKC2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ @@ -3463,6 +3502,10 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1202), /* MAXIO MAP1202 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1cc1, 0x5350), /* ADATA XPG GAMMIX S50 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1e49, 0x0041), /* ZHITAI TiPro7000 NVMe SSD */ + .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), .driver_data = NVME_QUIRK_DMA_ADDRESS_BITS_48, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0065), @@ -3483,10 +3526,6 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_128_BYTES_SQES | NVME_QUIRK_SHARED_TAGS | NVME_QUIRK_SKIP_CID_GEN }, - { PCI_DEVICE(0x144d, 0xa808), /* Samsung X5 */ - .driver_data = NVME_QUIRK_DELAY_BEFORE_CHK_RDY| - NVME_QUIRK_NO_DEEPEST_PS | - NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) }, { 0, } }; diff --git a/drivers/platform/mellanox/Kconfig b/drivers/platform/mellanox/Kconfig index 72df4b8f4dd8..09c7829e95c4 100644 --- a/drivers/platform/mellanox/Kconfig +++ b/drivers/platform/mellanox/Kconfig @@ -85,7 +85,7 @@ config NVSW_SN2201 depends on I2C depends on REGMAP_I2C help - This driver provides support for the Nvidia SN2201 platfom. + This driver provides support for the Nvidia SN2201 platform. The SN2201 is a highly integrated for one rack unit system with L3 management switches. It has 48 x 1Gbps RJ45 + 4 x 100G QSFP28 ports in a compact 1RU form factor. The system also including a diff --git a/drivers/platform/mellanox/nvsw-sn2201.c b/drivers/platform/mellanox/nvsw-sn2201.c index 0bcdc7c75007..2923daf63b75 100644 --- a/drivers/platform/mellanox/nvsw-sn2201.c +++ b/drivers/platform/mellanox/nvsw-sn2201.c @@ -326,7 +326,7 @@ static struct resource nvsw_sn2201_lpc_res[] = { }; /* SN2201 I2C platform data. */ -struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = { +static struct mlxreg_core_hotplug_platform_data nvsw_sn2201_i2c_data = { .irq = NVSW_SN2201_CPLD_SYSIRQ, }; diff --git a/drivers/platform/mips/Kconfig b/drivers/platform/mips/Kconfig index d421e1482395..6b51ad01f791 100644 --- a/drivers/platform/mips/Kconfig +++ b/drivers/platform/mips/Kconfig @@ -17,7 +17,7 @@ menuconfig MIPS_PLATFORM_DEVICES if MIPS_PLATFORM_DEVICES config CPU_HWMON - tristate "Loongson-3 CPU HWMon Driver" + bool "Loongson-3 CPU HWMon Driver" depends on MACH_LOONGSON64 select HWMON default y diff --git a/drivers/platform/x86/barco-p50-gpio.c b/drivers/platform/x86/barco-p50-gpio.c index 05534287bc26..8dd672339485 100644 --- a/drivers/platform/x86/barco-p50-gpio.c +++ b/drivers/platform/x86/barco-p50-gpio.c @@ -405,11 +405,14 @@ MODULE_DEVICE_TABLE(dmi, dmi_ids); static int __init p50_module_init(void) { struct resource res = DEFINE_RES_IO(P50_GPIO_IO_PORT_BASE, P50_PORT_CMD + 1); + int ret; if (!dmi_first_match(dmi_ids)) return -ENODEV; - platform_driver_register(&p50_gpio_driver); + ret = platform_driver_register(&p50_gpio_driver); + if (ret) + return ret; gpio_pdev = platform_device_register_simple(DRIVER_NAME, PLATFORM_DEVID_NONE, &res, 1); if (IS_ERR(gpio_pdev)) { diff --git a/drivers/platform/x86/gigabyte-wmi.c b/drivers/platform/x86/gigabyte-wmi.c index 1ef606e3ef80..497ad2f64a51 100644 --- a/drivers/platform/x86/gigabyte-wmi.c +++ b/drivers/platform/x86/gigabyte-wmi.c @@ -140,6 +140,7 @@ static u8 gigabyte_wmi_detect_sensor_usability(struct wmi_device *wdev) }} static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M DS3H-CF"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B450M S2H V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE AX V2"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("B550 AORUS ELITE"), @@ -156,6 +157,7 @@ static const struct dmi_system_id gigabyte_wmi_known_working_platforms[] = { DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 GAMING X"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 I AORUS PRO WIFI"), DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("X570 UD"), + DMI_EXACT_MATCH_GIGABYTE_BOARD_NAME("Z690M AORUS ELITE AX DDR4"), { } }; diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 667f94bba905..0d8cb22e30df 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -38,6 +38,7 @@ MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" #define HP_OMEN_EC_THERMAL_PROFILE_OFFSET 0x95 +#define zero_if_sup(tmp) (zero_insize_support?0:sizeof(tmp)) // use when zero insize is required /* DMI board names of devices that should use the omen specific path for * thermal profiles. @@ -220,6 +221,7 @@ static struct input_dev *hp_wmi_input_dev; static struct platform_device *hp_wmi_platform_dev; static struct platform_profile_handler platform_profile_handler; static bool platform_profile_support; +static bool zero_insize_support; static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; @@ -290,14 +292,16 @@ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, struct bios_return *bios_return; union acpi_object *obj = NULL; struct bios_args *args = NULL; - int mid, actual_outsize, ret; + int mid, actual_insize, actual_outsize; size_t bios_args_size; + int ret; mid = encode_outsize_for_pvsz(outsize); if (WARN_ON(mid < 0)) return mid; - bios_args_size = struct_size(args, data, insize); + actual_insize = max(insize, 128); + bios_args_size = struct_size(args, data, actual_insize); args = kmalloc(bios_args_size, GFP_KERNEL); if (!args) return -ENOMEM; @@ -374,7 +378,7 @@ static int hp_wmi_read_int(int query) int val = 0, ret; ret = hp_wmi_perform_query(query, HPWMI_READ, &val, - 0, sizeof(val)); + zero_if_sup(val), sizeof(val)); if (ret) return ret < 0 ? ret : -EINVAL; @@ -410,7 +414,8 @@ static int hp_wmi_get_tablet_mode(void) return -ENODEV; ret = hp_wmi_perform_query(HPWMI_SYSTEM_DEVICE_MODE, HPWMI_READ, - system_device_mode, 0, sizeof(system_device_mode)); + system_device_mode, zero_if_sup(system_device_mode), + sizeof(system_device_mode)); if (ret < 0) return ret; @@ -497,7 +502,7 @@ static int hp_wmi_fan_speed_max_get(void) int val = 0, ret; ret = hp_wmi_perform_query(HPWMI_FAN_SPEED_MAX_GET_QUERY, HPWMI_GM, - &val, 0, sizeof(val)); + &val, zero_if_sup(val), sizeof(val)); if (ret) return ret < 0 ? ret : -EINVAL; @@ -509,7 +514,7 @@ static int __init hp_wmi_bios_2008_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (!ret) return 1; @@ -520,7 +525,7 @@ static int __init hp_wmi_bios_2009_later(void) { u8 state[128]; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (!ret) return 1; @@ -598,7 +603,7 @@ static int hp_wmi_rfkill2_refresh(void) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (err) return err; @@ -1007,7 +1012,7 @@ static int __init hp_wmi_rfkill2_setup(struct platform_device *device) int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, - 0, sizeof(state)); + zero_if_sup(state), sizeof(state)); if (err) return err < 0 ? err : -EINVAL; @@ -1483,11 +1488,15 @@ static int __init hp_wmi_init(void) { int event_capable = wmi_has_guid(HPWMI_EVENT_GUID); int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID); - int err; + int err, tmp = 0; if (!bios_capable && !event_capable) return -ENODEV; + if (hp_wmi_perform_query(HPWMI_HARDWARE_QUERY, HPWMI_READ, &tmp, + sizeof(tmp), sizeof(tmp)) == HPWMI_RET_INVALID_PARAMETERS) + zero_insize_support = true; + if (event_capable) { err = hp_wmi_input_setup(); if (err) diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 216d31e3403d..79cff1fc675c 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -122,6 +122,12 @@ static const struct dmi_system_id dmi_vgbs_allow_list[] = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Spectre x360 Convertible 15-df0xxx"), }, }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Microsoft Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "Surface Go"), + }, + }, { } }; diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index edaf22e5ae98..40183bda7894 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -1912,6 +1912,7 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ROCKETLAKE, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map), {} }; diff --git a/drivers/platform/x86/intel/pmt/crashlog.c b/drivers/platform/x86/intel/pmt/crashlog.c index 34daf9df168b..ace1239bc0a0 100644 --- a/drivers/platform/x86/intel/pmt/crashlog.c +++ b/drivers/platform/x86/intel/pmt/crashlog.c @@ -282,7 +282,7 @@ static int pmt_crashlog_probe(struct auxiliary_device *auxdev, auxiliary_set_drvdata(auxdev, priv); for (i = 0; i < intel_vsec_dev->num_resources; i++) { - struct intel_pmt_entry *entry = &priv->entry[i].entry; + struct intel_pmt_entry *entry = &priv->entry[priv->num_entries].entry; ret = intel_pmt_dev_create(entry, &pmt_crashlog_ns, intel_vsec_dev, i); if (ret < 0) diff --git a/drivers/regulator/qcom_smd-regulator.c b/drivers/regulator/qcom_smd-regulator.c index 7dff94a2eb7e..ef6e47d025ca 100644 --- a/drivers/regulator/qcom_smd-regulator.c +++ b/drivers/regulator/qcom_smd-regulator.c @@ -723,19 +723,19 @@ static const struct regulator_desc pms405_pldo600 = { static const struct regulator_desc mp5496_smpa2 = { .linear_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(725000, 0, 27, 12500), + REGULATOR_LINEAR_RANGE(600000, 0, 127, 12500), }, .n_linear_ranges = 1, - .n_voltages = 28, + .n_voltages = 128, .ops = &rpm_mp5496_ops, }; static const struct regulator_desc mp5496_ldoa2 = { .linear_ranges = (struct linear_range[]) { - REGULATOR_LINEAR_RANGE(1800000, 0, 60, 25000), + REGULATOR_LINEAR_RANGE(800000, 0, 127, 25000), }, .n_linear_ranges = 1, - .n_voltages = 61, + .n_voltages = 128, .ops = &rpm_mp5496_ops, }; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index d0eab5700dc5..00684e11976b 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -160,8 +160,8 @@ static void ibmvfc_npiv_logout(struct ibmvfc_host *); static void ibmvfc_tgt_implicit_logout_and_del(struct ibmvfc_target *); static void ibmvfc_tgt_move_login(struct ibmvfc_target *); -static void ibmvfc_release_sub_crqs(struct ibmvfc_host *); -static void ibmvfc_init_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *); +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *); static const char *unknown_error = "unknown error"; @@ -917,7 +917,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) struct vio_dev *vdev = to_vio_dev(vhost->dev); unsigned long flags; - ibmvfc_release_sub_crqs(vhost); + ibmvfc_dereg_sub_crqs(vhost); /* Re-enable the CRQ */ do { @@ -936,7 +936,7 @@ static int ibmvfc_reenable_crq_queue(struct ibmvfc_host *vhost) spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_init_sub_crqs(vhost); + ibmvfc_reg_sub_crqs(vhost); return rc; } @@ -955,7 +955,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) struct vio_dev *vdev = to_vio_dev(vhost->dev); struct ibmvfc_queue *crq = &vhost->crq; - ibmvfc_release_sub_crqs(vhost); + ibmvfc_dereg_sub_crqs(vhost); /* Close the CRQ */ do { @@ -988,7 +988,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost) spin_unlock(vhost->crq.q_lock); spin_unlock_irqrestore(vhost->host->host_lock, flags); - ibmvfc_init_sub_crqs(vhost); + ibmvfc_reg_sub_crqs(vhost); return rc; } @@ -5682,6 +5682,8 @@ static int ibmvfc_alloc_queue(struct ibmvfc_host *vhost, queue->cur = 0; queue->fmt = fmt; queue->size = PAGE_SIZE / fmt_size; + + queue->vhost = vhost; return 0; } @@ -5757,9 +5759,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, ENTER; - if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) - return -ENOMEM; - rc = h_reg_sub_crq(vdev->unit_address, scrq->msg_token, PAGE_SIZE, &scrq->cookie, &scrq->hw_irq); @@ -5790,7 +5789,6 @@ static int ibmvfc_register_scsi_channel(struct ibmvfc_host *vhost, } scrq->hwq_id = index; - scrq->vhost = vhost; LEAVE; return 0; @@ -5800,7 +5798,6 @@ irq_failed: rc = plpar_hcall_norets(H_FREE_SUB_CRQ, vdev->unit_address, scrq->cookie); } while (rtas_busy_delay(rc)); reg_failed: - ibmvfc_free_queue(vhost, scrq); LEAVE; return rc; } @@ -5826,12 +5823,50 @@ static void ibmvfc_deregister_scsi_channel(struct ibmvfc_host *vhost, int index) if (rc) dev_err(dev, "Failed to free sub-crq[%d]: rc=%ld\n", index, rc); - ibmvfc_free_queue(vhost, scrq); + /* Clean out the queue */ + memset(scrq->msgs.crq, 0, PAGE_SIZE); + scrq->cur = 0; + + LEAVE; +} + +static void ibmvfc_reg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i, j; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) { + if (ibmvfc_register_scsi_channel(vhost, i)) { + for (j = i; j > 0; j--) + ibmvfc_deregister_scsi_channel(vhost, j - 1); + vhost->do_enquiry = 0; + return; + } + } + + LEAVE; +} + +static void ibmvfc_dereg_sub_crqs(struct ibmvfc_host *vhost) +{ + int i; + + ENTER; + if (!vhost->mq_enabled || !vhost->scsi_scrqs.scrqs) + return; + + for (i = 0; i < nr_scsi_hw_queues; i++) + ibmvfc_deregister_scsi_channel(vhost, i); + LEAVE; } static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) { + struct ibmvfc_queue *scrq; int i, j; ENTER; @@ -5847,30 +5882,41 @@ static void ibmvfc_init_sub_crqs(struct ibmvfc_host *vhost) } for (i = 0; i < nr_scsi_hw_queues; i++) { - if (ibmvfc_register_scsi_channel(vhost, i)) { - for (j = i; j > 0; j--) - ibmvfc_deregister_scsi_channel(vhost, j - 1); + scrq = &vhost->scsi_scrqs.scrqs[i]; + if (ibmvfc_alloc_queue(vhost, scrq, IBMVFC_SUB_CRQ_FMT)) { + for (j = i; j > 0; j--) { + scrq = &vhost->scsi_scrqs.scrqs[j - 1]; + ibmvfc_free_queue(vhost, scrq); + } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; vhost->scsi_scrqs.active_queues = 0; vhost->do_enquiry = 0; - break; + vhost->mq_enabled = 0; + return; } } + ibmvfc_reg_sub_crqs(vhost); + LEAVE; } static void ibmvfc_release_sub_crqs(struct ibmvfc_host *vhost) { + struct ibmvfc_queue *scrq; int i; ENTER; if (!vhost->scsi_scrqs.scrqs) return; - for (i = 0; i < nr_scsi_hw_queues; i++) - ibmvfc_deregister_scsi_channel(vhost, i); + ibmvfc_dereg_sub_crqs(vhost); + + for (i = 0; i < nr_scsi_hw_queues; i++) { + scrq = &vhost->scsi_scrqs.scrqs[i]; + ibmvfc_free_queue(vhost, scrq); + } kfree(vhost->scsi_scrqs.scrqs); vhost->scsi_scrqs.scrqs = NULL; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.h b/drivers/scsi/ibmvscsi/ibmvfc.h index 3718406e0988..c39a245f43d0 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.h +++ b/drivers/scsi/ibmvscsi/ibmvfc.h @@ -789,6 +789,7 @@ struct ibmvfc_queue { spinlock_t _lock; spinlock_t *q_lock; + struct ibmvfc_host *vhost; struct ibmvfc_event_pool evt_pool; struct list_head sent; struct list_head free; @@ -797,7 +798,6 @@ struct ibmvfc_queue { union ibmvfc_iu cancel_rsp; /* Sub-CRQ fields */ - struct ibmvfc_host *vhost; unsigned long cookie; unsigned long vios_cookie; unsigned long hw_irq; diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 256ec6d08c16..9d01a3e3c26a 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -9795,7 +9795,7 @@ static int ipr_alloc_mem(struct ipr_ioa_cfg *ioa_cfg) GFP_KERNEL); if (!ioa_cfg->hrrq[i].host_rrq) { - while (--i > 0) + while (--i >= 0) dma_free_coherent(&pdev->dev, sizeof(u32) * ioa_cfg->hrrq[i].size, ioa_cfg->hrrq[i].host_rrq, @@ -10068,7 +10068,7 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, ioa_cfg->vectors_info[i].desc, &ioa_cfg->hrrq[i]); if (rc) { - while (--i >= 0) + while (--i > 0) free_irq(pci_irq_vector(pdev, i), &ioa_cfg->hrrq[i]); return rc; diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index b1be0dd0337a..f5d74958b664 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -420,8 +420,6 @@ int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t, uint32_t); void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *); -void lpfc_sli4_abort_fcp_cmpl(struct lpfc_hba *h, struct lpfc_iocbq *i, - struct lpfc_wcqe_complete *w); void lpfc_sli_free_hbq(struct lpfc_hba *, struct hbq_dmabuf *); @@ -630,7 +628,7 @@ void lpfc_nvmet_invalidate_host(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp); void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *abts_cmpl); + struct lpfc_iocbq *rspiocb); void lpfc_create_multixri_pools(struct lpfc_hba *phba); void lpfc_create_destroy_pools(struct lpfc_hba *phba); void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 9d36b20fb878..13dfe285493d 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -197,7 +197,7 @@ lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, memset(bpl, 0, sizeof(struct ulp_bde64)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); - bpl->tus.f.bdeFlags = BUFF_TYPE_BLP_64; + bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); bpl->tus.w = le32_to_cpu(bpl->tus.w); diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 07f9a6e61e10..3fababb7c181 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -2998,10 +2998,7 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, ndlp->nlp_DID, ulp_status, ulp_word4); - /* Call NLP_EVT_DEVICE_RM if link is down or LOGO is aborted */ if (lpfc_error_lost_link(ulp_status, ulp_word4)) { - lpfc_disc_state_machine(vport, ndlp, cmdiocb, - NLP_EVT_DEVICE_RM); skip_recovery = 1; goto out; } @@ -3021,18 +3018,10 @@ lpfc_cmpl_els_logo(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, spin_unlock_irq(&ndlp->lock); lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); - lpfc_els_free_iocb(phba, cmdiocb); - lpfc_nlp_put(ndlp); - - /* Presume the node was released. */ - return; + goto out_rsrc_free; } out: - /* Driver is done with the IO. */ - lpfc_els_free_iocb(phba, cmdiocb); - lpfc_nlp_put(ndlp); - /* At this point, the LOGO processing is complete. NOTE: For a * pt2pt topology, we are assuming the NPortID will only change * on link up processing. For a LOGO / PLOGI initiated by the @@ -3059,6 +3048,10 @@ out: ndlp->nlp_DID, ulp_status, ulp_word4, tmo, vport->num_disc_nodes); + + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); + lpfc_disc_start(vport); return; } @@ -3075,6 +3068,10 @@ out: lpfc_disc_state_machine(vport, ndlp, cmdiocb, NLP_EVT_DEVICE_RM); } +out_rsrc_free: + /* Driver is done with the I/O. */ + lpfc_els_free_iocb(phba, cmdiocb); + lpfc_nlp_put(ndlp); } /** diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h index 8511369d2cf8..f024415731ac 100644 --- a/drivers/scsi/lpfc/lpfc_hw4.h +++ b/drivers/scsi/lpfc/lpfc_hw4.h @@ -4487,6 +4487,9 @@ struct wqe_common { #define wqe_sup_SHIFT 6 #define wqe_sup_MASK 0x00000001 #define wqe_sup_WORD word11 +#define wqe_ffrq_SHIFT 6 +#define wqe_ffrq_MASK 0x00000001 +#define wqe_ffrq_WORD word11 #define wqe_wqec_SHIFT 7 #define wqe_wqec_MASK 0x00000001 #define wqe_wqec_WORD word11 diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 93b94c64518d..750dd1e9f2cc 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -12188,7 +12188,7 @@ lpfc_sli_enable_msi(struct lpfc_hba *phba) rc = pci_enable_msi(phba->pcidev); if (!rc) lpfc_printf_log(phba, KERN_INFO, LOG_INIT, - "0462 PCI enable MSI mode success.\n"); + "0012 PCI enable MSI mode success.\n"); else { lpfc_printf_log(phba, KERN_INFO, LOG_INIT, "0471 PCI enable MSI mode failed (%d)\n", rc); diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c index 639f86635127..b86ff9fcdf0c 100644 --- a/drivers/scsi/lpfc/lpfc_nportdisc.c +++ b/drivers/scsi/lpfc/lpfc_nportdisc.c @@ -834,7 +834,8 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, lpfc_nvmet_invalidate_host(phba, ndlp); if (ndlp->nlp_DID == Fabric_DID) { - if (vport->port_state <= LPFC_FDISC) + if (vport->port_state <= LPFC_FDISC || + vport->fc_flag & FC_PT2PT) goto out; lpfc_linkdown_port(vport); spin_lock_irq(shost->host_lock); diff --git a/drivers/scsi/lpfc/lpfc_nvme.c b/drivers/scsi/lpfc/lpfc_nvme.c index 335e90633933..cd10ee6482fc 100644 --- a/drivers/scsi/lpfc/lpfc_nvme.c +++ b/drivers/scsi/lpfc/lpfc_nvme.c @@ -1065,25 +1065,37 @@ lpfc_nvme_io_cmd_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeIn, nCmd->rcv_rsplen = wcqe->parameter; nCmd->status = 0; + /* Get the NVME cmd details for this unique error. */ + cp = (struct nvme_fc_cmd_iu *)nCmd->cmdaddr; + ep = (struct nvme_fc_ersp_iu *)nCmd->rspaddr; + /* Check if this is really an ERSP */ if (nCmd->rcv_rsplen == LPFC_NVME_ERSP_LEN) { lpfc_ncmd->status = IOSTAT_SUCCESS; lpfc_ncmd->result = 0; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME, - "6084 NVME Completion ERSP: " - "xri %x placed x%x\n", - lpfc_ncmd->cur_iocbq.sli4_xritag, - wcqe->total_data_placed); + "6084 NVME FCP_ERR ERSP: " + "xri %x placed x%x opcode x%x cmd_id " + "x%x cqe_status x%x\n", + lpfc_ncmd->cur_iocbq.sli4_xritag, + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); break; } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "6081 NVME Completion Protocol Error: " "xri %x status x%x result x%x " - "placed x%x\n", + "placed x%x opcode x%x cmd_id x%x, " + "cqe_status x%x\n", lpfc_ncmd->cur_iocbq.sli4_xritag, lpfc_ncmd->status, lpfc_ncmd->result, - wcqe->total_data_placed); + wcqe->total_data_placed, + cp->sqe.common.opcode, + cp->sqe.common.command_id, + ep->cqe.status); break; case IOSTAT_LOCAL_REJECT: /* Let fall through to set command final state. */ @@ -1195,7 +1207,8 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, { struct lpfc_hba *phba = vport->phba; struct nvmefc_fcp_req *nCmd = lpfc_ncmd->nvmeCmd; - struct lpfc_iocbq *pwqeq = &(lpfc_ncmd->cur_iocbq); + struct nvme_common_command *sqe; + struct lpfc_iocbq *pwqeq = &lpfc_ncmd->cur_iocbq; union lpfc_wqe128 *wqe = &pwqeq->wqe; uint32_t req_len; @@ -1252,8 +1265,14 @@ lpfc_nvme_prep_io_cmd(struct lpfc_vport *vport, cstat->control_requests++; } - if (pnode->nlp_nvme_info & NLP_NVME_NSLER) + if (pnode->nlp_nvme_info & NLP_NVME_NSLER) { bf_set(wqe_erp, &wqe->generic.wqe_com, 1); + sqe = &((struct nvme_fc_cmd_iu *) + nCmd->cmdaddr)->sqe.common; + if (sqe->opcode == nvme_admin_async_event) + bf_set(wqe_ffrq, &wqe->generic.wqe_com, 1); + } + /* * Finish initializing those WQE fields that are independent * of the nvme_cmnd request_buffer @@ -1787,7 +1806,7 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, * lpfc_nvme_abort_fcreq_cmpl - Complete an NVME FCP abort request. * @phba: Pointer to HBA context object * @cmdiocb: Pointer to command iocb object. - * @abts_cmpl: Pointer to wcqe complete object. + * @rspiocb: Pointer to response iocb object. * * This is the callback function for any NVME FCP IO that was aborted. * @@ -1796,8 +1815,10 @@ lpfc_nvme_fcp_io_submit(struct nvme_fc_local_port *pnvme_lport, **/ void lpfc_nvme_abort_fcreq_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, - struct lpfc_wcqe_complete *abts_cmpl) + struct lpfc_iocbq *rspiocb) { + struct lpfc_wcqe_complete *abts_cmpl = &rspiocb->wcqe_cmpl; + lpfc_printf_log(phba, KERN_INFO, LOG_NVME, "6145 ABORT_XRI_CN completing on rpi x%x " "original iotag x%x, abort cmd iotag x%x " @@ -1840,6 +1861,7 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, struct lpfc_nvme_fcpreq_priv *freqpriv; unsigned long flags; int ret_val; + struct nvme_fc_cmd_iu *cp; /* Validate pointers. LLDD fault handling with transport does * have timing races. @@ -1963,10 +1985,16 @@ lpfc_nvme_fcp_abort(struct nvme_fc_local_port *pnvme_lport, return; } + /* + * Get Command Id from cmd to plug into response. This + * code is not needed in the next NVME Transport drop. + */ + cp = (struct nvme_fc_cmd_iu *)lpfc_nbuf->nvmeCmd->cmdaddr; lpfc_printf_vlog(vport, KERN_INFO, LOG_NVME_ABTS, "6138 Transport Abort NVME Request Issued for " - "ox_id x%x\n", - nvmereq_wqe->sli4_xritag); + "ox_id x%x nvme opcode x%x nvme cmd_id x%x\n", + nvmereq_wqe->sli4_xritag, cp->sqe.common.opcode, + cp->sqe.common.command_id); return; out_unlock: diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index d43968203248..ba5e4016262e 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -6062,6 +6062,9 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd) int status; u32 logit = LOG_FCP; + if (!rport) + return FAILED; + rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, @@ -6140,6 +6143,9 @@ lpfc_target_reset_handler(struct scsi_cmnd *cmnd) unsigned long flags; DECLARE_WAIT_QUEUE_HEAD_ONSTACK(waitq); + if (!rport) + return FAILED; + rdata = rport->dd_data; if (!rdata || !rdata->pnode) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 6ed696c4602a..80ac3a051c19 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -1930,7 +1930,7 @@ lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total) sync_buf = __lpfc_sli_get_iocbq(phba); if (!sync_buf) { lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT, - "6213 No available WQEs for CMF_SYNC_WQE\n"); + "6244 No available WQEs for CMF_SYNC_WQE\n"); ret_val = ENOMEM; goto out_unlock; } @@ -3805,7 +3805,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, set_job_ulpword4(cmdiocbp, IOERR_ABORT_REQUESTED); /* - * For SLI4, irsiocb contains + * For SLI4, irspiocb contains * NO_XRI in sli_xritag, it * shall not affect releasing * sgl (xri) process. @@ -3823,7 +3823,7 @@ lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, } } } - (cmdiocbp->cmd_cmpl) (phba, cmdiocbp, saveq); + cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq); } else lpfc_sli_release_iocbq(phba, cmdiocbp); } else { @@ -4063,8 +4063,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba, cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED; if (cmdiocbq->cmd_cmpl) { spin_unlock_irqrestore(&phba->hbalock, iflag); - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, - &rspiocbq); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq); spin_lock_irqsave(&phba->hbalock, iflag); } break; @@ -10288,7 +10287,7 @@ __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number, * @flag: Flag indicating if this command can be put into txq. * * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to - * send an iocb command to an HBA with SLI-4 interface spec. + * send an iocb command to an HBA with SLI-3 interface spec. * * This function takes the hbalock before invoking the lockless version. * The function will return success after it successfully submit the wqe to @@ -12740,7 +12739,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl; cmdiocbq->wait_cmd_cmpl = NULL; if (cmdiocbq->cmd_cmpl) - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, NULL); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL); else lpfc_sli_release_iocbq(phba, cmdiocbq); return; @@ -12754,9 +12753,9 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba, /* Set the exchange busy flag for task management commands */ if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) && - !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { + !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) { lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf, - cur_iocbq); + cur_iocbq); if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY)) lpfc_cmd->flags |= LPFC_SBUF_XBUSY; else @@ -13896,7 +13895,7 @@ void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba) * @irspiocbq: Pointer to work-queue completion queue entry. * * This routine handles an ELS work-queue completion event and construct - * a pseudo response ELS IODBQ from the SLI4 ELS WCQE for the common + * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common * discovery engine to handle. * * Return: Pointer to the receive IOCBQ, NULL otherwise. @@ -13940,7 +13939,7 @@ lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba, if (bf_get(lpfc_wcqe_c_xb, wcqe)) { spin_lock_irqsave(&phba->hbalock, iflags); - cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; + irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY; spin_unlock_irqrestore(&phba->hbalock, iflags); } @@ -14799,7 +14798,7 @@ lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq, /* Pass the cmd_iocb and the wcqe to the upper layer */ memcpy(&cmdiocbq->wcqe_cmpl, wcqe, sizeof(struct lpfc_wcqe_complete)); - (cmdiocbq->cmd_cmpl)(phba, cmdiocbq, cmdiocbq); + cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq); } else { lpfc_printf_log(phba, KERN_WARNING, LOG_SLI, "0375 FCP cmdiocb not callback function " @@ -18956,7 +18955,7 @@ lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport, /* Free iocb created in lpfc_prep_seq */ list_for_each_entry_safe(curr_iocb, next_iocb, - &iocbq->list, list) { + &iocbq->list, list) { list_del_init(&curr_iocb->list); lpfc_sli_release_iocbq(phba, curr_iocb); } diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h index 4fab79ed58ed..2ab6f7db64d8 100644 --- a/drivers/scsi/lpfc/lpfc_version.h +++ b/drivers/scsi/lpfc/lpfc_version.h @@ -20,7 +20,7 @@ * included with this package. * *******************************************************************/ -#define LPFC_DRIVER_VERSION "14.2.0.3" +#define LPFC_DRIVER_VERSION "14.2.0.4" #define LPFC_DRIVER_NAME "lpfc" /* Used for SLI 2/3 */ diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 37d46ae5c61d..9a1ae52bb621 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -5369,6 +5369,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) Mpi2ConfigReply_t mpi_reply; Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL; Mpi26PCIeIOUnitPage1_t pcie_iounit_pg1; + u16 depth; int sz; int rc = 0; @@ -5380,7 +5381,7 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) goto out; /* sas iounit page 1 */ sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData); - sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL); + sas_iounit_pg1 = kzalloc(sizeof(Mpi2SasIOUnitPage1_t), GFP_KERNEL); if (!sas_iounit_pg1) { pr_err("%s: failure at %s:%d/%s()!\n", ioc->name, __FILE__, __LINE__, __func__); @@ -5393,16 +5394,16 @@ static int _base_assign_fw_reported_qd(struct MPT3SAS_ADAPTER *ioc) ioc->name, __FILE__, __LINE__, __func__); goto out; } - ioc->max_wideport_qd = - (le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth)) ? - le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth) : - MPT3SAS_SAS_QUEUE_DEPTH; - ioc->max_narrowport_qd = - (le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth)) ? - le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth) : - MPT3SAS_SAS_QUEUE_DEPTH; - ioc->max_sata_qd = (sas_iounit_pg1->SATAMaxQDepth) ? - sas_iounit_pg1->SATAMaxQDepth : MPT3SAS_SATA_QUEUE_DEPTH; + + depth = le16_to_cpu(sas_iounit_pg1->SASWideMaxQueueDepth); + ioc->max_wideport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = le16_to_cpu(sas_iounit_pg1->SASNarrowMaxQueueDepth); + ioc->max_narrowport_qd = (depth ? depth : MPT3SAS_SAS_QUEUE_DEPTH); + + depth = sas_iounit_pg1->SATAMaxQDepth; + ioc->max_sata_qd = (depth ? depth : MPT3SAS_SATA_QUEUE_DEPTH); + /* pcie iounit page 1 */ rc = mpt3sas_config_get_pcie_iounit_pg1(ioc, &mpi_reply, &pcie_iounit_pg1, sizeof(Mpi26PCIeIOUnitPage1_t)); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index bfce60183a6e..836ddc476764 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -4031,7 +4031,7 @@ pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance) return 0; out_unwind: - while (--i > 0) + while (--i >= 0) free_irq(pci_irq_vector(pdev, i), &pinstance->hrrq_vector[i]); pci_free_irq_vectors(pdev); return rc; diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 1f423f723d06..b8a76b89f85a 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -2826,6 +2826,24 @@ static void zbc_open_zone(struct sdebug_dev_info *devip, } } +static inline void zbc_set_zone_full(struct sdebug_dev_info *devip, + struct sdeb_zone_state *zsp) +{ + switch (zsp->z_cond) { + case ZC2_IMPLICIT_OPEN: + devip->nr_imp_open--; + break; + case ZC3_EXPLICIT_OPEN: + devip->nr_exp_open--; + break; + default: + WARN_ONCE(true, "Invalid zone %llu condition %x\n", + zsp->z_start, zsp->z_cond); + break; + } + zsp->z_cond = ZC5_FULL; +} + static void zbc_inc_wp(struct sdebug_dev_info *devip, unsigned long long lba, unsigned int num) { @@ -2838,7 +2856,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, if (zsp->z_type == ZBC_ZTYPE_SWR) { zsp->z_wp += num; if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); return; } @@ -2857,7 +2875,7 @@ static void zbc_inc_wp(struct sdebug_dev_info *devip, n = num; } if (zsp->z_wp >= zend) - zsp->z_cond = ZC5_FULL; + zbc_set_zone_full(devip, zsp); num -= n; lba += n; diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 2c0dd64159b0..5d21f07456c6 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -212,7 +212,12 @@ iscsi_create_endpoint(int dd_size) return NULL; mutex_lock(&iscsi_ep_idr_mutex); - id = idr_alloc(&iscsi_ep_idr, ep, 0, -1, GFP_NOIO); + + /* + * First endpoint id should be 1 to comply with user space + * applications (iscsid). + */ + id = idr_alloc(&iscsi_ep_idr, ep, 1, -1, GFP_NOIO); if (id < 0) { mutex_unlock(&iscsi_ep_idr_mutex); printk(KERN_ERR "Could not allocate endpoint ID. Error %d.\n", diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 895b56c8f25e..a1a2ac09066f 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -3072,7 +3072,7 @@ static void sd_read_cpr(struct scsi_disk *sdkp) goto out; /* We must have at least a 64B header and one 32B range descriptor */ - vpd_len = get_unaligned_be16(&buffer[2]) + 3; + vpd_len = get_unaligned_be16(&buffer[2]) + 4; if (vpd_len > buf_len || vpd_len < 64 + 32 || (vpd_len & 31)) { sd_printk(KERN_ERR, sdkp, "Invalid Concurrent Positioning Ranges VPD page\n"); diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index ca3530982e52..fe000da11332 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1844,7 +1844,7 @@ static struct scsi_host_template scsi_driver = { .cmd_per_lun = 2048, .this_id = -1, /* Ensure there are no gaps in presented sgls */ - .virt_boundary_mask = PAGE_SIZE-1, + .virt_boundary_mask = HV_HYP_PAGE_SIZE - 1, .no_write_same = 1, .track_queue_depth = 1, .change_queue_depth = storvsc_change_queue_depth, @@ -1895,6 +1895,7 @@ static int storvsc_probe(struct hv_device *device, int target = 0; struct storvsc_device *stor_device; int max_sub_channels = 0; + u32 max_xfer_bytes; /* * We support sub-channels for storage on SCSI and FC controllers. @@ -1968,12 +1969,28 @@ static int storvsc_probe(struct hv_device *device, } /* max cmd length */ host->max_cmd_len = STORVSC_MAX_CMD_LEN; - /* - * set the table size based on the info we got - * from the host. + * Any reasonable Hyper-V configuration should provide + * max_transfer_bytes value aligning to HV_HYP_PAGE_SIZE, + * protecting it from any weird value. + */ + max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); + /* max_hw_sectors_kb */ + host->max_sectors = max_xfer_bytes >> 9; + /* + * There are 2 requirements for Hyper-V storvsc sgl segments, + * based on which the below calculation for max segments is + * done: + * + * 1. Except for the first and last sgl segment, all sgl segments + * should be align to HV_HYP_PAGE_SIZE, that also means the + * maximum number of segments in a sgl can be calculated by + * dividing the total max transfer length by HV_HYP_PAGE_SIZE. + * + * 2. Except for the first and last, each entry in the SGL must + * have an offset that is a multiple of HV_HYP_PAGE_SIZE. */ - host->sg_tablesize = (stor_device->max_transfer_bytes >> PAGE_SHIFT); + host->sg_tablesize = (max_xfer_bytes >> HV_HYP_PAGE_SHIFT) + 1; /* * For non-IDE disks, the host supports multiple channels. * Set the number of HW queues we are supporting. diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h index 51a82f7803d3..9d16cf925483 100644 --- a/drivers/scsi/vmw_pvscsi.h +++ b/drivers/scsi/vmw_pvscsi.h @@ -331,8 +331,8 @@ struct PVSCSIRingReqDesc { u8 tag; u8 bus; u8 target; - u8 vcpuHint; - u8 unused[59]; + u16 vcpuHint; + u8 unused[58]; } __packed; /* diff --git a/drivers/soc/amlogic/meson-mx-socinfo.c b/drivers/soc/amlogic/meson-mx-socinfo.c index 78f0f1aeca57..92125dd65f33 100644 --- a/drivers/soc/amlogic/meson-mx-socinfo.c +++ b/drivers/soc/amlogic/meson-mx-socinfo.c @@ -126,6 +126,7 @@ static int __init meson_mx_socinfo_init(void) np = of_find_matching_node(NULL, meson_mx_socinfo_analog_top_ids); if (np) { analog_top_regmap = syscon_node_to_regmap(np); + of_node_put(np); if (IS_ERR(analog_top_regmap)) return PTR_ERR(analog_top_regmap); diff --git a/drivers/soc/amlogic/meson-secure-pwrc.c b/drivers/soc/amlogic/meson-secure-pwrc.c index a10a417a87db..e93518763526 100644 --- a/drivers/soc/amlogic/meson-secure-pwrc.c +++ b/drivers/soc/amlogic/meson-secure-pwrc.c @@ -152,8 +152,10 @@ static int meson_secure_pwrc_probe(struct platform_device *pdev) } pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); - if (!pwrc) + if (!pwrc) { + of_node_put(sm_np); return -ENOMEM; + } pwrc->fw = meson_sm_get(sm_np); of_node_put(sm_np); diff --git a/drivers/soc/bcm/brcmstb/pm/pm-arm.c b/drivers/soc/bcm/brcmstb/pm/pm-arm.c index 3cbb165d6e30..70ad0f3dce28 100644 --- a/drivers/soc/bcm/brcmstb/pm/pm-arm.c +++ b/drivers/soc/bcm/brcmstb/pm/pm-arm.c @@ -783,6 +783,7 @@ static int brcmstb_pm_probe(struct platform_device *pdev) } ret = brcmstb_init_sram(dn); + of_node_put(dn); if (ret) { pr_err("error setting up SRAM for PM\n"); return ret; diff --git a/drivers/soc/imx/imx8m-blk-ctrl.c b/drivers/soc/imx/imx8m-blk-ctrl.c index 7f49385ed2f8..7ebc28709e94 100644 --- a/drivers/soc/imx/imx8m-blk-ctrl.c +++ b/drivers/soc/imx/imx8m-blk-ctrl.c @@ -667,7 +667,7 @@ static const struct imx8m_blk_ctrl_domain_data imx8mp_media_blk_ctl_domain_data[ }, [IMX8MP_MEDIABLK_PD_LCDIF_2] = { .name = "mediablk-lcdif-2", - .clk_names = (const char *[]){ "disp1", "apb", "axi", }, + .clk_names = (const char *[]){ "disp2", "apb", "axi", }, .num_clks = 3, .gpc_name = "lcdif2", .rst_mask = BIT(11) | BIT(12) | BIT(24), diff --git a/drivers/soc/mediatek/Kconfig b/drivers/soc/mediatek/Kconfig index fdd8bc08569e..3c3eedea35f7 100644 --- a/drivers/soc/mediatek/Kconfig +++ b/drivers/soc/mediatek/Kconfig @@ -73,4 +73,14 @@ config MTK_MMSYS Say yes here to add support for the MediaTek Multimedia Subsystem (MMSYS). +config MTK_SVS + tristate "MediaTek Smart Voltage Scaling(SVS)" + depends on MTK_EFUSE && NVMEM + help + The Smart Voltage Scaling(SVS) engine is a piece of hardware + which has several controllers(banks) for calculating suitable + voltage to different power domains(CPU/GPU/CCI) according to + chip process corner, temperatures and other factors. Then DVFS + driver could apply SVS bank voltage to PMIC/Buck. + endmenu diff --git a/drivers/soc/mediatek/Makefile b/drivers/soc/mediatek/Makefile index 90270f8114ed..0e9e703c931a 100644 --- a/drivers/soc/mediatek/Makefile +++ b/drivers/soc/mediatek/Makefile @@ -7,3 +7,4 @@ obj-$(CONFIG_MTK_SCPSYS) += mtk-scpsys.o obj-$(CONFIG_MTK_SCPSYS_PM_DOMAINS) += mtk-pm-domains.o obj-$(CONFIG_MTK_MMSYS) += mtk-mmsys.o obj-$(CONFIG_MTK_MMSYS) += mtk-mutex.o +obj-$(CONFIG_MTK_SVS) += mtk-svs.o diff --git a/drivers/soc/mediatek/mt6795-pm-domains.h b/drivers/soc/mediatek/mt6795-pm-domains.h new file mode 100644 index 000000000000..ef07c9dfdd9b --- /dev/null +++ b/drivers/soc/mediatek/mt6795-pm-domains.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __SOC_MEDIATEK_MT6795_PM_DOMAINS_H +#define __SOC_MEDIATEK_MT6795_PM_DOMAINS_H + +#include "mtk-pm-domains.h" +#include <dt-bindings/power/mt6795-power.h> + +/* + * MT6795 power domain support + */ + +static const struct scpsys_domain_data scpsys_domain_data_mt6795[] = { + [MT6795_POWER_DOMAIN_VDEC] = { + .name = "vdec", + .sta_mask = PWR_STATUS_VDEC, + .ctl_offs = SPM_VDE_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + }, + [MT6795_POWER_DOMAIN_VENC] = { + .name = "venc", + .sta_mask = PWR_STATUS_VENC, + .ctl_offs = SPM_VEN_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_ISP] = { + .name = "isp", + .sta_mask = PWR_STATUS_ISP, + .ctl_offs = SPM_ISP_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MM] = { + .name = "mm", + .sta_mask = PWR_STATUS_DISP, + .ctl_offs = SPM_DIS_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(12, 12), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MM_M0 | + MT8173_TOP_AXI_PROT_EN_MM_M1), + }, + }, + [MT6795_POWER_DOMAIN_MJC] = { + .name = "mjc", + .sta_mask = BIT(20), + .ctl_offs = 0x298, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_AUDIO] = { + .name = "audio", + .sta_mask = PWR_STATUS_AUDIO, + .ctl_offs = SPM_AUDIO_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(15, 12), + }, + [MT6795_POWER_DOMAIN_MFG_ASYNC] = { + .name = "mfg_async", + .sta_mask = PWR_STATUS_MFG_ASYNC, + .ctl_offs = SPM_MFG_ASYNC_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = 0, + }, + [MT6795_POWER_DOMAIN_MFG_2D] = { + .name = "mfg_2d", + .sta_mask = PWR_STATUS_MFG_2D, + .ctl_offs = SPM_MFG_2D_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(11, 8), + .sram_pdn_ack_bits = GENMASK(13, 12), + }, + [MT6795_POWER_DOMAIN_MFG] = { + .name = "mfg", + .sta_mask = PWR_STATUS_MFG, + .ctl_offs = SPM_MFG_PWR_CON, + .pwr_sta_offs = SPM_PWR_STATUS, + .pwr_sta2nd_offs = SPM_PWR_STATUS_2ND, + .sram_pdn_bits = GENMASK(13, 8), + .sram_pdn_ack_bits = GENMASK(21, 16), + .bp_infracfg = { + BUS_PROT_UPDATE_TOPAXI(MT8173_TOP_AXI_PROT_EN_MFG_S | + MT8173_TOP_AXI_PROT_EN_MFG_M0 | + MT8173_TOP_AXI_PROT_EN_MFG_M1 | + MT8173_TOP_AXI_PROT_EN_MFG_SNOOP_OUT), + }, + }, +}; + +static const struct scpsys_soc_data mt6795_scpsys_data = { + .domains_data = scpsys_domain_data_mt6795, + .num_domains = ARRAY_SIZE(scpsys_domain_data_mt6795), +}; + +#endif /* __SOC_MEDIATEK_MT6795_PM_DOMAINS_H */ diff --git a/drivers/soc/mediatek/mt8183-pm-domains.h b/drivers/soc/mediatek/mt8183-pm-domains.h index 71b8757e552d..99de67fe5de8 100644 --- a/drivers/soc/mediatek/mt8183-pm-domains.h +++ b/drivers/soc/mediatek/mt8183-pm-domains.h @@ -41,6 +41,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8183[] = { .pwr_sta2nd_offs = 0x0184, .sram_pdn_bits = 0, .sram_pdn_ack_bits = 0, + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8183_POWER_DOMAIN_MFG] = { .name = "mfg", diff --git a/drivers/soc/mediatek/mt8186-pm-domains.h b/drivers/soc/mediatek/mt8186-pm-domains.h index bf2dd0cdc3a8..108af61854a3 100644 --- a/drivers/soc/mediatek/mt8186-pm-domains.h +++ b/drivers/soc/mediatek/mt8186-pm-domains.h @@ -51,7 +51,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8186[] = { MT8186_TOP_AXI_PROT_EN_1_CLR, MT8186_TOP_AXI_PROT_EN_1_STA), }, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, }, [MT8186_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8192-pm-domains.h b/drivers/soc/mediatek/mt8192-pm-domains.h index 558c4ee4784a..b97b2051920f 100644 --- a/drivers/soc/mediatek/mt8192-pm-domains.h +++ b/drivers/soc/mediatek/mt8192-pm-domains.h @@ -58,6 +58,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { .pwr_sta2nd_offs = 0x0170, .sram_pdn_bits = GENMASK(8, 8), .sram_pdn_ack_bits = GENMASK(12, 12), + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8192_POWER_DOMAIN_MFG1] = { .name = "mfg1", @@ -85,6 +86,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8192[] = { MT8192_TOP_AXI_PROT_EN_2_CLR, MT8192_TOP_AXI_PROT_EN_2_STA1), }, + .caps = MTK_SCPD_DOMAIN_SUPPLY, }, [MT8192_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8195-pm-domains.h b/drivers/soc/mediatek/mt8195-pm-domains.h index 938f4d51f5ae..d7387ea1b9c9 100644 --- a/drivers/soc/mediatek/mt8195-pm-domains.h +++ b/drivers/soc/mediatek/mt8195-pm-domains.h @@ -67,7 +67,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { .ctl_offs = 0x334, .pwr_sta_offs = 0x174, .pwr_sta2nd_offs = 0x178, - .caps = MTK_SCPD_ACTIVE_WAKEUP, + .caps = MTK_SCPD_ACTIVE_WAKEUP | MTK_SCPD_ALWAYS_ON, }, [MT8195_POWER_DOMAIN_CSI_RX_TOP] = { .name = "csi_rx_top", @@ -162,7 +162,7 @@ static const struct scpsys_domain_data scpsys_domain_data_mt8195[] = { MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_CLR, MT8195_TOP_AXI_PROT_EN_SUB_INFRA_VDNR_STA1), }, - .caps = MTK_SCPD_KEEP_DEFAULT_OFF, + .caps = MTK_SCPD_KEEP_DEFAULT_OFF | MTK_SCPD_DOMAIN_SUPPLY, }, [MT8195_POWER_DOMAIN_MFG2] = { .name = "mfg2", diff --git a/drivers/soc/mediatek/mt8365-mmsys.h b/drivers/soc/mediatek/mt8365-mmsys.h index 24129a6c25f8..7abaf048d91e 100644 --- a/drivers/soc/mediatek/mt8365-mmsys.h +++ b/drivers/soc/mediatek/mt8365-mmsys.h @@ -10,6 +10,9 @@ #define MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN 0xf60 #define MT8365_DISP_REG_CONFIG_DISP_COLOR0_SEL_IN 0xf64 #define MT8365_DISP_REG_CONFIG_DISP_DSI0_SEL_IN 0xf68 +#define MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL 0xfd0 +#define MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN 0xfd8 +#define MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00 0xfdc #define MT8365_RDMA0_SOUT_COLOR0 0x1 #define MT8365_DITHER_MOUT_EN_DSI0 0x1 @@ -18,6 +21,10 @@ #define MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 0x0 #define MT8365_DISP_COLOR_SEL_IN_COLOR0 0x0 #define MT8365_OVL0_MOUT_PATH0_SEL BIT(0) +#define MT8365_RDMA1_SOUT_DPI0 0x1 +#define MT8365_DPI0_SEL_IN_RDMA1 0x0 +#define MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK 0x1 +#define MT8365_DPI0_SEL_IN_RDMA1 0x0 static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { { @@ -55,6 +62,21 @@ static const struct mtk_mmsys_routes mt8365_mmsys_routing_table[] = { MT8365_DISP_REG_CONFIG_DISP_RDMA0_RSZ0_SEL_IN, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0, MT8365_RDMA0_RSZ0_SEL_IN_RDMA0 }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_LVDS_SYS_CFG_00, + MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK, MT8365_LVDS_SYS_CFG_00_SEL_LVDS_PXL_CLK + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_DPI0_SEL_IN, + MT8365_DPI0_SEL_IN_RDMA1, MT8365_DPI0_SEL_IN_RDMA1 + }, + { + DDP_COMPONENT_RDMA1, DDP_COMPONENT_DPI0, + MT8365_DISP_REG_CONFIG_DISP_RDMA1_SOUT_SEL, + MT8365_RDMA1_SOUT_DPI0, MT8365_RDMA1_SOUT_DPI0 + }, }; #endif /* __SOC_MEDIATEK_MT8365_MMSYS_H */ diff --git a/drivers/soc/mediatek/mtk-devapc.c b/drivers/soc/mediatek/mtk-devapc.c index 7c65ad3d1f8a..fc13334db1b1 100644 --- a/drivers/soc/mediatek/mtk-devapc.c +++ b/drivers/soc/mediatek/mtk-devapc.c @@ -31,10 +31,7 @@ struct mtk_devapc_vio_dbgs { u32 vio_dbg1; }; -struct mtk_devapc_data { - /* numbers of violation index */ - u32 vio_idx_num; - +struct mtk_devapc_regs_ofs { /* reg offset */ u32 vio_mask_offset; u32 vio_sta_offset; @@ -46,6 +43,12 @@ struct mtk_devapc_data { u32 vio_shift_con_offset; }; +struct mtk_devapc_data { + /* numbers of violation index */ + u32 vio_idx_num; + const struct mtk_devapc_regs_ofs *regs_ofs; +}; + struct mtk_devapc_context { struct device *dev; void __iomem *infra_base; @@ -58,7 +61,7 @@ static void clear_vio_status(struct mtk_devapc_context *ctx) void __iomem *reg; int i; - reg = ctx->infra_base + ctx->data->vio_sta_offset; + reg = ctx->infra_base + ctx->data->regs_ofs->vio_sta_offset; for (i = 0; i < VIO_MOD_TO_REG_IND(ctx->data->vio_idx_num) - 1; i++) writel(GENMASK(31, 0), reg + 4 * i); @@ -73,7 +76,7 @@ static void mask_module_irq(struct mtk_devapc_context *ctx, bool mask) u32 val; int i; - reg = ctx->infra_base + ctx->data->vio_mask_offset; + reg = ctx->infra_base + ctx->data->regs_ofs->vio_mask_offset; if (mask) val = GENMASK(31, 0); @@ -116,11 +119,11 @@ static int devapc_sync_vio_dbg(struct mtk_devapc_context *ctx) u32 val; pd_vio_shift_sta_reg = ctx->infra_base + - ctx->data->vio_shift_sta_offset; + ctx->data->regs_ofs->vio_shift_sta_offset; pd_vio_shift_sel_reg = ctx->infra_base + - ctx->data->vio_shift_sel_offset; + ctx->data->regs_ofs->vio_shift_sel_offset; pd_vio_shift_con_reg = ctx->infra_base + - ctx->data->vio_shift_con_offset; + ctx->data->regs_ofs->vio_shift_con_offset; /* Find the minimum shift group which has violation */ val = readl(pd_vio_shift_sta_reg); @@ -161,8 +164,8 @@ static void devapc_extract_vio_dbg(struct mtk_devapc_context *ctx) void __iomem *vio_dbg0_reg; void __iomem *vio_dbg1_reg; - vio_dbg0_reg = ctx->infra_base + ctx->data->vio_dbg0_offset; - vio_dbg1_reg = ctx->infra_base + ctx->data->vio_dbg1_offset; + vio_dbg0_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg0_offset; + vio_dbg1_reg = ctx->infra_base + ctx->data->regs_ofs->vio_dbg1_offset; vio_dbgs.vio_dbg0 = readl(vio_dbg0_reg); vio_dbgs.vio_dbg1 = readl(vio_dbg1_reg); @@ -200,7 +203,7 @@ static irqreturn_t devapc_violation_irq(int irq_number, void *data) */ static void start_devapc(struct mtk_devapc_context *ctx) { - writel(BIT(31), ctx->infra_base + ctx->data->apc_con_offset); + writel(BIT(31), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset); mask_module_irq(ctx, false); } @@ -212,11 +215,10 @@ static void stop_devapc(struct mtk_devapc_context *ctx) { mask_module_irq(ctx, true); - writel(BIT(2), ctx->infra_base + ctx->data->apc_con_offset); + writel(BIT(2), ctx->infra_base + ctx->data->regs_ofs->apc_con_offset); } -static const struct mtk_devapc_data devapc_mt6779 = { - .vio_idx_num = 511, +static const struct mtk_devapc_regs_ofs devapc_regs_ofs_mt6779 = { .vio_mask_offset = 0x0, .vio_sta_offset = 0x400, .vio_dbg0_offset = 0x900, @@ -227,11 +229,24 @@ static const struct mtk_devapc_data devapc_mt6779 = { .vio_shift_con_offset = 0xF20, }; +static const struct mtk_devapc_data devapc_mt6779 = { + .vio_idx_num = 511, + .regs_ofs = &devapc_regs_ofs_mt6779, +}; + +static const struct mtk_devapc_data devapc_mt8186 = { + .vio_idx_num = 519, + .regs_ofs = &devapc_regs_ofs_mt6779, +}; + static const struct of_device_id mtk_devapc_dt_match[] = { { .compatible = "mediatek,mt6779-devapc", .data = &devapc_mt6779, }, { + .compatible = "mediatek,mt8186-devapc", + .data = &devapc_mt8186, + }, { }, }; MODULE_DEVICE_TABLE(of, mtk_devapc_dt_match); diff --git a/drivers/soc/mediatek/mtk-mutex.c b/drivers/soc/mediatek/mtk-mutex.c index 981d56967e7a..5ea43de4e410 100644 --- a/drivers/soc/mediatek/mtk-mutex.c +++ b/drivers/soc/mediatek/mtk-mutex.c @@ -7,10 +7,12 @@ #include <linux/iopoll.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/soc/mediatek/mtk-mmsys.h> #include <linux/soc/mediatek/mtk-mutex.h> +#include <linux/soc/mediatek/mtk-cmdq.h> #define MT2701_MUTEX0_MOD0 0x2c #define MT2701_MUTEX0_SOF0 0x30 @@ -80,6 +82,15 @@ #define MT8183_MUTEX_MOD_DISP_GAMMA0 16 #define MT8183_MUTEX_MOD_DISP_DITHER0 17 +#define MT8183_MUTEX_MOD_MDP_RDMA0 2 +#define MT8183_MUTEX_MOD_MDP_RSZ0 4 +#define MT8183_MUTEX_MOD_MDP_RSZ1 5 +#define MT8183_MUTEX_MOD_MDP_TDSHP0 6 +#define MT8183_MUTEX_MOD_MDP_WROT0 7 +#define MT8183_MUTEX_MOD_MDP_WDMA 8 +#define MT8183_MUTEX_MOD_MDP_AAL0 23 +#define MT8183_MUTEX_MOD_MDP_CCORR0 24 + #define MT8173_MUTEX_MOD_DISP_OVL0 11 #define MT8173_MUTEX_MOD_DISP_OVL1 12 #define MT8173_MUTEX_MOD_DISP_RDMA0 13 @@ -110,6 +121,20 @@ #define MT8195_MUTEX_MOD_DISP_DP_INTF0 21 #define MT8195_MUTEX_MOD_DISP_PWM0 27 +#define MT8365_MUTEX_MOD_DISP_OVL0 7 +#define MT8365_MUTEX_MOD_DISP_OVL0_2L 8 +#define MT8365_MUTEX_MOD_DISP_RDMA0 9 +#define MT8365_MUTEX_MOD_DISP_RDMA1 10 +#define MT8365_MUTEX_MOD_DISP_WDMA0 11 +#define MT8365_MUTEX_MOD_DISP_COLOR0 12 +#define MT8365_MUTEX_MOD_DISP_CCORR 13 +#define MT8365_MUTEX_MOD_DISP_AAL 14 +#define MT8365_MUTEX_MOD_DISP_GAMMA 15 +#define MT8365_MUTEX_MOD_DISP_DITHER 16 +#define MT8365_MUTEX_MOD_DISP_DSI0 17 +#define MT8365_MUTEX_MOD_DISP_PWM0 20 +#define MT8365_MUTEX_MOD_DISP_DPI0 22 + #define MT2712_MUTEX_MOD_DISP_PWM2 10 #define MT2712_MUTEX_MOD_DISP_OVL0 11 #define MT2712_MUTEX_MOD_DISP_OVL1 12 @@ -185,6 +210,7 @@ struct mtk_mutex_data { const unsigned int *mutex_sof; const unsigned int mutex_mod_reg; const unsigned int mutex_sof_reg; + const unsigned int *mutex_table_mod; const bool no_clk; }; @@ -194,6 +220,8 @@ struct mtk_mutex_ctx { void __iomem *regs; struct mtk_mutex mutex[10]; const struct mtk_mutex_data *data; + phys_addr_t addr; + struct cmdq_client_reg cmdq_reg; }; static const unsigned int mt2701_mutex_mod[DDP_COMPONENT_ID_MAX] = { @@ -272,6 +300,17 @@ static const unsigned int mt8183_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_WDMA0] = MT8183_MUTEX_MOD_DISP_WDMA0, }; +static const unsigned int mt8183_mutex_table_mod[MUTEX_MOD_IDX_MAX] = { + [MUTEX_MOD_IDX_MDP_RDMA0] = MT8183_MUTEX_MOD_MDP_RDMA0, + [MUTEX_MOD_IDX_MDP_RSZ0] = MT8183_MUTEX_MOD_MDP_RSZ0, + [MUTEX_MOD_IDX_MDP_RSZ1] = MT8183_MUTEX_MOD_MDP_RSZ1, + [MUTEX_MOD_IDX_MDP_TDSHP0] = MT8183_MUTEX_MOD_MDP_TDSHP0, + [MUTEX_MOD_IDX_MDP_WROT0] = MT8183_MUTEX_MOD_MDP_WROT0, + [MUTEX_MOD_IDX_MDP_WDMA] = MT8183_MUTEX_MOD_MDP_WDMA, + [MUTEX_MOD_IDX_MDP_AAL0] = MT8183_MUTEX_MOD_MDP_AAL0, + [MUTEX_MOD_IDX_MDP_CCORR0] = MT8183_MUTEX_MOD_MDP_CCORR0, +}; + static const unsigned int mt8186_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_AAL0] = MT8186_MUTEX_MOD_DISP_AAL0, [DDP_COMPONENT_CCORR] = MT8186_MUTEX_MOD_DISP_CCORR0, @@ -315,6 +354,22 @@ static const unsigned int mt8195_mutex_mod[DDP_COMPONENT_ID_MAX] = { [DDP_COMPONENT_DP_INTF0] = MT8195_MUTEX_MOD_DISP_DP_INTF0, }; +static const unsigned int mt8365_mutex_mod[DDP_COMPONENT_ID_MAX] = { + [DDP_COMPONENT_AAL0] = MT8365_MUTEX_MOD_DISP_AAL, + [DDP_COMPONENT_CCORR] = MT8365_MUTEX_MOD_DISP_CCORR, + [DDP_COMPONENT_COLOR0] = MT8365_MUTEX_MOD_DISP_COLOR0, + [DDP_COMPONENT_DITHER0] = MT8365_MUTEX_MOD_DISP_DITHER, + [DDP_COMPONENT_DPI0] = MT8365_MUTEX_MOD_DISP_DPI0, + [DDP_COMPONENT_DSI0] = MT8365_MUTEX_MOD_DISP_DSI0, + [DDP_COMPONENT_GAMMA] = MT8365_MUTEX_MOD_DISP_GAMMA, + [DDP_COMPONENT_OVL0] = MT8365_MUTEX_MOD_DISP_OVL0, + [DDP_COMPONENT_OVL_2L0] = MT8365_MUTEX_MOD_DISP_OVL0_2L, + [DDP_COMPONENT_PWM0] = MT8365_MUTEX_MOD_DISP_PWM0, + [DDP_COMPONENT_RDMA0] = MT8365_MUTEX_MOD_DISP_RDMA0, + [DDP_COMPONENT_RDMA1] = MT8365_MUTEX_MOD_DISP_RDMA1, + [DDP_COMPONENT_WDMA0] = MT8365_MUTEX_MOD_DISP_WDMA0, +}; + static const unsigned int mt2712_mutex_sof[DDP_MUTEX_SOF_MAX] = { [MUTEX_SOF_SINGLE_MODE] = MUTEX_SOF_SINGLE_MODE, [MUTEX_SOF_DSI0] = MUTEX_SOF_DSI0, @@ -399,6 +454,7 @@ static const struct mtk_mutex_data mt8183_mutex_driver_data = { .mutex_sof = mt8183_mutex_sof, .mutex_mod_reg = MT8183_MUTEX0_MOD0, .mutex_sof_reg = MT8183_MUTEX0_SOF0, + .mutex_table_mod = mt8183_mutex_table_mod, .no_clk = true, }; @@ -423,6 +479,14 @@ static const struct mtk_mutex_data mt8195_mutex_driver_data = { .mutex_sof_reg = MT8183_MUTEX0_SOF0, }; +static const struct mtk_mutex_data mt8365_mutex_driver_data = { + .mutex_mod = mt8365_mutex_mod, + .mutex_sof = mt8183_mutex_sof, + .mutex_mod_reg = MT8183_MUTEX0_MOD0, + .mutex_sof_reg = MT8183_MUTEX0_SOF0, + .no_clk = true, +}; + struct mtk_mutex *mtk_mutex_get(struct device *dev) { struct mtk_mutex_ctx *mtx = dev_get_drvdata(dev); @@ -572,6 +636,30 @@ void mtk_mutex_enable(struct mtk_mutex *mutex) } EXPORT_SYMBOL_GPL(mtk_mutex_enable); +int mtk_mutex_enable_by_cmdq(struct mtk_mutex *mutex, void *pkt) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + struct cmdq_pkt *cmdq_pkt = (struct cmdq_pkt *)pkt; + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (!mtx->cmdq_reg.size) { + dev_err(mtx->dev, "mediatek,gce-client-reg hasn't been set"); + return -EINVAL; + } + + cmdq_pkt_write(cmdq_pkt, mtx->cmdq_reg.subsys, + mtx->addr + DISP_REG_MUTEX_EN(mutex->id), 1); + return 0; +#else + dev_err(mtx->dev, "Not support for enable MUTEX by CMDQ"); + return -ENODEV; +#endif +} +EXPORT_SYMBOL_GPL(mtk_mutex_enable_by_cmdq); + void mtk_mutex_disable(struct mtk_mutex *mutex) { struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, @@ -606,12 +694,67 @@ void mtk_mutex_release(struct mtk_mutex *mutex) } EXPORT_SYMBOL_GPL(mtk_mutex_release); +int mtk_mutex_write_mod(struct mtk_mutex *mutex, + enum mtk_mutex_mod_index idx, bool clear) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); + unsigned int reg; + unsigned int offset; + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (idx < MUTEX_MOD_IDX_MDP_RDMA0 || + idx >= MUTEX_MOD_IDX_MAX) { + dev_err(mtx->dev, "Not supported MOD table index : %d", idx); + return -EINVAL; + } + + offset = DISP_REG_MUTEX_MOD(mtx->data->mutex_mod_reg, + mutex->id); + reg = readl_relaxed(mtx->regs + offset); + + if (clear) + reg &= ~BIT(mtx->data->mutex_table_mod[idx]); + else + reg |= BIT(mtx->data->mutex_table_mod[idx]); + + writel_relaxed(reg, mtx->regs + offset); + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_mutex_write_mod); + +int mtk_mutex_write_sof(struct mtk_mutex *mutex, + enum mtk_mutex_sof_index idx) +{ + struct mtk_mutex_ctx *mtx = container_of(mutex, struct mtk_mutex_ctx, + mutex[mutex->id]); + + WARN_ON(&mtx->mutex[mutex->id] != mutex); + + if (idx < MUTEX_SOF_IDX_SINGLE_MODE || + idx >= MUTEX_SOF_IDX_MAX) { + dev_err(mtx->dev, "Not supported SOF index : %d", idx); + return -EINVAL; + } + + writel_relaxed(idx, mtx->regs + + DISP_REG_MUTEX_SOF(mtx->data->mutex_sof_reg, mutex->id)); + + return 0; +} +EXPORT_SYMBOL_GPL(mtk_mutex_write_sof); + static int mtk_mutex_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_mutex_ctx *mtx; struct resource *regs; int i; +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + int ret; +#endif mtx = devm_kzalloc(dev, sizeof(*mtx), GFP_KERNEL); if (!mtx) @@ -631,12 +774,18 @@ static int mtk_mutex_probe(struct platform_device *pdev) } } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - mtx->regs = devm_ioremap_resource(dev, regs); + mtx->regs = devm_platform_get_and_ioremap_resource(pdev, 0, ®s); if (IS_ERR(mtx->regs)) { dev_err(dev, "Failed to map mutex registers\n"); return PTR_ERR(mtx->regs); } + mtx->addr = regs->start; + +#if IS_REACHABLE(CONFIG_MTK_CMDQ) + ret = cmdq_dev_get_client_reg(dev, &mtx->cmdq_reg, 0); + if (ret) + dev_dbg(dev, "No mediatek,gce-client-reg!\n"); +#endif platform_set_drvdata(pdev, mtx); @@ -665,6 +814,8 @@ static const struct of_device_id mutex_driver_dt_match[] = { .data = &mt8192_mutex_driver_data}, { .compatible = "mediatek,mt8195-disp-mutex", .data = &mt8195_mutex_driver_data}, + { .compatible = "mediatek,mt8365-disp-mutex", + .data = &mt8365_mutex_driver_data}, {}, }; MODULE_DEVICE_TABLE(of, mutex_driver_dt_match); diff --git a/drivers/soc/mediatek/mtk-pm-domains.c b/drivers/soc/mediatek/mtk-pm-domains.c index 5ced254b082b..9734f1091c69 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.c +++ b/drivers/soc/mediatek/mtk-pm-domains.c @@ -16,6 +16,7 @@ #include <linux/regulator/consumer.h> #include <linux/soc/mediatek/infracfg.h> +#include "mt6795-pm-domains.h" #include "mt8167-pm-domains.h" #include "mt8173-pm-domains.h" #include "mt8183-pm-domains.h" @@ -428,6 +429,9 @@ generic_pm_domain *scpsys_add_one_domain(struct scpsys *scpsys, struct device_no dev_err(scpsys->dev, "%pOF: failed to power on domain: %d\n", node, ret); goto err_put_subsys_clocks; } + + if (MTK_SCPD_CAPS(pd, MTK_SCPD_ALWAYS_ON)) + pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON; } if (scpsys->domains[id]) { @@ -556,6 +560,10 @@ static void scpsys_domain_cleanup(struct scpsys *scpsys) static const struct of_device_id scpsys_of_match[] = { { + .compatible = "mediatek,mt6795-power-controller", + .data = &mt6795_scpsys_data, + }, + { .compatible = "mediatek,mt8167-power-controller", .data = &mt8167_scpsys_data, }, diff --git a/drivers/soc/mediatek/mtk-pm-domains.h b/drivers/soc/mediatek/mtk-pm-domains.h index daa24e890dd4..7d3c0c36316c 100644 --- a/drivers/soc/mediatek/mtk-pm-domains.h +++ b/drivers/soc/mediatek/mtk-pm-domains.h @@ -8,6 +8,8 @@ #define MTK_SCPD_SRAM_ISO BIT(2) #define MTK_SCPD_KEEP_DEFAULT_OFF BIT(3) #define MTK_SCPD_DOMAIN_SUPPLY BIT(4) +/* can't set MTK_SCPD_KEEP_DEFAULT_OFF at the same time */ +#define MTK_SCPD_ALWAYS_ON BIT(5) #define MTK_SCPD_CAPS(_scpd, _x) ((_scpd)->data->caps & (_x)) #define SPM_VDE_PWR_CON 0x0210 diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index bf39a64f3ecc..d8cb0f833645 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -13,6 +13,9 @@ #include <linux/regmap.h> #include <linux/reset.h> +#define PWRAP_POLL_DELAY_US 10 +#define PWRAP_POLL_TIMEOUT_US 10000 + #define PWRAP_MT8135_BRIDGE_IORD_ARB_EN 0x4 #define PWRAP_MT8135_BRIDGE_WACS3_EN 0x10 #define PWRAP_MT8135_BRIDGE_INIT_DONE3 0x14 @@ -1140,12 +1143,9 @@ enum pwrap_type { }; struct pmic_wrapper; -struct pwrap_slv_type { - const u32 *dew_regs; - enum pmic_type type; + +struct pwrap_slv_regops { const struct regmap_config *regmap; - /* Flags indicating the capability for the target slave */ - u32 caps; /* * pwrap operations are highly associated with the PMIC types, * so the pointers added increases flexibility allowing determination @@ -1155,6 +1155,14 @@ struct pwrap_slv_type { int (*pwrap_write)(struct pmic_wrapper *wrp, u32 adr, u32 wdata); }; +struct pwrap_slv_type { + const u32 *dew_regs; + enum pmic_type type; + const struct pwrap_slv_regops *regops; + /* Flags indicating the capability for the target slave */ + u32 caps; +}; + struct pmic_wrapper { struct device *dev; void __iomem *base; @@ -1241,27 +1249,14 @@ static bool pwrap_is_fsm_idle_and_sync_idle(struct pmic_wrapper *wrp) (val & PWRAP_STATE_SYNC_IDLE0); } -static int pwrap_wait_for_state(struct pmic_wrapper *wrp, - bool (*fp)(struct pmic_wrapper *)) -{ - unsigned long timeout; - - timeout = jiffies + usecs_to_jiffies(10000); - - do { - if (time_after(jiffies, timeout)) - return fp(wrp) ? 0 : -ETIMEDOUT; - if (fp(wrp)) - return 0; - } while (1); -} - static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { + bool tmp; int ret; u32 val; - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1273,7 +1268,8 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) val = (adr >> 1) << 16; pwrap_writel(wrp, val, PWRAP_WACS2_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); + ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) return ret; @@ -1290,11 +1286,14 @@ static int pwrap_read16(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { + bool tmp; int ret, msb; *rdata = 0; for (msb = 0; msb < 2; msb++) { - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); + if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1303,7 +1302,8 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) pwrap_writel(wrp, ((msb << 30) | (adr << 16)), PWRAP_WACS2_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_vldclr); + ret = readx_poll_timeout(pwrap_is_fsm_vldclr, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) return ret; @@ -1318,14 +1318,16 @@ static int pwrap_read32(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata) { - return wrp->slave->pwrap_read(wrp, adr, rdata); + return wrp->slave->regops->pwrap_read(wrp, adr, rdata); } static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { + bool tmp; int ret; - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1344,10 +1346,12 @@ static int pwrap_write16(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { + bool tmp; int ret, msb, rdata; for (msb = 0; msb < 2; msb++) { - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { pwrap_leave_fsm_vldclr(wrp); return ret; @@ -1373,7 +1377,7 @@ static int pwrap_write32(struct pmic_wrapper *wrp, u32 adr, u32 wdata) static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata) { - return wrp->slave->pwrap_write(wrp, adr, wdata); + return wrp->slave->regops->pwrap_write(wrp, adr, wdata); } static int pwrap_regmap_read(void *context, u32 adr, u32 *rdata) @@ -1388,6 +1392,7 @@ static int pwrap_regmap_write(void *context, u32 adr, u32 wdata) static int pwrap_reset_spislave(struct pmic_wrapper *wrp) { + bool tmp; int ret, i; pwrap_writel(wrp, 0, PWRAP_HIPRIO_ARB_EN); @@ -1407,7 +1412,8 @@ static int pwrap_reset_spislave(struct pmic_wrapper *wrp) pwrap_writel(wrp, wrp->master->spi_w | PWRAP_MAN_CMD_OP_OUTS, PWRAP_MAN_CMD); - ret = pwrap_wait_for_state(wrp, pwrap_is_sync_idle); + ret = readx_poll_timeout(pwrap_is_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); return ret; @@ -1458,14 +1464,15 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp) static int pwrap_init_dual_io(struct pmic_wrapper *wrp) { int ret; + bool tmp; u32 rdata; /* Enable dual IO mode */ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_DIO_EN], 1); /* Check IDLE & INIT_DONE in advance */ - ret = pwrap_wait_for_state(wrp, - pwrap_is_fsm_idle_and_sync_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "%s fail, ret=%d\n", __func__, ret); return ret; @@ -1570,6 +1577,7 @@ static bool pwrap_is_pmic_cipher_ready(struct pmic_wrapper *wrp) static int pwrap_init_cipher(struct pmic_wrapper *wrp) { int ret; + bool tmp; u32 rdata = 0; pwrap_writel(wrp, 0x1, PWRAP_CIPHER_SWRST); @@ -1624,14 +1632,16 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) } /* wait for cipher data ready@AP */ - ret = pwrap_wait_for_state(wrp, pwrap_is_cipher_ready); + ret = readx_poll_timeout(pwrap_is_cipher_ready, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "cipher data ready@AP fail, ret=%d\n", ret); return ret; } /* wait for cipher data ready@PMIC */ - ret = pwrap_wait_for_state(wrp, pwrap_is_pmic_cipher_ready); + ret = readx_poll_timeout(pwrap_is_pmic_cipher_ready, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "timeout waiting for cipher data ready@PMIC\n"); @@ -1640,7 +1650,8 @@ static int pwrap_init_cipher(struct pmic_wrapper *wrp) /* wait for cipher mode idle */ pwrap_write(wrp, wrp->slave->dew_regs[PWRAP_DEW_CIPHER_MODE], 0x1); - ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle_and_sync_idle); + ret = readx_poll_timeout(pwrap_is_fsm_idle_and_sync_idle, wrp, tmp, tmp, + PWRAP_POLL_DELAY_US, PWRAP_POLL_TIMEOUT_US); if (ret) { dev_err(wrp->dev, "cipher mode idle fail, ret=%d\n", ret); return ret; @@ -1885,99 +1896,82 @@ static const struct regmap_config pwrap_regmap_config32 = { .max_register = 0xffff, }; +static const struct pwrap_slv_regops pwrap_regops16 = { + .pwrap_read = pwrap_read16, + .pwrap_write = pwrap_write16, + .regmap = &pwrap_regmap_config16, +}; + +static const struct pwrap_slv_regops pwrap_regops32 = { + .pwrap_read = pwrap_read32, + .pwrap_write = pwrap_write32, + .regmap = &pwrap_regmap_config32, +}; + static const struct pwrap_slv_type pmic_mt6323 = { .dew_regs = mt6323_regs, .type = PMIC_MT6323, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | PWRAP_SLV_CAP_SECURITY, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6351 = { .dew_regs = mt6351_regs, .type = PMIC_MT6351, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = 0, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6357 = { .dew_regs = mt6357_regs, .type = PMIC_MT6357, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = 0, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6358 = { .dew_regs = mt6358_regs, .type = PMIC_MT6358, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6359 = { .dew_regs = mt6359_regs, .type = PMIC_MT6359, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_DUALIO, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct pwrap_slv_type pmic_mt6380 = { .dew_regs = NULL, .type = PMIC_MT6380, - .regmap = &pwrap_regmap_config32, + .regops = &pwrap_regops32, .caps = 0, - .pwrap_read = pwrap_read32, - .pwrap_write = pwrap_write32, }; static const struct pwrap_slv_type pmic_mt6397 = { .dew_regs = mt6397_regs, .type = PMIC_MT6397, - .regmap = &pwrap_regmap_config16, + .regops = &pwrap_regops16, .caps = PWRAP_SLV_CAP_SPI | PWRAP_SLV_CAP_DUALIO | PWRAP_SLV_CAP_SECURITY, - .pwrap_read = pwrap_read16, - .pwrap_write = pwrap_write16, }; static const struct of_device_id of_slave_match_tbl[] = { - { - .compatible = "mediatek,mt6323", - .data = &pmic_mt6323, - }, { - .compatible = "mediatek,mt6351", - .data = &pmic_mt6351, - }, { - .compatible = "mediatek,mt6357", - .data = &pmic_mt6357, - }, { - .compatible = "mediatek,mt6358", - .data = &pmic_mt6358, - }, { - .compatible = "mediatek,mt6359", - .data = &pmic_mt6359, - }, { - /* The MT6380 PMIC only implements a regulator, so we bind it - * directly instead of using a MFD. - */ - .compatible = "mediatek,mt6380-regulator", - .data = &pmic_mt6380, - }, { - .compatible = "mediatek,mt6397", - .data = &pmic_mt6397, - }, { - /* sentinel */ - } + { .compatible = "mediatek,mt6323", .data = &pmic_mt6323 }, + { .compatible = "mediatek,mt6351", .data = &pmic_mt6351 }, + { .compatible = "mediatek,mt6357", .data = &pmic_mt6357 }, + { .compatible = "mediatek,mt6358", .data = &pmic_mt6358 }, + { .compatible = "mediatek,mt6359", .data = &pmic_mt6359 }, + + /* The MT6380 PMIC only implements a regulator, so we bind it + * directly instead of using a MFD. + */ + { .compatible = "mediatek,mt6380-regulator", .data = &pmic_mt6380 }, + { .compatible = "mediatek,mt6397", .data = &pmic_mt6397 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_slave_match_tbl); @@ -2136,45 +2130,19 @@ static struct pmic_wrapper_type pwrap_mt8186 = { }; static const struct of_device_id of_pwrap_match_tbl[] = { - { - .compatible = "mediatek,mt2701-pwrap", - .data = &pwrap_mt2701, - }, { - .compatible = "mediatek,mt6765-pwrap", - .data = &pwrap_mt6765, - }, { - .compatible = "mediatek,mt6779-pwrap", - .data = &pwrap_mt6779, - }, { - .compatible = "mediatek,mt6797-pwrap", - .data = &pwrap_mt6797, - }, { - .compatible = "mediatek,mt6873-pwrap", - .data = &pwrap_mt6873, - }, { - .compatible = "mediatek,mt7622-pwrap", - .data = &pwrap_mt7622, - }, { - .compatible = "mediatek,mt8135-pwrap", - .data = &pwrap_mt8135, - }, { - .compatible = "mediatek,mt8173-pwrap", - .data = &pwrap_mt8173, - }, { - .compatible = "mediatek,mt8183-pwrap", - .data = &pwrap_mt8183, - }, { - .compatible = "mediatek,mt8186-pwrap", - .data = &pwrap_mt8186, - }, { - .compatible = "mediatek,mt8195-pwrap", - .data = &pwrap_mt8195, - }, { - .compatible = "mediatek,mt8516-pwrap", - .data = &pwrap_mt8516, - }, { - /* sentinel */ - } + { .compatible = "mediatek,mt2701-pwrap", .data = &pwrap_mt2701 }, + { .compatible = "mediatek,mt6765-pwrap", .data = &pwrap_mt6765 }, + { .compatible = "mediatek,mt6779-pwrap", .data = &pwrap_mt6779 }, + { .compatible = "mediatek,mt6797-pwrap", .data = &pwrap_mt6797 }, + { .compatible = "mediatek,mt6873-pwrap", .data = &pwrap_mt6873 }, + { .compatible = "mediatek,mt7622-pwrap", .data = &pwrap_mt7622 }, + { .compatible = "mediatek,mt8135-pwrap", .data = &pwrap_mt8135 }, + { .compatible = "mediatek,mt8173-pwrap", .data = &pwrap_mt8173 }, + { .compatible = "mediatek,mt8183-pwrap", .data = &pwrap_mt8183 }, + { .compatible = "mediatek,mt8186-pwrap", .data = &pwrap_mt8186 }, + { .compatible = "mediatek,mt8195-pwrap", .data = &pwrap_mt8195 }, + { .compatible = "mediatek,mt8516-pwrap", .data = &pwrap_mt8516 }, + { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, of_pwrap_match_tbl); @@ -2185,7 +2153,6 @@ static int pwrap_probe(struct platform_device *pdev) struct pmic_wrapper *wrp; struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_slave_id = NULL; - struct resource *res; if (np->child) of_slave_id = of_match_node(of_slave_match_tbl, np->child); @@ -2205,8 +2172,7 @@ static int pwrap_probe(struct platform_device *pdev) wrp->slave = of_slave_id->data; wrp->dev = &pdev->dev; - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pwrap"); - wrp->base = devm_ioremap_resource(wrp->dev, res); + wrp->base = devm_platform_ioremap_resource_byname(pdev, "pwrap"); if (IS_ERR(wrp->base)) return PTR_ERR(wrp->base); @@ -2220,9 +2186,7 @@ static int pwrap_probe(struct platform_device *pdev) } if (HAS_CAP(wrp->master->caps, PWRAP_CAP_BRIDGE)) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "pwrap-bridge"); - wrp->bridge_base = devm_ioremap_resource(wrp->dev, res); + wrp->bridge_base = devm_platform_ioremap_resource_byname(pdev, "pwrap-bridge"); if (IS_ERR(wrp->bridge_base)) return PTR_ERR(wrp->bridge_base); @@ -2315,13 +2279,18 @@ static int pwrap_probe(struct platform_device *pdev) pwrap_writel(wrp, wrp->master->int1_en_all, PWRAP_INT1_EN); irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto err_out2; + } + ret = devm_request_irq(wrp->dev, irq, pwrap_interrupt, IRQF_TRIGGER_HIGH, "mt-pmic-pwrap", wrp); if (ret) goto err_out2; - wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regmap); + wrp->regmap = devm_regmap_init(wrp->dev, NULL, wrp, wrp->slave->regops->regmap); if (IS_ERR(wrp->regmap)) { ret = PTR_ERR(wrp->regmap); goto err_out2; diff --git a/drivers/soc/mediatek/mtk-svs.c b/drivers/soc/mediatek/mtk-svs.c new file mode 100644 index 000000000000..dee8664a12fd --- /dev/null +++ b/drivers/soc/mediatek/mtk-svs.c @@ -0,0 +1,2403 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2022 MediaTek Inc. + */ + +#include <linux/bits.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/cpuidle.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/nvmem-consumer.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> +#include <linux/pm_domain.h> +#include <linux/pm_opp.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/thermal.h> + +/* svs bank 1-line software id */ +#define SVSB_CPU_LITTLE BIT(0) +#define SVSB_CPU_BIG BIT(1) +#define SVSB_CCI BIT(2) +#define SVSB_GPU BIT(3) + +/* svs bank 2-line type */ +#define SVSB_LOW BIT(8) +#define SVSB_HIGH BIT(9) + +/* svs bank mode support */ +#define SVSB_MODE_ALL_DISABLE 0 +#define SVSB_MODE_INIT01 BIT(1) +#define SVSB_MODE_INIT02 BIT(2) +#define SVSB_MODE_MON BIT(3) + +/* svs bank volt flags */ +#define SVSB_INIT01_PD_REQ BIT(0) +#define SVSB_INIT01_VOLT_IGNORE BIT(1) +#define SVSB_INIT01_VOLT_INC_ONLY BIT(2) +#define SVSB_MON_VOLT_IGNORE BIT(16) +#define SVSB_REMOVE_DVTFIXED_VOLT BIT(24) + +/* svs bank register common configuration */ +#define SVSB_DET_MAX 0xffff +#define SVSB_DET_WINDOW 0xa28 +#define SVSB_DTHI 0x1 +#define SVSB_DTLO 0xfe +#define SVSB_EN_INIT01 0x1 +#define SVSB_EN_INIT02 0x5 +#define SVSB_EN_MON 0x2 +#define SVSB_EN_OFF 0x0 +#define SVSB_INTEN_INIT0x 0x00005f01 +#define SVSB_INTEN_MONVOPEN 0x00ff0000 +#define SVSB_INTSTS_CLEAN 0x00ffffff +#define SVSB_INTSTS_COMPLETE 0x1 +#define SVSB_INTSTS_MONVOP 0x00ff0000 +#define SVSB_RUNCONFIG_DEFAULT 0x80000000 + +/* svs bank related setting */ +#define BITS8 8 +#define MAX_OPP_ENTRIES 16 +#define REG_BYTES 4 +#define SVSB_DC_SIGNED_BIT BIT(15) +#define SVSB_DET_CLK_EN BIT(31) +#define SVSB_TEMP_LOWER_BOUND 0xb2 +#define SVSB_TEMP_UPPER_BOUND 0x64 + +static DEFINE_SPINLOCK(svs_lock); + +#define debug_fops_ro(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define debug_fops_rw(name) \ + static int svs_##name##_debug_open(struct inode *inode, \ + struct file *filp) \ + { \ + return single_open(filp, svs_##name##_debug_show, \ + inode->i_private); \ + } \ + static const struct file_operations svs_##name##_debug_fops = { \ + .owner = THIS_MODULE, \ + .open = svs_##name##_debug_open, \ + .read = seq_read, \ + .write = svs_##name##_debug_write, \ + .llseek = seq_lseek, \ + .release = single_release, \ + } + +#define svs_dentry_data(name) {__stringify(name), &svs_##name##_debug_fops} + +/** + * enum svsb_phase - svs bank phase enumeration + * @SVSB_PHASE_ERROR: svs bank encounters unexpected condition + * @SVSB_PHASE_INIT01: svs bank basic init for data calibration + * @SVSB_PHASE_INIT02: svs bank can provide voltages to opp table + * @SVSB_PHASE_MON: svs bank can provide voltages with thermal effect + * @SVSB_PHASE_MAX: total number of svs bank phase (debug purpose) + * + * Each svs bank has its own independent phase and we enable each svs bank by + * running their phase orderly. However, when svs bank encounters unexpected + * condition, it will fire an irq (PHASE_ERROR) to inform svs software. + * + * svs bank general phase-enabled order: + * SVSB_PHASE_INIT01 -> SVSB_PHASE_INIT02 -> SVSB_PHASE_MON + */ +enum svsb_phase { + SVSB_PHASE_ERROR = 0, + SVSB_PHASE_INIT01, + SVSB_PHASE_INIT02, + SVSB_PHASE_MON, + SVSB_PHASE_MAX, +}; + +enum svs_reg_index { + DESCHAR = 0, + TEMPCHAR, + DETCHAR, + AGECHAR, + DCCONFIG, + AGECONFIG, + FREQPCT30, + FREQPCT74, + LIMITVALS, + VBOOT, + DETWINDOW, + CONFIG, + TSCALCS, + RUNCONFIG, + SVSEN, + INIT2VALS, + DCVALUES, + AGEVALUES, + VOP30, + VOP74, + TEMP, + INTSTS, + INTSTSRAW, + INTEN, + CHKINT, + CHKSHIFT, + STATUS, + VDESIGN30, + VDESIGN74, + DVT30, + DVT74, + AGECOUNT, + SMSTATE0, + SMSTATE1, + CTL0, + DESDETSEC, + TEMPAGESEC, + CTRLSPARE0, + CTRLSPARE1, + CTRLSPARE2, + CTRLSPARE3, + CORESEL, + THERMINTST, + INTST, + THSTAGE0ST, + THSTAGE1ST, + THSTAGE2ST, + THAHBST0, + THAHBST1, + SPARE0, + SPARE1, + SPARE2, + SPARE3, + THSLPEVEB, + SVS_REG_MAX, +}; + +static const u32 svs_regs_v2[] = { + [DESCHAR] = 0xc00, + [TEMPCHAR] = 0xc04, + [DETCHAR] = 0xc08, + [AGECHAR] = 0xc0c, + [DCCONFIG] = 0xc10, + [AGECONFIG] = 0xc14, + [FREQPCT30] = 0xc18, + [FREQPCT74] = 0xc1c, + [LIMITVALS] = 0xc20, + [VBOOT] = 0xc24, + [DETWINDOW] = 0xc28, + [CONFIG] = 0xc2c, + [TSCALCS] = 0xc30, + [RUNCONFIG] = 0xc34, + [SVSEN] = 0xc38, + [INIT2VALS] = 0xc3c, + [DCVALUES] = 0xc40, + [AGEVALUES] = 0xc44, + [VOP30] = 0xc48, + [VOP74] = 0xc4c, + [TEMP] = 0xc50, + [INTSTS] = 0xc54, + [INTSTSRAW] = 0xc58, + [INTEN] = 0xc5c, + [CHKINT] = 0xc60, + [CHKSHIFT] = 0xc64, + [STATUS] = 0xc68, + [VDESIGN30] = 0xc6c, + [VDESIGN74] = 0xc70, + [DVT30] = 0xc74, + [DVT74] = 0xc78, + [AGECOUNT] = 0xc7c, + [SMSTATE0] = 0xc80, + [SMSTATE1] = 0xc84, + [CTL0] = 0xc88, + [DESDETSEC] = 0xce0, + [TEMPAGESEC] = 0xce4, + [CTRLSPARE0] = 0xcf0, + [CTRLSPARE1] = 0xcf4, + [CTRLSPARE2] = 0xcf8, + [CTRLSPARE3] = 0xcfc, + [CORESEL] = 0xf00, + [THERMINTST] = 0xf04, + [INTST] = 0xf08, + [THSTAGE0ST] = 0xf0c, + [THSTAGE1ST] = 0xf10, + [THSTAGE2ST] = 0xf14, + [THAHBST0] = 0xf18, + [THAHBST1] = 0xf1c, + [SPARE0] = 0xf20, + [SPARE1] = 0xf24, + [SPARE2] = 0xf28, + [SPARE3] = 0xf2c, + [THSLPEVEB] = 0xf30, +}; + +/** + * struct svs_platform - svs platform control + * @name: svs platform name + * @base: svs platform register base + * @dev: svs platform device + * @main_clk: main clock for svs bank + * @pbank: svs bank pointer needing to be protected by spin_lock section + * @banks: svs banks that svs platform supports + * @rst: svs platform reset control + * @efuse_parsing: svs platform efuse parsing function pointer + * @probe: svs platform probe function pointer + * @irqflags: svs platform irq settings flags + * @efuse_max: total number of svs efuse + * @tefuse_max: total number of thermal efuse + * @regs: svs platform registers map + * @bank_max: total number of svs banks + * @efuse: svs efuse data received from NVMEM framework + * @tefuse: thermal efuse data received from NVMEM framework + */ +struct svs_platform { + char *name; + void __iomem *base; + struct device *dev; + struct clk *main_clk; + struct svs_bank *pbank; + struct svs_bank *banks; + struct reset_control *rst; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + size_t efuse_max; + size_t tefuse_max; + const u32 *regs; + u32 bank_max; + u32 *efuse; + u32 *tefuse; +}; + +struct svs_platform_data { + char *name; + struct svs_bank *banks; + bool (*efuse_parsing)(struct svs_platform *svsp); + int (*probe)(struct svs_platform *svsp); + unsigned long irqflags; + const u32 *regs; + u32 bank_max; +}; + +/** + * struct svs_bank - svs bank representation + * @dev: bank device + * @opp_dev: device for opp table/buck control + * @init_completion: the timeout completion for bank init + * @buck: regulator used by opp_dev + * @tzd: thermal zone device for getting temperature + * @lock: mutex lock to protect voltage update process + * @set_freq_pct: function pointer to set bank frequency percent table + * @get_volts: function pointer to get bank voltages + * @name: bank name + * @buck_name: regulator name + * @tzone_name: thermal zone name + * @phase: bank current phase + * @volt_od: bank voltage overdrive + * @reg_data: bank register data in different phase for debug purpose + * @pm_runtime_enabled_count: bank pm runtime enabled count + * @mode_support: bank mode support. + * @freq_base: reference frequency for bank init + * @turn_freq_base: refenrece frequency for 2-line turn point + * @vboot: voltage request for bank init01 only + * @opp_dfreq: default opp frequency table + * @opp_dvolt: default opp voltage table + * @freq_pct: frequency percent table for bank init + * @volt: bank voltage table + * @volt_step: bank voltage step + * @volt_base: bank voltage base + * @volt_flags: bank voltage flags + * @vmax: bank voltage maximum + * @vmin: bank voltage minimum + * @age_config: bank age configuration + * @age_voffset_in: bank age voltage offset + * @dc_config: bank dc configuration + * @dc_voffset_in: bank dc voltage offset + * @dvt_fixed: bank dvt fixed value + * @vco: bank VCO value + * @chk_shift: bank chicken shift + * @core_sel: bank selection + * @opp_count: bank opp count + * @int_st: bank interrupt identification + * @sw_id: bank software identification + * @cpu_id: cpu core id for SVS CPU bank use only + * @ctl0: TS-x selection + * @temp: bank temperature + * @tzone_htemp: thermal zone high temperature threshold + * @tzone_htemp_voffset: thermal zone high temperature voltage offset + * @tzone_ltemp: thermal zone low temperature threshold + * @tzone_ltemp_voffset: thermal zone low temperature voltage offset + * @bts: svs efuse data + * @mts: svs efuse data + * @bdes: svs efuse data + * @mdes: svs efuse data + * @mtdes: svs efuse data + * @dcbdet: svs efuse data + * @dcmdet: svs efuse data + * @turn_pt: 2-line turn point tells which opp_volt calculated by high/low bank + * @type: bank type to represent it is 2-line (high/low) bank or 1-line bank + * + * Svs bank will generate suitalbe voltages by below general math equation + * and provide these voltages to opp voltage table. + * + * opp_volt[i] = (volt[i] * volt_step) + volt_base; + */ +struct svs_bank { + struct device *dev; + struct device *opp_dev; + struct completion init_completion; + struct regulator *buck; + struct thermal_zone_device *tzd; + struct mutex lock; /* lock to protect voltage update process */ + void (*set_freq_pct)(struct svs_platform *svsp); + void (*get_volts)(struct svs_platform *svsp); + char *name; + char *buck_name; + char *tzone_name; + enum svsb_phase phase; + s32 volt_od; + u32 reg_data[SVSB_PHASE_MAX][SVS_REG_MAX]; + u32 pm_runtime_enabled_count; + u32 mode_support; + u32 freq_base; + u32 turn_freq_base; + u32 vboot; + u32 opp_dfreq[MAX_OPP_ENTRIES]; + u32 opp_dvolt[MAX_OPP_ENTRIES]; + u32 freq_pct[MAX_OPP_ENTRIES]; + u32 volt[MAX_OPP_ENTRIES]; + u32 volt_step; + u32 volt_base; + u32 volt_flags; + u32 vmax; + u32 vmin; + u32 age_config; + u32 age_voffset_in; + u32 dc_config; + u32 dc_voffset_in; + u32 dvt_fixed; + u32 vco; + u32 chk_shift; + u32 core_sel; + u32 opp_count; + u32 int_st; + u32 sw_id; + u32 cpu_id; + u32 ctl0; + u32 temp; + u32 tzone_htemp; + u32 tzone_htemp_voffset; + u32 tzone_ltemp; + u32 tzone_ltemp_voffset; + u32 bts; + u32 mts; + u32 bdes; + u32 mdes; + u32 mtdes; + u32 dcbdet; + u32 dcmdet; + u32 turn_pt; + u32 type; +}; + +static u32 percent(u32 numerator, u32 denominator) +{ + /* If not divide 1000, "numerator * 100" will have data overflow. */ + numerator /= 1000; + denominator /= 1000; + + return DIV_ROUND_UP(numerator * 100, denominator); +} + +static u32 svs_readl_relaxed(struct svs_platform *svsp, enum svs_reg_index rg_i) +{ + return readl_relaxed(svsp->base + svsp->regs[rg_i]); +} + +static void svs_writel_relaxed(struct svs_platform *svsp, u32 val, + enum svs_reg_index rg_i) +{ + writel_relaxed(val, svsp->base + svsp->regs[rg_i]); +} + +static void svs_switch_bank(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, svsb->core_sel, CORESEL); +} + +static u32 svs_bank_volt_to_opp_volt(u32 svsb_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (svsb_volt * svsb_volt_step) + svsb_volt_base; +} + +static u32 svs_opp_volt_to_bank_volt(u32 opp_u_volt, u32 svsb_volt_step, + u32 svsb_volt_base) +{ + return (opp_u_volt - svsb_volt_base) / svsb_volt_step; +} + +static int svs_sync_bank_volts_from_opp(struct svs_bank *svsb) +{ + struct dev_pm_opp *opp; + u32 i, opp_u_volt; + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], + true); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %u (%ld)\n", + svsb->opp_dfreq[i], PTR_ERR(opp)); + return PTR_ERR(opp); + } + + opp_u_volt = dev_pm_opp_get_voltage(opp); + svsb->volt[i] = svs_opp_volt_to_bank_volt(opp_u_volt, + svsb->volt_step, + svsb->volt_base); + dev_pm_opp_put(opp); + } + + return 0; +} + +static int svs_adjust_pm_opp_volts(struct svs_bank *svsb) +{ + int ret = -EPERM, tzone_temp = 0; + u32 i, svsb_volt, opp_volt, temp_voffset = 0, opp_start, opp_stop; + + mutex_lock(&svsb->lock); + + /* + * 2-line bank updates its corresponding opp volts. + * 1-line bank updates all opp volts. + */ + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } else { + opp_start = 0; + opp_stop = svsb->opp_count; + } + + /* Get thermal effect */ + if (svsb->phase == SVSB_PHASE_MON) { + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret || (svsb->temp > SVSB_TEMP_UPPER_BOUND && + svsb->temp < SVSB_TEMP_LOWER_BOUND)) { + dev_err(svsb->dev, "%s: %d (0x%x), run default volts\n", + svsb->tzone_name, ret, svsb->temp); + svsb->phase = SVSB_PHASE_ERROR; + } + + if (tzone_temp >= svsb->tzone_htemp) + temp_voffset += svsb->tzone_htemp_voffset; + else if (tzone_temp <= svsb->tzone_ltemp) + temp_voffset += svsb->tzone_ltemp_voffset; + + /* 2-line bank update all opp volts when running mon mode */ + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + opp_start = 0; + opp_stop = svsb->opp_count; + } + } + + /* vmin <= svsb_volt (opp_volt) <= default opp voltage */ + for (i = opp_start; i < opp_stop; i++) { + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + opp_volt = svsb->opp_dvolt[i]; + break; + case SVSB_PHASE_INIT01: + /* do nothing */ + goto unlock_mutex; + case SVSB_PHASE_INIT02: + svsb_volt = max(svsb->volt[i], svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + case SVSB_PHASE_MON: + svsb_volt = max(svsb->volt[i] + temp_voffset, svsb->vmin); + opp_volt = svs_bank_volt_to_opp_volt(svsb_volt, + svsb->volt_step, + svsb->volt_base); + break; + default: + dev_err(svsb->dev, "unknown phase: %u\n", svsb->phase); + ret = -EINVAL; + goto unlock_mutex; + } + + opp_volt = min(opp_volt, svsb->opp_dvolt[i]); + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + svsb->opp_dfreq[i], + opp_volt, opp_volt, + svsb->opp_dvolt[i]); + if (ret) { + dev_err(svsb->dev, "set %uuV fail: %d\n", + opp_volt, ret); + goto unlock_mutex; + } + } + +unlock_mutex: + mutex_unlock(&svsb->lock); + + return ret; +} + +static int svs_dump_debug_show(struct seq_file *m, void *p) +{ + struct svs_platform *svsp = (struct svs_platform *)m->private; + struct svs_bank *svsb; + unsigned long svs_reg_addr; + u32 idx, i, j, bank_id; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse && svsp->efuse[i]) + seq_printf(m, "M_HW_RES%d = 0x%08x\n", + i, svsp->efuse[i]); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse) + seq_printf(m, "THERMAL_EFUSE%d = 0x%08x\n", + i, svsp->tefuse[i]); + + for (bank_id = 0, idx = 0; idx < svsp->bank_max; idx++, bank_id++) { + svsb = &svsp->banks[idx]; + + for (i = SVSB_PHASE_INIT01; i <= SVSB_PHASE_MON; i++) { + seq_printf(m, "Bank_number = %u\n", bank_id); + + if (i == SVSB_PHASE_INIT01 || i == SVSB_PHASE_INIT02) + seq_printf(m, "mode = init%d\n", i); + else if (i == SVSB_PHASE_MON) + seq_puts(m, "mode = mon\n"); + else + seq_puts(m, "mode = error\n"); + + for (j = DESCHAR; j < SVS_REG_MAX; j++) { + svs_reg_addr = (unsigned long)(svsp->base + + svsp->regs[j]); + seq_printf(m, "0x%08lx = 0x%08x\n", + svs_reg_addr, svsb->reg_data[i][j]); + } + } + } + + return 0; +} + +debug_fops_ro(dump); + +static int svs_enable_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + + switch (svsb->phase) { + case SVSB_PHASE_ERROR: + seq_puts(m, "disabled\n"); + break; + case SVSB_PHASE_INIT01: + seq_puts(m, "init1\n"); + break; + case SVSB_PHASE_INIT02: + seq_puts(m, "init2\n"); + break; + case SVSB_PHASE_MON: + seq_puts(m, "mon mode\n"); + break; + default: + seq_puts(m, "unknown\n"); + break; + } + + return 0; +} + +static ssize_t svs_enable_debug_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *pos) +{ + struct svs_bank *svsb = file_inode(filp)->i_private; + struct svs_platform *svsp = dev_get_drvdata(svsb->dev); + unsigned long flags; + int enabled, ret; + char *buf = NULL; + + if (count >= PAGE_SIZE) + return -EINVAL; + + buf = (char *)memdup_user_nul(buffer, count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + ret = kstrtoint(buf, 10, &enabled); + if (ret) + return ret; + + if (!enabled) { + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svsb->mode_support = SVSB_MODE_ALL_DISABLE; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + kfree(buf); + + return count; +} + +debug_fops_rw(enable); + +static int svs_status_debug_show(struct seq_file *m, void *v) +{ + struct svs_bank *svsb = (struct svs_bank *)m->private; + struct dev_pm_opp *opp; + int tzone_temp = 0, ret; + u32 i; + + ret = thermal_zone_get_temp(svsb->tzd, &tzone_temp); + if (ret) + seq_printf(m, "%s: temperature ignore, turn_pt = %u\n", + svsb->name, svsb->turn_pt); + else + seq_printf(m, "%s: temperature = %d, turn_pt = %u\n", + svsb->name, tzone_temp, svsb->turn_pt); + + for (i = 0; i < svsb->opp_count; i++) { + opp = dev_pm_opp_find_freq_exact(svsb->opp_dev, + svsb->opp_dfreq[i], true); + if (IS_ERR(opp)) { + seq_printf(m, "%s: cannot find freq = %u (%ld)\n", + svsb->name, svsb->opp_dfreq[i], + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + seq_printf(m, "opp_freq[%02u]: %u, opp_volt[%02u]: %lu, ", + i, svsb->opp_dfreq[i], i, + dev_pm_opp_get_voltage(opp)); + seq_printf(m, "svsb_volt[%02u]: 0x%x, freq_pct[%02u]: %u\n", + i, svsb->volt[i], i, svsb->freq_pct[i]); + dev_pm_opp_put(opp); + } + + return 0; +} + +debug_fops_ro(status); + +static int svs_create_debug_cmds(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dentry *svs_dir, *svsb_dir, *file_entry; + const char *d = "/sys/kernel/debug/svs"; + u32 i, idx; + + struct svs_dentry { + const char *name; + const struct file_operations *fops; + }; + + struct svs_dentry svs_entries[] = { + svs_dentry_data(dump), + }; + + struct svs_dentry svsb_entries[] = { + svs_dentry_data(enable), + svs_dentry_data(status), + }; + + svs_dir = debugfs_create_dir("svs", NULL); + if (IS_ERR(svs_dir)) { + dev_err(svsp->dev, "cannot create %s: %ld\n", + d, PTR_ERR(svs_dir)); + return PTR_ERR(svs_dir); + } + + for (i = 0; i < ARRAY_SIZE(svs_entries); i++) { + file_entry = debugfs_create_file(svs_entries[i].name, 0664, + svs_dir, svsp, + svs_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svs_entries[i].name, PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->mode_support == SVSB_MODE_ALL_DISABLE) + continue; + + svsb_dir = debugfs_create_dir(svsb->name, svs_dir); + if (IS_ERR(svsb_dir)) { + dev_err(svsp->dev, "cannot create %s/%s: %ld\n", + d, svsb->name, PTR_ERR(svsb_dir)); + return PTR_ERR(svsb_dir); + } + + for (i = 0; i < ARRAY_SIZE(svsb_entries); i++) { + file_entry = debugfs_create_file(svsb_entries[i].name, + 0664, svsb_dir, svsb, + svsb_entries[i].fops); + if (IS_ERR(file_entry)) { + dev_err(svsp->dev, "no %s/%s/%s?: %ld\n", + d, svsb->name, svsb_entries[i].name, + PTR_ERR(file_entry)); + return PTR_ERR(file_entry); + } + } + } + + return 0; +} + +static u32 interpolate(u32 f0, u32 f1, u32 v0, u32 v1, u32 fx) +{ + u32 vx; + + if (v0 == v1 || f0 == f1) + return v0; + + /* *100 to have decimal fraction factor */ + vx = (v0 * 100) - ((((v0 - v1) * 100) / (f0 - f1)) * (f0 - fx)); + + return DIV_ROUND_UP(vx, 100); +} + +static void svs_get_bank_volts_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *vop, vop74, vop30, turn_pt = svsb->turn_pt; + u32 b_sft, shift_byte = 0, opp_start = 0, opp_stop = 0; + u32 middle_index = (svsb->opp_count / 2); + + if (svsb->phase == SVSB_PHASE_MON && + svsb->volt_flags & SVSB_MON_VOLT_IGNORE) + return; + + vop74 = svs_readl_relaxed(svsp, VOP74); + vop30 = svs_readl_relaxed(svsp, VOP30); + + /* Target is to set svsb->volt[] by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* volt[0] ~ volt[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] + volt[j] ~ volt[opp_count - 1] */ + j = svsb->opp_count - 7; + svsb->volt[turn_pt] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[turn_pt + 1] ~ volt[j - 1] by interpolate */ + for (i = turn_pt + 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[turn_pt], + svsb->freq_pct[j], + svsb->volt[turn_pt], + svsb->volt[j], + svsb->freq_pct[i]); + } + } else { + if (svsb->type == SVSB_HIGH) { + /* volt[0] + volt[j] ~ volt[turn_pt - 1] */ + j = turn_pt - 7; + svsb->volt[0] = vop30 & GENMASK(7, 0); + shift_byte++; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + + /* volt[1] ~ volt[j - 1] by interpolate */ + for (i = 1; i < j; i++) + svsb->volt[i] = interpolate(svsb->freq_pct[0], + svsb->freq_pct[j], + svsb->volt[0], + svsb->volt[j], + svsb->freq_pct[i]); + } else if (svsb->type == SVSB_LOW) { + /* volt[turn_pt] ~ volt[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + vop = (shift_byte < REG_BYTES) ? &vop30 : + &vop74; + svsb->volt[i] = (*vop >> b_sft) & GENMASK(7, 0); + shift_byte++; + } + } + } + + if (svsb->type == SVSB_HIGH) { + opp_start = 0; + opp_stop = svsb->turn_pt; + } else if (svsb->type == SVSB_LOW) { + opp_start = svsb->turn_pt; + opp_stop = svsb->opp_count; + } + + for (i = opp_start; i < opp_stop; i++) + if (svsb->volt_flags & SVSB_REMOVE_DVTFIXED_VOLT) + svsb->volt[i] -= svsb->dvt_fixed; +} + +static void svs_set_bank_freq_pct_v3(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 i, j, *freq_pct, freq_pct74 = 0, freq_pct30 = 0; + u32 b_sft, shift_byte = 0, turn_pt; + u32 middle_index = (svsb->opp_count / 2); + + for (i = 0; i < svsb->opp_count; i++) { + if (svsb->opp_dfreq[i] <= svsb->turn_freq_base) { + svsb->turn_pt = i; + break; + } + } + + turn_pt = svsb->turn_pt; + + /* Target is to fill out freq_pct74 / freq_pct30 by algorithm */ + if (turn_pt < middle_index) { + if (svsb->type == SVSB_HIGH) { + /* + * If we don't handle this situation, + * SVSB_HIGH's FREQPCT74 / FREQPCT30 would keep "0" + * and this leads SVSB_LOW to work abnormally. + */ + if (turn_pt == 0) + freq_pct30 = svsb->freq_pct[0]; + + /* freq_pct[0] ~ freq_pct[turn_pt - 1] */ + for (i = 0; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* + * freq_pct[turn_pt] + + * freq_pct[opp_count - 7] ~ freq_pct[opp_count -1] + */ + freq_pct30 = svsb->freq_pct[turn_pt]; + shift_byte++; + j = svsb->opp_count - 7; + for (i = j; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } else { + if (svsb->type == SVSB_HIGH) { + /* + * freq_pct[0] + + * freq_pct[turn_pt - 7] ~ freq_pct[turn_pt - 1] + */ + freq_pct30 = svsb->freq_pct[0]; + shift_byte++; + j = turn_pt - 7; + for (i = j; i < turn_pt; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } else if (svsb->type == SVSB_LOW) { + /* freq_pct[turn_pt] ~ freq_pct[opp_count - 1] */ + for (i = turn_pt; i < svsb->opp_count; i++) { + b_sft = BITS8 * (shift_byte % REG_BYTES); + freq_pct = (shift_byte < REG_BYTES) ? + &freq_pct30 : &freq_pct74; + *freq_pct |= (svsb->freq_pct[i] << b_sft); + shift_byte++; + } + } + } + + svs_writel_relaxed(svsp, freq_pct74, FREQPCT74); + svs_writel_relaxed(svsp, freq_pct30, FREQPCT30); +} + +static void svs_get_bank_volts_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + u32 temp, i; + + temp = svs_readl_relaxed(svsp, VOP74); + svsb->volt[14] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[12] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[10] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[8] = (temp & GENMASK(7, 0)); + + temp = svs_readl_relaxed(svsp, VOP30); + svsb->volt[6] = (temp >> 24) & GENMASK(7, 0); + svsb->volt[4] = (temp >> 16) & GENMASK(7, 0); + svsb->volt[2] = (temp >> 8) & GENMASK(7, 0); + svsb->volt[0] = (temp & GENMASK(7, 0)); + + for (i = 0; i <= 12; i += 2) + svsb->volt[i + 1] = interpolate(svsb->freq_pct[i], + svsb->freq_pct[i + 2], + svsb->volt[i], + svsb->volt[i + 2], + svsb->freq_pct[i + 1]); + + svsb->volt[15] = interpolate(svsb->freq_pct[12], + svsb->freq_pct[14], + svsb->volt[12], + svsb->volt[14], + svsb->freq_pct[15]); + + for (i = 0; i < svsb->opp_count; i++) + svsb->volt[i] += svsb->volt_od; +} + +static void svs_set_bank_freq_pct_v2(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_writel_relaxed(svsp, + (svsb->freq_pct[14] << 24) | + (svsb->freq_pct[12] << 16) | + (svsb->freq_pct[10] << 8) | + svsb->freq_pct[8], + FREQPCT74); + + svs_writel_relaxed(svsp, + (svsb->freq_pct[6] << 24) | + (svsb->freq_pct[4] << 16) | + (svsb->freq_pct[2] << 8) | + svsb->freq_pct[0], + FREQPCT30); +} + +static void svs_set_bank_phase(struct svs_platform *svsp, + enum svsb_phase target_phase) +{ + struct svs_bank *svsb = svsp->pbank; + u32 des_char, temp_char, det_char, limit_vals, init2vals, ts_calcs; + + svs_switch_bank(svsp); + + des_char = (svsb->bdes << 8) | svsb->mdes; + svs_writel_relaxed(svsp, des_char, DESCHAR); + + temp_char = (svsb->vco << 16) | (svsb->mtdes << 8) | svsb->dvt_fixed; + svs_writel_relaxed(svsp, temp_char, TEMPCHAR); + + det_char = (svsb->dcbdet << 8) | svsb->dcmdet; + svs_writel_relaxed(svsp, det_char, DETCHAR); + + svs_writel_relaxed(svsp, svsb->dc_config, DCCONFIG); + svs_writel_relaxed(svsp, svsb->age_config, AGECONFIG); + svs_writel_relaxed(svsp, SVSB_RUNCONFIG_DEFAULT, RUNCONFIG); + + svsb->set_freq_pct(svsp); + + limit_vals = (svsb->vmax << 24) | (svsb->vmin << 16) | + (SVSB_DTHI << 8) | SVSB_DTLO; + svs_writel_relaxed(svsp, limit_vals, LIMITVALS); + + svs_writel_relaxed(svsp, SVSB_DET_WINDOW, DETWINDOW); + svs_writel_relaxed(svsp, SVSB_DET_MAX, CONFIG); + svs_writel_relaxed(svsp, svsb->chk_shift, CHKSHIFT); + svs_writel_relaxed(svsp, svsb->ctl0, CTL0); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + + switch (target_phase) { + case SVSB_PHASE_INIT01: + svs_writel_relaxed(svsp, svsb->vboot, VBOOT); + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_INIT01, SVSEN); + break; + case SVSB_PHASE_INIT02: + svs_writel_relaxed(svsp, SVSB_INTEN_INIT0x, INTEN); + init2vals = (svsb->age_voffset_in << 16) | svsb->dc_voffset_in; + svs_writel_relaxed(svsp, init2vals, INIT2VALS); + svs_writel_relaxed(svsp, SVSB_EN_INIT02, SVSEN); + break; + case SVSB_PHASE_MON: + ts_calcs = (svsb->bts << 12) | svsb->mts; + svs_writel_relaxed(svsp, ts_calcs, TSCALCS); + svs_writel_relaxed(svsp, SVSB_INTEN_MONVOPEN, INTEN); + svs_writel_relaxed(svsp, SVSB_EN_MON, SVSEN); + break; + default: + dev_err(svsb->dev, "requested unknown target phase: %u\n", + target_phase); + break; + } +} + +static inline void svs_save_bank_register_data(struct svs_platform *svsp, + enum svsb_phase phase) +{ + struct svs_bank *svsb = svsp->pbank; + enum svs_reg_index rg_i; + + for (rg_i = DESCHAR; rg_i < SVS_REG_MAX; rg_i++) + svsb->reg_data[phase][rg_i] = svs_readl_relaxed(svsp, rg_i); +} + +static inline void svs_error_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_err(svsb->dev, "%s: CORESEL = 0x%08x\n", + __func__, svs_readl_relaxed(svsp, CORESEL)); + dev_err(svsb->dev, "SVSEN = 0x%08x, INTSTS = 0x%08x\n", + svs_readl_relaxed(svsp, SVSEN), + svs_readl_relaxed(svsp, INTSTS)); + dev_err(svsb->dev, "SMSTATE0 = 0x%08x, SMSTATE1 = 0x%08x\n", + svs_readl_relaxed(svsp, SMSTATE0), + svs_readl_relaxed(svsp, SMSTATE1)); + dev_err(svsb->dev, "TEMP = 0x%08x\n", svs_readl_relaxed(svsp, TEMP)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_ERROR); + + svsb->phase = SVSB_PHASE_ERROR; + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); +} + +static inline void svs_init01_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VDN74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VDESIGN74), + svs_readl_relaxed(svsp, VDESIGN30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT01); + + svsb->phase = SVSB_PHASE_INIT01; + svsb->dc_voffset_in = ~(svs_readl_relaxed(svsp, DCVALUES) & + GENMASK(15, 0)) + 1; + if (svsb->volt_flags & SVSB_INIT01_VOLT_IGNORE || + (svsb->dc_voffset_in & SVSB_DC_SIGNED_BIT && + svsb->volt_flags & SVSB_INIT01_VOLT_INC_ONLY)) + svsb->dc_voffset_in = 0; + + svsb->age_voffset_in = svs_readl_relaxed(svsp, AGEVALUES) & + GENMASK(15, 0); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); + svsb->core_sel &= ~SVSB_DET_CLK_EN; +} + +static inline void svs_init02_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + dev_info(svsb->dev, "%s: VOP74~30:0x%08x~0x%08x, DC:0x%08x\n", + __func__, svs_readl_relaxed(svsp, VOP74), + svs_readl_relaxed(svsp, VOP30), + svs_readl_relaxed(svsp, DCVALUES)); + + svs_save_bank_register_data(svsp, SVSB_PHASE_INIT02); + + svsb->phase = SVSB_PHASE_INIT02; + svsb->get_volts(svsp); + + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_COMPLETE, INTSTS); +} + +static inline void svs_mon_mode_isr_handler(struct svs_platform *svsp) +{ + struct svs_bank *svsb = svsp->pbank; + + svs_save_bank_register_data(svsp, SVSB_PHASE_MON); + + svsb->phase = SVSB_PHASE_MON; + svsb->get_volts(svsp); + + svsb->temp = svs_readl_relaxed(svsp, TEMP) & GENMASK(7, 0); + svs_writel_relaxed(svsp, SVSB_INTSTS_MONVOP, INTSTS); +} + +static irqreturn_t svs_isr(int irq, void *data) +{ + struct svs_platform *svsp = data; + struct svs_bank *svsb = NULL; + unsigned long flags; + u32 idx, int_sts, svs_en; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + WARN(!svsb, "%s: svsb(%s) is null", __func__, svsb->name); + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + + /* Find out which svs bank fires interrupt */ + if (svsb->int_st & svs_readl_relaxed(svsp, INTST)) { + spin_unlock_irqrestore(&svs_lock, flags); + continue; + } + + svs_switch_bank(svsp); + int_sts = svs_readl_relaxed(svsp, INTSTS); + svs_en = svs_readl_relaxed(svsp, SVSEN); + + if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT01) + svs_init01_isr_handler(svsp); + else if (int_sts == SVSB_INTSTS_COMPLETE && + svs_en == SVSB_EN_INIT02) + svs_init02_isr_handler(svsp); + else if (int_sts & SVSB_INTSTS_MONVOP) + svs_mon_mode_isr_handler(svsp); + else + svs_error_isr_handler(svsp); + + spin_unlock_irqrestore(&svs_lock, flags); + break; + } + + svs_adjust_pm_opp_volts(svsb); + + if (svsb->phase == SVSB_PHASE_INIT01 || + svsb->phase == SVSB_PHASE_INIT02) + complete(&svsb->init_completion); + + return IRQ_HANDLED; +} + +static int svs_init01(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + bool search_done; + int ret = 0, r; + u32 opp_freq, opp_vboot, buck_volt, idx, i; + + /* Keep CPUs' core power on for svs_init01 initialization */ + cpuidle_pause_and_lock(); + + /* Svs bank init01 preparation - power enable */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + ret = regulator_enable(svsb->buck); + if (ret) { + dev_err(svsb->dev, "%s enable fail: %d\n", + svsb->buck_name, ret); + goto svs_init01_resume_cpuidle; + } + + /* Some buck doesn't support mode change. Show fail msg only */ + ret = regulator_set_mode(svsb->buck, REGULATOR_MODE_FAST); + if (ret) + dev_notice(svsb->dev, "set fast mode fail: %d\n", ret); + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + if (!pm_runtime_enabled(svsb->opp_dev)) { + pm_runtime_enable(svsb->opp_dev); + svsb->pm_runtime_enabled_count++; + } + + ret = pm_runtime_get_sync(svsb->opp_dev); + if (ret < 0) { + dev_err(svsb->dev, "mtcmos on fail: %d\n", ret); + goto svs_init01_resume_cpuidle; + } + } + } + + /* + * Svs bank init01 preparation - vboot voltage adjustment + * Sometimes two svs banks use the same buck. Therefore, + * we have to set each svs bank to target voltage(vboot) first. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + /* + * Find the fastest freq that can be run at vboot and + * fix to that freq until svs_init01 is done. + */ + search_done = false; + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + for (i = 0; i < svsb->opp_count; i++) { + opp_freq = svsb->opp_dfreq[i]; + if (!search_done && svsb->opp_dvolt[i] <= opp_vboot) { + ret = dev_pm_opp_adjust_voltage(svsb->opp_dev, + opp_freq, + opp_vboot, + opp_vboot, + opp_vboot); + if (ret) { + dev_err(svsb->dev, + "set opp %uuV vboot fail: %d\n", + opp_vboot, ret); + goto svs_init01_finish; + } + + search_done = true; + } else { + ret = dev_pm_opp_disable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (ret) { + dev_err(svsb->dev, + "opp %uHz disable fail: %d\n", + svsb->opp_dfreq[i], ret); + goto svs_init01_finish; + } + } + } + } + + /* Svs bank init01 begins */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + opp_vboot = svs_bank_volt_to_opp_volt(svsb->vboot, + svsb->volt_step, + svsb->volt_base); + + buck_volt = regulator_get_voltage(svsb->buck); + if (buck_volt != opp_vboot) { + dev_err(svsb->dev, + "buck voltage: %uuV, expected vboot: %uuV\n", + buck_volt, opp_vboot); + ret = -EPERM; + goto svs_init01_finish; + } + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT01); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init01 completion timeout\n"); + ret = -EBUSY; + goto svs_init01_finish; + } + } + +svs_init01_finish: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT01)) + continue; + + for (i = 0; i < svsb->opp_count; i++) { + r = dev_pm_opp_enable(svsb->opp_dev, + svsb->opp_dfreq[i]); + if (r) + dev_err(svsb->dev, "opp %uHz enable fail: %d\n", + svsb->opp_dfreq[i], r); + } + + if (svsb->volt_flags & SVSB_INIT01_PD_REQ) { + r = pm_runtime_put_sync(svsb->opp_dev); + if (r) + dev_err(svsb->dev, "mtcmos off fail: %d\n", r); + + if (svsb->pm_runtime_enabled_count > 0) { + pm_runtime_disable(svsb->opp_dev); + svsb->pm_runtime_enabled_count--; + } + } + + r = regulator_set_mode(svsb->buck, REGULATOR_MODE_NORMAL); + if (r) + dev_notice(svsb->dev, "set normal mode fail: %d\n", r); + + r = regulator_disable(svsb->buck); + if (r) + dev_err(svsb->dev, "%s disable fail: %d\n", + svsb->buck_name, r); + } + +svs_init01_resume_cpuidle: + cpuidle_resume_and_unlock(); + + return ret; +} + +static int svs_init02(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags, time_left; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + reinit_completion(&svsb->init_completion); + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_INIT02); + spin_unlock_irqrestore(&svs_lock, flags); + + time_left = wait_for_completion_timeout(&svsb->init_completion, + msecs_to_jiffies(5000)); + if (!time_left) { + dev_err(svsb->dev, "init02 completion timeout\n"); + return -EBUSY; + } + } + + /* + * 2-line high/low bank update its corresponding opp voltages only. + * Therefore, we sync voltages from opp for high/low bank voltages + * consistency. + */ + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_INIT02)) + continue; + + if (svsb->type == SVSB_HIGH || svsb->type == SVSB_LOW) { + if (svs_sync_bank_volts_from_opp(svsb)) { + dev_err(svsb->dev, "sync volt fail\n"); + return -EPERM; + } + } + } + + return 0; +} + +static void svs_mon_mode(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + unsigned long flags; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (!(svsb->mode_support & SVSB_MODE_MON)) + continue; + + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_set_bank_phase(svsp, SVSB_PHASE_MON); + spin_unlock_irqrestore(&svs_lock, flags); + } +} + +static int svs_start(struct svs_platform *svsp) +{ + int ret; + + ret = svs_init01(svsp); + if (ret) + return ret; + + ret = svs_init02(svsp); + if (ret) + return ret; + + svs_mon_mode(svsp); + + return 0; +} + +static int svs_suspend(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + struct svs_bank *svsb; + unsigned long flags; + int ret; + u32 idx; + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + /* This might wait for svs_isr() process */ + spin_lock_irqsave(&svs_lock, flags); + svsp->pbank = svsb; + svs_switch_bank(svsp); + svs_writel_relaxed(svsp, SVSB_EN_OFF, SVSEN); + svs_writel_relaxed(svsp, SVSB_INTSTS_CLEAN, INTSTS); + spin_unlock_irqrestore(&svs_lock, flags); + + svsb->phase = SVSB_PHASE_ERROR; + svs_adjust_pm_opp_volts(svsb); + } + + ret = reset_control_assert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot assert reset %d\n", ret); + return ret; + } + + clk_disable_unprepare(svsp->main_clk); + + return 0; +} + +static int svs_resume(struct device *dev) +{ + struct svs_platform *svsp = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main_clk, disable svs\n"); + return ret; + } + + ret = reset_control_deassert(svsp->rst); + if (ret) { + dev_err(svsp->dev, "cannot deassert reset %d\n", ret); + goto out_of_resume; + } + + ret = svs_init02(svsp); + if (ret) + goto out_of_resume; + + svs_mon_mode(svsp); + + return 0; + +out_of_resume: + clk_disable_unprepare(svsp->main_clk); + return ret; +} + +static int svs_bank_resource_setup(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct dev_pm_opp *opp; + unsigned long freq; + int count, ret; + u32 idx, i; + + dev_set_drvdata(svsp->dev, svsp); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->name = "SVSB_CPU_LITTLE"; + break; + case SVSB_CPU_BIG: + svsb->name = "SVSB_CPU_BIG"; + break; + case SVSB_CCI: + svsb->name = "SVSB_CCI"; + break; + case SVSB_GPU: + if (svsb->type == SVSB_HIGH) + svsb->name = "SVSB_GPU_HIGH"; + else if (svsb->type == SVSB_LOW) + svsb->name = "SVSB_GPU_LOW"; + else + svsb->name = "SVSB_GPU"; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + svsb->dev = devm_kzalloc(svsp->dev, sizeof(*svsb->dev), + GFP_KERNEL); + if (!svsb->dev) + return -ENOMEM; + + ret = dev_set_name(svsb->dev, "%s", svsb->name); + if (ret) + return ret; + + dev_set_drvdata(svsb->dev, svsp); + + ret = dev_pm_opp_of_add_table(svsb->opp_dev); + if (ret) { + dev_err(svsb->dev, "add opp table fail: %d\n", ret); + return ret; + } + + mutex_init(&svsb->lock); + init_completion(&svsb->init_completion); + + if (svsb->mode_support & SVSB_MODE_INIT01) { + svsb->buck = devm_regulator_get_optional(svsb->opp_dev, + svsb->buck_name); + if (IS_ERR(svsb->buck)) { + dev_err(svsb->dev, "cannot get \"%s-supply\"\n", + svsb->buck_name); + return PTR_ERR(svsb->buck); + } + } + + if (svsb->mode_support & SVSB_MODE_MON) { + svsb->tzd = thermal_zone_get_zone_by_name(svsb->tzone_name); + if (IS_ERR(svsb->tzd)) { + dev_err(svsb->dev, "cannot get \"%s\" thermal zone\n", + svsb->tzone_name); + return PTR_ERR(svsb->tzd); + } + } + + count = dev_pm_opp_get_opp_count(svsb->opp_dev); + if (svsb->opp_count != count) { + dev_err(svsb->dev, + "opp_count not \"%u\" but get \"%d\"?\n", + svsb->opp_count, count); + return count; + } + + for (i = 0, freq = U32_MAX; i < svsb->opp_count; i++, freq--) { + opp = dev_pm_opp_find_freq_floor(svsb->opp_dev, &freq); + if (IS_ERR(opp)) { + dev_err(svsb->dev, "cannot find freq = %ld\n", + PTR_ERR(opp)); + return PTR_ERR(opp); + } + + svsb->opp_dfreq[i] = freq; + svsb->opp_dvolt[i] = dev_pm_opp_get_voltage(opp); + svsb->freq_pct[i] = percent(svsb->opp_dfreq[i], + svsb->freq_base); + dev_pm_opp_put(opp); + } + } + + return 0; +} + +static bool svs_mt8192_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + u32 idx, i, vmin, golden_temp; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[9]) { + dev_notice(svsp->dev, "svs_efuse[9] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + vmin = (svsp->efuse[19] >> 4) & GENMASK(1, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (vmin == 0x1) + svsb->vmin = 0x1e; + + if (svsb->type == SVSB_LOW) { + svsb->mtdes = svsp->efuse[10] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[10] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[10] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17]) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 8) & GENMASK(7, 0); + } else if (svsb->type == SVSB_HIGH) { + svsb->mtdes = svsp->efuse[9] & GENMASK(7, 0); + svsb->bdes = (svsp->efuse[9] >> 16) & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[9] >> 24) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[17] >> 24) & GENMASK(7, 0); + } + + svsb->vmax += svsb->dvt_fixed; + } + + /* Thermal efuse parsing */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR_OR_NULL(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + for (i = 0; i < svsp->tefuse_max; i++) + if (svsp->tefuse[i] != 0) + break; + + if (i == svsp->tefuse_max) + golden_temp = 50; /* All thermal efuse data are 0 */ + else + golden_temp = (svsp->tefuse[0] >> 24) & GENMASK(7, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = 500; + svsb->bts = (((500 * golden_temp + 250460) / 1000) - 25) * 4; + } + + return true; +} + +static bool svs_mt8183_efuse_parsing(struct svs_platform *svsp) +{ + struct svs_bank *svsb; + struct nvmem_cell *cell; + int format[6], x_roomt[6], o_vtsmcu[5], o_vtsabb, tb_roomt = 0; + int adc_ge_t, adc_oe_t, ge, oe, gain, degc_cali, adc_cali_en_t; + int o_slope, o_slope_sign, ts_id; + u32 idx, i, ft_pgm, mts, temp0, temp1, temp2; + + for (i = 0; i < svsp->efuse_max; i++) + if (svsp->efuse[i]) + dev_info(svsp->dev, "M_HW_RES%d: 0x%08x\n", + i, svsp->efuse[i]); + + if (!svsp->efuse[2]) { + dev_notice(svsp->dev, "svs_efuse[2] = 0x0?\n"); + return false; + } + + /* Svs efuse parsing */ + ft_pgm = (svsp->efuse[0] >> 4) & GENMASK(3, 0); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (ft_pgm <= 1) + svsb->volt_flags |= SVSB_INIT01_VOLT_IGNORE; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + svsb->bdes = svsp->efuse[16] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[16] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[16] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[16] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[17] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_CPU_BIG: + svsb->bdes = svsp->efuse[18] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[18] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[18] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[18] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[17] & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 15; + else + svsb->volt_od += 12; + break; + case SVSB_CCI: + svsb->bdes = svsp->efuse[4] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[4] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[4] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[4] >> 24) & GENMASK(7, 0); + svsb->mtdes = (svsp->efuse[5] >> 16) & GENMASK(7, 0); + + if (ft_pgm <= 3) + svsb->volt_od += 10; + else + svsb->volt_od += 2; + break; + case SVSB_GPU: + svsb->bdes = svsp->efuse[6] & GENMASK(7, 0); + svsb->mdes = (svsp->efuse[6] >> 8) & GENMASK(7, 0); + svsb->dcbdet = (svsp->efuse[6] >> 16) & GENMASK(7, 0); + svsb->dcmdet = (svsp->efuse[6] >> 24) & GENMASK(7, 0); + svsb->mtdes = svsp->efuse[5] & GENMASK(7, 0); + + if (ft_pgm >= 2) { + svsb->freq_base = 800000000; /* 800MHz */ + svsb->dvt_fixed = 2; + } + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return false; + } + } + + /* Get thermal efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "t-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"t-calibration-data\"? %ld\n", + PTR_ERR(cell)); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse = nvmem_cell_read(cell, &svsp->tefuse_max); + if (IS_ERR(svsp->tefuse)) { + dev_err(svsp->dev, "cannot read thermal efuse: %ld\n", + PTR_ERR(svsp->tefuse)); + nvmem_cell_put(cell); + goto remove_mt8183_svsb_mon_mode; + } + + svsp->tefuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + /* Thermal efuse parsing */ + adc_ge_t = (svsp->tefuse[1] >> 22) & GENMASK(9, 0); + adc_oe_t = (svsp->tefuse[1] >> 12) & GENMASK(9, 0); + + o_vtsmcu[0] = (svsp->tefuse[0] >> 17) & GENMASK(8, 0); + o_vtsmcu[1] = (svsp->tefuse[0] >> 8) & GENMASK(8, 0); + o_vtsmcu[2] = svsp->tefuse[1] & GENMASK(8, 0); + o_vtsmcu[3] = (svsp->tefuse[2] >> 23) & GENMASK(8, 0); + o_vtsmcu[4] = (svsp->tefuse[2] >> 5) & GENMASK(8, 0); + o_vtsabb = (svsp->tefuse[2] >> 14) & GENMASK(8, 0); + + degc_cali = (svsp->tefuse[0] >> 1) & GENMASK(5, 0); + adc_cali_en_t = svsp->tefuse[0] & BIT(0); + o_slope_sign = (svsp->tefuse[0] >> 7) & BIT(0); + + ts_id = (svsp->tefuse[1] >> 9) & BIT(0); + o_slope = (svsp->tefuse[0] >> 26) & GENMASK(5, 0); + + if (adc_cali_en_t == 1) { + if (!ts_id) + o_slope = 0; + + if (adc_ge_t < 265 || adc_ge_t > 758 || + adc_oe_t < 265 || adc_oe_t > 758 || + o_vtsmcu[0] < -8 || o_vtsmcu[0] > 484 || + o_vtsmcu[1] < -8 || o_vtsmcu[1] > 484 || + o_vtsmcu[2] < -8 || o_vtsmcu[2] > 484 || + o_vtsmcu[3] < -8 || o_vtsmcu[3] > 484 || + o_vtsmcu[4] < -8 || o_vtsmcu[4] > 484 || + o_vtsabb < -8 || o_vtsabb > 484 || + degc_cali < 1 || degc_cali > 63) { + dev_err(svsp->dev, "bad thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + } else { + dev_err(svsp->dev, "no thermal efuse, no mon mode\n"); + goto remove_mt8183_svsb_mon_mode; + } + + ge = ((adc_ge_t - 512) * 10000) / 4096; + oe = (adc_oe_t - 512); + gain = (10000 + ge); + + format[0] = (o_vtsmcu[0] + 3350 - oe); + format[1] = (o_vtsmcu[1] + 3350 - oe); + format[2] = (o_vtsmcu[2] + 3350 - oe); + format[3] = (o_vtsmcu[3] + 3350 - oe); + format[4] = (o_vtsmcu[4] + 3350 - oe); + format[5] = (o_vtsabb + 3350 - oe); + + for (i = 0; i < 6; i++) + x_roomt[i] = (((format[i] * 10000) / 4096) * 10000) / gain; + + temp0 = (10000 * 100000 / gain) * 15 / 18; + + if (!o_slope_sign) + mts = (temp0 * 10) / (1534 + o_slope * 10); + else + mts = (temp0 * 10) / (1534 - o_slope * 10); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mts = mts; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + tb_roomt = x_roomt[3]; + break; + case SVSB_CPU_BIG: + tb_roomt = x_roomt[4]; + break; + case SVSB_CCI: + tb_roomt = x_roomt[3]; + break; + case SVSB_GPU: + tb_roomt = x_roomt[1]; + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + goto remove_mt8183_svsb_mon_mode; + } + + temp0 = (degc_cali * 10 / 2); + temp1 = ((10000 * 100000 / 4096 / gain) * + oe + tb_roomt * 10) * 15 / 18; + + if (!o_slope_sign) + temp2 = temp1 * 100 / (1534 + o_slope * 10); + else + temp2 = temp1 * 100 / (1534 - o_slope * 10); + + svsb->bts = (temp0 + temp2 - 250) * 4 / 10; + } + + return true; + +remove_mt8183_svsb_mon_mode: + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + svsb->mode_support &= ~SVSB_MODE_MON; + } + + return true; +} + +static bool svs_is_efuse_data_correct(struct svs_platform *svsp) +{ + struct nvmem_cell *cell; + + /* Get svs efuse by nvmem */ + cell = nvmem_cell_get(svsp->dev, "svs-calibration-data"); + if (IS_ERR(cell)) { + dev_err(svsp->dev, "no \"svs-calibration-data\"? %ld\n", + PTR_ERR(cell)); + return false; + } + + svsp->efuse = nvmem_cell_read(cell, &svsp->efuse_max); + if (IS_ERR(svsp->efuse)) { + dev_err(svsp->dev, "cannot read svs efuse: %ld\n", + PTR_ERR(svsp->efuse)); + nvmem_cell_put(cell); + return false; + } + + svsp->efuse_max /= sizeof(u32); + nvmem_cell_put(cell); + + return svsp->efuse_parsing(svsp); +} + +static struct device *svs_get_subsys_device(struct svs_platform *svsp, + const char *node_name) +{ + struct platform_device *pdev; + struct device_node *np; + + np = of_find_node_by_name(NULL, node_name); + if (!np) { + dev_err(svsp->dev, "cannot find %s node\n", node_name); + return ERR_PTR(-ENODEV); + } + + pdev = of_find_device_by_node(np); + if (!pdev) { + of_node_put(np); + dev_err(svsp->dev, "cannot find pdev by %s\n", node_name); + return ERR_PTR(-ENXIO); + } + + of_node_put(np); + + return &pdev->dev; +} + +static struct device *svs_add_device_link(struct svs_platform *svsp, + const char *node_name) +{ + struct device *dev; + struct device_link *sup_link; + + if (!node_name) { + dev_err(svsp->dev, "node name cannot be null\n"); + return ERR_PTR(-EINVAL); + } + + dev = svs_get_subsys_device(svsp, node_name); + if (IS_ERR(dev)) + return dev; + + sup_link = device_link_add(svsp->dev, dev, + DL_FLAG_AUTOREMOVE_CONSUMER); + if (!sup_link) { + dev_err(svsp->dev, "sup_link is NULL\n"); + return ERR_PTR(-EINVAL); + } + + if (sup_link->supplier->links.status != DL_DEV_DRIVER_BOUND) + return ERR_PTR(-EPROBE_DEFER); + + return dev; +} + +static int svs_mt8192_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + svsp->rst = devm_reset_control_get_optional(svsp->dev, "svs_rst"); + if (IS_ERR(svsp->rst)) + return dev_err_probe(svsp->dev, PTR_ERR(svsp->rst), + "cannot get svs reset control\n"); + + dev = svs_add_device_link(svsp, "lvts"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get lvts device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + if (svsb->type == SVSB_HIGH) + svsb->opp_dev = svs_add_device_link(svsp, "mali"); + else if (svsb->type == SVSB_LOW) + svsb->opp_dev = svs_get_subsys_device(svsp, "mali"); + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static int svs_mt8183_platform_probe(struct svs_platform *svsp) +{ + struct device *dev; + struct svs_bank *svsb; + u32 idx; + + dev = svs_add_device_link(svsp, "thermal"); + if (IS_ERR(dev)) + return dev_err_probe(svsp->dev, PTR_ERR(dev), + "failed to get thermal device\n"); + + for (idx = 0; idx < svsp->bank_max; idx++) { + svsb = &svsp->banks[idx]; + + switch (svsb->sw_id) { + case SVSB_CPU_LITTLE: + case SVSB_CPU_BIG: + svsb->opp_dev = get_cpu_device(svsb->cpu_id); + break; + case SVSB_CCI: + svsb->opp_dev = svs_add_device_link(svsp, "cci"); + break; + case SVSB_GPU: + svsb->opp_dev = svs_add_device_link(svsp, "gpu"); + break; + default: + dev_err(svsb->dev, "unknown sw_id: %u\n", svsb->sw_id); + return -EINVAL; + } + + if (IS_ERR(svsb->opp_dev)) + return dev_err_probe(svsp->dev, PTR_ERR(svsb->opp_dev), + "failed to get OPP device for bank %d\n", + idx); + } + + return 0; +} + +static struct svs_bank svs_mt8192_banks[] = { + { + .sw_id = SVSB_GPU, + .type = SVSB_LOW, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT, + .mode_support = SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 688000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x1, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0100, + .int_st = BIT(0), + .ctl0 = 0x00540003, + }, + { + .sw_id = SVSB_GPU, + .type = SVSB_HIGH, + .set_freq_pct = svs_set_bank_freq_pct_v3, + .get_volts = svs_get_bank_volts_v3, + .tzone_name = "gpu1", + .volt_flags = SVSB_REMOVE_DVTFIXED_VOLT | + SVSB_MON_VOLT_IGNORE, + .mode_support = SVSB_MODE_INIT02 | SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 902000000, + .turn_freq_base = 688000000, + .volt_step = 6250, + .volt_base = 400000, + .vmax = 0x60, + .vmin = 0x1a, + .age_config = 0x555555, + .dc_config = 0x1, + .dvt_fixed = 0x6, + .vco = 0x18, + .chk_shift = 0x87, + .core_sel = 0x0fff0101, + .int_st = BIT(1), + .ctl0 = 0x00540003, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 7, + }, +}; + +static struct svs_bank svs_mt8183_banks[] = { + { + .sw_id = SVSB_CPU_LITTLE, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 0, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0000, + .int_st = BIT(0), + .ctl0 = 0x00010001, + }, + { + .sw_id = SVSB_CPU_BIG, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .cpu_id = 4, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1989000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x58, + .vmin = 0x10, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0001, + .int_st = BIT(1), + .ctl0 = 0x00000001, + }, + { + .sw_id = SVSB_CCI, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "proc", + .volt_flags = SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 1196000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x64, + .vmin = 0x18, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x7, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0002, + .int_st = BIT(2), + .ctl0 = 0x00100003, + }, + { + .sw_id = SVSB_GPU, + .set_freq_pct = svs_set_bank_freq_pct_v2, + .get_volts = svs_get_bank_volts_v2, + .buck_name = "mali", + .tzone_name = "tzts2", + .volt_flags = SVSB_INIT01_PD_REQ | + SVSB_INIT01_VOLT_INC_ONLY, + .mode_support = SVSB_MODE_INIT01 | SVSB_MODE_INIT02 | + SVSB_MODE_MON, + .opp_count = MAX_OPP_ENTRIES, + .freq_base = 900000000, + .vboot = 0x30, + .volt_step = 6250, + .volt_base = 500000, + .vmax = 0x40, + .vmin = 0x14, + .age_config = 0x555555, + .dc_config = 0x555555, + .dvt_fixed = 0x3, + .vco = 0x10, + .chk_shift = 0x77, + .core_sel = 0x8fff0003, + .int_st = BIT(3), + .ctl0 = 0x00050001, + .tzone_htemp = 85000, + .tzone_htemp_voffset = 0, + .tzone_ltemp = 25000, + .tzone_ltemp_voffset = 3, + }, +}; + +static const struct svs_platform_data svs_mt8192_platform_data = { + .name = "mt8192-svs", + .banks = svs_mt8192_banks, + .efuse_parsing = svs_mt8192_efuse_parsing, + .probe = svs_mt8192_platform_probe, + .irqflags = IRQF_TRIGGER_HIGH, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8192_banks), +}; + +static const struct svs_platform_data svs_mt8183_platform_data = { + .name = "mt8183-svs", + .banks = svs_mt8183_banks, + .efuse_parsing = svs_mt8183_efuse_parsing, + .probe = svs_mt8183_platform_probe, + .irqflags = IRQF_TRIGGER_LOW, + .regs = svs_regs_v2, + .bank_max = ARRAY_SIZE(svs_mt8183_banks), +}; + +static const struct of_device_id svs_of_match[] = { + { + .compatible = "mediatek,mt8192-svs", + .data = &svs_mt8192_platform_data, + }, { + .compatible = "mediatek,mt8183-svs", + .data = &svs_mt8183_platform_data, + }, { + /* Sentinel */ + }, +}; + +static struct svs_platform *svs_platform_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + const struct svs_platform_data *svsp_data; + int ret; + + svsp_data = of_device_get_match_data(&pdev->dev); + if (!svsp_data) { + dev_err(&pdev->dev, "no svs platform data?\n"); + return ERR_PTR(-EPERM); + } + + svsp = devm_kzalloc(&pdev->dev, sizeof(*svsp), GFP_KERNEL); + if (!svsp) + return ERR_PTR(-ENOMEM); + + svsp->dev = &pdev->dev; + svsp->name = svsp_data->name; + svsp->banks = svsp_data->banks; + svsp->efuse_parsing = svsp_data->efuse_parsing; + svsp->probe = svsp_data->probe; + svsp->irqflags = svsp_data->irqflags; + svsp->regs = svsp_data->regs; + svsp->bank_max = svsp_data->bank_max; + + ret = svsp->probe(svsp); + if (ret) + return ERR_PTR(ret); + + return svsp; +} + +static int svs_probe(struct platform_device *pdev) +{ + struct svs_platform *svsp; + unsigned int svsp_irq; + int ret; + + svsp = svs_platform_probe(pdev); + if (IS_ERR(svsp)) + return PTR_ERR(svsp); + + if (!svs_is_efuse_data_correct(svsp)) { + dev_notice(svsp->dev, "efuse data isn't correct\n"); + ret = -EPERM; + goto svs_probe_free_resource; + } + + ret = svs_bank_resource_setup(svsp); + if (ret) { + dev_err(svsp->dev, "svs bank resource setup fail: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp_irq = irq_of_parse_and_map(svsp->dev->of_node, 0); + ret = devm_request_threaded_irq(svsp->dev, svsp_irq, NULL, svs_isr, + svsp->irqflags | IRQF_ONESHOT, + svsp->name, svsp); + if (ret) { + dev_err(svsp->dev, "register irq(%d) failed: %d\n", + svsp_irq, ret); + goto svs_probe_free_resource; + } + + svsp->main_clk = devm_clk_get(svsp->dev, "main"); + if (IS_ERR(svsp->main_clk)) { + dev_err(svsp->dev, "failed to get clock: %ld\n", + PTR_ERR(svsp->main_clk)); + ret = PTR_ERR(svsp->main_clk); + goto svs_probe_free_resource; + } + + ret = clk_prepare_enable(svsp->main_clk); + if (ret) { + dev_err(svsp->dev, "cannot enable main clk: %d\n", ret); + goto svs_probe_free_resource; + } + + svsp->base = of_iomap(svsp->dev->of_node, 0); + if (IS_ERR_OR_NULL(svsp->base)) { + dev_err(svsp->dev, "cannot find svs register base\n"); + ret = -EINVAL; + goto svs_probe_clk_disable; + } + + ret = svs_start(svsp); + if (ret) { + dev_err(svsp->dev, "svs start fail: %d\n", ret); + goto svs_probe_iounmap; + } + + ret = svs_create_debug_cmds(svsp); + if (ret) { + dev_err(svsp->dev, "svs create debug cmds fail: %d\n", ret); + goto svs_probe_iounmap; + } + + return 0; + +svs_probe_iounmap: + iounmap(svsp->base); + +svs_probe_clk_disable: + clk_disable_unprepare(svsp->main_clk); + +svs_probe_free_resource: + if (!IS_ERR_OR_NULL(svsp->efuse)) + kfree(svsp->efuse); + if (!IS_ERR_OR_NULL(svsp->tefuse)) + kfree(svsp->tefuse); + + return ret; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(svs_pm_ops, svs_suspend, svs_resume); + +static struct platform_driver svs_driver = { + .probe = svs_probe, + .driver = { + .name = "mtk-svs", + .pm = &svs_pm_ops, + .of_match_table = of_match_ptr(svs_of_match), + }, +}; + +module_platform_driver(svs_driver); + +MODULE_AUTHOR("Roger Lu <roger.lu@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek SVS driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/renesas/r8a779a0-sysc.c b/drivers/soc/renesas/r8a779a0-sysc.c index fdfc857df334..04f1bc322ae7 100644 --- a/drivers/soc/renesas/r8a779a0-sysc.c +++ b/drivers/soc/renesas/r8a779a0-sysc.c @@ -57,11 +57,11 @@ static struct rcar_gen4_sysc_area r8a779a0_areas[] __initdata = { { "a2cv6", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, { "a2cn2", R8A779A0_PD_A2CN2, R8A779A0_PD_A3IR }, { "a2imp23", R8A779A0_PD_A2IMP23, R8A779A0_PD_A3IR }, - { "a2dp1", R8A779A0_PD_A2DP0, R8A779A0_PD_A3IR }, - { "a2cv2", R8A779A0_PD_A2CV0, R8A779A0_PD_A3IR }, - { "a2cv3", R8A779A0_PD_A2CV1, R8A779A0_PD_A3IR }, - { "a2cv5", R8A779A0_PD_A2CV4, R8A779A0_PD_A3IR }, - { "a2cv7", R8A779A0_PD_A2CV6, R8A779A0_PD_A3IR }, + { "a2dp1", R8A779A0_PD_A2DP1, R8A779A0_PD_A3IR }, + { "a2cv2", R8A779A0_PD_A2CV2, R8A779A0_PD_A3IR }, + { "a2cv3", R8A779A0_PD_A2CV3, R8A779A0_PD_A3IR }, + { "a2cv5", R8A779A0_PD_A2CV5, R8A779A0_PD_A3IR }, + { "a2cv7", R8A779A0_PD_A2CV7, R8A779A0_PD_A3IR }, { "a2cn1", R8A779A0_PD_A2CN1, R8A779A0_PD_A3IR }, { "a1cnn0", R8A779A0_PD_A1CNN0, R8A779A0_PD_A2CN0 }, { "a1cnn2", R8A779A0_PD_A1CNN2, R8A779A0_PD_A2CN2 }, diff --git a/drivers/soc/renesas/rcar-gen4-sysc.h b/drivers/soc/renesas/rcar-gen4-sysc.h index fe2d98254754..388cfa8f8f9f 100644 --- a/drivers/soc/renesas/rcar-gen4-sysc.h +++ b/drivers/soc/renesas/rcar-gen4-sysc.h @@ -25,8 +25,8 @@ struct rcar_gen4_sysc_area { const char *name; u8 pdr; /* PDRn */ - int parent; /* -1 if none */ - unsigned int flags; /* See PD_* */ + s8 parent; /* -1 if none */ + u8 flags; /* See PD_* */ }; /* diff --git a/drivers/soc/renesas/rcar-sysc.h b/drivers/soc/renesas/rcar-sysc.h index 8d861c1cfdf7..266c599a0a9b 100644 --- a/drivers/soc/renesas/rcar-sysc.h +++ b/drivers/soc/renesas/rcar-sysc.h @@ -31,8 +31,8 @@ struct rcar_sysc_area { u16 chan_offs; /* Offset of PWRSR register for this area */ u8 chan_bit; /* Bit in PWR* (except for PWRUP in PWRSR) */ u8 isr_bit; /* Bit in SYSCI*R */ - int parent; /* -1 if none */ - unsigned int flags; /* See PD_* */ + s8 parent; /* -1 if none */ + u8 flags; /* See PD_* */ }; diff --git a/drivers/soc/sunxi/Kconfig b/drivers/soc/sunxi/Kconfig index 1fef0e711056..8aecbc9b1976 100644 --- a/drivers/soc/sunxi/Kconfig +++ b/drivers/soc/sunxi/Kconfig @@ -6,6 +6,7 @@ config SUNXI_MBUS bool default ARCH_SUNXI + depends on ARM || ARM64 help Say y to enable the fixups needed to support the Allwinner MBUS DMA quirks. diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index a23d4f6329f5..31d778e9d255 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -69,6 +69,7 @@ #define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */ #define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */ #define CDNS_SPI_SS0 0x1 /* Slave Select zero */ +#define CDNS_SPI_NOSS 0x3C /* No Slave select */ /* * SPI Interrupt Registers bit Masks @@ -92,9 +93,6 @@ #define CDNS_SPI_ER_ENABLE 0x00000001 /* SPI Enable Bit Mask */ #define CDNS_SPI_ER_DISABLE 0x0 /* SPI Disable Bit Mask */ -/* SPI FIFO depth in bytes */ -#define CDNS_SPI_FIFO_DEPTH 128 - /* Default number of chip select lines */ #define CDNS_SPI_DEFAULT_NUM_CS 4 @@ -110,6 +108,7 @@ * @rx_bytes: Number of bytes requested * @dev_busy: Device busy flag * @is_decoded_cs: Flag for decoder property set or not + * @tx_fifo_depth: Depth of the TX FIFO */ struct cdns_spi { void __iomem *regs; @@ -123,6 +122,7 @@ struct cdns_spi { int rx_bytes; u8 dev_busy; u32 is_decoded_cs; + unsigned int tx_fifo_depth; }; /* Macros for the SPI controller read/write */ @@ -304,7 +304,7 @@ static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi) { unsigned long trans_cnt = 0; - while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) && + while ((trans_cnt < xspi->tx_fifo_depth) && (xspi->tx_bytes > 0)) { /* When xspi in busy condition, bytes may send failed, @@ -450,20 +450,43 @@ static int cdns_prepare_transfer_hardware(struct spi_master *master) * @master: Pointer to the spi_master structure which provides * information about the controller. * - * This function disables the SPI master controller. + * This function disables the SPI master controller when no slave selected. * * Return: 0 always */ static int cdns_unprepare_transfer_hardware(struct spi_master *master) { struct cdns_spi *xspi = spi_master_get_devdata(master); + u32 ctrl_reg; - cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); + /* Disable the SPI if slave is deselected */ + ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR); + ctrl_reg = (ctrl_reg & CDNS_SPI_CR_SSCTRL) >> CDNS_SPI_SS_SHIFT; + if (ctrl_reg == CDNS_SPI_NOSS) + cdns_spi_write(xspi, CDNS_SPI_ER, CDNS_SPI_ER_DISABLE); return 0; } /** + * cdns_spi_detect_fifo_depth - Detect the FIFO depth of the hardware + * @xspi: Pointer to the cdns_spi structure + * + * The depth of the TX FIFO is a synthesis configuration parameter of the SPI + * IP. The FIFO threshold register is sized so that its maximum value can be the + * FIFO size - 1. This is used to detect the size of the FIFO. + */ +static void cdns_spi_detect_fifo_depth(struct cdns_spi *xspi) +{ + /* The MSBs will get truncated giving us the size of the FIFO */ + cdns_spi_write(xspi, CDNS_SPI_THLD, 0xffff); + xspi->tx_fifo_depth = cdns_spi_read(xspi, CDNS_SPI_THLD) + 1; + + /* Reset to default */ + cdns_spi_write(xspi, CDNS_SPI_THLD, 0x1); +} + +/** * cdns_spi_probe - Probe method for the SPI driver * @pdev: Pointer to the platform_device structure * @@ -535,6 +558,8 @@ static int cdns_spi_probe(struct platform_device *pdev) if (ret < 0) xspi->is_decoded_cs = 0; + cdns_spi_detect_fifo_depth(xspi); + /* SPI controller initializations */ cdns_spi_init_hw(xspi); diff --git a/drivers/spi/spi-mem.c b/drivers/spi/spi-mem.c index e8de4f5017cd..0c79193d9697 100644 --- a/drivers/spi/spi-mem.c +++ b/drivers/spi/spi-mem.c @@ -808,7 +808,7 @@ int spi_mem_poll_status(struct spi_mem *mem, op->data.dir != SPI_MEM_DATA_IN) return -EINVAL; - if (ctlr->mem_ops && ctlr->mem_ops->poll_status) { + if (ctlr->mem_ops && ctlr->mem_ops->poll_status && !mem->spi->cs_gpiod) { ret = spi_mem_access_start(mem); if (ret) return ret; diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c index a08215eb9e14..79242dc5272d 100644 --- a/drivers/spi/spi-rockchip.c +++ b/drivers/spi/spi-rockchip.c @@ -381,15 +381,18 @@ static int rockchip_spi_prepare_irq(struct rockchip_spi *rs, rs->tx_left = rs->tx ? xfer->len / rs->n_bytes : 0; rs->rx_left = xfer->len / rs->n_bytes; - if (rs->cs_inactive) - writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR); - else - writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR); + writel_relaxed(0xffffffff, rs->regs + ROCKCHIP_SPI_ICR); + spi_enable_chip(rs, true); if (rs->tx_left) rockchip_spi_pio_writer(rs); + if (rs->cs_inactive) + writel_relaxed(INT_RF_FULL | INT_CS_INACTIVE, rs->regs + ROCKCHIP_SPI_IMR); + else + writel_relaxed(INT_RF_FULL, rs->regs + ROCKCHIP_SPI_IMR); + /* 1 means the transfer is in progress */ return 1; } diff --git a/drivers/staging/olpc_dcon/Kconfig b/drivers/staging/olpc_dcon/Kconfig index d1a0dea09ef0..d0ba34cc32f7 100644 --- a/drivers/staging/olpc_dcon/Kconfig +++ b/drivers/staging/olpc_dcon/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 config FB_OLPC_DCON tristate "One Laptop Per Child Display CONtroller support" - depends on OLPC && FB + depends on OLPC && FB && BROKEN depends on I2C depends on GPIO_CS5535 && ACPI select BACKLIGHT_CLASS_DEVICE diff --git a/drivers/staging/r8188eu/core/rtw_xmit.c b/drivers/staging/r8188eu/core/rtw_xmit.c index 3d8e9dea7651..7135d89caac1 100644 --- a/drivers/staging/r8188eu/core/rtw_xmit.c +++ b/drivers/staging/r8188eu/core/rtw_xmit.c @@ -178,8 +178,7 @@ s32 _rtw_init_xmit_priv(struct xmit_priv *pxmitpriv, struct adapter *padapter) pxmitpriv->free_xmit_extbuf_cnt = num_xmit_extbuf; - res = rtw_alloc_hwxmits(padapter); - if (res) { + if (rtw_alloc_hwxmits(padapter)) { res = _FAIL; goto exit; } @@ -1483,19 +1482,10 @@ int rtw_alloc_hwxmits(struct adapter *padapter) hwxmits = pxmitpriv->hwxmits; - if (pxmitpriv->hwxmit_entry == 5) { - hwxmits[0] .sta_queue = &pxmitpriv->bm_pending; - hwxmits[1] .sta_queue = &pxmitpriv->vo_pending; - hwxmits[2] .sta_queue = &pxmitpriv->vi_pending; - hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; - hwxmits[4] .sta_queue = &pxmitpriv->be_pending; - } else if (pxmitpriv->hwxmit_entry == 4) { - hwxmits[0] .sta_queue = &pxmitpriv->vo_pending; - hwxmits[1] .sta_queue = &pxmitpriv->vi_pending; - hwxmits[2] .sta_queue = &pxmitpriv->be_pending; - hwxmits[3] .sta_queue = &pxmitpriv->bk_pending; - } else { - } + hwxmits[0].sta_queue = &pxmitpriv->vo_pending; + hwxmits[1].sta_queue = &pxmitpriv->vi_pending; + hwxmits[2].sta_queue = &pxmitpriv->be_pending; + hwxmits[3].sta_queue = &pxmitpriv->bk_pending; return 0; } diff --git a/drivers/staging/r8188eu/os_dep/ioctl_linux.c b/drivers/staging/r8188eu/os_dep/ioctl_linux.c index 1b09462ca908..8dd280e2739a 100644 --- a/drivers/staging/r8188eu/os_dep/ioctl_linux.c +++ b/drivers/staging/r8188eu/os_dep/ioctl_linux.c @@ -403,7 +403,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; - wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, KeyMaterial); + wep_total_len = wep_key_len + sizeof(*pwep); pwep = kzalloc(wep_total_len, GFP_KERNEL); if (!pwep) goto exit; diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c index ece97e37ac91..30374a820496 100644 --- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c +++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c @@ -90,7 +90,8 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material); - pwep = kzalloc(wep_total_len, GFP_KERNEL); + /* Allocate a full structure to avoid potentially running off the end. */ + pwep = kzalloc(sizeof(*pwep), GFP_KERNEL); if (!pwep) { ret = -ENOMEM; goto exit; @@ -582,7 +583,8 @@ static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, if (wep_key_len > 0) { wep_key_len = wep_key_len <= 5 ? 5 : 13; wep_total_len = wep_key_len + FIELD_OFFSET(struct ndis_802_11_wep, key_material); - pwep = kzalloc(wep_total_len, GFP_KERNEL); + /* Allocate a full structure to avoid potentially running off the end. */ + pwep = kzalloc(sizeof(*pwep), GFP_KERNEL); if (!pwep) goto exit; diff --git a/drivers/tty/goldfish.c b/drivers/tty/goldfish.c index c7968aecd870..d02de3f0326f 100644 --- a/drivers/tty/goldfish.c +++ b/drivers/tty/goldfish.c @@ -426,7 +426,7 @@ static int goldfish_tty_remove(struct platform_device *pdev) tty_unregister_device(goldfish_tty_driver, qtty->console.index); iounmap(qtty->base); qtty->base = NULL; - free_irq(qtty->irq, pdev); + free_irq(qtty->irq, qtty); tty_port_destroy(&qtty->port); goldfish_tty_current_line_count--; if (goldfish_tty_current_line_count == 0) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 137eebdcfda9..fd4d24f61c46 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -455,7 +455,7 @@ static void gsm_hex_dump_bytes(const char *fname, const u8 *data, return; } - prefix = kasprintf(GFP_KERNEL, "%s: ", fname); + prefix = kasprintf(GFP_ATOMIC, "%s: ", fname); if (!prefix) return; print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 16, 1, data, len, diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index 78b6dedc43e6..8f32fe9e149e 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1517,6 +1517,8 @@ static inline void __stop_tx(struct uart_8250_port *p) unsigned char lsr = serial_in(p, UART_LSR); u64 stop_delay = 0; + p->lsr_saved_flags |= lsr & LSR_SAVE_FLAGS; + if (!(lsr & UART_LSR_THRE)) return; /* diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 4733a233bd0c..f8f950641ad9 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1306,6 +1306,7 @@ static const struct uart_ops qcom_geni_console_pops = { .stop_tx = qcom_geni_serial_stop_tx, .start_tx = qcom_geni_serial_start_tx, .stop_rx = qcom_geni_serial_stop_rx, + .start_rx = qcom_geni_serial_start_rx, .set_termios = qcom_geni_serial_set_termios, .startup = qcom_geni_serial_startup, .request_port = qcom_geni_serial_request_port, diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 9a85b41caa0a..338ebadfd44b 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2214,11 +2214,12 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) /* * Nothing to do if the console is not suspending * except stop_rx to prevent any asynchronous data - * over RX line. Re-start_rx, when required, is - * done by set_termios in resume sequence + * over RX line. However ensure that we will be + * able to Re-start_rx later. */ if (!console_suspend_enabled && uart_console(uport)) { - uport->ops->stop_rx(uport); + if (uport->ops->start_rx) + uport->ops->stop_rx(uport); goto unlock; } @@ -2310,6 +2311,8 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) if (console_suspend_enabled) uart_change_pm(state, UART_PM_STATE_ON); uport->ops->set_termios(uport, &termios, NULL); + if (!console_suspend_enabled && uport->ops->start_rx) + uport->ops->start_rx(uport); if (console_suspend_enabled) console_start(uport->cons); } diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c index 18e623325887..d2b2720db6ca 100644 --- a/drivers/tty/sysrq.c +++ b/drivers/tty/sysrq.c @@ -581,7 +581,6 @@ void __handle_sysrq(int key, bool check_mask) rcu_sysrq_start(); rcu_read_lock(); - printk_prefer_direct_enter(); /* * Raise the apparent loglevel to maximum so that the sysrq header * is shown to provide the user with positive feedback. We do not @@ -623,7 +622,6 @@ void __handle_sysrq(int key, bool check_mask) pr_cont("\n"); console_loglevel = orig_log_level; } - printk_prefer_direct_exit(); rcu_read_unlock(); rcu_sysrq_end(); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 01fb4bad86be..ce86d1b790c0 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -748,17 +748,28 @@ static enum utp_ocs ufshcd_get_tr_ocs(struct ufshcd_lrb *lrbp) } /** - * ufshcd_utrl_clear - Clear a bit in UTRLCLR register + * ufshcd_utrl_clear() - Clear requests from the controller request list. * @hba: per adapter instance - * @pos: position of the bit to be cleared + * @mask: mask with one bit set for each request to be cleared */ -static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 pos) +static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) { if (hba->quirks & UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR) - ufshcd_writel(hba, (1 << pos), REG_UTP_TRANSFER_REQ_LIST_CLEAR); - else - ufshcd_writel(hba, ~(1 << pos), - REG_UTP_TRANSFER_REQ_LIST_CLEAR); + mask = ~mask; + /* + * From the UFSHCI specification: "UTP Transfer Request List CLear + * Register (UTRLCLR): This field is bit significant. Each bit + * corresponds to a slot in the UTP Transfer Request List, where bit 0 + * corresponds to request slot 0. A bit in this field is set to ‘0’ + * by host software to indicate to the host controller that a transfer + * request slot is cleared. The host controller + * shall free up any resources associated to the request slot + * immediately, and shall set the associated bit in UTRLDBR to ‘0’. The + * host software indicates no change to request slots by setting the + * associated bits in this field to ‘1’. Bits in this field shall only + * be set ‘1’ or ‘0’ by host software when UTRLRSR is set to ‘1’." + */ + ufshcd_writel(hba, ~mask, REG_UTP_TRANSFER_REQ_LIST_CLEAR); } /** @@ -2863,27 +2874,26 @@ static int ufshcd_compose_dev_cmd(struct ufs_hba *hba, return ufshcd_compose_devman_upiu(hba, lrbp); } -static int -ufshcd_clear_cmd(struct ufs_hba *hba, int tag) +/* + * Clear all the requests from the controller for which a bit has been set in + * @mask and wait until the controller confirms that these requests have been + * cleared. + */ +static int ufshcd_clear_cmds(struct ufs_hba *hba, u32 mask) { - int err = 0; unsigned long flags; - u32 mask = 1 << tag; /* clear outstanding transaction before retry */ spin_lock_irqsave(hba->host->host_lock, flags); - ufshcd_utrl_clear(hba, tag); + ufshcd_utrl_clear(hba, mask); spin_unlock_irqrestore(hba->host->host_lock, flags); /* * wait for h/w to clear corresponding bit in door-bell. * max. wait is 1 sec. */ - err = ufshcd_wait_for_register(hba, - REG_UTP_TRANSFER_REQ_DOOR_BELL, - mask, ~mask, 1000, 1000); - - return err; + return ufshcd_wait_for_register(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL, + mask, ~mask, 1000, 1000); } static int @@ -2963,7 +2973,7 @@ static int ufshcd_wait_for_dev_cmd(struct ufs_hba *hba, err = -ETIMEDOUT; dev_dbg(hba->dev, "%s: dev_cmd request timedout, tag %d\n", __func__, lrbp->task_tag); - if (!ufshcd_clear_cmd(hba, lrbp->task_tag)) + if (!ufshcd_clear_cmds(hba, 1U << lrbp->task_tag)) /* successfully cleared the command, retry if needed */ err = -EAGAIN; /* @@ -6958,14 +6968,14 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, } /** - * ufshcd_eh_device_reset_handler - device reset handler registered to - * scsi layer. + * ufshcd_eh_device_reset_handler() - Reset a single logical unit. * @cmd: SCSI command pointer * * Returns SUCCESS/FAILED */ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) { + unsigned long flags, pending_reqs = 0, not_cleared = 0; struct Scsi_Host *host; struct ufs_hba *hba; u32 pos; @@ -6984,14 +6994,24 @@ static int ufshcd_eh_device_reset_handler(struct scsi_cmnd *cmd) } /* clear the commands that were pending for corresponding LUN */ - for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) { - if (hba->lrb[pos].lun == lun) { - err = ufshcd_clear_cmd(hba, pos); - if (err) - break; - __ufshcd_transfer_req_compl(hba, 1U << pos); - } + spin_lock_irqsave(&hba->outstanding_lock, flags); + for_each_set_bit(pos, &hba->outstanding_reqs, hba->nutrs) + if (hba->lrb[pos].lun == lun) + __set_bit(pos, &pending_reqs); + hba->outstanding_reqs &= ~pending_reqs; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + if (ufshcd_clear_cmds(hba, pending_reqs) < 0) { + spin_lock_irqsave(&hba->outstanding_lock, flags); + not_cleared = pending_reqs & + ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); + hba->outstanding_reqs |= not_cleared; + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + + dev_err(hba->dev, "%s: failed to clear requests %#lx\n", + __func__, not_cleared); } + __ufshcd_transfer_req_compl(hba, pending_reqs & ~not_cleared); out: hba->req_abort_count = 0; @@ -7088,7 +7108,7 @@ static int ufshcd_try_to_abort_task(struct ufs_hba *hba, int tag) goto out; } - err = ufshcd_clear_cmd(hba, tag); + err = ufshcd_clear_cmds(hba, 1U << tag); if (err) dev_err(hba->dev, "%s: Failed clearing cmd at tag %d, err %d\n", __func__, tag, err); diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c index e45c3d6e1536..794e413800ae 100644 --- a/drivers/usb/cdns3/cdnsp-ring.c +++ b/drivers/usb/cdns3/cdnsp-ring.c @@ -1941,13 +1941,16 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) } if (enqd_len + trb_buff_len >= full_len) { - if (need_zero_pkt) - zero_len_trb = !zero_len_trb; - - field &= ~TRB_CHAIN; - field |= TRB_IOC; - more_trbs_coming = false; - preq->td.last_trb = ring->enqueue; + if (need_zero_pkt && !zero_len_trb) { + zero_len_trb = true; + } else { + zero_len_trb = false; + field &= ~TRB_CHAIN; + field |= TRB_IOC; + more_trbs_coming = false; + need_zero_pkt = false; + preq->td.last_trb = ring->enqueue; + } } /* Only set interrupt on short packet for OUT endpoints. */ @@ -1962,7 +1965,7 @@ int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq) length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0); - cdnsp_queue_trb(pdev, ring, more_trbs_coming | zero_len_trb, + cdnsp_queue_trb(pdev, ring, more_trbs_coming, lower_32_bits(send_addr), upper_32_bits(send_addr), length_field, diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c index dc6c96e04bcf..3b8bf6daf7d0 100644 --- a/drivers/usb/chipidea/udc.c +++ b/drivers/usb/chipidea/udc.c @@ -1048,6 +1048,9 @@ isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) struct ci_hdrc *ci = req->context; unsigned long flags; + if (req->status < 0) + return; + if (ci->setaddr) { hw_usb_set_address(ci, ci->address); ci->setaddr = false; diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c index f63a27d11fac..3f107a06817d 100644 --- a/drivers/usb/dwc2/hcd.c +++ b/drivers/usb/dwc2/hcd.c @@ -5190,7 +5190,7 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg) res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { retval = -EINVAL; - goto error1; + goto error2; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index e027c0420dc3..573421984948 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1644,13 +1644,8 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) * This device property is for kernel internal use only and * is expected to be set by the glue code. */ - if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) { - edev = extcon_get_extcon_dev(name); - if (!edev) - return ERR_PTR(-EPROBE_DEFER); - - return edev; - } + if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) + return extcon_get_extcon_dev(name); /* * Try to get an extcon device from the USB PHY controller's "port" diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index ba51de7dd760..6b018048fe2e 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -127,6 +127,7 @@ static const struct property_entry dwc3_pci_intel_phy_charger_detect_properties[ PROPERTY_ENTRY_STRING("dr_mode", "peripheral"), PROPERTY_ENTRY_BOOL("snps,dis_u2_susphy_quirk"), PROPERTY_ENTRY_BOOL("linux,phy_charger_detect"), + PROPERTY_ENTRY_BOOL("linux,sysdev_is_parent"), {} }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 00427d108ab9..8716bece1072 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2976,6 +2976,7 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) struct dwc3 *dwc = dep->dwc; u32 mdwidth; int size; + int maxpacket; mdwidth = dwc3_mdwidth(dwc); @@ -2988,21 +2989,24 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) else size = DWC31_GTXFIFOSIZ_TXFDEP(size); - /* FIFO Depth is in MDWDITH bytes. Multiply */ - size *= mdwidth; - /* - * To meet performance requirement, a minimum TxFIFO size of 3x - * MaxPacketSize is recommended for endpoints that support burst and a - * minimum TxFIFO size of 2x MaxPacketSize for endpoints that don't - * support burst. Use those numbers and we can calculate the max packet - * limit as below. + * maxpacket size is determined as part of the following, after assuming + * a mult value of one maxpacket: + * DWC3 revision 280A and prior: + * fifo_size = mult * (max_packet / mdwidth) + 1; + * maxpacket = mdwidth * (fifo_size - 1); + * + * DWC3 revision 290A and onwards: + * fifo_size = mult * ((max_packet + mdwidth)/mdwidth + 1) + 1 + * maxpacket = mdwidth * ((fifo_size - 1) - 1) - mdwidth; */ - if (dwc->maximum_speed >= USB_SPEED_SUPER) - size /= 3; + if (DWC3_VER_IS_PRIOR(DWC3, 290A)) + maxpacket = mdwidth * (size - 1); else - size /= 2; + maxpacket = mdwidth * ((size - 1) - 1) - mdwidth; + /* Functionally, space for one max packet is sufficient */ + size = min_t(int, maxpacket, 1024); usb_ep_set_maxpacket_limit(&dep->endpoint, size); dep->endpoint.max_streams = 16; diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 4585ee3a444a..e0fa4b186ec6 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -122,8 +122,6 @@ struct ffs_ep { struct usb_endpoint_descriptor *descs[3]; u8 num; - - int status; /* P: epfile->mutex */ }; struct ffs_epfile { @@ -227,6 +225,9 @@ struct ffs_io_data { bool use_sg; struct ffs_data *ffs; + + int status; + struct completion done; }; struct ffs_desc_helper { @@ -707,12 +708,15 @@ static const struct file_operations ffs_ep0_operations = { static void ffs_epfile_io_complete(struct usb_ep *_ep, struct usb_request *req) { + struct ffs_io_data *io_data = req->context; + ENTER(); - if (req->context) { - struct ffs_ep *ep = _ep->driver_data; - ep->status = req->status ? req->status : req->actual; - complete(req->context); - } + if (req->status) + io_data->status = req->status; + else + io_data->status = req->actual; + + complete(&io_data->done); } static ssize_t ffs_copy_to_iter(void *data, int data_len, struct iov_iter *iter) @@ -1050,7 +1054,6 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) WARN(1, "%s: data_len == -EINVAL\n", __func__); ret = -EINVAL; } else if (!io_data->aio) { - DECLARE_COMPLETION_ONSTACK(done); bool interrupted = false; req = ep->req; @@ -1066,7 +1069,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) io_data->buf = data; - req->context = &done; + init_completion(&io_data->done); + req->context = io_data; req->complete = ffs_epfile_io_complete; ret = usb_ep_queue(ep->ep, req, GFP_ATOMIC); @@ -1075,7 +1079,12 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) spin_unlock_irq(&epfile->ffs->eps_lock); - if (wait_for_completion_interruptible(&done)) { + if (wait_for_completion_interruptible(&io_data->done)) { + spin_lock_irq(&epfile->ffs->eps_lock); + if (epfile->ep != ep) { + ret = -ESHUTDOWN; + goto error_lock; + } /* * To avoid race condition with ffs_epfile_io_complete, * dequeue the request first then check @@ -1083,17 +1092,18 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * condition with req->complete callback. */ usb_ep_dequeue(ep->ep, req); - wait_for_completion(&done); - interrupted = ep->status < 0; + spin_unlock_irq(&epfile->ffs->eps_lock); + wait_for_completion(&io_data->done); + interrupted = io_data->status < 0; } if (interrupted) ret = -EINTR; - else if (io_data->read && ep->status > 0) - ret = __ffs_epfile_read_data(epfile, data, ep->status, + else if (io_data->read && io_data->status > 0) + ret = __ffs_epfile_read_data(epfile, data, io_data->status, &io_data->data); else - ret = ep->status; + ret = io_data->status; goto error_mutex; } else if (!(req = usb_ep_alloc_request(ep->ep, GFP_ATOMIC))) { ret = -ENOMEM; diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index 6f5d45ef2e39..f51694f29de9 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -775,9 +775,13 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g, dev->qmult = qmult; snprintf(net->name, sizeof(net->name), "%s%%d", netname); - if (get_ether_addr(dev_addr, addr)) + if (get_ether_addr(dev_addr, addr)) { + net->addr_assign_type = NET_ADDR_RANDOM; dev_warn(&g->dev, "using random %s ethernet address\n", "self"); + } else { + net->addr_assign_type = NET_ADDR_SET; + } eth_hw_addr_set(net, addr); if (get_ether_addr(host_addr, dev->host_mac)) dev_warn(&g->dev, @@ -844,6 +848,10 @@ struct net_device *gether_setup_name_default(const char *netname) eth_random_addr(dev->dev_mac); pr_warn("using random %s ethernet address\n", "self"); + + /* by default we always have a random MAC address */ + net->addr_assign_type = NET_ADDR_RANDOM; + eth_random_addr(dev->host_mac); pr_warn("using random %s ethernet address\n", "host"); @@ -871,7 +879,6 @@ int gether_register_netdev(struct net_device *net) dev = netdev_priv(net); g = dev->gadget; - net->addr_assign_type = NET_ADDR_RANDOM; eth_hw_addr_set(net, dev->dev_mac); status = register_netdev(net); @@ -912,6 +919,7 @@ int gether_set_dev_addr(struct net_device *net, const char *dev_addr) if (get_ether_addr(dev_addr, new_addr)) return -EINVAL; memcpy(dev->dev_mac, new_addr, ETH_ALEN); + net->addr_assign_type = NET_ADDR_SET; return 0; } EXPORT_SYMBOL_GPL(gether_set_dev_addr); diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index a9bb4553db84..d42bb3346745 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -424,6 +424,9 @@ static void uvcg_video_pump(struct work_struct *work) uvcg_queue_cancel(queue, 0); break; } + + /* Endpoint now owns the request */ + req = NULL; video->req_int_count++; } diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 241740024c50..2acece16b890 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -11,6 +11,7 @@ #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/delay.h> +#include <linux/idr.h> #include <linux/kref.h> #include <linux/miscdevice.h> #include <linux/module.h> @@ -36,6 +37,9 @@ MODULE_LICENSE("GPL"); /*----------------------------------------------------------------------*/ +static DEFINE_IDA(driver_id_numbers); +#define DRIVER_DRIVER_NAME_LENGTH_MAX 32 + #define RAW_EVENT_QUEUE_SIZE 16 struct raw_event_queue { @@ -161,6 +165,9 @@ struct raw_dev { /* Reference to misc device: */ struct device *dev; + /* Make driver names unique */ + int driver_id_number; + /* Protected by lock: */ enum dev_state state; bool gadget_registered; @@ -189,6 +196,7 @@ static struct raw_dev *dev_new(void) spin_lock_init(&dev->lock); init_completion(&dev->ep0_done); raw_event_queue_init(&dev->queue); + dev->driver_id_number = -1; return dev; } @@ -199,6 +207,9 @@ static void dev_free(struct kref *kref) kfree(dev->udc_name); kfree(dev->driver.udc_name); + kfree(dev->driver.driver.name); + if (dev->driver_id_number >= 0) + ida_free(&driver_id_numbers, dev->driver_id_number); if (dev->req) { if (dev->ep0_urb_queued) usb_ep_dequeue(dev->gadget->ep0, dev->req); @@ -419,9 +430,11 @@ out_put: static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) { int ret = 0; + int driver_id_number; struct usb_raw_init arg; char *udc_driver_name; char *udc_device_name; + char *driver_driver_name; unsigned long flags; if (copy_from_user(&arg, (void __user *)value, sizeof(arg))) @@ -440,36 +453,43 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) return -EINVAL; } + driver_id_number = ida_alloc(&driver_id_numbers, GFP_KERNEL); + if (driver_id_number < 0) + return driver_id_number; + + driver_driver_name = kmalloc(DRIVER_DRIVER_NAME_LENGTH_MAX, GFP_KERNEL); + if (!driver_driver_name) { + ret = -ENOMEM; + goto out_free_driver_id_number; + } + snprintf(driver_driver_name, DRIVER_DRIVER_NAME_LENGTH_MAX, + DRIVER_NAME ".%d", driver_id_number); + udc_driver_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); - if (!udc_driver_name) - return -ENOMEM; + if (!udc_driver_name) { + ret = -ENOMEM; + goto out_free_driver_driver_name; + } ret = strscpy(udc_driver_name, &arg.driver_name[0], UDC_NAME_LENGTH_MAX); - if (ret < 0) { - kfree(udc_driver_name); - return ret; - } + if (ret < 0) + goto out_free_udc_driver_name; ret = 0; udc_device_name = kmalloc(UDC_NAME_LENGTH_MAX, GFP_KERNEL); if (!udc_device_name) { - kfree(udc_driver_name); - return -ENOMEM; + ret = -ENOMEM; + goto out_free_udc_driver_name; } ret = strscpy(udc_device_name, &arg.device_name[0], UDC_NAME_LENGTH_MAX); - if (ret < 0) { - kfree(udc_driver_name); - kfree(udc_device_name); - return ret; - } + if (ret < 0) + goto out_free_udc_device_name; ret = 0; spin_lock_irqsave(&dev->lock, flags); if (dev->state != STATE_DEV_OPENED) { dev_dbg(dev->dev, "fail, device is not opened\n"); - kfree(udc_driver_name); - kfree(udc_device_name); ret = -EINVAL; goto out_unlock; } @@ -484,14 +504,25 @@ static int raw_ioctl_init(struct raw_dev *dev, unsigned long value) dev->driver.suspend = gadget_suspend; dev->driver.resume = gadget_resume; dev->driver.reset = gadget_reset; - dev->driver.driver.name = DRIVER_NAME; + dev->driver.driver.name = driver_driver_name; dev->driver.udc_name = udc_device_name; dev->driver.match_existing_only = 1; + dev->driver_id_number = driver_id_number; dev->state = STATE_DEV_INITIALIZED; + spin_unlock_irqrestore(&dev->lock, flags); + return ret; out_unlock: spin_unlock_irqrestore(&dev->lock, flags); +out_free_udc_device_name: + kfree(udc_device_name); +out_free_udc_driver_name: + kfree(udc_driver_name); +out_free_driver_driver_name: + kfree(driver_driver_name); +out_free_driver_id_number: + ida_free(&driver_id_numbers, driver_id_number); return ret; } diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index 6117ae8e7242..cea10cdb83ae 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -3016,6 +3016,7 @@ static int lpc32xx_udc_probe(struct platform_device *pdev) } udc->isp1301_i2c_client = isp1301_get_client(isp1301_node); + of_node_put(isp1301_node); if (!udc->isp1301_i2c_client) { return -EPROBE_DEFER; } diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c index c54f2bc23d3f..0fdc014c9401 100644 --- a/drivers/usb/host/xhci-hub.c +++ b/drivers/usb/host/xhci-hub.c @@ -652,7 +652,7 @@ struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd) * It will release and re-aquire the lock while calling ACPI * method. */ -static void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, +void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index, bool on, unsigned long *flags) __must_hold(&xhci->lock) { diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index fac9492a8bda..dce6c0ec8d34 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -61,6 +61,8 @@ #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e #define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed +#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e +#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0 #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 @@ -269,7 +271,9 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI || + pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f0ab63138016..65858f607437 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -611,15 +611,37 @@ static int xhci_init(struct usb_hcd *hcd) static int xhci_run_finished(struct xhci_hcd *xhci) { + unsigned long flags; + u32 temp; + + /* + * Enable interrupts before starting the host (xhci 4.2 and 5.5.2). + * Protect the short window before host is running with a lock + */ + spin_lock_irqsave(&xhci->lock, flags); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable interrupts"); + temp = readl(&xhci->op_regs->command); + temp |= (CMD_EIE); + writel(temp, &xhci->op_regs->command); + + xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Enable primary interrupter"); + temp = readl(&xhci->ir_set->irq_pending); + writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); + if (xhci_start(xhci)) { xhci_halt(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); return -ENODEV; } + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; if (xhci->quirks & XHCI_NEC_HOST) xhci_ring_cmd_db(xhci); + spin_unlock_irqrestore(&xhci->lock, flags); + return 0; } @@ -668,19 +690,6 @@ int xhci_run(struct usb_hcd *hcd) temp |= (xhci->imod_interval / 250) & ER_IRQ_INTERVAL_MASK; writel(temp, &xhci->ir_set->irq_control); - /* Set the HCD state before we enable the irqs */ - temp = readl(&xhci->op_regs->command); - temp |= (CMD_EIE); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enable interrupts, cmd = 0x%x.", temp); - writel(temp, &xhci->op_regs->command); - - temp = readl(&xhci->ir_set->irq_pending); - xhci_dbg_trace(xhci, trace_xhci_dbg_init, - "// Enabling event ring interrupter %p by writing 0x%x to irq_pending", - xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp)); - writel(ER_IRQ_ENABLE(temp), &xhci->ir_set->irq_pending); - if (xhci->quirks & XHCI_NEC_HOST) { struct xhci_command *command; @@ -782,6 +791,8 @@ static void xhci_stop(struct usb_hcd *hcd) void xhci_shutdown(struct usb_hcd *hcd) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + unsigned long flags; + int i; if (xhci->quirks & XHCI_SPURIOUS_REBOOT) usb_disable_xhci_ports(to_pci_dev(hcd->self.sysdev)); @@ -797,12 +808,21 @@ void xhci_shutdown(struct usb_hcd *hcd) del_timer_sync(&xhci->shared_hcd->rh_timer); } - spin_lock_irq(&xhci->lock); + spin_lock_irqsave(&xhci->lock, flags); xhci_halt(xhci); + + /* Power off USB2 ports*/ + for (i = 0; i < xhci->usb2_rhub.num_ports; i++) + xhci_set_port_power(xhci, xhci->main_hcd, i, false, &flags); + + /* Power off USB3 ports*/ + for (i = 0; i < xhci->usb3_rhub.num_ports; i++) + xhci_set_port_power(xhci, xhci->shared_hcd, i, false, &flags); + /* Workaround for spurious wakeups at shutdown with HSW */ if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) xhci_reset(xhci, XHCI_RESET_SHORT_USEC); - spin_unlock_irq(&xhci->lock); + spin_unlock_irqrestore(&xhci->lock, flags); xhci_cleanup_msix(xhci); @@ -1107,7 +1127,6 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) { u32 command, temp = 0; struct usb_hcd *hcd = xhci_to_hcd(xhci); - struct usb_hcd *secondary_hcd; int retval = 0; bool comp_timer_running = false; bool pending_portevent = false; @@ -1214,23 +1233,19 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) * first with the primary HCD, and then with the secondary HCD. * If we don't do the same, the host will never be started. */ - if (!usb_hcd_is_primary_hcd(hcd)) - secondary_hcd = hcd; - else - secondary_hcd = xhci->shared_hcd; - xhci_dbg(xhci, "Initialize the xhci_hcd\n"); - retval = xhci_init(hcd->primary_hcd); + retval = xhci_init(hcd); if (retval) return retval; comp_timer_running = true; xhci_dbg(xhci, "Start the primary HCD\n"); - retval = xhci_run(hcd->primary_hcd); - if (!retval && secondary_hcd) { + retval = xhci_run(hcd); + if (!retval && xhci->shared_hcd) { xhci_dbg(xhci, "Start the secondary HCD\n"); - retval = xhci_run(secondary_hcd); + retval = xhci_run(xhci->shared_hcd); } + hcd->state = HC_STATE_SUSPENDED; if (xhci->shared_hcd) xhci->shared_hcd->state = HC_STATE_SUSPENDED; diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 0bd76c94a4b1..28aaf031f9a8 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -2196,6 +2196,8 @@ int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, int xhci_hub_status_data(struct usb_hcd *hcd, char *buf); int xhci_find_raw_port_number(struct usb_hcd *hcd, int port1); struct xhci_hub *xhci_get_rhub(struct usb_hcd *hcd); +void xhci_set_port_power(struct xhci_hcd *xhci, struct usb_hcd *hcd, u16 index, + bool on, unsigned long *flags); void xhci_hc_died(struct xhci_hcd *xhci); diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index a7b3c15957ba..feba2a8d1233 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -166,6 +166,7 @@ static const struct usb_device_id edgeport_2port_id_table[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; @@ -204,6 +205,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_8S) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416) }, { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_TI_EDGEPORT_416B) }, + { USB_DEVICE(USB_VENDOR_ID_ION, ION_DEVICE_ID_E5805A) }, { } }; diff --git a/drivers/usb/serial/io_usbvend.h b/drivers/usb/serial/io_usbvend.h index 52cbc353051f..9a6f742ad3ab 100644 --- a/drivers/usb/serial/io_usbvend.h +++ b/drivers/usb/serial/io_usbvend.h @@ -212,6 +212,7 @@ // // Definitions for other product IDs #define ION_DEVICE_ID_MT4X56USB 0x1403 // OEM device +#define ION_DEVICE_ID_E5805A 0x1A01 // OEM device (rebranded Edgeport/4) #define GENERATION_ID_FROM_USB_PRODUCT_ID(ProductId) \ diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index e60425bbf537..de59fa919540 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -252,10 +252,12 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 #define QUECTEL_PRODUCT_EP06 0x0306 +#define QUECTEL_PRODUCT_EM05G 0x030a #define QUECTEL_PRODUCT_EM12 0x0512 #define QUECTEL_PRODUCT_RM500Q 0x0800 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 #define QUECTEL_PRODUCT_EC200T 0x6026 +#define QUECTEL_PRODUCT_RM500K 0x7001 #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 @@ -432,6 +434,8 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_CLS8 0x00b0 #define CINTERION_PRODUCT_MV31_MBIM 0x00b3 #define CINTERION_PRODUCT_MV31_RMNET 0x00b7 +#define CINTERION_PRODUCT_MV31_2_MBIM 0x00b8 +#define CINTERION_PRODUCT_MV31_2_RMNET 0x00b9 #define CINTERION_PRODUCT_MV32_WA 0x00f1 #define CINTERION_PRODUCT_MV32_WB 0x00f2 @@ -1132,6 +1136,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EP06, 0xff, 0, 0) }, + { USB_DEVICE_INTERFACE_CLASS(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM05G, 0xff), + .driver_info = RSVD(6) | ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff), .driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) }, @@ -1145,6 +1151,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = ZLP }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500K, 0xff, 0x00, 0x00) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, @@ -1277,6 +1284,7 @@ static const struct usb_device_id option_ids[] = { .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1231, 0xff), /* Telit LE910Cx (RNDIS) */ .driver_info = NCTRL(2) | RSVD(3) }, + { USB_DEVICE_AND_INTERFACE_INFO(TELIT_VENDOR_ID, 0x1250, 0xff, 0x00, 0x00) }, /* Telit LE910Cx (rmnet) */ { USB_DEVICE(TELIT_VENDOR_ID, 0x1260), .driver_info = NCTRL(0) | RSVD(1) | RSVD(2) }, { USB_DEVICE(TELIT_VENDOR_ID, 0x1261), @@ -1979,6 +1987,10 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff), .driver_info = RSVD(0)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_MBIM, 0xff), + .driver_info = RSVD(3)}, + { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_2_RMNET, 0xff), + .driver_info = RSVD(0)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WA, 0xff), .driver_info = RSVD(3)}, { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV32_WB, 0xff), diff --git a/drivers/usb/serial/pl2303.c b/drivers/usb/serial/pl2303.c index 3506c47e1eef..40b1ab3d284d 100644 --- a/drivers/usb/serial/pl2303.c +++ b/drivers/usb/serial/pl2303.c @@ -436,22 +436,27 @@ static int pl2303_detect_type(struct usb_serial *serial) break; case 0x200: switch (bcdDevice) { - case 0x100: + case 0x100: /* GC */ case 0x105: + return TYPE_HXN; + case 0x300: /* GT / TA */ + if (pl2303_supports_hx_status(serial)) + return TYPE_TA; + fallthrough; case 0x305: + case 0x400: /* GL */ case 0x405: + return TYPE_HXN; + case 0x500: /* GE / TB */ + if (pl2303_supports_hx_status(serial)) + return TYPE_TB; + fallthrough; + case 0x505: + case 0x600: /* GS */ case 0x605: - /* - * Assume it's an HXN-type if the device doesn't - * support the old read request value. - */ - if (!pl2303_supports_hx_status(serial)) - return TYPE_HXN; - break; - case 0x300: - return TYPE_TA; - case 0x500: - return TYPE_TB; + case 0x700: /* GR */ + case 0x705: + return TYPE_HXN; } break; } diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index 557f392fe24d..073fd2ea5e0b 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -56,7 +56,6 @@ config TYPEC_WCOVE tristate "Intel WhiskeyCove PMIC USB Type-C PHY driver" depends on ACPI depends on MFD_INTEL_PMC_BXT - depends on INTEL_SOC_PMIC depends on BXT_WC_PMIC_OPREGION help This driver adds support for USB Type-C on Intel Broxton platforms diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index b7a955479156..1b6d46b86f81 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -107,7 +107,7 @@ struct mlx5_vdpa_virtqueue { /* Resources for implementing the notification channel from the device * to the driver. fwqp is the firmware end of an RC connection; the - * other end is vqqp used by the driver. cq is is where completions are + * other end is vqqp used by the driver. cq is where completions are * reported. */ struct mlx5_vdpa_cq cq; @@ -1814,12 +1814,13 @@ static virtio_net_ctrl_ack handle_ctrl_vlan(struct mlx5_vdpa_dev *mvdev, u8 cmd) id = mlx5vdpa16_to_cpu(mvdev, vlan); mac_vlan_del(ndev, ndev->config.mac, id, true); + status = VIRTIO_NET_OK; break; default: - break; -} + break; + } -return status; + return status; } static void mlx5_cvq_kick_handler(struct work_struct *work) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index d503848b3b6e..776ad7496f53 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -1345,9 +1345,9 @@ static int vduse_create_dev(struct vduse_dev_config *config, dev->minor = ret; dev->msg_timeout = VDUSE_MSG_DEFAULT_TIMEOUT; - dev->dev = device_create(vduse_class, NULL, - MKDEV(MAJOR(vduse_major), dev->minor), - dev, "%s", config->name); + dev->dev = device_create_with_groups(vduse_class, NULL, + MKDEV(MAJOR(vduse_major), dev->minor), + dev, vduse_dev_groups, "%s", config->name); if (IS_ERR(dev->dev)) { ret = PTR_ERR(dev->dev); goto err_dev; @@ -1596,7 +1596,6 @@ static int vduse_init(void) return PTR_ERR(vduse_class); vduse_class->devnode = vduse_devnode; - vduse_class->dev_groups = vduse_dev_groups; ret = alloc_chrdev_region(&vduse_major, 0, VDUSE_DEV_MAX, "vduse"); if (ret) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 935a1d0ddb97..5ad2596c6e8a 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -499,6 +499,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, ops->set_vq_ready(vdpa, idx, s.num); return 0; case VHOST_VDPA_GET_VRING_GROUP: + if (!ops->get_vq_group) + return -EOPNOTSUPP; s.index = idx; s.num = ops->get_vq_group(vdpa, idx); if (s.num >= vdpa->ngroups) diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c index 14e2043d7685..eab55accf381 100644 --- a/drivers/vhost/vringh.c +++ b/drivers/vhost/vringh.c @@ -292,7 +292,7 @@ __vringh_iov(struct vringh *vrh, u16 i, int (*copy)(const struct vringh *vrh, void *dst, const void *src, size_t len)) { - int err, count = 0, up_next, desc_max; + int err, count = 0, indirect_count = 0, up_next, desc_max; struct vring_desc desc, *descs; struct vringh_range range = { -1ULL, 0 }, slowrange; bool slow = false; @@ -349,7 +349,12 @@ __vringh_iov(struct vringh *vrh, u16 i, continue; } - if (count++ == vrh->vring.num) { + if (up_next == -1) + count++; + else + indirect_count++; + + if (count > vrh->vring.num || indirect_count > desc_max) { vringh_bad("Descriptor loop in %p", descs); err = -ELOOP; goto fail; @@ -411,6 +416,7 @@ __vringh_iov(struct vringh *vrh, u16 i, i = return_from_indirect(vrh, &up_next, &descs, &desc_max); slow = false; + indirect_count = 0; } else break; } diff --git a/drivers/video/console/sticore.c b/drivers/video/console/sticore.c index fa23bf0247b0..bd4dc97d4d34 100644 --- a/drivers/video/console/sticore.c +++ b/drivers/video/console/sticore.c @@ -1148,6 +1148,7 @@ int sti_call(const struct sti_struct *sti, unsigned long func, return ret; } +#if defined(CONFIG_FB_STI) /* check if given fb_info is the primary device */ int fb_is_primary_device(struct fb_info *info) { @@ -1163,6 +1164,7 @@ int fb_is_primary_device(struct fb_info *info) return (sti->info == info); } EXPORT_SYMBOL(fb_is_primary_device); +#endif MODULE_AUTHOR("Philipp Rumpf, Helge Deller, Thomas Bogendoerfer"); MODULE_DESCRIPTION("Core STI driver for HP's NGLE series graphics cards in HP PARISC machines"); diff --git a/drivers/video/fbdev/au1100fb.c b/drivers/video/fbdev/au1100fb.c index 52f731a61482..519313b8bb00 100644 --- a/drivers/video/fbdev/au1100fb.c +++ b/drivers/video/fbdev/au1100fb.c @@ -560,8 +560,7 @@ int au1100fb_drv_suspend(struct platform_device *dev, pm_message_t state) /* Blank the LCD */ au1100fb_fb_blank(VESA_POWERDOWN, &fbdev->info); - if (fbdev->lcdclk) - clk_disable(fbdev->lcdclk); + clk_disable(fbdev->lcdclk); memcpy(&fbregs, fbdev->regs, sizeof(struct au1100fb_regs)); @@ -577,8 +576,7 @@ int au1100fb_drv_resume(struct platform_device *dev) memcpy(fbdev->regs, &fbregs, sizeof(struct au1100fb_regs)); - if (fbdev->lcdclk) - clk_enable(fbdev->lcdclk); + clk_enable(fbdev->lcdclk); /* Unblank the LCD */ au1100fb_fb_blank(VESA_NO_BLANKING, &fbdev->info); diff --git a/drivers/video/fbdev/cirrusfb.c b/drivers/video/fbdev/cirrusfb.c index 3d47c347b897..51e072c03e1c 100644 --- a/drivers/video/fbdev/cirrusfb.c +++ b/drivers/video/fbdev/cirrusfb.c @@ -2184,12 +2184,6 @@ static struct pci_driver cirrusfb_pci_driver = { .id_table = cirrusfb_pci_table, .probe = cirrusfb_pci_register, .remove = cirrusfb_pci_unregister, -#ifdef CONFIG_PM -#if 0 - .suspend = cirrusfb_pci_suspend, - .resume = cirrusfb_pci_resume, -#endif -#endif }; #endif /* CONFIG_PCI */ diff --git a/drivers/video/fbdev/intelfb/intelfbdrv.c b/drivers/video/fbdev/intelfb/intelfbdrv.c index a9579964eaba..5647fca8c49a 100644 --- a/drivers/video/fbdev/intelfb/intelfbdrv.c +++ b/drivers/video/fbdev/intelfb/intelfbdrv.c @@ -472,7 +472,7 @@ static int intelfb_pci_register(struct pci_dev *pdev, struct fb_info *info; struct intelfb_info *dinfo; int i, err, dvo; - int aperture_size, stolen_size; + int aperture_size, stolen_size = 0; struct agp_kern_info gtt_info; int agp_memtype; const char *s; @@ -571,7 +571,7 @@ static int intelfb_pci_register(struct pci_dev *pdev, return -ENODEV; } - if (intelfbhw_get_memory(pdev, &aperture_size,&stolen_size)) { + if (intelfbhw_get_memory(pdev, &aperture_size, &stolen_size)) { cleanup(dinfo); return -ENODEV; } diff --git a/drivers/video/fbdev/intelfb/intelfbhw.c b/drivers/video/fbdev/intelfb/intelfbhw.c index 57aff7450bce..2086e06532ee 100644 --- a/drivers/video/fbdev/intelfb/intelfbhw.c +++ b/drivers/video/fbdev/intelfb/intelfbhw.c @@ -201,13 +201,11 @@ int intelfbhw_get_memory(struct pci_dev *pdev, int *aperture_size, case PCI_DEVICE_ID_INTEL_945GME: case PCI_DEVICE_ID_INTEL_965G: case PCI_DEVICE_ID_INTEL_965GM: - /* 915, 945 and 965 chipsets support a 256MB aperture. - Aperture size is determined by inspected the - base address of the aperture. */ - if (pci_resource_start(pdev, 2) & 0x08000000) - *aperture_size = MB(128); - else - *aperture_size = MB(256); + /* + * 915, 945 and 965 chipsets support 64MB, 128MB or 256MB + * aperture. Determine size from PCI resource length. + */ + *aperture_size = pci_resource_len(pdev, 2); break; default: if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M) diff --git a/drivers/video/fbdev/omap/sossi.c b/drivers/video/fbdev/omap/sossi.c index c90eb8ca58af..66aff6cd1df0 100644 --- a/drivers/video/fbdev/omap/sossi.c +++ b/drivers/video/fbdev/omap/sossi.c @@ -359,7 +359,7 @@ static void sossi_set_bits_per_cycle(int bpc) int bus_pick_count, bus_pick_width; /* - * We set explicitly the the bus_pick_count as well, although + * We set explicitly the bus_pick_count as well, although * with remapping/reordering disabled it will be calculated by HW * as (32 / bus_pick_width). */ diff --git a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c index 6fbfeb01b315..170463a7e1f4 100644 --- a/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c +++ b/drivers/video/fbdev/omap2/omapfb/dss/hdmi_phy.c @@ -143,7 +143,7 @@ int hdmi_phy_configure(struct hdmi_phy_data *phy, unsigned long hfbitclk, /* * In OMAP5+, the HFBITCLK must be divided by 2 before issuing the * HDMI_PHYPWRCMD_LDOON command. - */ + */ if (phy_feat->bist_ctrl) REG_FLD_MOD(phy->base, HDMI_TXPHY_BIST_CONTROL, 1, 11, 11); diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c index 043cc8f9ef1c..c3cd1e1cc01b 100644 --- a/drivers/video/fbdev/pxa3xx-gcu.c +++ b/drivers/video/fbdev/pxa3xx-gcu.c @@ -381,7 +381,7 @@ pxa3xx_gcu_write(struct file *file, const char *buff, struct pxa3xx_gcu_batch *buffer; struct pxa3xx_gcu_priv *priv = to_pxa3xx_gcu_priv(file); - int words = count / 4; + size_t words = count / 4; /* Does not need to be atomic. There's a lock in user space, * but anyhow, this is just for statistics. */ diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 2c198561c338..f96ce8801be4 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -237,8 +237,7 @@ static int simplefb_clocks_get(struct simplefb_par *par, if (IS_ERR(clock)) { if (PTR_ERR(clock) == -EPROBE_DEFER) { while (--i >= 0) { - if (par->clks[i]) - clk_put(par->clks[i]); + clk_put(par->clks[i]); } kfree(par->clks); return -EPROBE_DEFER; diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index bcacfb6934fa..d119b1d08007 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -96,7 +96,7 @@ static const struct fb_fix_screeninfo xxxfb_fix = { /* * Modern graphical hardware not only supports pipelines but some - * also support multiple monitors where each display can have its + * also support multiple monitors where each display can have * its own unique data. In this case each display could be * represented by a separate framebuffer device thus a separate * struct fb_info. Now the struct xxx_par represents the graphics @@ -838,9 +838,9 @@ static void xxxfb_remove(struct pci_dev *dev) * * See Documentation/driver-api/pm/devices.rst for more information */ -static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg) +static int xxxfb_suspend(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct xxxfb_par *par = info->par; /* suspend here */ @@ -853,9 +853,9 @@ static int xxxfb_suspend(struct pci_dev *dev, pm_message_t msg) * * See Documentation/driver-api/pm/devices.rst for more information */ -static int xxxfb_resume(struct pci_dev *dev) +static int xxxfb_resume(struct device *dev) { - struct fb_info *info = pci_get_drvdata(dev); + struct fb_info *info = dev_get_drvdata(dev); struct xxxfb_par *par = info->par; /* resume here */ @@ -873,14 +873,15 @@ static const struct pci_device_id xxxfb_id_table[] = { { 0, } }; +static SIMPLE_DEV_PM_OPS(xxxfb_pm_ops, xxxfb_suspend, xxxfb_resume); + /* For PCI drivers */ static struct pci_driver xxxfb_driver = { .name = "xxxfb", .id_table = xxxfb_id_table, .probe = xxxfb_probe, .remove = xxxfb_remove, - .suspend = xxxfb_suspend, /* optional but recommended */ - .resume = xxxfb_resume, /* optional but recommended */ + .driver.pm = xxxfb_pm_ops, /* optional but recommended */ }; MODULE_DEVICE_TABLE(pci, xxxfb_id_table); diff --git a/drivers/virtio/Kconfig b/drivers/virtio/Kconfig index b5adf6abd241..a6dc8b5846fe 100644 --- a/drivers/virtio/Kconfig +++ b/drivers/virtio/Kconfig @@ -6,12 +6,6 @@ config VIRTIO bus, such as CONFIG_VIRTIO_PCI, CONFIG_VIRTIO_MMIO, CONFIG_RPMSG or CONFIG_S390_GUEST. -config ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS - bool - help - This option is selected if the architecture may need to enforce - VIRTIO_F_ACCESS_PLATFORM - config VIRTIO_PCI_LIB tristate help diff --git a/drivers/virtio/virtio.c b/drivers/virtio/virtio.c index ef04a96942bf..6bace84ae37e 100644 --- a/drivers/virtio/virtio.c +++ b/drivers/virtio/virtio.c @@ -5,6 +5,7 @@ #include <linux/module.h> #include <linux/idr.h> #include <linux/of.h> +#include <linux/platform-feature.h> #include <uapi/linux/virtio_ids.h> /* Unique numbering for virtio devices. */ @@ -170,12 +171,10 @@ EXPORT_SYMBOL_GPL(virtio_add_status); static int virtio_features_ok(struct virtio_device *dev) { unsigned int status; - int ret; might_sleep(); - ret = arch_has_restricted_virtio_memory_access(); - if (ret) { + if (platform_has(PLATFORM_VIRTIO_RESTRICTED_MEM_ACCESS)) { if (!virtio_has_feature(dev, VIRTIO_F_VERSION_1)) { dev_warn(&dev->dev, "device must provide VIRTIO_F_VERSION_1\n"); diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index f9a36bc7ac27..c9bec3813e94 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -255,7 +255,7 @@ static void vm_set_status(struct virtio_device *vdev, u8 status) /* * Per memory-barriers.txt, wmb() is not needed to guarantee - * that the the cache coherent memory writes have completed + * that the cache coherent memory writes have completed * before writing to the MMIO region. */ writel(status, vm_dev->base + VIRTIO_MMIO_STATUS); @@ -701,6 +701,7 @@ static int vm_cmdline_set(const char *device, if (!vm_cmdline_parent_registered) { err = device_register(&vm_cmdline_parent); if (err) { + put_device(&vm_cmdline_parent); pr_err("Failed to register parent device!\n"); return err; } diff --git a/drivers/virtio/virtio_pci_modern_dev.c b/drivers/virtio/virtio_pci_modern_dev.c index a0fa14f28a7f..b790f30b2b56 100644 --- a/drivers/virtio/virtio_pci_modern_dev.c +++ b/drivers/virtio/virtio_pci_modern_dev.c @@ -469,7 +469,7 @@ void vp_modern_set_status(struct virtio_pci_modern_device *mdev, /* * Per memory-barriers.txt, wmb() is not needed to guarantee - * that the the cache coherent memory writes have completed + * that the cache coherent memory writes have completed * before writing to the MMIO region. */ vp_iowrite8(status, &cfg->device_status); diff --git a/drivers/watchdog/gxp-wdt.c b/drivers/watchdog/gxp-wdt.c index b0b2d7a6fdde..2fd85be88278 100644 --- a/drivers/watchdog/gxp-wdt.c +++ b/drivers/watchdog/gxp-wdt.c @@ -172,3 +172,4 @@ module_platform_driver(gxp_wdt_driver); MODULE_AUTHOR("Nick Hawkins <nick.hawkins@hpe.com>"); MODULE_AUTHOR("Jean-Marie Verdun <verdun@hpe.com>"); MODULE_DESCRIPTION("Driver for GXP watchdog timer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/Kconfig b/drivers/xen/Kconfig index 120d32f164ac..bfd5f4f706bc 100644 --- a/drivers/xen/Kconfig +++ b/drivers/xen/Kconfig @@ -335,4 +335,24 @@ config XEN_UNPOPULATED_ALLOC having to balloon out RAM regions in order to obtain physical memory space to create such mappings. +config XEN_GRANT_DMA_IOMMU + bool + select IOMMU_API + +config XEN_GRANT_DMA_OPS + bool + select DMA_OPS + +config XEN_VIRTIO + bool "Xen virtio support" + depends on VIRTIO + select XEN_GRANT_DMA_OPS + select XEN_GRANT_DMA_IOMMU if OF + help + Enable virtio support for running as Xen guest. Depending on the + guest type this will require special support on the backend side + (qemu or kernel, depending on the virtio device types used). + + If in doubt, say n. + endmenu diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 5aae66e638a7..c0503f1c7d5b 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile @@ -39,3 +39,5 @@ xen-gntalloc-y := gntalloc.o xen-privcmd-y := privcmd.o privcmd-buf.o obj-$(CONFIG_XEN_FRONT_PGDIR_SHBUF) += xen-front-pgdir-shbuf.o obj-$(CONFIG_XEN_UNPOPULATED_ALLOC) += unpopulated-alloc.o +obj-$(CONFIG_XEN_GRANT_DMA_OPS) += grant-dma-ops.o +obj-$(CONFIG_XEN_GRANT_DMA_IOMMU) += grant-dma-iommu.o diff --git a/drivers/xen/features.c b/drivers/xen/features.c index 7b591443833c..87f1828d40d5 100644 --- a/drivers/xen/features.c +++ b/drivers/xen/features.c @@ -42,7 +42,7 @@ void xen_setup_features(void) if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0) break; for (j = 0; j < 32; j++) - xen_features[i * 32 + j] = !!(fi.submap & 1<<j); + xen_features[i * 32 + j] = !!(fi.submap & 1U << j); } if (xen_pv_domain()) { diff --git a/drivers/xen/gntdev-common.h b/drivers/xen/gntdev-common.h index 20d7d059dadb..40ef379c28ab 100644 --- a/drivers/xen/gntdev-common.h +++ b/drivers/xen/gntdev-common.h @@ -16,6 +16,7 @@ #include <linux/mmu_notifier.h> #include <linux/types.h> #include <xen/interface/event_channel.h> +#include <xen/grant_table.h> struct gntdev_dmabuf_priv; @@ -56,6 +57,7 @@ struct gntdev_grant_map { struct gnttab_unmap_grant_ref *unmap_ops; struct gnttab_map_grant_ref *kmap_ops; struct gnttab_unmap_grant_ref *kunmap_ops; + bool *being_removed; struct page **pages; unsigned long pages_vm_start; @@ -73,6 +75,11 @@ struct gntdev_grant_map { /* Needed to avoid allocation in gnttab_dma_free_pages(). */ xen_pfn_t *frames; #endif + + /* Number of live grants */ + atomic_t live_grants; + /* Needed to avoid allocation in __unmap_grant_pages */ + struct gntab_unmap_queue_data unmap_data; }; struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c index 59ffea800079..4b56c39f766d 100644 --- a/drivers/xen/gntdev.c +++ b/drivers/xen/gntdev.c @@ -35,6 +35,7 @@ #include <linux/slab.h> #include <linux/highmem.h> #include <linux/refcount.h> +#include <linux/workqueue.h> #include <xen/xen.h> #include <xen/grant_table.h> @@ -60,10 +61,11 @@ module_param(limit, uint, 0644); MODULE_PARM_DESC(limit, "Maximum number of grants that may be mapped by one mapping request"); +/* True in PV mode, false otherwise */ static int use_ptemod; -static int unmap_grant_pages(struct gntdev_grant_map *map, - int offset, int pages); +static void unmap_grant_pages(struct gntdev_grant_map *map, + int offset, int pages); static struct miscdevice gntdev_miscdev; @@ -120,6 +122,7 @@ static void gntdev_free_map(struct gntdev_grant_map *map) kvfree(map->unmap_ops); kvfree(map->kmap_ops); kvfree(map->kunmap_ops); + kvfree(map->being_removed); kfree(map); } @@ -140,10 +143,13 @@ struct gntdev_grant_map *gntdev_alloc_map(struct gntdev_priv *priv, int count, add->unmap_ops = kvmalloc_array(count, sizeof(add->unmap_ops[0]), GFP_KERNEL); add->pages = kvcalloc(count, sizeof(add->pages[0]), GFP_KERNEL); + add->being_removed = + kvcalloc(count, sizeof(add->being_removed[0]), GFP_KERNEL); if (NULL == add->grants || NULL == add->map_ops || NULL == add->unmap_ops || - NULL == add->pages) + NULL == add->pages || + NULL == add->being_removed) goto err; if (use_ptemod) { add->kmap_ops = kvmalloc_array(count, sizeof(add->kmap_ops[0]), @@ -250,9 +256,36 @@ void gntdev_put_map(struct gntdev_priv *priv, struct gntdev_grant_map *map) if (!refcount_dec_and_test(&map->users)) return; - if (map->pages && !use_ptemod) + if (map->pages && !use_ptemod) { + /* + * Increment the reference count. This ensures that the + * subsequent call to unmap_grant_pages() will not wind up + * re-entering itself. It *can* wind up calling + * gntdev_put_map() recursively, but such calls will be with a + * reference count greater than 1, so they will return before + * this code is reached. The recursion depth is thus limited to + * 1. Do NOT use refcount_inc() here, as it will detect that + * the reference count is zero and WARN(). + */ + refcount_set(&map->users, 1); + + /* + * Unmap the grants. This may or may not be asynchronous, so it + * is possible that the reference count is 1 on return, but it + * could also be greater than 1. + */ unmap_grant_pages(map, 0, map->count); + /* Check if the memory now needs to be freed */ + if (!refcount_dec_and_test(&map->users)) + return; + + /* + * All pages have been returned to the hypervisor, so free the + * map. + */ + } + if (map->notify.flags & UNMAP_NOTIFY_SEND_EVENT) { notify_remote_via_evtchn(map->notify.event); evtchn_put(map->notify.event); @@ -283,6 +316,7 @@ static int find_grant_ptes(pte_t *pte, unsigned long addr, void *data) int gntdev_map_grant_pages(struct gntdev_grant_map *map) { + size_t alloced = 0; int i, err = 0; if (!use_ptemod) { @@ -331,97 +365,116 @@ int gntdev_map_grant_pages(struct gntdev_grant_map *map) map->count); for (i = 0; i < map->count; i++) { - if (map->map_ops[i].status == GNTST_okay) + if (map->map_ops[i].status == GNTST_okay) { map->unmap_ops[i].handle = map->map_ops[i].handle; - else if (!err) + if (!use_ptemod) + alloced++; + } else if (!err) err = -EINVAL; if (map->flags & GNTMAP_device_map) map->unmap_ops[i].dev_bus_addr = map->map_ops[i].dev_bus_addr; if (use_ptemod) { - if (map->kmap_ops[i].status == GNTST_okay) + if (map->kmap_ops[i].status == GNTST_okay) { + if (map->map_ops[i].status == GNTST_okay) + alloced++; map->kunmap_ops[i].handle = map->kmap_ops[i].handle; - else if (!err) + } else if (!err) err = -EINVAL; } } + atomic_add(alloced, &map->live_grants); return err; } -static int __unmap_grant_pages(struct gntdev_grant_map *map, int offset, - int pages) +static void __unmap_grant_pages_done(int result, + struct gntab_unmap_queue_data *data) { - int i, err = 0; - struct gntab_unmap_queue_data unmap_data; - - if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { - int pgno = (map->notify.addr >> PAGE_SHIFT); - if (pgno >= offset && pgno < offset + pages) { - /* No need for kmap, pages are in lowmem */ - uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); - tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; - map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; - } - } - - unmap_data.unmap_ops = map->unmap_ops + offset; - unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; - unmap_data.pages = map->pages + offset; - unmap_data.count = pages; - - err = gnttab_unmap_refs_sync(&unmap_data); - if (err) - return err; + unsigned int i; + struct gntdev_grant_map *map = data->data; + unsigned int offset = data->unmap_ops - map->unmap_ops; - for (i = 0; i < pages; i++) { - if (map->unmap_ops[offset+i].status) - err = -EINVAL; + for (i = 0; i < data->count; i++) { + WARN_ON(map->unmap_ops[offset+i].status); pr_debug("unmap handle=%d st=%d\n", map->unmap_ops[offset+i].handle, map->unmap_ops[offset+i].status); map->unmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; if (use_ptemod) { - if (map->kunmap_ops[offset+i].status) - err = -EINVAL; + WARN_ON(map->kunmap_ops[offset+i].status); pr_debug("kunmap handle=%u st=%d\n", map->kunmap_ops[offset+i].handle, map->kunmap_ops[offset+i].status); map->kunmap_ops[offset+i].handle = INVALID_GRANT_HANDLE; } } - return err; + /* + * Decrease the live-grant counter. This must happen after the loop to + * prevent premature reuse of the grants by gnttab_mmap(). + */ + atomic_sub(data->count, &map->live_grants); + + /* Release reference taken by __unmap_grant_pages */ + gntdev_put_map(NULL, map); +} + +static void __unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) +{ + if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) { + int pgno = (map->notify.addr >> PAGE_SHIFT); + + if (pgno >= offset && pgno < offset + pages) { + /* No need for kmap, pages are in lowmem */ + uint8_t *tmp = pfn_to_kaddr(page_to_pfn(map->pages[pgno])); + + tmp[map->notify.addr & (PAGE_SIZE-1)] = 0; + map->notify.flags &= ~UNMAP_NOTIFY_CLEAR_BYTE; + } + } + + map->unmap_data.unmap_ops = map->unmap_ops + offset; + map->unmap_data.kunmap_ops = use_ptemod ? map->kunmap_ops + offset : NULL; + map->unmap_data.pages = map->pages + offset; + map->unmap_data.count = pages; + map->unmap_data.done = __unmap_grant_pages_done; + map->unmap_data.data = map; + refcount_inc(&map->users); /* to keep map alive during async call below */ + + gnttab_unmap_refs_async(&map->unmap_data); } -static int unmap_grant_pages(struct gntdev_grant_map *map, int offset, - int pages) +static void unmap_grant_pages(struct gntdev_grant_map *map, int offset, + int pages) { - int range, err = 0; + int range; + + if (atomic_read(&map->live_grants) == 0) + return; /* Nothing to do */ pr_debug("unmap %d+%d [%d+%d]\n", map->index, map->count, offset, pages); /* It is possible the requested range will have a "hole" where we * already unmapped some of the grants. Only unmap valid ranges. */ - while (pages && !err) { - while (pages && - map->unmap_ops[offset].handle == INVALID_GRANT_HANDLE) { + while (pages) { + while (pages && map->being_removed[offset]) { offset++; pages--; } range = 0; while (range < pages) { - if (map->unmap_ops[offset + range].handle == - INVALID_GRANT_HANDLE) + if (map->being_removed[offset + range]) break; + map->being_removed[offset + range] = true; range++; } - err = __unmap_grant_pages(map, offset, range); + if (range) + __unmap_grant_pages(map, offset, range); offset += range; pages -= range; } - - return err; } /* ------------------------------------------------------------------ */ @@ -473,7 +526,6 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, struct gntdev_grant_map *map = container_of(mn, struct gntdev_grant_map, notifier); unsigned long mstart, mend; - int err; if (!mmu_notifier_range_blockable(range)) return false; @@ -494,10 +546,9 @@ static bool gntdev_invalidate(struct mmu_interval_notifier *mn, map->index, map->count, map->vma->vm_start, map->vma->vm_end, range->start, range->end, mstart, mend); - err = unmap_grant_pages(map, + unmap_grant_pages(map, (mstart - map->vma->vm_start) >> PAGE_SHIFT, (mend - mstart) >> PAGE_SHIFT); - WARN_ON(err); return true; } @@ -985,6 +1036,10 @@ static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma) goto unlock_out; if (use_ptemod && map->vma) goto unlock_out; + if (atomic_read(&map->live_grants)) { + err = -EAGAIN; + goto unlock_out; + } refcount_inc(&map->users); vma->vm_ops = &gntdev_vmops; diff --git a/drivers/xen/grant-dma-iommu.c b/drivers/xen/grant-dma-iommu.c new file mode 100644 index 000000000000..16b8bc0c0b33 --- /dev/null +++ b/drivers/xen/grant-dma-iommu.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Stub IOMMU driver which does nothing. + * The main purpose of it being present is to reuse generic IOMMU device tree + * bindings by Xen grant DMA-mapping layer. + * + * Copyright (C) 2022 EPAM Systems Inc. + */ + +#include <linux/iommu.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +struct grant_dma_iommu_device { + struct device *dev; + struct iommu_device iommu; +}; + +/* Nothing is really needed here */ +static const struct iommu_ops grant_dma_iommu_ops; + +static const struct of_device_id grant_dma_iommu_of_match[] = { + { .compatible = "xen,grant-dma" }, + { }, +}; + +static int grant_dma_iommu_probe(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu; + int ret; + + mmu = devm_kzalloc(&pdev->dev, sizeof(*mmu), GFP_KERNEL); + if (!mmu) + return -ENOMEM; + + mmu->dev = &pdev->dev; + + ret = iommu_device_register(&mmu->iommu, &grant_dma_iommu_ops, &pdev->dev); + if (ret) + return ret; + + platform_set_drvdata(pdev, mmu); + + return 0; +} + +static int grant_dma_iommu_remove(struct platform_device *pdev) +{ + struct grant_dma_iommu_device *mmu = platform_get_drvdata(pdev); + + platform_set_drvdata(pdev, NULL); + iommu_device_unregister(&mmu->iommu); + + return 0; +} + +static struct platform_driver grant_dma_iommu_driver = { + .driver = { + .name = "grant-dma-iommu", + .of_match_table = grant_dma_iommu_of_match, + }, + .probe = grant_dma_iommu_probe, + .remove = grant_dma_iommu_remove, +}; + +static int __init grant_dma_iommu_init(void) +{ + struct device_node *iommu_np; + + iommu_np = of_find_matching_node(NULL, grant_dma_iommu_of_match); + if (!iommu_np) + return 0; + + of_node_put(iommu_np); + + return platform_driver_register(&grant_dma_iommu_driver); +} +subsys_initcall(grant_dma_iommu_init); diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c new file mode 100644 index 000000000000..fc0142484001 --- /dev/null +++ b/drivers/xen/grant-dma-ops.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Xen grant DMA-mapping layer - contains special DMA-mapping routines + * for providing grant references as DMA addresses to be used by frontends + * (e.g. virtio) in Xen guests + * + * Copyright (c) 2021, Juergen Gross <jgross@suse.com> + */ + +#include <linux/module.h> +#include <linux/dma-map-ops.h> +#include <linux/of.h> +#include <linux/pfn.h> +#include <linux/xarray.h> +#include <xen/xen.h> +#include <xen/xen-ops.h> +#include <xen/grant_table.h> + +struct xen_grant_dma_data { + /* The ID of backend domain */ + domid_t backend_domid; + /* Is device behaving sane? */ + bool broken; +}; + +static DEFINE_XARRAY(xen_grant_dma_devices); + +#define XEN_GRANT_DMA_ADDR_OFF (1ULL << 63) + +static inline dma_addr_t grant_to_dma(grant_ref_t grant) +{ + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); +} + +static inline grant_ref_t dma_to_grant(dma_addr_t dma) +{ + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); +} + +static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) +{ + struct xen_grant_dma_data *data; + + xa_lock(&xen_grant_dma_devices); + data = xa_load(&xen_grant_dma_devices, (unsigned long)dev); + xa_unlock(&xen_grant_dma_devices); + + return data; +} + +/* + * DMA ops for Xen frontends (e.g. virtio). + * + * Used to act as a kind of software IOMMU for Xen guests by using grants as + * DMA addresses. + * Such a DMA address is formed by using the grant reference as a frame + * number and setting the highest address bit (this bit is for the backend + * to be able to distinguish it from e.g. a mmio address). + */ +static void *xen_grant_dma_alloc(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + unsigned long pfn; + grant_ref_t grant; + void *ret; + + data = find_xen_grant_dma_data(dev); + if (!data) + return NULL; + + if (unlikely(data->broken)) + return NULL; + + ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + if (!ret) + return NULL; + + pfn = virt_to_pfn(ret); + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { + free_pages_exact(ret, n_pages * PAGE_SIZE); + return NULL; + } + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + pfn_to_gfn(pfn + i), 0); + } + + *dma_handle = grant_to_dma(grant); + + return ret; +} + +static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, + dma_addr_t dma_handle, unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); + + free_pages_exact(vaddr, n_pages * PAGE_SIZE); +} + +static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, + dma_addr_t *dma_handle, + enum dma_data_direction dir, + gfp_t gfp) +{ + void *vaddr; + + vaddr = xen_grant_dma_alloc(dev, size, dma_handle, gfp, 0); + if (!vaddr) + return NULL; + + return virt_to_page(vaddr); +} + +static void xen_grant_dma_free_pages(struct device *dev, size_t size, + struct page *vaddr, dma_addr_t dma_handle, + enum dma_data_direction dir) +{ + xen_grant_dma_free(dev, size, page_to_virt(vaddr), dma_handle, 0); +} + +static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + dma_addr_t dma_handle; + + if (WARN_ON(dir == DMA_NONE)) + return DMA_MAPPING_ERROR; + + data = find_xen_grant_dma_data(dev); + if (!data) + return DMA_MAPPING_ERROR; + + if (unlikely(data->broken)) + return DMA_MAPPING_ERROR; + + if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) + return DMA_MAPPING_ERROR; + + for (i = 0; i < n_pages; i++) { + gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, + xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + } + + dma_handle = grant_to_dma(grant) + offset; + + return dma_handle; +} + +static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + unsigned long attrs) +{ + struct xen_grant_dma_data *data; + unsigned int i, n_pages = PFN_UP(size); + grant_ref_t grant; + + if (WARN_ON(dir == DMA_NONE)) + return; + + data = find_xen_grant_dma_data(dev); + if (!data) + return; + + if (unlikely(data->broken)) + return; + + grant = dma_to_grant(dma_handle); + + for (i = 0; i < n_pages; i++) { + if (unlikely(!gnttab_end_foreign_access_ref(grant + i))) { + dev_alert(dev, "Grant still in use by backend domain, disabled for further use\n"); + data->broken = true; + return; + } + } + + gnttab_free_grant_reference_seq(grant, n_pages); +} + +static void xen_grant_dma_unmap_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return; + + for_each_sg(sg, s, nents, i) + xen_grant_dma_unmap_page(dev, s->dma_address, sg_dma_len(s), dir, + attrs); +} + +static int xen_grant_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + unsigned long attrs) +{ + struct scatterlist *s; + unsigned int i; + + if (WARN_ON(dir == DMA_NONE)) + return -EINVAL; + + for_each_sg(sg, s, nents, i) { + s->dma_address = xen_grant_dma_map_page(dev, sg_page(s), s->offset, + s->length, dir, attrs); + if (s->dma_address == DMA_MAPPING_ERROR) + goto out; + + sg_dma_len(s) = s->length; + } + + return nents; + +out: + xen_grant_dma_unmap_sg(dev, sg, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); + sg_dma_len(sg) = 0; + + return -EIO; +} + +static int xen_grant_dma_supported(struct device *dev, u64 mask) +{ + return mask == DMA_BIT_MASK(64); +} + +static const struct dma_map_ops xen_grant_dma_ops = { + .alloc = xen_grant_dma_alloc, + .free = xen_grant_dma_free, + .alloc_pages = xen_grant_dma_alloc_pages, + .free_pages = xen_grant_dma_free_pages, + .mmap = dma_common_mmap, + .get_sgtable = dma_common_get_sgtable, + .map_page = xen_grant_dma_map_page, + .unmap_page = xen_grant_dma_unmap_page, + .map_sg = xen_grant_dma_map_sg, + .unmap_sg = xen_grant_dma_unmap_sg, + .dma_supported = xen_grant_dma_supported, +}; + +bool xen_is_grant_dma_device(struct device *dev) +{ + struct device_node *iommu_np; + bool has_iommu; + + /* XXX Handle only DT devices for now */ + if (!dev->of_node) + return false; + + iommu_np = of_parse_phandle(dev->of_node, "iommus", 0); + has_iommu = iommu_np && of_device_is_compatible(iommu_np, "xen,grant-dma"); + of_node_put(iommu_np); + + return has_iommu; +} + +void xen_grant_setup_dma_ops(struct device *dev) +{ + struct xen_grant_dma_data *data; + struct of_phandle_args iommu_spec; + + data = find_xen_grant_dma_data(dev); + if (data) { + dev_err(dev, "Xen grant DMA data is already created\n"); + return; + } + + /* XXX ACPI device unsupported for now */ + if (!dev->of_node) + goto err; + + if (of_parse_phandle_with_args(dev->of_node, "iommus", "#iommu-cells", + 0, &iommu_spec)) { + dev_err(dev, "Cannot parse iommus property\n"); + goto err; + } + + if (!of_device_is_compatible(iommu_spec.np, "xen,grant-dma") || + iommu_spec.args_count != 1) { + dev_err(dev, "Incompatible IOMMU node\n"); + of_node_put(iommu_spec.np); + goto err; + } + + of_node_put(iommu_spec.np); + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + goto err; + + /* + * The endpoint ID here means the ID of the domain where the corresponding + * backend is running + */ + data->backend_domid = iommu_spec.args[0]; + + if (xa_err(xa_store(&xen_grant_dma_devices, (unsigned long)dev, data, + GFP_KERNEL))) { + dev_err(dev, "Cannot store Xen grant DMA data\n"); + goto err; + } + + dev->dma_ops = &xen_grant_dma_ops; + + return; + +err: + dev_err(dev, "Cannot set up Xen grant DMA ops, retain platform DMA ops\n"); +} + +MODULE_DESCRIPTION("Xen grant DMA-mapping layer"); +MODULE_AUTHOR("Juergen Gross <jgross@suse.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 7a18292540bc..738029de3c67 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -33,6 +33,7 @@ #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt +#include <linux/bitmap.h> #include <linux/memblock.h> #include <linux/sched.h> #include <linux/mm.h> @@ -70,9 +71,32 @@ static grant_ref_t **gnttab_list; static unsigned int nr_grant_frames; + +/* + * Handling of free grants: + * + * Free grants are in a simple list anchored in gnttab_free_head. They are + * linked by grant ref, the last element contains GNTTAB_LIST_END. The number + * of free entries is stored in gnttab_free_count. + * Additionally there is a bitmap of free entries anchored in + * gnttab_free_bitmap. This is being used for simplifying allocation of + * multiple consecutive grants, which is needed e.g. for support of virtio. + * gnttab_last_free is used to add free entries of new frames at the end of + * the free list. + * gnttab_free_tail_ptr specifies the variable which references the start + * of consecutive free grants ending with gnttab_last_free. This pointer is + * updated in a rather defensive way, in order to avoid performance hits in + * hot paths. + * All those variables are protected by gnttab_list_lock. + */ static int gnttab_free_count; -static grant_ref_t gnttab_free_head; +static unsigned int gnttab_size; +static grant_ref_t gnttab_free_head = GNTTAB_LIST_END; +static grant_ref_t gnttab_last_free = GNTTAB_LIST_END; +static grant_ref_t *gnttab_free_tail_ptr; +static unsigned long *gnttab_free_bitmap; static DEFINE_SPINLOCK(gnttab_list_lock); + struct grant_frames xen_auto_xlat_grant_frames; static unsigned int xen_gnttab_version; module_param_named(version, xen_gnttab_version, uint, 0); @@ -168,16 +192,116 @@ static int get_free_entries(unsigned count) ref = head = gnttab_free_head; gnttab_free_count -= count; - while (count-- > 1) - head = gnttab_entry(head); + while (count--) { + bitmap_clear(gnttab_free_bitmap, head, 1); + if (gnttab_free_tail_ptr == __gnttab_entry(head)) + gnttab_free_tail_ptr = &gnttab_free_head; + if (count) + head = gnttab_entry(head); + } gnttab_free_head = gnttab_entry(head); gnttab_entry(head) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_last_free = GNTTAB_LIST_END; + gnttab_free_tail_ptr = NULL; + } + spin_unlock_irqrestore(&gnttab_list_lock, flags); return ref; } +static int get_seq_entry_count(void) +{ + if (gnttab_last_free == GNTTAB_LIST_END || !gnttab_free_tail_ptr || + *gnttab_free_tail_ptr == GNTTAB_LIST_END) + return 0; + + return gnttab_last_free - *gnttab_free_tail_ptr + 1; +} + +/* Rebuilds the free grant list and tries to find count consecutive entries. */ +static int get_free_seq(unsigned int count) +{ + int ret = -ENOSPC; + unsigned int from, to; + grant_ref_t *last; + + gnttab_free_tail_ptr = &gnttab_free_head; + last = &gnttab_free_head; + + for (from = find_first_bit(gnttab_free_bitmap, gnttab_size); + from < gnttab_size; + from = find_next_bit(gnttab_free_bitmap, gnttab_size, to + 1)) { + to = find_next_zero_bit(gnttab_free_bitmap, gnttab_size, + from + 1); + if (ret < 0 && to - from >= count) { + ret = from; + bitmap_clear(gnttab_free_bitmap, ret, count); + from += count; + gnttab_free_count -= count; + if (from == to) + continue; + } + + /* + * Recreate the free list in order to have it properly sorted. + * This is needed to make sure that the free tail has the maximum + * possible size. + */ + while (from < to) { + *last = from; + last = __gnttab_entry(from); + gnttab_last_free = from; + from++; + } + if (to < gnttab_size) + gnttab_free_tail_ptr = __gnttab_entry(to - 1); + } + + *last = GNTTAB_LIST_END; + if (gnttab_last_free != gnttab_size - 1) + gnttab_free_tail_ptr = NULL; + + return ret; +} + +static int get_free_entries_seq(unsigned int count) +{ + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&gnttab_list_lock, flags); + + if (gnttab_free_count < count) { + ret = gnttab_expand(count - gnttab_free_count); + if (ret < 0) + goto out; + } + + if (get_seq_entry_count() < count) { + ret = get_free_seq(count); + if (ret >= 0) + goto out; + ret = gnttab_expand(count - get_seq_entry_count()); + if (ret < 0) + goto out; + } + + ret = *gnttab_free_tail_ptr; + *gnttab_free_tail_ptr = gnttab_entry(ret + count - 1); + gnttab_free_count -= count; + if (!gnttab_free_count) + gnttab_free_tail_ptr = NULL; + bitmap_clear(gnttab_free_bitmap, ret, count); + + out: + spin_unlock_irqrestore(&gnttab_list_lock, flags); + + return ret; +} + static void do_free_callbacks(void) { struct gnttab_free_callback *callback, *next; @@ -204,21 +328,51 @@ static inline void check_free_callbacks(void) do_free_callbacks(); } -static void put_free_entry(grant_ref_t ref) +static void put_free_entry_locked(grant_ref_t ref) { - unsigned long flags; - if (unlikely(ref < GNTTAB_NR_RESERVED_ENTRIES)) return; - spin_lock_irqsave(&gnttab_list_lock, flags); gnttab_entry(ref) = gnttab_free_head; gnttab_free_head = ref; + if (!gnttab_free_count) + gnttab_last_free = ref; + if (gnttab_free_tail_ptr == &gnttab_free_head) + gnttab_free_tail_ptr = __gnttab_entry(ref); gnttab_free_count++; + bitmap_set(gnttab_free_bitmap, ref, 1); +} + +static void put_free_entry(grant_ref_t ref) +{ + unsigned long flags; + + spin_lock_irqsave(&gnttab_list_lock, flags); + put_free_entry_locked(ref); check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } +static void gnttab_set_free(unsigned int start, unsigned int n) +{ + unsigned int i; + + for (i = start; i < start + n - 1; i++) + gnttab_entry(i) = i + 1; + + gnttab_entry(i) = GNTTAB_LIST_END; + if (!gnttab_free_count) { + gnttab_free_head = start; + gnttab_free_tail_ptr = &gnttab_free_head; + } else { + gnttab_entry(gnttab_last_free) = start; + } + gnttab_free_count += n; + gnttab_last_free = i; + + bitmap_set(gnttab_free_bitmap, start, n); +} + /* * Following applies to gnttab_update_entry_v1 and gnttab_update_entry_v2. * Introducing a valid entry into the grant table: @@ -450,23 +604,31 @@ void gnttab_free_grant_references(grant_ref_t head) { grant_ref_t ref; unsigned long flags; - int count = 1; - if (head == GNTTAB_LIST_END) - return; + spin_lock_irqsave(&gnttab_list_lock, flags); - ref = head; - while (gnttab_entry(ref) != GNTTAB_LIST_END) { - ref = gnttab_entry(ref); - count++; + while (head != GNTTAB_LIST_END) { + ref = gnttab_entry(head); + put_free_entry_locked(head); + head = ref; } - gnttab_entry(ref) = gnttab_free_head; - gnttab_free_head = head; - gnttab_free_count += count; check_free_callbacks(); spin_unlock_irqrestore(&gnttab_list_lock, flags); } EXPORT_SYMBOL_GPL(gnttab_free_grant_references); +void gnttab_free_grant_reference_seq(grant_ref_t head, unsigned int count) +{ + unsigned long flags; + unsigned int i; + + spin_lock_irqsave(&gnttab_list_lock, flags); + for (i = count; i > 0; i--) + put_free_entry_locked(head + i - 1); + check_free_callbacks(); + spin_unlock_irqrestore(&gnttab_list_lock, flags); +} +EXPORT_SYMBOL_GPL(gnttab_free_grant_reference_seq); + int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) { int h = get_free_entries(count); @@ -480,6 +642,24 @@ int gnttab_alloc_grant_references(u16 count, grant_ref_t *head) } EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references); +int gnttab_alloc_grant_reference_seq(unsigned int count, grant_ref_t *first) +{ + int h; + + if (count == 1) + h = get_free_entries(1); + else + h = get_free_entries_seq(count); + + if (h < 0) + return -ENOSPC; + + *first = h; + + return 0; +} +EXPORT_SYMBOL_GPL(gnttab_alloc_grant_reference_seq); + int gnttab_empty_grant_references(const grant_ref_t *private_head) { return (*private_head == GNTTAB_LIST_END); @@ -572,16 +752,13 @@ static int grow_gnttab_list(unsigned int more_frames) goto grow_nomem; } + gnttab_set_free(gnttab_size, extra_entries); - for (i = grefs_per_frame * nr_grant_frames; - i < grefs_per_frame * new_nr_grant_frames - 1; i++) - gnttab_entry(i) = i + 1; - - gnttab_entry(i) = gnttab_free_head; - gnttab_free_head = grefs_per_frame * nr_grant_frames; - gnttab_free_count += extra_entries; + if (!gnttab_free_tail_ptr) + gnttab_free_tail_ptr = __gnttab_entry(gnttab_size); nr_grant_frames = new_nr_grant_frames; + gnttab_size += extra_entries; check_free_callbacks(); @@ -1424,20 +1601,20 @@ static int gnttab_expand(unsigned int req_entries) int gnttab_init(void) { int i; - unsigned long max_nr_grant_frames; + unsigned long max_nr_grant_frames, max_nr_grefs; unsigned int max_nr_glist_frames, nr_glist_frames; - unsigned int nr_init_grefs; int ret; gnttab_request_version(); max_nr_grant_frames = gnttab_max_grant_frames(); + max_nr_grefs = max_nr_grant_frames * + gnttab_interface->grefs_per_grant_frame; nr_grant_frames = 1; /* Determine the maximum number of frames required for the * grant reference free list on the current hypervisor. */ - max_nr_glist_frames = (max_nr_grant_frames * - gnttab_interface->grefs_per_grant_frame / RPP); + max_nr_glist_frames = max_nr_grefs / RPP; gnttab_list = kmalloc_array(max_nr_glist_frames, sizeof(grant_ref_t *), @@ -1454,6 +1631,12 @@ int gnttab_init(void) } } + gnttab_free_bitmap = bitmap_zalloc(max_nr_grefs, GFP_KERNEL); + if (!gnttab_free_bitmap) { + ret = -ENOMEM; + goto ini_nomem; + } + ret = arch_gnttab_init(max_nr_grant_frames, nr_status_frames(max_nr_grant_frames)); if (ret < 0) @@ -1464,15 +1647,10 @@ int gnttab_init(void) goto ini_nomem; } - nr_init_grefs = nr_grant_frames * - gnttab_interface->grefs_per_grant_frame; - - for (i = GNTTAB_NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++) - gnttab_entry(i) = i + 1; + gnttab_size = nr_grant_frames * gnttab_interface->grefs_per_grant_frame; - gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END; - gnttab_free_count = nr_init_grefs - GNTTAB_NR_RESERVED_ENTRIES; - gnttab_free_head = GNTTAB_NR_RESERVED_ENTRIES; + gnttab_set_free(GNTTAB_NR_RESERVED_ENTRIES, + gnttab_size - GNTTAB_NR_RESERVED_ENTRIES); printk("Grant table initialized\n"); return 0; @@ -1481,6 +1659,7 @@ int gnttab_init(void) for (i--; i >= 0; i--) free_page((unsigned long)gnttab_list[i]); kfree(gnttab_list); + bitmap_free(gnttab_free_bitmap); return ret; } EXPORT_SYMBOL_GPL(gnttab_init); diff --git a/drivers/xen/xlate_mmu.c b/drivers/xen/xlate_mmu.c index 34742c6e189e..f17c4c03db30 100644 --- a/drivers/xen/xlate_mmu.c +++ b/drivers/xen/xlate_mmu.c @@ -261,7 +261,6 @@ int __init xen_xlate_map_ballooned_pages(xen_pfn_t **gfns, void **virt, return 0; } -EXPORT_SYMBOL_GPL(xen_xlate_map_ballooned_pages); struct remap_pfn { struct mm_struct *mm; |