diff options
Diffstat (limited to 'drivers')
309 files changed, 23587 insertions, 4449 deletions
diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 33f71983e001..6078064684c6 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -298,6 +298,59 @@ out: return status; } +struct iort_workaround_oem_info { + char oem_id[ACPI_OEM_ID_SIZE + 1]; + char oem_table_id[ACPI_OEM_TABLE_ID_SIZE + 1]; + u32 oem_revision; +}; + +static bool apply_id_count_workaround; + +static struct iort_workaround_oem_info wa_info[] __initdata = { + { + .oem_id = "HISI ", + .oem_table_id = "HIP07 ", + .oem_revision = 0, + }, { + .oem_id = "HISI ", + .oem_table_id = "HIP08 ", + .oem_revision = 0, + } +}; + +static void __init +iort_check_id_count_workaround(struct acpi_table_header *tbl) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(wa_info); i++) { + if (!memcmp(wa_info[i].oem_id, tbl->oem_id, ACPI_OEM_ID_SIZE) && + !memcmp(wa_info[i].oem_table_id, tbl->oem_table_id, ACPI_OEM_TABLE_ID_SIZE) && + wa_info[i].oem_revision == tbl->oem_revision) { + apply_id_count_workaround = true; + pr_warn(FW_BUG "ID count for ID mapping entry is wrong, applying workaround\n"); + break; + } + } +} + +static inline u32 iort_get_map_max(struct acpi_iort_id_mapping *map) +{ + u32 map_max = map->input_base + map->id_count; + + /* + * The IORT specification revision D (Section 3, table 4, page 9) says + * Number of IDs = The number of IDs in the range minus one, but the + * IORT code ignored the "minus one", and some firmware did that too, + * so apply a workaround here to keep compatible with both the spec + * compliant and non-spec compliant firmwares. + */ + if (apply_id_count_workaround) + map_max--; + + return map_max; +} + static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, u32 *rid_out) { @@ -314,8 +367,7 @@ static int iort_id_map(struct acpi_iort_id_mapping *map, u8 type, u32 rid_in, return -ENXIO; } - if (rid_in < map->input_base || - (rid_in >= map->input_base + map->id_count)) + if (rid_in < map->input_base || rid_in > iort_get_map_max(map)) return -ENXIO; *rid_out = map->output_base + (rid_in - map->input_base); @@ -1631,5 +1683,6 @@ void __init acpi_iort_init(void) return; } + iort_check_id_count_workaround(iort_table); iort_init_platform_devices(); } diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c index aad00d2b28f5..cc87004d5e2d 100644 --- a/drivers/atm/firestream.c +++ b/drivers/atm/firestream.c @@ -912,6 +912,7 @@ static int fs_open(struct atm_vcc *atm_vcc) } if (!to) { printk ("No more free channels for FS50..\n"); + kfree(vcc); return -EBUSY; } vcc->channo = dev->channo; @@ -922,6 +923,7 @@ static int fs_open(struct atm_vcc *atm_vcc) if (((DO_DIRECTION(rxtp) && dev->atm_vccs[vcc->channo])) || ( DO_DIRECTION(txtp) && test_bit (vcc->channo, dev->tx_inuse))) { printk ("Channel is in use for FS155.\n"); + kfree(vcc); return -EBUSY; } } @@ -935,6 +937,7 @@ static int fs_open(struct atm_vcc *atm_vcc) tc, sizeof (struct fs_transmit_config)); if (!tc) { fs_dprintk (FS_DEBUG_OPEN, "fs: can't alloc transmit_config.\n"); + kfree(vcc); return -ENOMEM; } diff --git a/drivers/base/regmap/regmap-i2c.c b/drivers/base/regmap/regmap-i2c.c index ac9b31c57967..008f8da69d97 100644 --- a/drivers/base/regmap/regmap-i2c.c +++ b/drivers/base/regmap/regmap-i2c.c @@ -43,7 +43,7 @@ static int regmap_smbus_byte_reg_write(void *context, unsigned int reg, return i2c_smbus_write_byte_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_byte = { +static const struct regmap_bus regmap_smbus_byte = { .reg_write = regmap_smbus_byte_reg_write, .reg_read = regmap_smbus_byte_reg_read, }; @@ -79,7 +79,7 @@ static int regmap_smbus_word_reg_write(void *context, unsigned int reg, return i2c_smbus_write_word_data(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word = { +static const struct regmap_bus regmap_smbus_word = { .reg_write = regmap_smbus_word_reg_write, .reg_read = regmap_smbus_word_reg_read, }; @@ -115,7 +115,7 @@ static int regmap_smbus_word_write_swapped(void *context, unsigned int reg, return i2c_smbus_write_word_swapped(i2c, reg, val); } -static struct regmap_bus regmap_smbus_word_swapped = { +static const struct regmap_bus regmap_smbus_word_swapped = { .reg_write = regmap_smbus_word_write_swapped, .reg_read = regmap_smbus_word_read_swapped, }; @@ -197,7 +197,7 @@ static int regmap_i2c_read(void *context, return -EIO; } -static struct regmap_bus regmap_i2c = { +static const struct regmap_bus regmap_i2c = { .write = regmap_i2c_write, .gather_write = regmap_i2c_gather_write, .read = regmap_i2c_read, @@ -239,7 +239,7 @@ static int regmap_i2c_smbus_i2c_read(void *context, const void *reg, return -EIO; } -static struct regmap_bus regmap_i2c_smbus_i2c_block = { +static const struct regmap_bus regmap_i2c_smbus_i2c_block = { .write = regmap_i2c_smbus_i2c_write, .read = regmap_i2c_smbus_i2c_read, .max_raw_read = I2C_SMBUS_BLOCK_MAX, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index 19f57ccfbe1d..59f911e57719 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -1488,11 +1488,18 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, WARN_ON(!map->bus); - /* Check for unwritable registers before we start */ - for (i = 0; i < val_len / map->format.val_bytes; i++) - if (!regmap_writeable(map, - reg + regmap_get_offset(map, i))) - return -EINVAL; + /* Check for unwritable or noinc registers in range + * before we start + */ + if (!regmap_writeable_noinc(map, reg)) { + for (i = 0; i < val_len / map->format.val_bytes; i++) { + unsigned int element = + reg + regmap_get_offset(map, i); + if (!regmap_writeable(map, element) || + regmap_writeable_noinc(map, element)) + return -EINVAL; + } + } if (!map->cache_bypass && map->format.parse_val) { unsigned int ival; diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 3b53b3e5ec3e..d52bf4df0bca 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -310,7 +310,17 @@ static ssize_t timeouts_show(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RO(timeouts); -static struct attribute *tpm_dev_attrs[] = { +static ssize_t tpm_version_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tpm_chip *chip = to_tpm_chip(dev); + + return sprintf(buf, "%s\n", chip->flags & TPM_CHIP_FLAG_TPM2 + ? "2" : "1"); +} +static DEVICE_ATTR_RO(tpm_version_major); + +static struct attribute *tpm1_dev_attrs[] = { &dev_attr_pubek.attr, &dev_attr_pcrs.attr, &dev_attr_enabled.attr, @@ -321,18 +331,28 @@ static struct attribute *tpm_dev_attrs[] = { &dev_attr_cancel.attr, &dev_attr_durations.attr, &dev_attr_timeouts.attr, + &dev_attr_tpm_version_major.attr, NULL, }; -static const struct attribute_group tpm_dev_group = { - .attrs = tpm_dev_attrs, +static struct attribute *tpm2_dev_attrs[] = { + &dev_attr_tpm_version_major.attr, + NULL +}; + +static const struct attribute_group tpm1_dev_group = { + .attrs = tpm1_dev_attrs, +}; + +static const struct attribute_group tpm2_dev_group = { + .attrs = tpm2_dev_attrs, }; void tpm_sysfs_add_device(struct tpm_chip *chip) { - if (chip->flags & TPM_CHIP_FLAG_TPM2) - return; - WARN_ON(chip->groups_cnt != 0); - chip->groups[chip->groups_cnt++] = &tpm_dev_group; + if (chip->flags & TPM_CHIP_FLAG_TPM2) + chip->groups[chip->groups_cnt++] = &tpm2_dev_group; + else + chip->groups[chip->groups_cnt++] = &tpm1_dev_group; } diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 6fa1eba9d477..5142da401db3 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -239,6 +239,14 @@ config FSL_RAID the capability to offload memcpy, xor and pq computation for raid5/6. +config HISI_DMA + tristate "HiSilicon DMA Engine support" + depends on ARM64 || (COMPILE_TEST && PCI_MSI) + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + help + Support HiSilicon Kunpeng DMA engine. + config IMG_MDC_DMA tristate "IMG MDC support" depends on MIPS || COMPILE_TEST @@ -273,6 +281,19 @@ config INTEL_IDMA64 Enable DMA support for Intel Low Power Subsystem such as found on Intel Skylake PCH. +config INTEL_IDXD + tristate "Intel Data Accelerators support" + depends on PCI && X86_64 + select DMA_ENGINE + select SBITMAP + help + Enable support for the Intel(R) data accelerators present + in Intel Xeon CPU. + + Say Y if you have such a platform. + + If unsure, say N. + config INTEL_IOATDMA tristate "Intel I/OAT DMA support" depends on PCI && X86_64 @@ -497,6 +518,15 @@ config PXA_DMA 16 to 32 channels for peripheral to memory or memory to memory transfers. +config PLX_DMA + tristate "PLX ExpressLane PEX Switch DMA Engine Support" + depends on PCI + select DMA_ENGINE + help + Some PLX ExpressLane PCI Switches support additional DMA engines. + These are exposed via extra functions on the switch's + upstream port. Each function exposes one DMA channel. + config SIRF_DMA tristate "CSR SiRFprimaII/SiRFmarco DMA support" depends on ARCH_SIRF diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 42d7e2fc64fa..1d908394fbea 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -35,12 +35,14 @@ obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o obj-$(CONFIG_MCF_EDMA) += mcf-edma.o fsl-edma-common.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o +obj-$(CONFIG_HISI_DMA) += hisi_dma.o obj-$(CONFIG_HSU_DMA) += hsu/ obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_IMX_SDMA) += imx-sdma.o obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ +obj-$(CONFIG_INTEL_IDXD) += idxd/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o obj-$(CONFIG_K3_DMA) += k3dma.o @@ -59,6 +61,7 @@ obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o obj-$(CONFIG_OWL_DMA) += owl-dma.o obj-$(CONFIG_PCH_DMA) += pch_dma.o obj-$(CONFIG_PL330_DMA) += pl330.o +obj-$(CONFIG_PLX_DMA) += plx_dma.o obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ obj-$(CONFIG_PXA_DMA) += pxa_dma.o obj-$(CONFIG_RENESAS_DMA) += sh/ diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index e4c593f48575..4768ef26013b 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -797,10 +797,7 @@ static int bcm2835_dma_terminate_all(struct dma_chan *chan) /* stop DMA activity */ if (c->desc) { - if (c->desc->vd.tx.flags & DMA_PREP_INTERRUPT) - vchan_terminate_vdesc(&c->desc->vd); - else - vchan_vdesc_fini(&c->desc->vd); + vchan_terminate_vdesc(&c->desc->vd); c->desc = NULL; bcm2835_dma_abort(c); } diff --git a/drivers/dma/dma-axi-dmac.c b/drivers/dma/dma-axi-dmac.c index a0ee404b736e..f1d149e32839 100644 --- a/drivers/dma/dma-axi-dmac.c +++ b/drivers/dma/dma-axi-dmac.c @@ -830,6 +830,7 @@ static int axi_dmac_probe(struct platform_device *pdev) struct dma_device *dma_dev; struct axi_dmac *dmac; struct resource *res; + struct regmap *regmap; int ret; dmac = devm_kzalloc(&pdev->dev, sizeof(*dmac), GFP_KERNEL); @@ -921,10 +922,17 @@ static int axi_dmac_probe(struct platform_device *pdev) platform_set_drvdata(pdev, dmac); - devm_regmap_init_mmio(&pdev->dev, dmac->base, &axi_dmac_regmap_config); + regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, + &axi_dmac_regmap_config); + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + goto err_free_irq; + } return 0; +err_free_irq: + free_irq(dmac->irq, dmac); err_unregister_of: of_dma_controller_free(pdev->dev.of_node); err_unregister_device: diff --git a/drivers/dma/dma-jz4780.c b/drivers/dma/dma-jz4780.c index 44af435628f8..448f663da89c 100644 --- a/drivers/dma/dma-jz4780.c +++ b/drivers/dma/dma-jz4780.c @@ -1021,12 +1021,19 @@ static const struct jz4780_dma_soc_data x1000_dma_soc_data = { .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, }; +static const struct jz4780_dma_soc_data x1830_dma_soc_data = { + .nb_channels = 32, + .transfer_ord_max = 7, + .flags = JZ_SOC_DATA_PROGRAMMABLE_DMA, +}; + static const struct of_device_id jz4780_dma_dt_match[] = { { .compatible = "ingenic,jz4740-dma", .data = &jz4740_dma_soc_data }, { .compatible = "ingenic,jz4725b-dma", .data = &jz4725b_dma_soc_data }, { .compatible = "ingenic,jz4770-dma", .data = &jz4770_dma_soc_data }, { .compatible = "ingenic,jz4780-dma", .data = &jz4780_dma_soc_data }, { .compatible = "ingenic,x1000-dma", .data = &x1000_dma_soc_data }, + { .compatible = "ingenic,x1830-dma", .data = &x1830_dma_soc_data }, {}, }; MODULE_DEVICE_TABLE(of, jz4780_dma_dt_match); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 03ac4b96117c..f3ef4edd4de1 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -60,6 +60,8 @@ static long dmaengine_ref_count; /* --- sysfs implementation --- */ +#define DMA_SLAVE_NAME "slave" + /** * dev_to_dma_chan - convert a device pointer to its sysfs container object * @dev - device node @@ -164,11 +166,152 @@ static struct class dma_devclass = { /* --- client and device registration --- */ -#define dma_device_satisfies_mask(device, mask) \ - __dma_device_satisfies_mask((device), &(mask)) -static int -__dma_device_satisfies_mask(struct dma_device *device, - const dma_cap_mask_t *want) +/** + * dma_cap_mask_all - enable iteration over all operation types + */ +static dma_cap_mask_t dma_cap_mask_all; + +/** + * dma_chan_tbl_ent - tracks channel allocations per core/operation + * @chan - associated channel for this entry + */ +struct dma_chan_tbl_ent { + struct dma_chan *chan; +}; + +/** + * channel_table - percpu lookup table for memory-to-memory offload providers + */ +static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; + +static int __init dma_channel_table_init(void) +{ + enum dma_transaction_type cap; + int err = 0; + + bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); + + /* 'interrupt', 'private', and 'slave' are channel capabilities, + * but are not associated with an operation so they do not need + * an entry in the channel_table + */ + clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); + clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); + clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); + + for_each_dma_cap_mask(cap, dma_cap_mask_all) { + channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); + if (!channel_table[cap]) { + err = -ENOMEM; + break; + } + } + + if (err) { + pr_err("dmaengine dma_channel_table_init failure: %d\n", err); + for_each_dma_cap_mask(cap, dma_cap_mask_all) + free_percpu(channel_table[cap]); + } + + return err; +} +arch_initcall(dma_channel_table_init); + +/** + * dma_chan_is_local - returns true if the channel is in the same numa-node as + * the cpu + */ +static bool dma_chan_is_local(struct dma_chan *chan, int cpu) +{ + int node = dev_to_node(chan->device->dev); + return node == NUMA_NO_NODE || + cpumask_test_cpu(cpu, cpumask_of_node(node)); +} + +/** + * min_chan - returns the channel with min count and in the same numa-node as + * the cpu + * @cap: capability to match + * @cpu: cpu index which the channel should be close to + * + * If some channels are close to the given cpu, the one with the lowest + * reference count is returned. Otherwise, cpu is ignored and only the + * reference count is taken into account. + * Must be called under dma_list_mutex. + */ +static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) +{ + struct dma_device *device; + struct dma_chan *chan; + struct dma_chan *min = NULL; + struct dma_chan *localmin = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (!dma_has_cap(cap, device->cap_mask) || + dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) { + if (!chan->client_count) + continue; + if (!min || chan->table_count < min->table_count) + min = chan; + + if (dma_chan_is_local(chan, cpu)) + if (!localmin || + chan->table_count < localmin->table_count) + localmin = chan; + } + } + + chan = localmin ? localmin : min; + + if (chan) + chan->table_count++; + + return chan; +} + +/** + * dma_channel_rebalance - redistribute the available channels + * + * Optimize for cpu isolation (each cpu gets a dedicated channel for an + * operation type) in the SMP case, and operation isolation (avoid + * multi-tasking channels) in the non-SMP case. Must be called under + * dma_list_mutex. + */ +static void dma_channel_rebalance(void) +{ + struct dma_chan *chan; + struct dma_device *device; + int cpu; + int cap; + + /* undo the last distribution */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_possible_cpu(cpu) + per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; + + list_for_each_entry(device, &dma_device_list, global_node) { + if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) + continue; + list_for_each_entry(chan, &device->channels, device_node) + chan->table_count = 0; + } + + /* don't populate the channel_table if no clients are available */ + if (!dmaengine_ref_count) + return; + + /* redistribute available channels */ + for_each_dma_cap_mask(cap, dma_cap_mask_all) + for_each_online_cpu(cpu) { + chan = min_chan(cap, cpu); + per_cpu_ptr(channel_table[cap], cpu)->chan = chan; + } +} + +static int dma_device_satisfies_mask(struct dma_device *device, + const dma_cap_mask_t *want) { dma_cap_mask_t has; @@ -179,7 +322,7 @@ __dma_device_satisfies_mask(struct dma_device *device, static struct module *dma_chan_to_owner(struct dma_chan *chan) { - return chan->device->dev->driver->owner; + return chan->device->owner; } /** @@ -198,6 +341,23 @@ static void balance_ref_count(struct dma_chan *chan) } } +static void dma_device_release(struct kref *ref) +{ + struct dma_device *device = container_of(ref, struct dma_device, ref); + + list_del_rcu(&device->global_node); + dma_channel_rebalance(); + + if (device->device_release) + device->device_release(device); +} + +static void dma_device_put(struct dma_device *device) +{ + lockdep_assert_held(&dma_list_mutex); + kref_put(&device->ref, dma_device_release); +} + /** * dma_chan_get - try to grab a dma channel's parent driver module * @chan - channel to grab @@ -218,6 +378,12 @@ static int dma_chan_get(struct dma_chan *chan) if (!try_module_get(owner)) return -ENODEV; + ret = kref_get_unless_zero(&chan->device->ref); + if (!ret) { + ret = -ENODEV; + goto module_put_out; + } + /* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) { ret = chan->device->device_alloc_chan_resources(chan); @@ -233,6 +399,8 @@ out: return 0; err_out: + dma_device_put(chan->device); +module_put_out: module_put(owner); return ret; } @@ -250,7 +418,6 @@ static void dma_chan_put(struct dma_chan *chan) return; chan->client_count--; - module_put(dma_chan_to_owner(chan)); /* This channel is not in use anymore, free it */ if (!chan->client_count && chan->device->device_free_chan_resources) { @@ -265,6 +432,9 @@ static void dma_chan_put(struct dma_chan *chan) chan->router = NULL; chan->route_data = NULL; } + + dma_device_put(chan->device); + module_put(dma_chan_to_owner(chan)); } enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) @@ -289,57 +459,6 @@ enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) EXPORT_SYMBOL(dma_sync_wait); /** - * dma_cap_mask_all - enable iteration over all operation types - */ -static dma_cap_mask_t dma_cap_mask_all; - -/** - * dma_chan_tbl_ent - tracks channel allocations per core/operation - * @chan - associated channel for this entry - */ -struct dma_chan_tbl_ent { - struct dma_chan *chan; -}; - -/** - * channel_table - percpu lookup table for memory-to-memory offload providers - */ -static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END]; - -static int __init dma_channel_table_init(void) -{ - enum dma_transaction_type cap; - int err = 0; - - bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END); - - /* 'interrupt', 'private', and 'slave' are channel capabilities, - * but are not associated with an operation so they do not need - * an entry in the channel_table - */ - clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits); - clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits); - clear_bit(DMA_SLAVE, dma_cap_mask_all.bits); - - for_each_dma_cap_mask(cap, dma_cap_mask_all) { - channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent); - if (!channel_table[cap]) { - err = -ENOMEM; - break; - } - } - - if (err) { - pr_err("initialization failure\n"); - for_each_dma_cap_mask(cap, dma_cap_mask_all) - free_percpu(channel_table[cap]); - } - - return err; -} -arch_initcall(dma_channel_table_init); - -/** * dma_find_channel - find a channel to carry out the operation * @tx_type: transaction type */ @@ -369,97 +488,6 @@ void dma_issue_pending_all(void) } EXPORT_SYMBOL(dma_issue_pending_all); -/** - * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu - */ -static bool dma_chan_is_local(struct dma_chan *chan, int cpu) -{ - int node = dev_to_node(chan->device->dev); - return node == NUMA_NO_NODE || - cpumask_test_cpu(cpu, cpumask_of_node(node)); -} - -/** - * min_chan - returns the channel with min count and in the same numa-node as the cpu - * @cap: capability to match - * @cpu: cpu index which the channel should be close to - * - * If some channels are close to the given cpu, the one with the lowest - * reference count is returned. Otherwise, cpu is ignored and only the - * reference count is taken into account. - * Must be called under dma_list_mutex. - */ -static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu) -{ - struct dma_device *device; - struct dma_chan *chan; - struct dma_chan *min = NULL; - struct dma_chan *localmin = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (!dma_has_cap(cap, device->cap_mask) || - dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) { - if (!chan->client_count) - continue; - if (!min || chan->table_count < min->table_count) - min = chan; - - if (dma_chan_is_local(chan, cpu)) - if (!localmin || - chan->table_count < localmin->table_count) - localmin = chan; - } - } - - chan = localmin ? localmin : min; - - if (chan) - chan->table_count++; - - return chan; -} - -/** - * dma_channel_rebalance - redistribute the available channels - * - * Optimize for cpu isolation (each cpu gets a dedicated channel for an - * operation type) in the SMP case, and operation isolation (avoid - * multi-tasking channels) in the non-SMP case. Must be called under - * dma_list_mutex. - */ -static void dma_channel_rebalance(void) -{ - struct dma_chan *chan; - struct dma_device *device; - int cpu; - int cap; - - /* undo the last distribution */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_possible_cpu(cpu) - per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; - - list_for_each_entry(device, &dma_device_list, global_node) { - if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) - continue; - list_for_each_entry(chan, &device->channels, device_node) - chan->table_count = 0; - } - - /* don't populate the channel_table if no clients are available */ - if (!dmaengine_ref_count) - return; - - /* redistribute available channels */ - for_each_dma_cap_mask(cap, dma_cap_mask_all) - for_each_online_cpu(cpu) { - chan = min_chan(cap, cpu); - per_cpu_ptr(channel_table[cap], cpu)->chan = chan; - } -} - int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) { struct dma_device *device; @@ -502,7 +530,7 @@ static struct dma_chan *private_candidate(const dma_cap_mask_t *mask, { struct dma_chan *chan; - if (mask && !__dma_device_satisfies_mask(dev, mask)) { + if (mask && !dma_device_satisfies_mask(dev, mask)) { dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); return NULL; } @@ -704,11 +732,11 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) if (has_acpi_companion(dev) && !chan) chan = acpi_dma_request_slave_chan_by_name(dev, name); - if (chan) { - /* Valid channel found or requester needs to be deferred */ - if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) - return chan; - } + if (PTR_ERR(chan) == -EPROBE_DEFER) + return chan; + + if (!IS_ERR_OR_NULL(chan)) + goto found; /* Try to find the channel via the DMA filter map(s) */ mutex_lock(&dma_list_mutex); @@ -728,7 +756,23 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name) } mutex_unlock(&dma_list_mutex); - return chan ? chan : ERR_PTR(-EPROBE_DEFER); + if (!IS_ERR_OR_NULL(chan)) + goto found; + + return ERR_PTR(-EPROBE_DEFER); + +found: + chan->slave = dev; + chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); + if (!chan->name) + return ERR_PTR(-ENOMEM); + + if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, + DMA_SLAVE_NAME)) + dev_err(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME); + if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) + dev_err(dev, "Cannot create DMA %s symlink\n", chan->name); + return chan; } EXPORT_SYMBOL_GPL(dma_request_chan); @@ -786,6 +830,13 @@ void dma_release_channel(struct dma_chan *chan) /* drop PRIVATE cap enabled by __dma_request_channel() */ if (--chan->device->privatecnt == 0) dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); + if (chan->slave) { + sysfs_remove_link(&chan->slave->kobj, chan->name); + kfree(chan->name); + chan->name = NULL; + chan->slave = NULL; + } + sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); mutex_unlock(&dma_list_mutex); } EXPORT_SYMBOL_GPL(dma_release_channel); @@ -834,14 +885,14 @@ EXPORT_SYMBOL(dmaengine_get); */ void dmaengine_put(void) { - struct dma_device *device; + struct dma_device *device, *_d; struct dma_chan *chan; mutex_lock(&dma_list_mutex); dmaengine_ref_count--; BUG_ON(dmaengine_ref_count < 0); /* drop channel references */ - list_for_each_entry(device, &dma_device_list, global_node) { + list_for_each_entry_safe(device, _d, &dma_device_list, global_node) { if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) continue; list_for_each_entry(chan, &device->channels, device_node) @@ -900,15 +951,115 @@ static int get_dma_id(struct dma_device *device) return 0; } +static int __dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan, + int chan_id) +{ + int rc = 0; + int chancnt = device->chancnt; + atomic_t *idr_ref; + struct dma_chan *tchan; + + tchan = list_first_entry_or_null(&device->channels, + struct dma_chan, device_node); + if (tchan->dev) { + idr_ref = tchan->dev->idr_ref; + } else { + idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); + if (!idr_ref) + return -ENOMEM; + atomic_set(idr_ref, 0); + } + + chan->local = alloc_percpu(typeof(*chan->local)); + if (!chan->local) + goto err_out; + chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); + if (!chan->dev) { + free_percpu(chan->local); + chan->local = NULL; + goto err_out; + } + + /* + * When the chan_id is a negative value, we are dynamically adding + * the channel. Otherwise we are static enumerating. + */ + chan->chan_id = chan_id < 0 ? chancnt : chan_id; + chan->dev->device.class = &dma_devclass; + chan->dev->device.parent = device->dev; + chan->dev->chan = chan; + chan->dev->idr_ref = idr_ref; + chan->dev->dev_id = device->dev_id; + atomic_inc(idr_ref); + dev_set_name(&chan->dev->device, "dma%dchan%d", + device->dev_id, chan->chan_id); + + rc = device_register(&chan->dev->device); + if (rc) + goto err_out; + chan->client_count = 0; + device->chancnt = chan->chan_id + 1; + + return 0; + + err_out: + free_percpu(chan->local); + kfree(chan->dev); + if (atomic_dec_return(idr_ref) == 0) + kfree(idr_ref); + return rc; +} + +int dma_async_device_channel_register(struct dma_device *device, + struct dma_chan *chan) +{ + int rc; + + rc = __dma_async_device_channel_register(device, chan, -1); + if (rc < 0) + return rc; + + dma_channel_rebalance(); + return 0; +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_register); + +static void __dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + WARN_ONCE(!device->device_release && chan->client_count, + "%s called while %d clients hold a reference\n", + __func__, chan->client_count); + mutex_lock(&dma_list_mutex); + list_del(&chan->device_node); + device->chancnt--; + chan->dev->chan = NULL; + mutex_unlock(&dma_list_mutex); + device_unregister(&chan->dev->device); + free_percpu(chan->local); +} + +void dma_async_device_channel_unregister(struct dma_device *device, + struct dma_chan *chan) +{ + __dma_async_device_channel_unregister(device, chan); + dma_channel_rebalance(); +} +EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); + /** * dma_async_device_register - registers DMA devices found * @device: &dma_device + * + * After calling this routine the structure should not be freed except in the + * device_release() callback which will be called after + * dma_async_device_unregister() is called and no further references are taken. */ int dma_async_device_register(struct dma_device *device) { - int chancnt = 0, rc; + int rc, i = 0; struct dma_chan* chan; - atomic_t *idr_ref; if (!device) return -ENODEV; @@ -919,6 +1070,8 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + device->owner = device->dev->driver->owner; + if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { dev_err(device->dev, "Device claims capability %s, but op is not defined\n", @@ -994,65 +1147,29 @@ int dma_async_device_register(struct dma_device *device) return -EIO; } + if (!device->device_release) + dev_warn(device->dev, + "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n"); + + kref_init(&device->ref); + /* note: this only matters in the * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case */ if (device_has_all_tx_types(device)) dma_cap_set(DMA_ASYNC_TX, device->cap_mask); - idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL); - if (!idr_ref) - return -ENOMEM; rc = get_dma_id(device); - if (rc != 0) { - kfree(idr_ref); + if (rc != 0) return rc; - } - - atomic_set(idr_ref, 0); /* represent channels in sysfs. Probably want devs too */ list_for_each_entry(chan, &device->channels, device_node) { - rc = -ENOMEM; - chan->local = alloc_percpu(typeof(*chan->local)); - if (chan->local == NULL) - goto err_out; - chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); - if (chan->dev == NULL) { - free_percpu(chan->local); - chan->local = NULL; + rc = __dma_async_device_channel_register(device, chan, i++); + if (rc < 0) goto err_out; - } - - chan->chan_id = chancnt++; - chan->dev->device.class = &dma_devclass; - chan->dev->device.parent = device->dev; - chan->dev->chan = chan; - chan->dev->idr_ref = idr_ref; - chan->dev->dev_id = device->dev_id; - atomic_inc(idr_ref); - dev_set_name(&chan->dev->device, "dma%dchan%d", - device->dev_id, chan->chan_id); - - rc = device_register(&chan->dev->device); - if (rc) { - free_percpu(chan->local); - chan->local = NULL; - kfree(chan->dev); - atomic_dec(idr_ref); - goto err_out; - } - chan->client_count = 0; - } - - if (!chancnt) { - dev_err(device->dev, "%s: device has no channels!\n", __func__); - rc = -ENODEV; - goto err_out; } - device->chancnt = chancnt; - mutex_lock(&dma_list_mutex); /* take references on public channels */ if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) @@ -1080,9 +1197,8 @@ int dma_async_device_register(struct dma_device *device) err_out: /* if we never registered a channel just release the idr */ - if (atomic_read(idr_ref) == 0) { + if (!device->chancnt) { ida_free(&dma_ida, device->dev_id); - kfree(idr_ref); return rc; } @@ -1108,23 +1224,20 @@ EXPORT_SYMBOL(dma_async_device_register); */ void dma_async_device_unregister(struct dma_device *device) { - struct dma_chan *chan; + struct dma_chan *chan, *n; + + list_for_each_entry_safe(chan, n, &device->channels, device_node) + __dma_async_device_channel_unregister(device, chan); mutex_lock(&dma_list_mutex); - list_del_rcu(&device->global_node); + /* + * setting DMA_PRIVATE ensures the device being torn down will not + * be used in the channel_table + */ + dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_channel_rebalance(); + dma_device_put(device); mutex_unlock(&dma_list_mutex); - - list_for_each_entry(chan, &device->channels, device_node) { - WARN_ONCE(chan->client_count, - "%s called while %d clients hold a reference\n", - __func__, chan->client_count); - mutex_lock(&dma_list_mutex); - chan->dev->chan = NULL; - mutex_unlock(&dma_list_mutex); - device_unregister(&chan->dev->device); - free_percpu(chan->local); - } } EXPORT_SYMBOL(dma_async_device_unregister); @@ -1302,6 +1415,79 @@ void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, } EXPORT_SYMBOL(dma_async_tx_descriptor_init); +static inline int desc_check_and_set_metadata_mode( + struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode) +{ + /* Make sure that the metadata mode is not mixed */ + if (!desc->desc_metadata_mode) { + if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) + desc->desc_metadata_mode = mode; + else + return -ENOTSUPP; + } else if (desc->desc_metadata_mode != mode) { + return -EINVAL; + } + + return 0; +} + +int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->attach) + return -ENOTSUPP; + + return desc->metadata_ops->attach(desc, data, len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata); + +void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + int ret; + + if (!desc) + return ERR_PTR(-EINVAL); + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ERR_PTR(ret); + + if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) + return ERR_PTR(-ENOTSUPP); + + return desc->metadata_ops->get_ptr(desc, payload_len, max_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr); + +int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + int ret; + + if (!desc) + return -EINVAL; + + ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE); + if (ret) + return ret; + + if (!desc->metadata_ops || !desc->metadata_ops->set_len) + return -ENOTSUPP; + + return desc->metadata_ops->set_len(desc, payload_len); +} +EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len); + /* dma_wait_for_async_tx - spin wait for a transaction to complete * @tx: in-flight transaction to wait on */ @@ -1373,5 +1559,3 @@ static int __init dma_bus_init(void) return class_register(&dma_devclass); } arch_initcall(dma_bus_init); - - diff --git a/drivers/dma/dmaengine.h b/drivers/dma/dmaengine.h index 501c0b063f85..e8a320c9e57c 100644 --- a/drivers/dma/dmaengine.h +++ b/drivers/dma/dmaengine.h @@ -77,6 +77,7 @@ static inline enum dma_status dma_cookie_status(struct dma_chan *chan, state->last = complete; state->used = used; state->residue = 0; + state->in_flight_bytes = 0; } return dma_async_is_complete(cookie, complete, used); } @@ -87,6 +88,13 @@ static inline void dma_set_residue(struct dma_tx_state *state, u32 residue) state->residue = residue; } +static inline void dma_set_in_flight_bytes(struct dma_tx_state *state, + u32 in_flight_bytes) +{ + if (state) + state->in_flight_bytes = in_flight_bytes; +} + struct dmaengine_desc_callback { dma_async_tx_callback callback; dma_async_tx_callback_result callback_result; @@ -171,4 +179,7 @@ dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb) return (cb->callback) ? true : false; } +struct dma_chan *dma_get_slave_channel(struct dma_chan *chan); +struct dma_chan *dma_get_any_slave_channel(struct dma_device *device); + #endif diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c index a1ce307c502f..14c1ac26f866 100644 --- a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c +++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c @@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) vchan_get_all_descriptors(&chan->vc, &head); - /* - * As vchan_dma_desc_free_list can access to desc_allocated list - * we need to call it in vc.lock context. - */ - vchan_dma_desc_free_list(&chan->vc, &head); - spin_unlock_irqrestore(&chan->vc.lock, flags); + vchan_dma_desc_free_list(&chan->vc, &head); + dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); return 0; diff --git a/drivers/dma/fsl-edma-common.c b/drivers/dma/fsl-edma-common.c index b1a7ca91701a..5697c3622699 100644 --- a/drivers/dma/fsl-edma-common.c +++ b/drivers/dma/fsl-edma-common.c @@ -109,10 +109,15 @@ void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan, u32 ch = fsl_chan->vchan.chan.chan_id; void __iomem *muxaddr; unsigned int chans_per_mux, ch_off; + int endian_diff[4] = {3, 1, -1, -3}; u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs; chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr; ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; + + if (fsl_chan->edma->drvdata->mux_swap) + ch_off += endian_diff[ch_off % 4]; + muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; slot = EDMAMUX_CHCFG_SOURCE(slot); diff --git a/drivers/dma/fsl-edma-common.h b/drivers/dma/fsl-edma-common.h index 5eaa2902ed39..67e422590c9a 100644 --- a/drivers/dma/fsl-edma-common.h +++ b/drivers/dma/fsl-edma-common.h @@ -147,6 +147,7 @@ struct fsl_edma_drvdata { enum edma_version version; u32 dmamuxs; bool has_dmaclk; + bool mux_swap; int (*setup_irq)(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma); }; diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c index b626c06ac2e0..eff7ebd8cf35 100644 --- a/drivers/dma/fsl-edma.c +++ b/drivers/dma/fsl-edma.c @@ -233,6 +233,13 @@ static struct fsl_edma_drvdata vf610_data = { .setup_irq = fsl_edma_irq_init, }; +static struct fsl_edma_drvdata ls1028a_data = { + .version = v1, + .dmamuxs = DMAMUX_NR, + .mux_swap = true, + .setup_irq = fsl_edma_irq_init, +}; + static struct fsl_edma_drvdata imx7ulp_data = { .version = v3, .dmamuxs = 1, @@ -242,6 +249,7 @@ static struct fsl_edma_drvdata imx7ulp_data = { static const struct of_device_id fsl_edma_dt_ids[] = { { .compatible = "fsl,vf610-edma", .data = &vf610_data}, + { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { /* sentinel */ } }; diff --git a/drivers/dma/fsl-qdma.c b/drivers/dma/fsl-qdma.c index 89792083d62c..95cc0256b387 100644 --- a/drivers/dma/fsl-qdma.c +++ b/drivers/dma/fsl-qdma.c @@ -304,7 +304,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan) vchan_dma_desc_free_list(&fsl_chan->vchan, &head); - if (!fsl_queue->comp_pool && !fsl_queue->comp_pool) + if (!fsl_queue->comp_pool && !fsl_queue->desc_pool) return; list_for_each_entry_safe(comp_temp, _comp_temp, diff --git a/drivers/dma/hisi_dma.c b/drivers/dma/hisi_dma.c new file mode 100644 index 000000000000..ed3619266a48 --- /dev/null +++ b/drivers/dma/hisi_dma.c @@ -0,0 +1,611 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright(c) 2019 HiSilicon Limited. */ +#include <linux/bitfield.h> +#include <linux/dmaengine.h> +#include <linux/init.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/spinlock.h> +#include "virt-dma.h" + +#define HISI_DMA_SQ_BASE_L 0x0 +#define HISI_DMA_SQ_BASE_H 0x4 +#define HISI_DMA_SQ_DEPTH 0x8 +#define HISI_DMA_SQ_TAIL_PTR 0xc +#define HISI_DMA_CQ_BASE_L 0x10 +#define HISI_DMA_CQ_BASE_H 0x14 +#define HISI_DMA_CQ_DEPTH 0x18 +#define HISI_DMA_CQ_HEAD_PTR 0x1c +#define HISI_DMA_CTRL0 0x20 +#define HISI_DMA_CTRL0_QUEUE_EN_S 0 +#define HISI_DMA_CTRL0_QUEUE_PAUSE_S 4 +#define HISI_DMA_CTRL1 0x24 +#define HISI_DMA_CTRL1_QUEUE_RESET_S 0 +#define HISI_DMA_Q_FSM_STS 0x30 +#define HISI_DMA_FSM_STS_MASK GENMASK(3, 0) +#define HISI_DMA_INT_STS 0x40 +#define HISI_DMA_INT_STS_MASK GENMASK(12, 0) +#define HISI_DMA_INT_MSK 0x44 +#define HISI_DMA_MODE 0x217c +#define HISI_DMA_OFFSET 0x100 + +#define HISI_DMA_MSI_NUM 30 +#define HISI_DMA_CHAN_NUM 30 +#define HISI_DMA_Q_DEPTH_VAL 1024 + +#define PCI_BAR_2 2 + +enum hisi_dma_mode { + EP = 0, + RC, +}; + +enum hisi_dma_chan_status { + DISABLE = -1, + IDLE = 0, + RUN, + CPL, + PAUSE, + HALT, + ABORT, + WAIT, + BUFFCLR, +}; + +struct hisi_dma_sqe { + __le32 dw0; +#define OPCODE_MASK GENMASK(3, 0) +#define OPCODE_SMALL_PACKAGE 0x1 +#define OPCODE_M2M 0x4 +#define LOCAL_IRQ_EN BIT(8) +#define ATTR_SRC_MASK GENMASK(14, 12) + __le32 dw1; + __le32 dw2; +#define ATTR_DST_MASK GENMASK(26, 24) + __le32 length; + __le64 src_addr; + __le64 dst_addr; +}; + +struct hisi_dma_cqe { + __le32 rsv0; + __le32 rsv1; + __le16 sq_head; + __le16 rsv2; + __le16 rsv3; + __le16 w0; +#define STATUS_MASK GENMASK(15, 1) +#define STATUS_SUCC 0x0 +#define VALID_BIT BIT(0) +}; + +struct hisi_dma_desc { + struct virt_dma_desc vd; + struct hisi_dma_sqe sqe; +}; + +struct hisi_dma_chan { + struct virt_dma_chan vc; + struct hisi_dma_dev *hdma_dev; + struct hisi_dma_sqe *sq; + struct hisi_dma_cqe *cq; + dma_addr_t sq_dma; + dma_addr_t cq_dma; + u32 sq_tail; + u32 cq_head; + u32 qp_num; + enum hisi_dma_chan_status status; + struct hisi_dma_desc *desc; +}; + +struct hisi_dma_dev { + struct pci_dev *pdev; + void __iomem *base; + struct dma_device dma_dev; + u32 chan_num; + u32 chan_depth; + struct hisi_dma_chan chan[]; +}; + +static inline struct hisi_dma_chan *to_hisi_dma_chan(struct dma_chan *c) +{ + return container_of(c, struct hisi_dma_chan, vc.chan); +} + +static inline struct hisi_dma_desc *to_hisi_dma_desc(struct virt_dma_desc *vd) +{ + return container_of(vd, struct hisi_dma_desc, vd); +} + +static inline void hisi_dma_chan_write(void __iomem *base, u32 reg, u32 index, + u32 val) +{ + writel_relaxed(val, base + reg + index * HISI_DMA_OFFSET); +} + +static inline void hisi_dma_update_bit(void __iomem *addr, u32 pos, bool val) +{ + u32 tmp; + + tmp = readl_relaxed(addr); + tmp = val ? tmp | BIT(pos) : tmp & ~BIT(pos); + writel_relaxed(tmp, addr); +} + +static void hisi_dma_free_irq_vectors(void *data) +{ + pci_free_irq_vectors(data); +} + +static void hisi_dma_pause_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool pause) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_PAUSE_S, pause); +} + +static void hisi_dma_enable_dma(struct hisi_dma_dev *hdma_dev, u32 index, + bool enable) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL0 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL0_QUEUE_EN_S, enable); +} + +static void hisi_dma_mask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_INT_MSK, qp_index, + HISI_DMA_INT_STS_MASK); +} + +static void hisi_dma_unmask_irq(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + void __iomem *base = hdma_dev->base; + + hisi_dma_chan_write(base, HISI_DMA_INT_STS, qp_index, + HISI_DMA_INT_STS_MASK); + hisi_dma_chan_write(base, HISI_DMA_INT_MSK, qp_index, 0); +} + +static void hisi_dma_do_reset(struct hisi_dma_dev *hdma_dev, u32 index) +{ + void __iomem *addr = hdma_dev->base + HISI_DMA_CTRL1 + index * + HISI_DMA_OFFSET; + + hisi_dma_update_bit(addr, HISI_DMA_CTRL1_QUEUE_RESET_S, 1); +} + +static void hisi_dma_reset_qp_point(struct hisi_dma_dev *hdma_dev, u32 index) +{ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_reset_hw_chan(struct hisi_dma_chan *chan) +{ + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + u32 index = chan->qp_num, tmp; + int ret; + + hisi_dma_pause_dma(hdma_dev, index, true); + hisi_dma_enable_dma(hdma_dev, index, false); + hisi_dma_mask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) != RUN, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "disable channel timeout!\n"); + WARN_ON(1); + } + + hisi_dma_do_reset(hdma_dev, index); + hisi_dma_reset_qp_point(hdma_dev, index); + hisi_dma_pause_dma(hdma_dev, index, false); + hisi_dma_enable_dma(hdma_dev, index, true); + hisi_dma_unmask_irq(hdma_dev, index); + + ret = readl_relaxed_poll_timeout(hdma_dev->base + + HISI_DMA_Q_FSM_STS + index * HISI_DMA_OFFSET, tmp, + FIELD_GET(HISI_DMA_FSM_STS_MASK, tmp) == IDLE, 10, 1000); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "reset channel timeout!\n"); + WARN_ON(1); + } +} + +static void hisi_dma_free_chan_resources(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + + hisi_dma_reset_hw_chan(chan); + vchan_free_chan_resources(&chan->vc); + + memset(chan->sq, 0, sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth); + memset(chan->cq, 0, sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth); + chan->sq_tail = 0; + chan->cq_head = 0; + chan->status = DISABLE; +} + +static void hisi_dma_desc_free(struct virt_dma_desc *vd) +{ + kfree(to_hisi_dma_desc(vd)); +} + +static struct dma_async_tx_descriptor * +hisi_dma_prep_dma_memcpy(struct dma_chan *c, dma_addr_t dst, dma_addr_t src, + size_t len, unsigned long flags) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + struct hisi_dma_desc *desc; + + desc = kzalloc(sizeof(*desc), GFP_NOWAIT); + if (!desc) + return NULL; + + desc->sqe.length = cpu_to_le32(len); + desc->sqe.src_addr = cpu_to_le64(src); + desc->sqe.dst_addr = cpu_to_le64(dst); + + return vchan_tx_prep(&chan->vc, &desc->vd, flags); +} + +static enum dma_status +hisi_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(c, cookie, txstate); +} + +static void hisi_dma_start_transfer(struct hisi_dma_chan *chan) +{ + struct hisi_dma_sqe *sqe = chan->sq + chan->sq_tail; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct virt_dma_desc *vd; + + vd = vchan_next_desc(&chan->vc); + if (!vd) { + dev_err(&hdma_dev->pdev->dev, "no issued task!\n"); + chan->desc = NULL; + return; + } + list_del(&vd->node); + desc = to_hisi_dma_desc(vd); + chan->desc = desc; + + memcpy(sqe, &desc->sqe, sizeof(struct hisi_dma_sqe)); + + /* update other field in sqe */ + sqe->dw0 = cpu_to_le32(FIELD_PREP(OPCODE_MASK, OPCODE_M2M)); + sqe->dw0 |= cpu_to_le32(LOCAL_IRQ_EN); + + /* make sure data has been updated in sqe */ + wmb(); + + /* update sq tail, point to new sqe position */ + chan->sq_tail = (chan->sq_tail + 1) % hdma_dev->chan_depth; + + /* update sq_tail to trigger a new task */ + hisi_dma_chan_write(hdma_dev->base, HISI_DMA_SQ_TAIL_PTR, chan->qp_num, + chan->sq_tail); +} + +static void hisi_dma_issue_pending(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + if (vchan_issue_pending(&chan->vc)) + hisi_dma_start_transfer(chan); + + spin_unlock_irqrestore(&chan->vc.lock, flags); +} + +static int hisi_dma_terminate_all(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&chan->vc.lock, flags); + + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, true); + if (chan->desc) { + vchan_terminate_vdesc(&chan->desc->vd); + chan->desc = NULL; + } + + vchan_get_all_descriptors(&chan->vc, &head); + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + vchan_dma_desc_free_list(&chan->vc, &head); + hisi_dma_pause_dma(chan->hdma_dev, chan->qp_num, false); + + return 0; +} + +static void hisi_dma_synchronize(struct dma_chan *c) +{ + struct hisi_dma_chan *chan = to_hisi_dma_chan(c); + + vchan_synchronize(&chan->vc); +} + +static int hisi_dma_alloc_qps_mem(struct hisi_dma_dev *hdma_dev) +{ + size_t sq_size = sizeof(struct hisi_dma_sqe) * hdma_dev->chan_depth; + size_t cq_size = sizeof(struct hisi_dma_cqe) * hdma_dev->chan_depth; + struct device *dev = &hdma_dev->pdev->dev; + struct hisi_dma_chan *chan; + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + chan = &hdma_dev->chan[i]; + chan->sq = dmam_alloc_coherent(dev, sq_size, &chan->sq_dma, + GFP_KERNEL); + if (!chan->sq) + return -ENOMEM; + + chan->cq = dmam_alloc_coherent(dev, cq_size, &chan->cq_dma, + GFP_KERNEL); + if (!chan->cq) + return -ENOMEM; + } + + return 0; +} + +static void hisi_dma_init_hw_qp(struct hisi_dma_dev *hdma_dev, u32 index) +{ + struct hisi_dma_chan *chan = &hdma_dev->chan[index]; + u32 hw_depth = hdma_dev->chan_depth - 1; + void __iomem *base = hdma_dev->base; + + /* set sq, cq base */ + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_L, index, + lower_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_SQ_BASE_H, index, + upper_32_bits(chan->sq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_L, index, + lower_32_bits(chan->cq_dma)); + hisi_dma_chan_write(base, HISI_DMA_CQ_BASE_H, index, + upper_32_bits(chan->cq_dma)); + + /* set sq, cq depth */ + hisi_dma_chan_write(base, HISI_DMA_SQ_DEPTH, index, hw_depth); + hisi_dma_chan_write(base, HISI_DMA_CQ_DEPTH, index, hw_depth); + + /* init sq tail and cq head */ + hisi_dma_chan_write(base, HISI_DMA_SQ_TAIL_PTR, index, 0); + hisi_dma_chan_write(base, HISI_DMA_CQ_HEAD_PTR, index, 0); +} + +static void hisi_dma_enable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_init_hw_qp(hdma_dev, qp_index); + hisi_dma_unmask_irq(hdma_dev, qp_index); + hisi_dma_enable_dma(hdma_dev, qp_index, true); +} + +static void hisi_dma_disable_qp(struct hisi_dma_dev *hdma_dev, u32 qp_index) +{ + hisi_dma_reset_hw_chan(&hdma_dev->chan[qp_index]); +} + +static void hisi_dma_enable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hdma_dev->chan[i].qp_num = i; + hdma_dev->chan[i].hdma_dev = hdma_dev; + hdma_dev->chan[i].vc.desc_free = hisi_dma_desc_free; + vchan_init(&hdma_dev->chan[i].vc, &hdma_dev->dma_dev); + hisi_dma_enable_qp(hdma_dev, i); + } +} + +static void hisi_dma_disable_qps(struct hisi_dma_dev *hdma_dev) +{ + int i; + + for (i = 0; i < hdma_dev->chan_num; i++) { + hisi_dma_disable_qp(hdma_dev, i); + tasklet_kill(&hdma_dev->chan[i].vc.task); + } +} + +static irqreturn_t hisi_dma_irq(int irq, void *data) +{ + struct hisi_dma_chan *chan = data; + struct hisi_dma_dev *hdma_dev = chan->hdma_dev; + struct hisi_dma_desc *desc; + struct hisi_dma_cqe *cqe; + unsigned long flags; + + spin_lock_irqsave(&chan->vc.lock, flags); + + desc = chan->desc; + cqe = chan->cq + chan->cq_head; + if (desc) { + if (FIELD_GET(STATUS_MASK, cqe->w0) == STATUS_SUCC) { + chan->cq_head = (chan->cq_head + 1) % + hdma_dev->chan_depth; + hisi_dma_chan_write(hdma_dev->base, + HISI_DMA_CQ_HEAD_PTR, chan->qp_num, + chan->cq_head); + vchan_cookie_complete(&desc->vd); + } else { + dev_err(&hdma_dev->pdev->dev, "task error!\n"); + } + + chan->desc = NULL; + } + + spin_unlock_irqrestore(&chan->vc.lock, flags); + + return IRQ_HANDLED; +} + +static int hisi_dma_request_qps_irq(struct hisi_dma_dev *hdma_dev) +{ + struct pci_dev *pdev = hdma_dev->pdev; + int i, ret; + + for (i = 0; i < hdma_dev->chan_num; i++) { + ret = devm_request_irq(&pdev->dev, pci_irq_vector(pdev, i), + hisi_dma_irq, IRQF_SHARED, "hisi_dma", + &hdma_dev->chan[i]); + if (ret) + return ret; + } + + return 0; +} + +/* This function enables all hw channels in a device */ +static int hisi_dma_enable_hw_channels(struct hisi_dma_dev *hdma_dev) +{ + int ret; + + ret = hisi_dma_alloc_qps_mem(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to allocate qp memory!\n"); + return ret; + } + + ret = hisi_dma_request_qps_irq(hdma_dev); + if (ret) { + dev_err(&hdma_dev->pdev->dev, "fail to request qp irq!\n"); + return ret; + } + + hisi_dma_enable_qps(hdma_dev); + + return 0; +} + +static void hisi_dma_disable_hw_channels(void *data) +{ + hisi_dma_disable_qps(data); +} + +static void hisi_dma_set_mode(struct hisi_dma_dev *hdma_dev, + enum hisi_dma_mode mode) +{ + writel_relaxed(mode == RC ? 1 : 0, hdma_dev->base + HISI_DMA_MODE); +} + +static int hisi_dma_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct device *dev = &pdev->dev; + struct hisi_dma_dev *hdma_dev; + struct dma_device *dma_dev; + size_t dev_size; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) { + dev_err(dev, "failed to enable device mem!\n"); + return ret; + } + + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_2, pci_name(pdev)); + if (ret) { + dev_err(dev, "failed to remap I/O region!\n"); + return ret; + } + + ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (ret) + return ret; + + dev_size = sizeof(struct hisi_dma_chan) * HISI_DMA_CHAN_NUM + + sizeof(*hdma_dev); + hdma_dev = devm_kzalloc(dev, dev_size, GFP_KERNEL); + if (!hdma_dev) + return -EINVAL; + + hdma_dev->base = pcim_iomap_table(pdev)[PCI_BAR_2]; + hdma_dev->pdev = pdev; + hdma_dev->chan_num = HISI_DMA_CHAN_NUM; + hdma_dev->chan_depth = HISI_DMA_Q_DEPTH_VAL; + + pci_set_drvdata(pdev, hdma_dev); + pci_set_master(pdev); + + ret = pci_alloc_irq_vectors(pdev, HISI_DMA_MSI_NUM, HISI_DMA_MSI_NUM, + PCI_IRQ_MSI); + if (ret < 0) { + dev_err(dev, "Failed to allocate MSI vectors!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_free_irq_vectors, pdev); + if (ret) + return ret; + + dma_dev = &hdma_dev->dma_dev; + dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); + dma_dev->device_free_chan_resources = hisi_dma_free_chan_resources; + dma_dev->device_prep_dma_memcpy = hisi_dma_prep_dma_memcpy; + dma_dev->device_tx_status = hisi_dma_tx_status; + dma_dev->device_issue_pending = hisi_dma_issue_pending; + dma_dev->device_terminate_all = hisi_dma_terminate_all; + dma_dev->device_synchronize = hisi_dma_synchronize; + dma_dev->directions = BIT(DMA_MEM_TO_MEM); + dma_dev->dev = dev; + INIT_LIST_HEAD(&dma_dev->channels); + + hisi_dma_set_mode(hdma_dev, RC); + + ret = hisi_dma_enable_hw_channels(hdma_dev); + if (ret < 0) { + dev_err(dev, "failed to enable hw channel!\n"); + return ret; + } + + ret = devm_add_action_or_reset(dev, hisi_dma_disable_hw_channels, + hdma_dev); + if (ret) + return ret; + + ret = dmaenginem_async_device_register(dma_dev); + if (ret < 0) + dev_err(dev, "failed to register device!\n"); + + return ret; +} + +static const struct pci_device_id hisi_dma_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_HUAWEI, 0xa122) }, + { 0, } +}; + +static struct pci_driver hisi_dma_pci_driver = { + .name = "hisi_dma", + .id_table = hisi_dma_pci_tbl, + .probe = hisi_dma_probe, +}; + +module_pci_driver(hisi_dma_pci_driver); + +MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>"); +MODULE_AUTHOR("Zhenfa Qiu <qiuzhenfa@hisilicon.com>"); +MODULE_DESCRIPTION("HiSilicon Kunpeng DMA controller driver"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(pci, hisi_dma_pci_tbl); diff --git a/drivers/dma/idxd/Makefile b/drivers/dma/idxd/Makefile new file mode 100644 index 000000000000..8978b898d777 --- /dev/null +++ b/drivers/dma/idxd/Makefile @@ -0,0 +1,2 @@ +obj-$(CONFIG_INTEL_IDXD) += idxd.o +idxd-y := init.o irq.o device.o sysfs.o submit.o dma.o cdev.o diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c new file mode 100644 index 000000000000..1d7347825b95 --- /dev/null +++ b/drivers/dma/idxd/cdev.c @@ -0,0 +1,302 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/sched/task.h> +#include <linux/intel-svm.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/cdev.h> +#include <linux/fs.h> +#include <linux/poll.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +struct idxd_cdev_context { + const char *name; + dev_t devt; + struct ida minor_ida; +}; + +/* + * ictx is an array based off of accelerator types. enum idxd_type + * is used as index + */ +static struct idxd_cdev_context ictx[IDXD_TYPE_MAX] = { + { .name = "dsa" }, +}; + +struct idxd_user_context { + struct idxd_wq *wq; + struct task_struct *task; + unsigned int flags; +}; + +enum idxd_cdev_cleanup { + CDEV_NORMAL = 0, + CDEV_FAILED, +}; + +static void idxd_cdev_dev_release(struct device *dev) +{ + dev_dbg(dev, "releasing cdev device\n"); + kfree(dev); +} + +static struct device_type idxd_cdev_device_type = { + .name = "idxd_cdev", + .release = idxd_cdev_dev_release, +}; + +static inline struct idxd_cdev *inode_idxd_cdev(struct inode *inode) +{ + struct cdev *cdev = inode->i_cdev; + + return container_of(cdev, struct idxd_cdev, cdev); +} + +static inline struct idxd_wq *idxd_cdev_wq(struct idxd_cdev *idxd_cdev) +{ + return container_of(idxd_cdev, struct idxd_wq, idxd_cdev); +} + +static inline struct idxd_wq *inode_wq(struct inode *inode) +{ + return idxd_cdev_wq(inode_idxd_cdev(inode)); +} + +static int idxd_cdev_open(struct inode *inode, struct file *filp) +{ + struct idxd_user_context *ctx; + struct idxd_device *idxd; + struct idxd_wq *wq; + struct device *dev; + struct idxd_cdev *idxd_cdev; + + wq = inode_wq(inode); + idxd = wq->idxd; + dev = &idxd->pdev->dev; + idxd_cdev = &wq->idxd_cdev; + + dev_dbg(dev, "%s called\n", __func__); + + if (idxd_wq_refcount(wq) > 1 && wq_dedicated(wq)) + return -EBUSY; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->wq = wq; + filp->private_data = ctx; + idxd_wq_get(wq); + return 0; +} + +static int idxd_cdev_release(struct inode *node, struct file *filep) +{ + struct idxd_user_context *ctx = filep->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + + dev_dbg(dev, "%s called\n", __func__); + filep->private_data = NULL; + + kfree(ctx); + idxd_wq_put(wq); + return 0; +} + +static int check_vma(struct idxd_wq *wq, struct vm_area_struct *vma, + const char *func) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { + dev_info_ratelimited(dev, + "%s: %s: mapping too large: %lu\n", + current->comm, func, + vma->vm_end - vma->vm_start); + return -EINVAL; + } + + return 0; +} + +static int idxd_cdev_mmap(struct file *filp, struct vm_area_struct *vma) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + phys_addr_t base = pci_resource_start(pdev, IDXD_WQ_BAR); + unsigned long pfn; + int rc; + + dev_dbg(&pdev->dev, "%s called\n", __func__); + rc = check_vma(wq, vma, __func__); + + vma->vm_flags |= VM_DONTCOPY; + pfn = (base + idxd_get_wq_portal_full_offset(wq->id, + IDXD_PORTAL_LIMITED)) >> PAGE_SHIFT; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_private_data = ctx; + + return io_remap_pfn_range(vma, vma->vm_start, pfn, PAGE_SIZE, + vma->vm_page_prot); +} + +static __poll_t idxd_cdev_poll(struct file *filp, + struct poll_table_struct *wait) +{ + struct idxd_user_context *ctx = filp->private_data; + struct idxd_wq *wq = ctx->wq; + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + unsigned long flags; + __poll_t out = 0; + + poll_wait(filp, &idxd_cdev->err_queue, wait); + spin_lock_irqsave(&idxd->dev_lock, flags); + if (idxd->sw_err.valid) + out = EPOLLIN | EPOLLRDNORM; + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return out; +} + +static const struct file_operations idxd_cdev_fops = { + .owner = THIS_MODULE, + .open = idxd_cdev_open, + .release = idxd_cdev_release, + .mmap = idxd_cdev_mmap, + .poll = idxd_cdev_poll, +}; + +int idxd_cdev_get_major(struct idxd_device *idxd) +{ + return MAJOR(ictx[idxd->type].devt); +} + +static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + struct device *dev; + int minor, rc; + + idxd_cdev->dev = kzalloc(sizeof(*idxd_cdev->dev), GFP_KERNEL); + if (!idxd_cdev->dev) + return -ENOMEM; + + dev = idxd_cdev->dev; + dev->parent = &idxd->pdev->dev; + dev_set_name(dev, "%s/wq%u.%u", idxd_get_dev_name(idxd), + idxd->id, wq->id); + dev->bus = idxd_get_bus_type(idxd); + + cdev_ctx = &ictx[wq->idxd->type]; + minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL); + if (minor < 0) { + rc = minor; + goto ida_err; + } + + dev->devt = MKDEV(MAJOR(cdev_ctx->devt), minor); + dev->type = &idxd_cdev_device_type; + rc = device_register(dev); + if (rc < 0) { + dev_err(&idxd->pdev->dev, "device register failed\n"); + put_device(dev); + goto dev_reg_err; + } + idxd_cdev->minor = minor; + + return 0; + + dev_reg_err: + ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt)); + ida_err: + kfree(dev); + idxd_cdev->dev = NULL; + return rc; +} + +static void idxd_wq_cdev_cleanup(struct idxd_wq *wq, + enum idxd_cdev_cleanup cdev_state) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct idxd_cdev_context *cdev_ctx; + + cdev_ctx = &ictx[wq->idxd->type]; + if (cdev_state == CDEV_NORMAL) + cdev_del(&idxd_cdev->cdev); + device_unregister(idxd_cdev->dev); + /* + * The device_type->release() will be called on the device and free + * the allocated struct device. We can just forget it. + */ + ida_simple_remove(&cdev_ctx->minor_ida, idxd_cdev->minor); + idxd_cdev->dev = NULL; + idxd_cdev->minor = -1; +} + +int idxd_wq_add_cdev(struct idxd_wq *wq) +{ + struct idxd_cdev *idxd_cdev = &wq->idxd_cdev; + struct cdev *cdev = &idxd_cdev->cdev; + struct device *dev; + int rc; + + rc = idxd_wq_cdev_dev_setup(wq); + if (rc < 0) + return rc; + + dev = idxd_cdev->dev; + cdev_init(cdev, &idxd_cdev_fops); + cdev_set_parent(cdev, &dev->kobj); + rc = cdev_add(cdev, dev->devt, 1); + if (rc) { + dev_dbg(&wq->idxd->pdev->dev, "cdev_add failed: %d\n", rc); + idxd_wq_cdev_cleanup(wq, CDEV_FAILED); + return rc; + } + + init_waitqueue_head(&idxd_cdev->err_queue); + return 0; +} + +void idxd_wq_del_cdev(struct idxd_wq *wq) +{ + idxd_wq_cdev_cleanup(wq, CDEV_NORMAL); +} + +int idxd_cdev_register(void) +{ + int rc, i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + ida_init(&ictx[i].minor_ida); + rc = alloc_chrdev_region(&ictx[i].devt, 0, MINORMASK, + ictx[i].name); + if (rc) + return rc; + } + + return 0; +} + +void idxd_cdev_remove(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + unregister_chrdev_region(ictx[i].devt, MINORMASK); + ida_destroy(&ictx[i].minor_ida); + } +} diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c new file mode 100644 index 000000000000..ada69e722f84 --- /dev/null +++ b/drivers/dma/idxd/device.c @@ -0,0 +1,693 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout); +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand); + +/* Interrupt control bits */ +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 1; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_mask_msix_vectors(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + int i, rc; + + for (i = 0; i < msixcnt; i++) { + rc = idxd_mask_msix_vector(idxd, i); + if (rc < 0) + dev_warn(&pdev->dev, + "Failed disabling msix vec %d\n", i); + } +} + +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) +{ + struct pci_dev *pdev = idxd->pdev; + int msixcnt = pci_msix_vec_count(pdev); + union msix_perm perm; + u32 offset; + + if (vec_id < 0 || vec_id >= msixcnt) + return -EINVAL; + + offset = idxd->msix_perm_offset + vec_id * 8; + perm.bits = ioread32(idxd->reg_base + offset); + perm.ignore = 0; + iowrite32(perm.bits, idxd->reg_base + offset); + + return 0; +} + +void idxd_unmask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 1; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +void idxd_mask_error_interrupts(struct idxd_device *idxd) +{ + union genctrl_reg genctrl; + + genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET); + genctrl.softerr_int_en = 0; + iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET); +} + +static void free_hw_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->hw_descs[i]); + + kfree(wq->hw_descs); +} + +static int alloc_hw_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->hw_descs = kcalloc_node(num, sizeof(struct dsa_hw_desc *), + GFP_KERNEL, node); + if (!wq->hw_descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->hw_descs[i] = kzalloc_node(sizeof(*wq->hw_descs[i]), + GFP_KERNEL, node); + if (!wq->hw_descs[i]) { + free_hw_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +static void free_descs(struct idxd_wq *wq) +{ + int i; + + for (i = 0; i < wq->num_descs; i++) + kfree(wq->descs[i]); + + kfree(wq->descs); +} + +static int alloc_descs(struct idxd_wq *wq, int num) +{ + struct device *dev = &wq->idxd->pdev->dev; + int i; + int node = dev_to_node(dev); + + wq->descs = kcalloc_node(num, sizeof(struct idxd_desc *), + GFP_KERNEL, node); + if (!wq->descs) + return -ENOMEM; + + for (i = 0; i < num; i++) { + wq->descs[i] = kzalloc_node(sizeof(*wq->descs[i]), + GFP_KERNEL, node); + if (!wq->descs[i]) { + free_descs(wq); + return -ENOMEM; + } + } + + return 0; +} + +/* WQ control bits */ +int idxd_wq_alloc_resources(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct idxd_group *group = wq->group; + struct device *dev = &idxd->pdev->dev; + int rc, num_descs, i; + + if (wq->type != IDXD_WQT_KERNEL) + return 0; + + num_descs = wq->size + + idxd->hw.gen_cap.max_descs_per_engine * group->num_engines; + wq->num_descs = num_descs; + + rc = alloc_hw_descs(wq, num_descs); + if (rc < 0) + return rc; + + wq->compls_size = num_descs * sizeof(struct dsa_completion_record); + wq->compls = dma_alloc_coherent(dev, wq->compls_size, + &wq->compls_addr, GFP_KERNEL); + if (!wq->compls) { + rc = -ENOMEM; + goto fail_alloc_compls; + } + + rc = alloc_descs(wq, num_descs); + if (rc < 0) + goto fail_alloc_descs; + + rc = sbitmap_init_node(&wq->sbmap, num_descs, -1, GFP_KERNEL, + dev_to_node(dev)); + if (rc < 0) + goto fail_sbitmap_init; + + for (i = 0; i < num_descs; i++) { + struct idxd_desc *desc = wq->descs[i]; + + desc->hw = wq->hw_descs[i]; + desc->completion = &wq->compls[i]; + desc->compl_dma = wq->compls_addr + + sizeof(struct dsa_completion_record) * i; + desc->id = i; + desc->wq = wq; + + dma_async_tx_descriptor_init(&desc->txd, &wq->dma_chan); + desc->txd.tx_submit = idxd_dma_tx_submit; + } + + return 0; + + fail_sbitmap_init: + free_descs(wq); + fail_alloc_descs: + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + fail_alloc_compls: + free_hw_descs(wq); + return rc; +} + +void idxd_wq_free_resources(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + if (wq->type != IDXD_WQT_KERNEL) + return; + + free_hw_descs(wq); + free_descs(wq); + dma_free_coherent(dev, wq->compls_size, wq->compls, wq->compls_addr); + sbitmap_free(&wq->sbmap); +} + +int idxd_wq_enable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + + if (wq->state == IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d already enabled\n", wq->id); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_WQ, wq->id); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_WQ_ENABLED) { + dev_dbg(dev, "WQ enable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_ENABLED; + dev_dbg(dev, "WQ %d enabled\n", wq->id); + return 0; +} + +int idxd_wq_disable(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 status, operand; + int rc; + + lockdep_assert_held(&idxd->dev_lock); + dev_dbg(dev, "Disabling WQ %d\n", wq->id); + + if (wq->state != IDXD_WQ_ENABLED) { + dev_dbg(dev, "WQ %d in wrong state: %d\n", wq->id, wq->state); + return 0; + } + + operand = BIT(wq->id % 16) | ((wq->id / 16) << 16); + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_WQ, operand); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + if (status != IDXD_CMDSTS_SUCCESS) { + dev_dbg(dev, "WQ disable failed: %#x\n", status); + return -ENXIO; + } + + wq->state = IDXD_WQ_DISABLED; + dev_dbg(dev, "WQ %d disabled\n", wq->id); + return 0; +} + +int idxd_wq_map_portal(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + resource_size_t start; + + start = pci_resource_start(pdev, IDXD_WQ_BAR); + start = start + wq->id * IDXD_PORTAL_SIZE; + + wq->dportal = devm_ioremap(dev, start, IDXD_PORTAL_SIZE); + if (!wq->dportal) + return -ENOMEM; + dev_dbg(dev, "wq %d portal mapped at %p\n", wq->id, wq->dportal); + + return 0; +} + +void idxd_wq_unmap_portal(struct idxd_wq *wq) +{ + struct device *dev = &wq->idxd->pdev->dev; + + devm_iounmap(dev, wq->dportal); +} + +/* Device control bits */ +static inline bool idxd_is_enabled(struct idxd_device *idxd) +{ + union gensts_reg gensts; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + + if (gensts.state == IDXD_DEVICE_STATE_ENABLED) + return true; + return false; +} + +static int idxd_cmd_wait(struct idxd_device *idxd, u32 *status, int timeout) +{ + u32 sts, to = timeout; + + lockdep_assert_held(&idxd->dev_lock); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + while (sts & IDXD_CMDSTS_ACTIVE && --to) { + cpu_relax(); + sts = ioread32(idxd->reg_base + IDXD_CMDSTS_OFFSET); + } + + if (to == 0 && sts & IDXD_CMDSTS_ACTIVE) { + dev_warn(&idxd->pdev->dev, "%s timed out!\n", __func__); + *status = 0; + return -EBUSY; + } + + *status = sts; + return 0; +} + +static int idxd_cmd_send(struct idxd_device *idxd, int cmd_code, u32 operand) +{ + union idxd_command_reg cmd; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cmd = cmd_code; + cmd.operand = operand; + dev_dbg(&idxd->pdev->dev, "%s: sending cmd: %#x op: %#x\n", + __func__, cmd_code, operand); + iowrite32(cmd.bits, idxd->reg_base + IDXD_CMD_OFFSET); + + return 0; +} + +int idxd_device_enable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device already enabled\n"); + return -ENXIO; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_ENABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was enabled */ + if (status != IDXD_CMDSTS_SUCCESS && + status != IDXD_CMDSTS_ERR_DEV_ENABLED) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + return -ENXIO; + } + + idxd->state = IDXD_DEV_ENABLED; + return 0; +} + +int idxd_device_disable(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + u32 status; + + lockdep_assert_held(&idxd->dev_lock); + if (!idxd_is_enabled(idxd)) { + dev_dbg(dev, "Device is not enabled\n"); + return 0; + } + + rc = idxd_cmd_send(idxd, IDXD_CMD_DISABLE_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + /* If the command is successful or if the device was disabled */ + if (status != IDXD_CMDSTS_SUCCESS && + !(status & IDXD_CMDSTS_ERR_DIS_DEV_EN)) { + dev_dbg(dev, "%s: err_code: %#x\n", __func__, status); + rc = -ENXIO; + return rc; + } + + idxd->state = IDXD_DEV_CONF_READY; + return 0; +} + +int __idxd_device_reset(struct idxd_device *idxd) +{ + u32 status; + int rc; + + rc = idxd_cmd_send(idxd, IDXD_CMD_RESET_DEVICE, 0); + if (rc < 0) + return rc; + rc = idxd_cmd_wait(idxd, &status, IDXD_REG_TIMEOUT); + if (rc < 0) + return rc; + + return 0; +} + +int idxd_device_reset(struct idxd_device *idxd) +{ + unsigned long flags; + int rc; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = __idxd_device_reset(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + return rc; +} + +/* Device configuration bits */ +static void idxd_group_config_write(struct idxd_group *group) +{ + struct idxd_device *idxd = group->idxd; + struct device *dev = &idxd->pdev->dev; + int i; + u32 grpcfg_offset; + + dev_dbg(dev, "Writing group %d cfg registers\n", group->id); + + /* setup GRPWQCFG */ + for (i = 0; i < 4; i++) { + grpcfg_offset = idxd->grpcfg_offset + + group->id * 64 + i * sizeof(u64); + iowrite64(group->grpcfg.wqs[i], + idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG wq[%d:%d: %#x]: %#llx\n", + group->id, i, grpcfg_offset, + ioread64(idxd->reg_base + grpcfg_offset)); + } + + /* setup GRPENGCFG */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 32; + iowrite64(group->grpcfg.engines, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPCFG engs[%d: %#x]: %#llx\n", group->id, + grpcfg_offset, ioread64(idxd->reg_base + grpcfg_offset)); + + /* setup GRPFLAGS */ + grpcfg_offset = idxd->grpcfg_offset + group->id * 64 + 40; + iowrite32(group->grpcfg.flags.bits, idxd->reg_base + grpcfg_offset); + dev_dbg(dev, "GRPFLAGS flags[%d: %#x]: %#x\n", + group->id, grpcfg_offset, + ioread32(idxd->reg_base + grpcfg_offset)); +} + +static int idxd_groups_config_write(struct idxd_device *idxd) + +{ + union gencfg_reg reg; + int i; + struct device *dev = &idxd->pdev->dev; + + /* Setup bandwidth token limit */ + if (idxd->token_limit) { + reg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET); + reg.token_limit = idxd->token_limit; + iowrite32(reg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET); + } + + dev_dbg(dev, "GENCFG(%#x): %#x\n", IDXD_GENCFG_OFFSET, + ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET)); + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + idxd_group_config_write(group); + } + + return 0; +} + +static int idxd_wq_config_write(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + u32 wq_offset; + int i; + + if (!wq->group) + return 0; + + memset(&wq->wqcfg, 0, sizeof(union wqcfg)); + + /* byte 0-3 */ + wq->wqcfg.wq_size = wq->size; + + if (wq->size == 0) { + dev_warn(dev, "Incorrect work queue size: 0\n"); + return -EINVAL; + } + + /* bytes 4-7 */ + wq->wqcfg.wq_thresh = wq->threshold; + + /* byte 8-11 */ + wq->wqcfg.priv = !!(wq->type == IDXD_WQT_KERNEL); + wq->wqcfg.mode = 1; + + wq->wqcfg.priority = wq->priority; + + /* bytes 12-15 */ + wq->wqcfg.max_xfer_shift = idxd->hw.gen_cap.max_xfer_shift; + wq->wqcfg.max_batch_shift = idxd->hw.gen_cap.max_batch_shift; + + dev_dbg(dev, "WQ %d CFGs\n", wq->id); + for (i = 0; i < 8; i++) { + wq_offset = idxd->wqcfg_offset + wq->id * 32 + i * sizeof(u32); + iowrite32(wq->wqcfg.bits[i], idxd->reg_base + wq_offset); + dev_dbg(dev, "WQ[%d][%d][%#x]: %#x\n", + wq->id, i, wq_offset, + ioread32(idxd->reg_base + wq_offset)); + } + + return 0; +} + +static int idxd_wqs_config_write(struct idxd_device *idxd) +{ + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + rc = idxd_wq_config_write(wq); + if (rc < 0) + return rc; + } + + return 0; +} + +static void idxd_group_flags_setup(struct idxd_device *idxd) +{ + int i; + + /* TC-A 0 and TC-B 1 should be defaults */ + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + if (group->tc_a == -1) + group->grpcfg.flags.tc_a = 0; + else + group->grpcfg.flags.tc_a = group->tc_a; + if (group->tc_b == -1) + group->grpcfg.flags.tc_b = 1; + else + group->grpcfg.flags.tc_b = group->tc_b; + group->grpcfg.flags.use_token_limit = group->use_token_limit; + group->grpcfg.flags.tokens_reserved = group->tokens_reserved; + if (group->tokens_allowed) + group->grpcfg.flags.tokens_allowed = + group->tokens_allowed; + else + group->grpcfg.flags.tokens_allowed = idxd->max_tokens; + } +} + +static int idxd_engines_setup(struct idxd_device *idxd) +{ + int i, engines = 0; + struct idxd_engine *eng; + struct idxd_group *group; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + group->grpcfg.engines = 0; + } + + for (i = 0; i < idxd->max_engines; i++) { + eng = &idxd->engines[i]; + group = eng->group; + + if (!group) + continue; + + group->grpcfg.engines |= BIT(eng->id); + engines++; + } + + if (!engines) + return -EINVAL; + + return 0; +} + +static int idxd_wqs_setup(struct idxd_device *idxd) +{ + struct idxd_wq *wq; + struct idxd_group *group; + int i, j, configured = 0; + struct device *dev = &idxd->pdev->dev; + + for (i = 0; i < idxd->max_groups; i++) { + group = &idxd->groups[i]; + for (j = 0; j < 4; j++) + group->grpcfg.wqs[j] = 0; + } + + for (i = 0; i < idxd->max_wqs; i++) { + wq = &idxd->wqs[i]; + group = wq->group; + + if (!wq->group) + continue; + if (!wq->size) + continue; + + if (!wq_dedicated(wq)) { + dev_warn(dev, "No shared workqueue support.\n"); + return -EINVAL; + } + + group->grpcfg.wqs[wq->id / 64] |= BIT(wq->id % 64); + configured++; + } + + if (configured == 0) + return -EINVAL; + + return 0; +} + +int idxd_device_config(struct idxd_device *idxd) +{ + int rc; + + lockdep_assert_held(&idxd->dev_lock); + rc = idxd_wqs_setup(idxd); + if (rc < 0) + return rc; + + rc = idxd_engines_setup(idxd); + if (rc < 0) + return rc; + + idxd_group_flags_setup(idxd); + + rc = idxd_wqs_config_write(idxd); + if (rc < 0) + return rc; + + rc = idxd_groups_config_write(idxd); + if (rc < 0) + return rc; + + return 0; +} diff --git a/drivers/dma/idxd/dma.c b/drivers/dma/idxd/dma.c new file mode 100644 index 000000000000..c64c1429d160 --- /dev/null +++ b/drivers/dma/idxd/dma.c @@ -0,0 +1,217 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +static inline struct idxd_wq *to_idxd_wq(struct dma_chan *c) +{ + return container_of(c, struct idxd_wq, dma_chan); +} + +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type) +{ + struct dma_async_tx_descriptor *tx; + struct dmaengine_result res; + int complete = 1; + + if (desc->completion->status == DSA_COMP_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (desc->completion->status) + res.result = DMA_TRANS_WRITE_FAILED; + else if (comp_type == IDXD_COMPLETE_ABORT) + res.result = DMA_TRANS_ABORTED; + else + complete = 0; + + tx = &desc->txd; + if (complete && tx->cookie) { + dma_cookie_complete(tx); + dma_descriptor_unmap(tx); + dmaengine_desc_get_callback_invoke(tx, &res); + tx->callback = NULL; + tx->callback_result = NULL; + } +} + +static void op_flag_setup(unsigned long flags, u32 *desc_flags) +{ + *desc_flags = IDXD_OP_FLAG_CRAV | IDXD_OP_FLAG_RCR; + if (flags & DMA_PREP_INTERRUPT) + *desc_flags |= IDXD_OP_FLAG_RCI; +} + +static inline void set_completion_address(struct idxd_desc *desc, + u64 *compl_addr) +{ + *compl_addr = desc->compl_dma; +} + +static inline void idxd_prep_desc_common(struct idxd_wq *wq, + struct dsa_hw_desc *hw, char opcode, + u64 addr_f1, u64 addr_f2, u64 len, + u64 compl, u32 flags) +{ + struct idxd_device *idxd = wq->idxd; + + hw->flags = flags; + hw->opcode = opcode; + hw->src_addr = addr_f1; + hw->dst_addr = addr_f2; + hw->xfer_size = len; + hw->priv = !!(wq->type == IDXD_WQT_KERNEL); + hw->completion_addr = compl; + + /* + * Descriptor completion vectors are 1-8 for MSIX. We will round + * robin through the 8 vectors. + */ + wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; + hw->int_handle = wq->vec_ptr; +} + +static struct dma_async_tx_descriptor * +idxd_dma_submit_memcpy(struct dma_chan *c, dma_addr_t dma_dest, + dma_addr_t dma_src, size_t len, unsigned long flags) +{ + struct idxd_wq *wq = to_idxd_wq(c); + u32 desc_flags; + struct idxd_device *idxd = wq->idxd; + struct idxd_desc *desc; + + if (wq->state != IDXD_WQ_ENABLED) + return NULL; + + if (len > idxd->max_xfer_bytes) + return NULL; + + op_flag_setup(flags, &desc_flags); + desc = idxd_alloc_desc(wq, IDXD_OP_BLOCK); + if (IS_ERR(desc)) + return NULL; + + idxd_prep_desc_common(wq, desc->hw, DSA_OPCODE_MEMMOVE, + dma_src, dma_dest, len, desc->compl_dma, + desc_flags); + + desc->txd.flags = flags; + + return &desc->txd; +} + +static int idxd_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_get(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); + return 0; +} + +static void idxd_dma_free_chan_resources(struct dma_chan *chan) +{ + struct idxd_wq *wq = to_idxd_wq(chan); + struct device *dev = &wq->idxd->pdev->dev; + + idxd_wq_put(wq); + dev_dbg(dev, "%s: client_count: %d\n", __func__, + idxd_wq_refcount(wq)); +} + +static enum dma_status idxd_dma_tx_status(struct dma_chan *dma_chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dma_chan, cookie, txstate); +} + +/* + * issue_pending() does not need to do anything since tx_submit() does the job + * already. + */ +static void idxd_dma_issue_pending(struct dma_chan *dma_chan) +{ +} + +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct dma_chan *c = tx->chan; + struct idxd_wq *wq = to_idxd_wq(c); + dma_cookie_t cookie; + int rc; + struct idxd_desc *desc = container_of(tx, struct idxd_desc, txd); + + cookie = dma_cookie_assign(tx); + + rc = idxd_submit_desc(wq, desc); + if (rc < 0) { + idxd_free_desc(wq, desc); + return rc; + } + + return cookie; +} + +static void idxd_dma_release(struct dma_device *device) +{ +} + +int idxd_register_dma_device(struct idxd_device *idxd) +{ + struct dma_device *dma = &idxd->dma_dev; + + INIT_LIST_HEAD(&dma->channels); + dma->dev = &idxd->pdev->dev; + + dma->device_release = idxd_dma_release; + + if (idxd->hw.opcap.bits[0] & IDXD_OPCAP_MEMMOVE) { + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->device_prep_dma_memcpy = idxd_dma_submit_memcpy; + } + + dma->device_tx_status = idxd_dma_tx_status; + dma->device_issue_pending = idxd_dma_issue_pending; + dma->device_alloc_chan_resources = idxd_dma_alloc_chan_resources; + dma->device_free_chan_resources = idxd_dma_free_chan_resources; + + return dma_async_device_register(&idxd->dma_dev); +} + +void idxd_unregister_dma_device(struct idxd_device *idxd) +{ + dma_async_device_unregister(&idxd->dma_dev); +} + +int idxd_register_dma_channel(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct dma_device *dma = &idxd->dma_dev; + struct dma_chan *chan = &wq->dma_chan; + int rc; + + memset(&wq->dma_chan, 0, sizeof(struct dma_chan)); + chan->device = dma; + list_add_tail(&chan->device_node, &dma->channels); + rc = dma_async_device_channel_register(dma, chan); + if (rc < 0) + return rc; + + return 0; +} + +void idxd_unregister_dma_channel(struct idxd_wq *wq) +{ + dma_async_device_channel_unregister(&wq->idxd->dma_dev, &wq->dma_chan); +} diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h new file mode 100644 index 000000000000..b8f8a363b4a7 --- /dev/null +++ b/drivers/dma/idxd/idxd.h @@ -0,0 +1,316 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_H_ +#define _IDXD_H_ + +#include <linux/sbitmap.h> +#include <linux/dmaengine.h> +#include <linux/percpu-rwsem.h> +#include <linux/wait.h> +#include <linux/cdev.h> +#include "registers.h" + +#define IDXD_DRIVER_VERSION "1.00" + +extern struct kmem_cache *idxd_desc_pool; + +#define IDXD_REG_TIMEOUT 50 +#define IDXD_DRAIN_TIMEOUT 5000 + +enum idxd_type { + IDXD_TYPE_UNKNOWN = -1, + IDXD_TYPE_DSA = 0, + IDXD_TYPE_MAX +}; + +#define IDXD_NAME_SIZE 128 + +struct idxd_device_driver { + struct device_driver drv; +}; + +struct idxd_irq_entry { + struct idxd_device *idxd; + int id; + struct llist_head pending_llist; + struct list_head work_list; +}; + +struct idxd_group { + struct device conf_dev; + struct idxd_device *idxd; + struct grpcfg grpcfg; + int id; + int num_engines; + int num_wqs; + bool use_token_limit; + u8 tokens_allowed; + u8 tokens_reserved; + int tc_a; + int tc_b; +}; + +#define IDXD_MAX_PRIORITY 0xf + +enum idxd_wq_state { + IDXD_WQ_DISABLED = 0, + IDXD_WQ_ENABLED, +}; + +enum idxd_wq_flag { + WQ_FLAG_DEDICATED = 0, +}; + +enum idxd_wq_type { + IDXD_WQT_NONE = 0, + IDXD_WQT_KERNEL, + IDXD_WQT_USER, +}; + +struct idxd_cdev { + struct cdev cdev; + struct device *dev; + int minor; + struct wait_queue_head err_queue; +}; + +#define IDXD_ALLOCATED_BATCH_SIZE 128U +#define WQ_NAME_SIZE 1024 +#define WQ_TYPE_SIZE 10 + +enum idxd_op_type { + IDXD_OP_BLOCK = 0, + IDXD_OP_NONBLOCK = 1, +}; + +enum idxd_complete_type { + IDXD_COMPLETE_NORMAL = 0, + IDXD_COMPLETE_ABORT, +}; + +struct idxd_wq { + void __iomem *dportal; + struct device conf_dev; + struct idxd_cdev idxd_cdev; + struct idxd_device *idxd; + int id; + enum idxd_wq_type type; + struct idxd_group *group; + int client_count; + struct mutex wq_lock; /* mutex for workqueue */ + u32 size; + u32 threshold; + u32 priority; + enum idxd_wq_state state; + unsigned long flags; + union wqcfg wqcfg; + atomic_t dq_count; /* dedicated queue flow control */ + u32 vec_ptr; /* interrupt steering */ + struct dsa_hw_desc **hw_descs; + int num_descs; + struct dsa_completion_record *compls; + dma_addr_t compls_addr; + int compls_size; + struct idxd_desc **descs; + struct sbitmap sbmap; + struct dma_chan dma_chan; + struct percpu_rw_semaphore submit_lock; + wait_queue_head_t submit_waitq; + char name[WQ_NAME_SIZE + 1]; +}; + +struct idxd_engine { + struct device conf_dev; + int id; + struct idxd_group *group; + struct idxd_device *idxd; +}; + +/* shadow registers */ +struct idxd_hw { + u32 version; + union gen_cap_reg gen_cap; + union wq_cap_reg wq_cap; + union group_cap_reg group_cap; + union engine_cap_reg engine_cap; + struct opcap opcap; +}; + +enum idxd_device_state { + IDXD_DEV_HALTED = -1, + IDXD_DEV_DISABLED = 0, + IDXD_DEV_CONF_READY, + IDXD_DEV_ENABLED, +}; + +enum idxd_device_flag { + IDXD_FLAG_CONFIGURABLE = 0, +}; + +struct idxd_device { + enum idxd_type type; + struct device conf_dev; + struct list_head list; + struct idxd_hw hw; + enum idxd_device_state state; + unsigned long flags; + int id; + int major; + + struct pci_dev *pdev; + void __iomem *reg_base; + + spinlock_t dev_lock; /* spinlock for device */ + struct idxd_group *groups; + struct idxd_wq *wqs; + struct idxd_engine *engines; + + int num_groups; + + u32 msix_perm_offset; + u32 wqcfg_offset; + u32 grpcfg_offset; + u32 perfmon_offset; + + u64 max_xfer_bytes; + u32 max_batch_size; + int max_groups; + int max_engines; + int max_tokens; + int max_wqs; + int max_wq_size; + int token_limit; + int nr_tokens; /* non-reserved tokens */ + + union sw_err_reg sw_err; + + struct msix_entry *msix_entries; + int num_wq_irqs; + struct idxd_irq_entry *irq_entries; + + struct dma_device dma_dev; +}; + +/* IDXD software descriptor */ +struct idxd_desc { + struct dsa_hw_desc *hw; + dma_addr_t desc_dma; + struct dsa_completion_record *completion; + dma_addr_t compl_dma; + struct dma_async_tx_descriptor txd; + struct llist_node llnode; + struct list_head list; + int id; + struct idxd_wq *wq; +}; + +#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) +#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) + +extern struct bus_type dsa_bus_type; + +static inline bool wq_dedicated(struct idxd_wq *wq) +{ + return test_bit(WQ_FLAG_DEDICATED, &wq->flags); +} + +enum idxd_portal_prot { + IDXD_PORTAL_UNLIMITED = 0, + IDXD_PORTAL_LIMITED, +}; + +static inline int idxd_get_wq_portal_offset(enum idxd_portal_prot prot) +{ + return prot * 0x1000; +} + +static inline int idxd_get_wq_portal_full_offset(int wq_id, + enum idxd_portal_prot prot) +{ + return ((wq_id * 4) << PAGE_SHIFT) + idxd_get_wq_portal_offset(prot); +} + +static inline void idxd_set_type(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + + if (pdev->device == PCI_DEVICE_ID_INTEL_DSA_SPR0) + idxd->type = IDXD_TYPE_DSA; + else + idxd->type = IDXD_TYPE_UNKNOWN; +} + +static inline void idxd_wq_get(struct idxd_wq *wq) +{ + wq->client_count++; +} + +static inline void idxd_wq_put(struct idxd_wq *wq) +{ + wq->client_count--; +} + +static inline int idxd_wq_refcount(struct idxd_wq *wq) +{ + return wq->client_count; +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd); +int idxd_register_bus_type(void); +void idxd_unregister_bus_type(void); +int idxd_setup_sysfs(struct idxd_device *idxd); +void idxd_cleanup_sysfs(struct idxd_device *idxd); +int idxd_register_driver(void); +void idxd_unregister_driver(void); +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd); + +/* device interrupt control */ +irqreturn_t idxd_irq_handler(int vec, void *data); +irqreturn_t idxd_misc_thread(int vec, void *data); +irqreturn_t idxd_wq_thread(int irq, void *data); +void idxd_mask_error_interrupts(struct idxd_device *idxd); +void idxd_unmask_error_interrupts(struct idxd_device *idxd); +void idxd_mask_msix_vectors(struct idxd_device *idxd); +int idxd_mask_msix_vector(struct idxd_device *idxd, int vec_id); +int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id); + +/* device control */ +int idxd_device_enable(struct idxd_device *idxd); +int idxd_device_disable(struct idxd_device *idxd); +int idxd_device_reset(struct idxd_device *idxd); +int __idxd_device_reset(struct idxd_device *idxd); +void idxd_device_cleanup(struct idxd_device *idxd); +int idxd_device_config(struct idxd_device *idxd); +void idxd_device_wqs_clear_state(struct idxd_device *idxd); + +/* work queue control */ +int idxd_wq_alloc_resources(struct idxd_wq *wq); +void idxd_wq_free_resources(struct idxd_wq *wq); +int idxd_wq_enable(struct idxd_wq *wq); +int idxd_wq_disable(struct idxd_wq *wq); +int idxd_wq_map_portal(struct idxd_wq *wq); +void idxd_wq_unmap_portal(struct idxd_wq *wq); + +/* submission */ +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc); +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype); +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc); + +/* dmaengine */ +int idxd_register_dma_device(struct idxd_device *idxd); +void idxd_unregister_dma_device(struct idxd_device *idxd); +int idxd_register_dma_channel(struct idxd_wq *wq); +void idxd_unregister_dma_channel(struct idxd_wq *wq); +void idxd_parse_completion_status(u8 status, enum dmaengine_tx_result *res); +void idxd_dma_complete_txd(struct idxd_desc *desc, + enum idxd_complete_type comp_type); +dma_cookie_t idxd_dma_tx_submit(struct dma_async_tx_descriptor *tx); + +/* cdev */ +int idxd_cdev_register(void); +void idxd_cdev_remove(void); +int idxd_cdev_get_major(struct idxd_device *idxd); +int idxd_wq_add_cdev(struct idxd_wq *wq); +void idxd_wq_del_cdev(struct idxd_wq *wq); + +#endif diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c new file mode 100644 index 000000000000..7778c05deb5d --- /dev/null +++ b/drivers/dma/idxd/init.c @@ -0,0 +1,533 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/pci.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/workqueue.h> +#include <linux/aer.h> +#include <linux/fs.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/device.h> +#include <linux/idr.h> +#include <uapi/linux/idxd.h> +#include <linux/dmaengine.h> +#include "../dmaengine.h" +#include "registers.h" +#include "idxd.h" + +MODULE_VERSION(IDXD_DRIVER_VERSION); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Intel Corporation"); + +#define DRV_NAME "idxd" + +static struct idr idxd_idrs[IDXD_TYPE_MAX]; +static struct mutex idxd_idr_lock; + +static struct pci_device_id idxd_pci_tbl[] = { + /* DSA ver 1.0 platforms */ + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_DSA_SPR0) }, + { 0, } +}; +MODULE_DEVICE_TABLE(pci, idxd_pci_tbl); + +static char *idxd_name[] = { + "dsa", +}; + +const char *idxd_get_dev_name(struct idxd_device *idxd) +{ + return idxd_name[idxd->type]; +} + +static int idxd_setup_interrupts(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + struct msix_entry *msix; + struct idxd_irq_entry *irq_entry; + int i, msixcnt; + int rc = 0; + + msixcnt = pci_msix_vec_count(pdev); + if (msixcnt < 0) { + dev_err(dev, "Not MSI-X interrupt capable.\n"); + goto err_no_irq; + } + + idxd->msix_entries = devm_kzalloc(dev, sizeof(struct msix_entry) * + msixcnt, GFP_KERNEL); + if (!idxd->msix_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) + idxd->msix_entries[i].entry = i; + + rc = pci_enable_msix_exact(pdev, idxd->msix_entries, msixcnt); + if (rc) { + dev_err(dev, "Failed enabling %d MSIX entries.\n", msixcnt); + goto err_no_irq; + } + dev_dbg(dev, "Enabled %d msix vectors\n", msixcnt); + + /* + * We implement 1 completion list per MSI-X entry except for + * entry 0, which is for errors and others. + */ + idxd->irq_entries = devm_kcalloc(dev, msixcnt, + sizeof(struct idxd_irq_entry), + GFP_KERNEL); + if (!idxd->irq_entries) { + rc = -ENOMEM; + goto err_no_irq; + } + + for (i = 0; i < msixcnt; i++) { + idxd->irq_entries[i].id = i; + idxd->irq_entries[i].idxd = idxd; + } + + msix = &idxd->msix_entries[0]; + irq_entry = &idxd->irq_entries[0]; + rc = devm_request_threaded_irq(dev, msix->vector, idxd_irq_handler, + idxd_misc_thread, 0, "idxd-misc", + irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate misc interrupt.\n"); + goto err_no_irq; + } + + dev_dbg(dev, "Allocated idxd-misc handler on msix vector %d\n", + msix->vector); + + /* first MSI-X entry is not for wq interrupts */ + idxd->num_wq_irqs = msixcnt - 1; + + for (i = 1; i < msixcnt; i++) { + msix = &idxd->msix_entries[i]; + irq_entry = &idxd->irq_entries[i]; + + init_llist_head(&idxd->irq_entries[i].pending_llist); + INIT_LIST_HEAD(&idxd->irq_entries[i].work_list); + rc = devm_request_threaded_irq(dev, msix->vector, + idxd_irq_handler, + idxd_wq_thread, 0, + "idxd-portal", irq_entry); + if (rc < 0) { + dev_err(dev, "Failed to allocate irq %d.\n", + msix->vector); + goto err_no_irq; + } + dev_dbg(dev, "Allocated idxd-msix %d for vector %d\n", + i, msix->vector); + } + + idxd_unmask_error_interrupts(idxd); + + return 0; + + err_no_irq: + /* Disable error interrupt generation */ + idxd_mask_error_interrupts(idxd); + pci_disable_msix(pdev); + dev_err(dev, "No usable interrupts\n"); + return rc; +} + +static void idxd_wqs_free_lock(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + percpu_free_rwsem(&wq->submit_lock); + } +} + +static int idxd_setup_internals(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + idxd->groups = devm_kcalloc(dev, idxd->max_groups, + sizeof(struct idxd_group), GFP_KERNEL); + if (!idxd->groups) + return -ENOMEM; + + for (i = 0; i < idxd->max_groups; i++) { + idxd->groups[i].idxd = idxd; + idxd->groups[i].id = i; + idxd->groups[i].tc_a = -1; + idxd->groups[i].tc_b = -1; + } + + idxd->wqs = devm_kcalloc(dev, idxd->max_wqs, sizeof(struct idxd_wq), + GFP_KERNEL); + if (!idxd->wqs) + return -ENOMEM; + + idxd->engines = devm_kcalloc(dev, idxd->max_engines, + sizeof(struct idxd_engine), GFP_KERNEL); + if (!idxd->engines) + return -ENOMEM; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + int rc; + + wq->id = i; + wq->idxd = idxd; + mutex_init(&wq->wq_lock); + atomic_set(&wq->dq_count, 0); + init_waitqueue_head(&wq->submit_waitq); + wq->idxd_cdev.minor = -1; + rc = percpu_init_rwsem(&wq->submit_lock); + if (rc < 0) { + idxd_wqs_free_lock(idxd); + return rc; + } + } + + for (i = 0; i < idxd->max_engines; i++) { + idxd->engines[i].idxd = idxd; + idxd->engines[i].id = i; + } + + return 0; +} + +static void idxd_read_table_offsets(struct idxd_device *idxd) +{ + union offsets_reg offsets; + struct device *dev = &idxd->pdev->dev; + + offsets.bits[0] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET); + offsets.bits[1] = ioread64(idxd->reg_base + IDXD_TABLE_OFFSET + + sizeof(u64)); + idxd->grpcfg_offset = offsets.grpcfg * 0x100; + dev_dbg(dev, "IDXD Group Config Offset: %#x\n", idxd->grpcfg_offset); + idxd->wqcfg_offset = offsets.wqcfg * 0x100; + dev_dbg(dev, "IDXD Work Queue Config Offset: %#x\n", + idxd->wqcfg_offset); + idxd->msix_perm_offset = offsets.msix_perm * 0x100; + dev_dbg(dev, "IDXD MSIX Permission Offset: %#x\n", + idxd->msix_perm_offset); + idxd->perfmon_offset = offsets.perfmon * 0x100; + dev_dbg(dev, "IDXD Perfmon Offset: %#x\n", idxd->perfmon_offset); +} + +static void idxd_read_caps(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i; + + /* reading generic capabilities */ + idxd->hw.gen_cap.bits = ioread64(idxd->reg_base + IDXD_GENCAP_OFFSET); + dev_dbg(dev, "gen_cap: %#llx\n", idxd->hw.gen_cap.bits); + idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; + dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); + idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; + dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); + if (idxd->hw.gen_cap.config_en) + set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); + + /* reading group capabilities */ + idxd->hw.group_cap.bits = + ioread64(idxd->reg_base + IDXD_GRPCAP_OFFSET); + dev_dbg(dev, "group_cap: %#llx\n", idxd->hw.group_cap.bits); + idxd->max_groups = idxd->hw.group_cap.num_groups; + dev_dbg(dev, "max groups: %u\n", idxd->max_groups); + idxd->max_tokens = idxd->hw.group_cap.total_tokens; + dev_dbg(dev, "max tokens: %u\n", idxd->max_tokens); + idxd->nr_tokens = idxd->max_tokens; + + /* read engine capabilities */ + idxd->hw.engine_cap.bits = + ioread64(idxd->reg_base + IDXD_ENGCAP_OFFSET); + dev_dbg(dev, "engine_cap: %#llx\n", idxd->hw.engine_cap.bits); + idxd->max_engines = idxd->hw.engine_cap.num_engines; + dev_dbg(dev, "max engines: %u\n", idxd->max_engines); + + /* read workqueue capabilities */ + idxd->hw.wq_cap.bits = ioread64(idxd->reg_base + IDXD_WQCAP_OFFSET); + dev_dbg(dev, "wq_cap: %#llx\n", idxd->hw.wq_cap.bits); + idxd->max_wq_size = idxd->hw.wq_cap.total_wq_size; + dev_dbg(dev, "total workqueue size: %u\n", idxd->max_wq_size); + idxd->max_wqs = idxd->hw.wq_cap.num_wqs; + dev_dbg(dev, "max workqueues: %u\n", idxd->max_wqs); + + /* reading operation capabilities */ + for (i = 0; i < 4; i++) { + idxd->hw.opcap.bits[i] = ioread64(idxd->reg_base + + IDXD_OPCAP_OFFSET + i * sizeof(u64)); + dev_dbg(dev, "opcap[%d]: %#llx\n", i, idxd->hw.opcap.bits[i]); + } +} + +static struct idxd_device *idxd_alloc(struct pci_dev *pdev, + void __iomem * const *iomap) +{ + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + + idxd = devm_kzalloc(dev, sizeof(struct idxd_device), GFP_KERNEL); + if (!idxd) + return NULL; + + idxd->pdev = pdev; + idxd->reg_base = iomap[IDXD_MMIO_BAR]; + spin_lock_init(&idxd->dev_lock); + + return idxd; +} + +static int idxd_probe(struct idxd_device *idxd) +{ + struct pci_dev *pdev = idxd->pdev; + struct device *dev = &pdev->dev; + int rc; + + dev_dbg(dev, "%s entered and resetting device\n", __func__); + rc = idxd_device_reset(idxd); + if (rc < 0) + return rc; + dev_dbg(dev, "IDXD reset complete\n"); + + idxd_read_caps(idxd); + idxd_read_table_offsets(idxd); + + rc = idxd_setup_internals(idxd); + if (rc) + goto err_setup; + + rc = idxd_setup_interrupts(idxd); + if (rc) + goto err_setup; + + dev_dbg(dev, "IDXD interrupt setup complete.\n"); + + mutex_lock(&idxd_idr_lock); + idxd->id = idr_alloc(&idxd_idrs[idxd->type], idxd, 0, 0, GFP_KERNEL); + mutex_unlock(&idxd_idr_lock); + if (idxd->id < 0) { + rc = -ENOMEM; + goto err_idr_fail; + } + + idxd->major = idxd_cdev_get_major(idxd); + + dev_dbg(dev, "IDXD device %d probed successfully\n", idxd->id); + return 0; + + err_idr_fail: + idxd_mask_error_interrupts(idxd); + idxd_mask_msix_vectors(idxd); + err_setup: + return rc; +} + +static int idxd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + void __iomem * const *iomap; + struct device *dev = &pdev->dev; + struct idxd_device *idxd; + int rc; + unsigned int mask; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + dev_dbg(dev, "Mapping BARs\n"); + mask = (1 << IDXD_MMIO_BAR); + rc = pcim_iomap_regions(pdev, mask, DRV_NAME); + if (rc) + return rc; + + iomap = pcim_iomap_table(pdev); + if (!iomap) + return -ENOMEM; + + dev_dbg(dev, "Set DMA masks\n"); + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + dev_dbg(dev, "Alloc IDXD context\n"); + idxd = idxd_alloc(pdev, iomap); + if (!idxd) + return -ENOMEM; + + idxd_set_type(idxd); + + dev_dbg(dev, "Set PCI master\n"); + pci_set_master(pdev); + pci_set_drvdata(pdev, idxd); + + idxd->hw.version = ioread32(idxd->reg_base + IDXD_VER_OFFSET); + rc = idxd_probe(idxd); + if (rc) { + dev_err(dev, "Intel(R) IDXD DMA Engine init failed\n"); + return -ENODEV; + } + + rc = idxd_setup_sysfs(idxd); + if (rc) { + dev_err(dev, "IDXD sysfs setup failed\n"); + return -ENODEV; + } + + idxd->state = IDXD_DEV_CONF_READY; + + dev_info(&pdev->dev, "Intel(R) Accelerator Device (v%x)\n", + idxd->hw.version); + + return 0; +} + +static void idxd_flush_pending_llist(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *itr; + struct llist_node *head; + + head = llist_del_all(&ie->pending_llist); + if (!head) + return; + + llist_for_each_entry_safe(desc, itr, head, llnode) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_flush_work_list(struct idxd_irq_entry *ie) +{ + struct idxd_desc *desc, *iter; + + list_for_each_entry_safe(desc, iter, &ie->work_list, list) { + list_del(&desc->list); + idxd_dma_complete_txd(desc, IDXD_COMPLETE_ABORT); + idxd_free_desc(desc->wq, desc); + } +} + +static void idxd_shutdown(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + int rc, i; + struct idxd_irq_entry *irq_entry; + int msixcnt = pci_msix_vec_count(pdev); + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + if (rc) + dev_err(&pdev->dev, "Disabling device failed\n"); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_mask_msix_vectors(idxd); + idxd_mask_error_interrupts(idxd); + + for (i = 0; i < msixcnt; i++) { + irq_entry = &idxd->irq_entries[i]; + synchronize_irq(idxd->msix_entries[i].vector); + if (i == 0) + continue; + idxd_flush_pending_llist(irq_entry); + idxd_flush_work_list(irq_entry); + } +} + +static void idxd_remove(struct pci_dev *pdev) +{ + struct idxd_device *idxd = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s called\n", __func__); + idxd_cleanup_sysfs(idxd); + idxd_shutdown(pdev); + idxd_wqs_free_lock(idxd); + mutex_lock(&idxd_idr_lock); + idr_remove(&idxd_idrs[idxd->type], idxd->id); + mutex_unlock(&idxd_idr_lock); +} + +static struct pci_driver idxd_pci_driver = { + .name = DRV_NAME, + .id_table = idxd_pci_tbl, + .probe = idxd_pci_probe, + .remove = idxd_remove, + .shutdown = idxd_shutdown, +}; + +static int __init idxd_init_module(void) +{ + int err, i; + + /* + * If the CPU does not support write512, there's no point in + * enumerating the device. We can not utilize it. + */ + if (!boot_cpu_has(X86_FEATURE_MOVDIR64B)) { + pr_warn("idxd driver failed to load without MOVDIR64B.\n"); + return -ENODEV; + } + + pr_info("%s: Intel(R) Accelerator Devices Driver %s\n", + DRV_NAME, IDXD_DRIVER_VERSION); + + mutex_init(&idxd_idr_lock); + for (i = 0; i < IDXD_TYPE_MAX; i++) + idr_init(&idxd_idrs[i]); + + err = idxd_register_bus_type(); + if (err < 0) + return err; + + err = idxd_register_driver(); + if (err < 0) + goto err_idxd_driver_register; + + err = idxd_cdev_register(); + if (err) + goto err_cdev_register; + + err = pci_register_driver(&idxd_pci_driver); + if (err) + goto err_pci_register; + + return 0; + +err_pci_register: + idxd_cdev_remove(); +err_cdev_register: + idxd_unregister_driver(); +err_idxd_driver_register: + idxd_unregister_bus_type(); + return err; +} +module_init(idxd_init_module); + +static void __exit idxd_exit_module(void) +{ + pci_unregister_driver(&idxd_pci_driver); + idxd_cdev_remove(); + idxd_unregister_bus_type(); +} +module_exit(idxd_exit_module); diff --git a/drivers/dma/idxd/irq.c b/drivers/dma/idxd/irq.c new file mode 100644 index 000000000000..d6fcd2e60103 --- /dev/null +++ b/drivers/dma/idxd/irq.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <linux/dmaengine.h> +#include <uapi/linux/idxd.h> +#include "../dmaengine.h" +#include "idxd.h" +#include "registers.h" + +void idxd_device_wqs_clear_state(struct idxd_device *idxd) +{ + int i; + + lockdep_assert_held(&idxd->dev_lock); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->state = IDXD_WQ_DISABLED; + } +} + +static int idxd_restart(struct idxd_device *idxd) +{ + int i, rc; + + lockdep_assert_held(&idxd->dev_lock); + + rc = __idxd_device_reset(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_config(idxd); + if (rc < 0) + goto out; + + rc = idxd_device_enable(idxd); + if (rc < 0) + goto out; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_ENABLED) { + rc = idxd_wq_enable(wq); + if (rc < 0) { + dev_warn(&idxd->pdev->dev, + "Unable to re-enable wq %s\n", + dev_name(&wq->conf_dev)); + } + } + } + + return 0; + + out: + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + return rc; +} + +irqreturn_t idxd_irq_handler(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + + idxd_mask_msix_vector(idxd, irq_entry->id); + return IRQ_WAKE_THREAD; +} + +irqreturn_t idxd_misc_thread(int vec, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + struct idxd_device *idxd = irq_entry->idxd; + struct device *dev = &idxd->pdev->dev; + union gensts_reg gensts; + u32 cause, val = 0; + int i, rc; + bool err = false; + + cause = ioread32(idxd->reg_base + IDXD_INTCAUSE_OFFSET); + + if (cause & IDXD_INTC_ERR) { + spin_lock_bh(&idxd->dev_lock); + for (i = 0; i < 4; i++) + idxd->sw_err.bits[i] = ioread64(idxd->reg_base + + IDXD_SWERR_OFFSET + i * sizeof(u64)); + iowrite64(IDXD_SWERR_ACK, idxd->reg_base + IDXD_SWERR_OFFSET); + + if (idxd->sw_err.valid && idxd->sw_err.wq_idx_valid) { + int id = idxd->sw_err.wq_idx; + struct idxd_wq *wq = &idxd->wqs[id]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } else { + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->type == IDXD_WQT_USER) + wake_up_interruptible(&wq->idxd_cdev.err_queue); + } + } + + spin_unlock_bh(&idxd->dev_lock); + val |= IDXD_INTC_ERR; + + for (i = 0; i < 4; i++) + dev_warn(dev, "err[%d]: %#16.16llx\n", + i, idxd->sw_err.bits[i]); + err = true; + } + + if (cause & IDXD_INTC_CMD) { + /* Driver does use command interrupts */ + val |= IDXD_INTC_CMD; + } + + if (cause & IDXD_INTC_OCCUPY) { + /* Driver does not utilize occupancy interrupt */ + val |= IDXD_INTC_OCCUPY; + } + + if (cause & IDXD_INTC_PERFMON_OVFL) { + /* + * Driver does not utilize perfmon counter overflow interrupt + * yet. + */ + val |= IDXD_INTC_PERFMON_OVFL; + } + + val ^= cause; + if (val) + dev_warn_once(dev, "Unexpected interrupt cause bits set: %#x\n", + val); + + iowrite32(cause, idxd->reg_base + IDXD_INTCAUSE_OFFSET); + if (!err) + return IRQ_HANDLED; + + gensts.bits = ioread32(idxd->reg_base + IDXD_GENSTATS_OFFSET); + if (gensts.state == IDXD_DEVICE_STATE_HALT) { + spin_lock_bh(&idxd->dev_lock); + if (gensts.reset_type == IDXD_DEVICE_RESET_SOFTWARE) { + rc = idxd_restart(idxd); + if (rc < 0) + dev_err(&idxd->pdev->dev, + "idxd restart failed, device halt."); + } else { + idxd_device_wqs_clear_state(idxd); + idxd->state = IDXD_DEV_HALTED; + dev_err(&idxd->pdev->dev, + "idxd halted, need %s.\n", + gensts.reset_type == IDXD_DEVICE_RESET_FLR ? + "FLR" : "system reset"); + } + spin_unlock_bh(&idxd->dev_lock); + } + + idxd_unmask_msix_vector(idxd, irq_entry->id); + return IRQ_HANDLED; +} + +static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct idxd_desc *desc, *t; + struct llist_node *head; + int queued = 0; + + head = llist_del_all(&irq_entry->pending_llist); + if (!head) + return 0; + + llist_for_each_entry_safe(desc, t, head, llnode) { + if (desc->completion->status) { + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + list_add_tail(&desc->list, &irq_entry->work_list); + queued++; + } + } + + return queued; +} + +static int irq_process_work_list(struct idxd_irq_entry *irq_entry, + int *processed) +{ + struct list_head *node, *next; + int queued = 0; + + if (list_empty(&irq_entry->work_list)) + return 0; + + list_for_each_safe(node, next, &irq_entry->work_list) { + struct idxd_desc *desc = + container_of(node, struct idxd_desc, list); + + if (desc->completion->status) { + list_del(&desc->list); + /* process and callback */ + idxd_dma_complete_txd(desc, IDXD_COMPLETE_NORMAL); + idxd_free_desc(desc->wq, desc); + (*processed)++; + } else { + queued++; + } + } + + return queued; +} + +irqreturn_t idxd_wq_thread(int irq, void *data) +{ + struct idxd_irq_entry *irq_entry = data; + int rc, processed = 0, retry = 0; + + /* + * There are two lists we are processing. The pending_llist is where + * submmiter adds all the submitted descriptor after sending it to + * the workqueue. It's a lockless singly linked list. The work_list + * is the common linux double linked list. We are in a scenario of + * multiple producers and a single consumer. The producers are all + * the kernel submitters of descriptors, and the consumer is the + * kernel irq handler thread for the msix vector when using threaded + * irq. To work with the restrictions of llist to remain lockless, + * we are doing the following steps: + * 1. Iterate through the work_list and process any completed + * descriptor. Delete the completed entries during iteration. + * 2. llist_del_all() from the pending list. + * 3. Iterate through the llist that was deleted from the pending list + * and process the completed entries. + * 4. If the entry is still waiting on hardware, list_add_tail() to + * the work_list. + * 5. Repeat until no more descriptors. + */ + do { + rc = irq_process_work_list(irq_entry, &processed); + if (rc != 0) { + retry++; + continue; + } + + rc = irq_process_pending_llist(irq_entry, &processed); + } while (rc != 0 && retry != 10); + + idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); + + if (processed == 0) + return IRQ_NONE; + + return IRQ_HANDLED; +} diff --git a/drivers/dma/idxd/registers.h b/drivers/dma/idxd/registers.h new file mode 100644 index 000000000000..a39e7ae6b3d9 --- /dev/null +++ b/drivers/dma/idxd/registers.h @@ -0,0 +1,336 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#ifndef _IDXD_REGISTERS_H_ +#define _IDXD_REGISTERS_H_ + +/* PCI Config */ +#define PCI_DEVICE_ID_INTEL_DSA_SPR0 0x0b25 + +#define IDXD_MMIO_BAR 0 +#define IDXD_WQ_BAR 2 +#define IDXD_PORTAL_SIZE 0x4000 + +/* MMIO Device BAR0 Registers */ +#define IDXD_VER_OFFSET 0x00 +#define IDXD_VER_MAJOR_MASK 0xf0 +#define IDXD_VER_MINOR_MASK 0x0f +#define GET_IDXD_VER_MAJOR(x) (((x) & IDXD_VER_MAJOR_MASK) >> 4) +#define GET_IDXD_VER_MINOR(x) ((x) & IDXD_VER_MINOR_MASK) + +union gen_cap_reg { + struct { + u64 block_on_fault:1; + u64 overlap_copy:1; + u64 cache_control_mem:1; + u64 cache_control_cache:1; + u64 rsvd:3; + u64 int_handle_req:1; + u64 dest_readback:1; + u64 drain_readback:1; + u64 rsvd2:6; + u64 max_xfer_shift:5; + u64 max_batch_shift:4; + u64 max_ims_mult:6; + u64 config_en:1; + u64 max_descs_per_engine:8; + u64 rsvd3:24; + }; + u64 bits; +} __packed; +#define IDXD_GENCAP_OFFSET 0x10 + +union wq_cap_reg { + struct { + u64 total_wq_size:16; + u64 num_wqs:8; + u64 rsvd:24; + u64 shared_mode:1; + u64 dedicated_mode:1; + u64 rsvd2:1; + u64 priority:1; + u64 occupancy:1; + u64 occupancy_int:1; + u64 rsvd3:10; + }; + u64 bits; +} __packed; +#define IDXD_WQCAP_OFFSET 0x20 + +union group_cap_reg { + struct { + u64 num_groups:8; + u64 total_tokens:8; + u64 token_en:1; + u64 token_limit:1; + u64 rsvd:46; + }; + u64 bits; +} __packed; +#define IDXD_GRPCAP_OFFSET 0x30 + +union engine_cap_reg { + struct { + u64 num_engines:8; + u64 rsvd:56; + }; + u64 bits; +} __packed; + +#define IDXD_ENGCAP_OFFSET 0x38 + +#define IDXD_OPCAP_NOOP 0x0001 +#define IDXD_OPCAP_BATCH 0x0002 +#define IDXD_OPCAP_MEMMOVE 0x0008 +struct opcap { + u64 bits[4]; +}; + +#define IDXD_OPCAP_OFFSET 0x40 + +#define IDXD_TABLE_OFFSET 0x60 +union offsets_reg { + struct { + u64 grpcfg:16; + u64 wqcfg:16; + u64 msix_perm:16; + u64 ims:16; + u64 perfmon:16; + u64 rsvd:48; + }; + u64 bits[2]; +} __packed; + +#define IDXD_GENCFG_OFFSET 0x80 +union gencfg_reg { + struct { + u32 token_limit:8; + u32 rsvd:4; + u32 user_int_en:1; + u32 rsvd2:19; + }; + u32 bits; +} __packed; + +#define IDXD_GENCTRL_OFFSET 0x88 +union genctrl_reg { + struct { + u32 softerr_int_en:1; + u32 rsvd:31; + }; + u32 bits; +} __packed; + +#define IDXD_GENSTATS_OFFSET 0x90 +union gensts_reg { + struct { + u32 state:2; + u32 reset_type:2; + u32 rsvd:28; + }; + u32 bits; +} __packed; + +enum idxd_device_status_state { + IDXD_DEVICE_STATE_DISABLED = 0, + IDXD_DEVICE_STATE_ENABLED, + IDXD_DEVICE_STATE_DRAIN, + IDXD_DEVICE_STATE_HALT, +}; + +enum idxd_device_reset_type { + IDXD_DEVICE_RESET_SOFTWARE = 0, + IDXD_DEVICE_RESET_FLR, + IDXD_DEVICE_RESET_WARM, + IDXD_DEVICE_RESET_COLD, +}; + +#define IDXD_INTCAUSE_OFFSET 0x98 +#define IDXD_INTC_ERR 0x01 +#define IDXD_INTC_CMD 0x02 +#define IDXD_INTC_OCCUPY 0x04 +#define IDXD_INTC_PERFMON_OVFL 0x08 + +#define IDXD_CMD_OFFSET 0xa0 +union idxd_command_reg { + struct { + u32 operand:20; + u32 cmd:5; + u32 rsvd:6; + u32 int_req:1; + }; + u32 bits; +} __packed; + +enum idxd_cmd { + IDXD_CMD_ENABLE_DEVICE = 1, + IDXD_CMD_DISABLE_DEVICE, + IDXD_CMD_DRAIN_ALL, + IDXD_CMD_ABORT_ALL, + IDXD_CMD_RESET_DEVICE, + IDXD_CMD_ENABLE_WQ, + IDXD_CMD_DISABLE_WQ, + IDXD_CMD_DRAIN_WQ, + IDXD_CMD_ABORT_WQ, + IDXD_CMD_RESET_WQ, + IDXD_CMD_DRAIN_PASID, + IDXD_CMD_ABORT_PASID, + IDXD_CMD_REQUEST_INT_HANDLE, +}; + +#define IDXD_CMDSTS_OFFSET 0xa8 +union cmdsts_reg { + struct { + u8 err; + u16 result; + u8 rsvd:7; + u8 active:1; + }; + u32 bits; +} __packed; +#define IDXD_CMDSTS_ACTIVE 0x80000000 + +enum idxd_cmdsts_err { + IDXD_CMDSTS_SUCCESS = 0, + IDXD_CMDSTS_INVAL_CMD, + IDXD_CMDSTS_INVAL_WQIDX, + IDXD_CMDSTS_HW_ERR, + /* enable device errors */ + IDXD_CMDSTS_ERR_DEV_ENABLED = 0x10, + IDXD_CMDSTS_ERR_CONFIG, + IDXD_CMDSTS_ERR_BUSMASTER_EN, + IDXD_CMDSTS_ERR_PASID_INVAL, + IDXD_CMDSTS_ERR_WQ_SIZE_ERANGE, + IDXD_CMDSTS_ERR_GRP_CONFIG, + IDXD_CMDSTS_ERR_GRP_CONFIG2, + IDXD_CMDSTS_ERR_GRP_CONFIG3, + IDXD_CMDSTS_ERR_GRP_CONFIG4, + /* enable wq errors */ + IDXD_CMDSTS_ERR_DEV_NOTEN = 0x20, + IDXD_CMDSTS_ERR_WQ_ENABLED, + IDXD_CMDSTS_ERR_WQ_SIZE, + IDXD_CMDSTS_ERR_WQ_PRIOR, + IDXD_CMDSTS_ERR_WQ_MODE, + IDXD_CMDSTS_ERR_BOF_EN, + IDXD_CMDSTS_ERR_PASID_EN, + IDXD_CMDSTS_ERR_MAX_BATCH_SIZE, + IDXD_CMDSTS_ERR_MAX_XFER_SIZE, + /* disable device errors */ + IDXD_CMDSTS_ERR_DIS_DEV_EN = 0x31, + /* disable WQ, drain WQ, abort WQ, reset WQ */ + IDXD_CMDSTS_ERR_DEV_NOT_EN, + /* request interrupt handle */ + IDXD_CMDSTS_ERR_INVAL_INT_IDX = 0x41, + IDXD_CMDSTS_ERR_NO_HANDLE, +}; + +#define IDXD_SWERR_OFFSET 0xc0 +#define IDXD_SWERR_VALID 0x00000001 +#define IDXD_SWERR_OVERFLOW 0x00000002 +#define IDXD_SWERR_ACK (IDXD_SWERR_VALID | IDXD_SWERR_OVERFLOW) +union sw_err_reg { + struct { + u64 valid:1; + u64 overflow:1; + u64 desc_valid:1; + u64 wq_idx_valid:1; + u64 batch:1; + u64 fault_rw:1; + u64 priv:1; + u64 rsvd:1; + u64 error:8; + u64 wq_idx:8; + u64 rsvd2:8; + u64 operation:8; + u64 pasid:20; + u64 rsvd3:4; + + u64 batch_idx:16; + u64 rsvd4:16; + u64 invalid_flags:32; + + u64 fault_addr; + + u64 rsvd5; + }; + u64 bits[4]; +} __packed; + +union msix_perm { + struct { + u32 rsvd:2; + u32 ignore:1; + u32 pasid_en:1; + u32 rsvd2:8; + u32 pasid:20; + }; + u32 bits; +} __packed; + +union group_flags { + struct { + u32 tc_a:3; + u32 tc_b:3; + u32 rsvd:1; + u32 use_token_limit:1; + u32 tokens_reserved:8; + u32 rsvd2:4; + u32 tokens_allowed:8; + u32 rsvd3:4; + }; + u32 bits; +} __packed; + +struct grpcfg { + u64 wqs[4]; + u64 engines; + union group_flags flags; +} __packed; + +union wqcfg { + struct { + /* bytes 0-3 */ + u16 wq_size; + u16 rsvd; + + /* bytes 4-7 */ + u16 wq_thresh; + u16 rsvd1; + + /* bytes 8-11 */ + u32 mode:1; /* shared or dedicated */ + u32 bof:1; /* block on fault */ + u32 rsvd2:2; + u32 priority:4; + u32 pasid:20; + u32 pasid_en:1; + u32 priv:1; + u32 rsvd3:2; + + /* bytes 12-15 */ + u32 max_xfer_shift:5; + u32 max_batch_shift:4; + u32 rsvd4:23; + + /* bytes 16-19 */ + u16 occupancy_inth; + u16 occupancy_table_sel:1; + u16 rsvd5:15; + + /* bytes 20-23 */ + u16 occupancy_limit; + u16 occupancy_int_en:1; + u16 rsvd6:15; + + /* bytes 24-27 */ + u16 occupancy; + u16 occupancy_int:1; + u16 rsvd7:12; + u16 mode_support:1; + u16 wq_state:2; + + /* bytes 28-31 */ + u32 rsvd8; + }; + u32 bits[8]; +} __packed; +#endif diff --git a/drivers/dma/idxd/submit.c b/drivers/dma/idxd/submit.c new file mode 100644 index 000000000000..45a0c5869a0a --- /dev/null +++ b/drivers/dma/idxd/submit.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <uapi/linux/idxd.h> +#include "idxd.h" +#include "registers.h" + +struct idxd_desc *idxd_alloc_desc(struct idxd_wq *wq, enum idxd_op_type optype) +{ + struct idxd_desc *desc; + int idx; + struct idxd_device *idxd = wq->idxd; + + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + + if (optype == IDXD_OP_BLOCK) + percpu_down_read(&wq->submit_lock); + else if (!percpu_down_read_trylock(&wq->submit_lock)) + return ERR_PTR(-EBUSY); + + if (!atomic_add_unless(&wq->dq_count, 1, wq->size)) { + int rc; + + if (optype == IDXD_OP_NONBLOCK) { + percpu_up_read(&wq->submit_lock); + return ERR_PTR(-EAGAIN); + } + + percpu_up_read(&wq->submit_lock); + percpu_down_write(&wq->submit_lock); + rc = wait_event_interruptible(wq->submit_waitq, + atomic_add_unless(&wq->dq_count, + 1, wq->size) || + idxd->state != IDXD_DEV_ENABLED); + percpu_up_write(&wq->submit_lock); + if (rc < 0) + return ERR_PTR(-EINTR); + if (idxd->state != IDXD_DEV_ENABLED) + return ERR_PTR(-EIO); + } else { + percpu_up_read(&wq->submit_lock); + } + + idx = sbitmap_get(&wq->sbmap, 0, false); + if (idx < 0) { + atomic_dec(&wq->dq_count); + return ERR_PTR(-EAGAIN); + } + + desc = wq->descs[idx]; + memset(desc->hw, 0, sizeof(struct dsa_hw_desc)); + memset(desc->completion, 0, sizeof(struct dsa_completion_record)); + return desc; +} + +void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + atomic_dec(&wq->dq_count); + + sbitmap_clear_bit(&wq->sbmap, desc->id); + wake_up(&wq->submit_waitq); +} + +int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) +{ + struct idxd_device *idxd = wq->idxd; + int vec = desc->hw->int_handle; + void __iomem *portal; + + if (idxd->state != IDXD_DEV_ENABLED) + return -EIO; + + portal = wq->dportal + idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED); + /* + * The wmb() flushes writes to coherent DMA data before possibly + * triggering a DMA read. The wmb() is necessary even on UP because + * the recipient is a device. + */ + wmb(); + iosubmit_cmds512(portal, desc->hw, 1); + + /* + * Pending the descriptor to the lockless list for the irq_entry + * that we designated the descriptor to. + */ + if (desc->hw->flags & IDXD_OP_FLAG_RCI) + llist_add(&desc->llnode, + &idxd->irq_entries[vec].pending_llist); + + return 0; +} diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c new file mode 100644 index 000000000000..849c50ab939a --- /dev/null +++ b/drivers/dma/idxd/sysfs.c @@ -0,0 +1,1528 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */ +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/pci.h> +#include <linux/device.h> +#include <linux/io-64-nonatomic-lo-hi.h> +#include <uapi/linux/idxd.h> +#include "registers.h" +#include "idxd.h" + +static char *idxd_wq_type_names[] = { + [IDXD_WQT_NONE] = "none", + [IDXD_WQT_KERNEL] = "kernel", + [IDXD_WQT_USER] = "user", +}; + +static void idxd_conf_device_release(struct device *dev) +{ + dev_dbg(dev, "%s for %s\n", __func__, dev_name(dev)); +} + +static struct device_type idxd_group_device_type = { + .name = "group", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_wq_device_type = { + .name = "wq", + .release = idxd_conf_device_release, +}; + +static struct device_type idxd_engine_device_type = { + .name = "engine", + .release = idxd_conf_device_release, +}; + +static struct device_type dsa_device_type = { + .name = "dsa", + .release = idxd_conf_device_release, +}; + +static inline bool is_dsa_dev(struct device *dev) +{ + return dev ? dev->type == &dsa_device_type : false; +} + +static inline bool is_idxd_dev(struct device *dev) +{ + return is_dsa_dev(dev); +} + +static inline bool is_idxd_wq_dev(struct device *dev) +{ + return dev ? dev->type == &idxd_wq_device_type : false; +} + +static inline bool is_idxd_wq_dmaengine(struct idxd_wq *wq) +{ + if (wq->type == IDXD_WQT_KERNEL && + strcmp(wq->name, "dmaengine") == 0) + return true; + return false; +} + +static inline bool is_idxd_wq_cdev(struct idxd_wq *wq) +{ + return wq->type == IDXD_WQT_USER ? true : false; +} + +static int idxd_config_bus_match(struct device *dev, + struct device_driver *drv) +{ + int matched = 0; + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) + return 0; + matched = 1; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + if (idxd->state < IDXD_DEV_CONF_READY) + return 0; + + if (wq->state != IDXD_WQ_DISABLED) { + dev_dbg(dev, "%s not disabled\n", dev_name(dev)); + return 0; + } + matched = 1; + } + + if (matched) + dev_dbg(dev, "%s matched\n", dev_name(dev)); + + return matched; +} + +static int idxd_config_bus_probe(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called\n", __func__); + + if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + + if (idxd->state != IDXD_DEV_CONF_READY) { + dev_warn(dev, "Device not ready for config\n"); + return -EBUSY; + } + + if (!try_module_get(THIS_MODULE)) + return -ENXIO; + + spin_lock_irqsave(&idxd->dev_lock, flags); + + /* Perform IDXD configuration and enabling */ + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device config failed: %d\n", rc); + return rc; + } + + /* start device */ + rc = idxd_device_enable(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_warn(dev, "Device enable failed: %d\n", rc); + return rc; + } + + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_info(dev, "Device %s enabled\n", dev_name(dev)); + + rc = idxd_register_dma_device(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + dev_dbg(dev, "Failed to register dmaengine device\n"); + return rc; + } + return 0; + } else if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + struct idxd_device *idxd = wq->idxd; + + mutex_lock(&wq->wq_lock); + + if (idxd->state != IDXD_DEV_ENABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Enabling while device not enabled.\n"); + return -EPERM; + } + + if (wq->state != IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d already enabled.\n", wq->id); + return -EBUSY; + } + + if (!wq->group) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ not attached to group.\n"); + return -EINVAL; + } + + if (strlen(wq->name) == 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ name not set.\n"); + return -EINVAL; + } + + rc = idxd_wq_alloc_resources(wq); + if (rc < 0) { + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ resource alloc failed\n"); + return rc; + } + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_config(idxd); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "Writing WQ %d config failed: %d\n", + wq->id, rc); + return rc; + } + + rc = idxd_wq_enable(wq); + if (rc < 0) { + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + dev_warn(dev, "WQ %d enabling failed: %d\n", + wq->id, rc); + return rc; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + rc = idxd_wq_map_portal(wq); + if (rc < 0) { + dev_warn(dev, "wq portal mapping failed: %d\n", rc); + rc = idxd_wq_disable(wq); + if (rc < 0) + dev_warn(dev, "IDXD wq disable failed\n"); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + mutex_unlock(&wq->wq_lock); + return rc; + } + + wq->client_count = 0; + + dev_info(dev, "wq %s enabled\n", dev_name(&wq->conf_dev)); + + if (is_idxd_wq_dmaengine(wq)) { + rc = idxd_register_dma_channel(wq); + if (rc < 0) { + dev_dbg(dev, "DMA channel register failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } else if (is_idxd_wq_cdev(wq)) { + rc = idxd_wq_add_cdev(wq); + if (rc < 0) { + dev_dbg(dev, "Cdev creation failed\n"); + mutex_unlock(&wq->wq_lock); + return rc; + } + } + + mutex_unlock(&wq->wq_lock); + return 0; + } + + return -ENODEV; +} + +static void disable_wq(struct idxd_wq *wq) +{ + struct idxd_device *idxd = wq->idxd; + struct device *dev = &idxd->pdev->dev; + unsigned long flags; + int rc; + + mutex_lock(&wq->wq_lock); + dev_dbg(dev, "%s removing WQ %s\n", __func__, dev_name(&wq->conf_dev)); + if (wq->state == IDXD_WQ_DISABLED) { + mutex_unlock(&wq->wq_lock); + return; + } + + if (is_idxd_wq_dmaengine(wq)) + idxd_unregister_dma_channel(wq); + else if (is_idxd_wq_cdev(wq)) + idxd_wq_del_cdev(wq); + + if (idxd_wq_refcount(wq)) + dev_warn(dev, "Clients has claim on wq %d: %d\n", + wq->id, idxd_wq_refcount(wq)); + + idxd_wq_unmap_portal(wq); + + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_wq_disable(wq); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + idxd_wq_free_resources(wq); + wq->client_count = 0; + mutex_unlock(&wq->wq_lock); + + if (rc < 0) + dev_warn(dev, "Failed to disable %s: %d\n", + dev_name(&wq->conf_dev), rc); + else + dev_info(dev, "wq %s disabled\n", dev_name(&wq->conf_dev)); +} + +static int idxd_config_bus_remove(struct device *dev) +{ + int rc; + unsigned long flags; + + dev_dbg(dev, "%s called for %s\n", __func__, dev_name(dev)); + + /* disable workqueue here */ + if (is_idxd_wq_dev(dev)) { + struct idxd_wq *wq = confdev_to_wq(dev); + + disable_wq(wq); + } else if (is_idxd_dev(dev)) { + struct idxd_device *idxd = confdev_to_idxd(dev); + int i; + + dev_dbg(dev, "%s removing dev %s\n", __func__, + dev_name(&idxd->conf_dev)); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (wq->state == IDXD_WQ_DISABLED) + continue; + dev_warn(dev, "Active wq %d on disable %s.\n", i, + dev_name(&idxd->conf_dev)); + device_release_driver(&wq->conf_dev); + } + + idxd_unregister_dma_device(idxd); + spin_lock_irqsave(&idxd->dev_lock, flags); + rc = idxd_device_disable(idxd); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + module_put(THIS_MODULE); + if (rc < 0) + dev_warn(dev, "Device disable failed\n"); + else + dev_info(dev, "Device %s disabled\n", dev_name(dev)); + + } + + return 0; +} + +static void idxd_config_bus_shutdown(struct device *dev) +{ + dev_dbg(dev, "%s called\n", __func__); +} + +struct bus_type dsa_bus_type = { + .name = "dsa", + .match = idxd_config_bus_match, + .probe = idxd_config_bus_probe, + .remove = idxd_config_bus_remove, + .shutdown = idxd_config_bus_shutdown, +}; + +static struct bus_type *idxd_bus_types[] = { + &dsa_bus_type +}; + +static struct idxd_device_driver dsa_drv = { + .drv = { + .name = "dsa", + .bus = &dsa_bus_type, + .owner = THIS_MODULE, + .mod_name = KBUILD_MODNAME, + }, +}; + +static struct idxd_device_driver *idxd_drvs[] = { + &dsa_drv +}; + +struct bus_type *idxd_get_bus_type(struct idxd_device *idxd) +{ + return idxd_bus_types[idxd->type]; +} + +static struct device_type *idxd_get_device_type(struct idxd_device *idxd) +{ + if (idxd->type == IDXD_TYPE_DSA) + return &dsa_device_type; + else + return NULL; +} + +/* IDXD generic driver setup */ +int idxd_register_driver(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = driver_register(&idxd_drvs[i]->drv); + if (rc < 0) + goto drv_fail; + } + + return 0; + +drv_fail: + for (; i > 0; i--) + driver_unregister(&idxd_drvs[i]->drv); + return rc; +} + +void idxd_unregister_driver(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + driver_unregister(&idxd_drvs[i]->drv); +} + +/* IDXD engine attributes */ +static ssize_t engine_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + + if (engine->group) + return sprintf(buf, "%d\n", engine->group->id); + else + return sprintf(buf, "%d\n", -1); +} + +static ssize_t engine_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_engine *engine = + container_of(dev, struct idxd_engine, conf_dev); + struct idxd_device *idxd = engine->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (engine->group) { + engine->group->num_engines--; + engine->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = engine->group; + + if (prevg) + prevg->num_engines--; + engine->group = &idxd->groups[id]; + engine->group->num_engines++; + + return count; +} + +static struct device_attribute dev_attr_engine_group = + __ATTR(group_id, 0644, engine_group_id_show, + engine_group_id_store); + +static struct attribute *idxd_engine_attributes[] = { + &dev_attr_engine_group.attr, + NULL, +}; + +static const struct attribute_group idxd_engine_attribute_group = { + .attrs = idxd_engine_attributes, +}; + +static const struct attribute_group *idxd_engine_attribute_groups[] = { + &idxd_engine_attribute_group, + NULL, +}; + +/* Group attributes */ + +static void idxd_set_free_tokens(struct idxd_device *idxd) +{ + int i, tokens; + + for (i = 0, tokens = 0; i < idxd->max_groups; i++) { + struct idxd_group *g = &idxd->groups[i]; + + tokens += g->tokens_reserved; + } + + idxd->nr_tokens = idxd->max_tokens - tokens; +} + +static ssize_t group_tokens_reserved_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_reserved); +} + +static ssize_t group_tokens_reserved_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + if (val > idxd->max_tokens) + return -EINVAL; + + if (val > idxd->nr_tokens) + return -EINVAL; + + group->tokens_reserved = val; + idxd_set_free_tokens(idxd); + return count; +} + +static struct device_attribute dev_attr_group_tokens_reserved = + __ATTR(tokens_reserved, 0644, group_tokens_reserved_show, + group_tokens_reserved_store); + +static ssize_t group_tokens_allowed_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->tokens_allowed); +} + +static ssize_t group_tokens_allowed_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + if (val < 4 * group->num_engines || + val > group->tokens_reserved + idxd->nr_tokens) + return -EINVAL; + + group->tokens_allowed = val; + return count; +} + +static struct device_attribute dev_attr_group_tokens_allowed = + __ATTR(tokens_allowed, 0644, group_tokens_allowed_show, + group_tokens_allowed_store); + +static ssize_t group_use_token_limit_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%u\n", group->use_token_limit); +} + +static ssize_t group_use_token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (idxd->token_limit == 0) + return -EPERM; + + group->use_token_limit = !!val; + return count; +} + +static struct device_attribute dev_attr_group_use_token_limit = + __ATTR(use_token_limit, 0644, group_use_token_limit_show, + group_use_token_limit_store); + +static ssize_t group_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + if (!engine->group) + continue; + + if (engine->group->id == group->id) + rc += sprintf(tmp + rc, "engine%d.%d ", + idxd->id, engine->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_engines = + __ATTR(engines, 0444, group_engines_show, NULL); + +static ssize_t group_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + int i, rc = 0; + char *tmp = buf; + struct idxd_device *idxd = group->idxd; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + if (!wq->group) + continue; + + if (wq->group->id == group->id) + rc += sprintf(tmp + rc, "wq%d.%d ", + idxd->id, wq->id); + } + + rc--; + rc += sprintf(tmp + rc, "\n"); + + return rc; +} + +static struct device_attribute dev_attr_group_work_queues = + __ATTR(work_queues, 0444, group_work_queues_show, NULL); + +static ssize_t group_traffic_class_a_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_a); +} + +static ssize_t group_traffic_class_a_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_a = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_a = + __ATTR(traffic_class_a, 0644, group_traffic_class_a_show, + group_traffic_class_a_store); + +static ssize_t group_traffic_class_b_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + + return sprintf(buf, "%d\n", group->tc_b); +} + +static ssize_t group_traffic_class_b_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_group *group = + container_of(dev, struct idxd_group, conf_dev); + struct idxd_device *idxd = group->idxd; + long val; + int rc; + + rc = kstrtol(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (val < 0 || val > 7) + return -EINVAL; + + group->tc_b = val; + return count; +} + +static struct device_attribute dev_attr_group_traffic_class_b = + __ATTR(traffic_class_b, 0644, group_traffic_class_b_show, + group_traffic_class_b_store); + +static struct attribute *idxd_group_attributes[] = { + &dev_attr_group_work_queues.attr, + &dev_attr_group_engines.attr, + &dev_attr_group_use_token_limit.attr, + &dev_attr_group_tokens_allowed.attr, + &dev_attr_group_tokens_reserved.attr, + &dev_attr_group_traffic_class_a.attr, + &dev_attr_group_traffic_class_b.attr, + NULL, +}; + +static const struct attribute_group idxd_group_attribute_group = { + .attrs = idxd_group_attributes, +}; + +static const struct attribute_group *idxd_group_attribute_groups[] = { + &idxd_group_attribute_group, + NULL, +}; + +/* IDXD work queue attribs */ +static ssize_t wq_clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->client_count); +} + +static struct device_attribute dev_attr_wq_clients = + __ATTR(clients, 0444, wq_clients_show, NULL); + +static ssize_t wq_state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->state) { + case IDXD_WQ_DISABLED: + return sprintf(buf, "disabled\n"); + case IDXD_WQ_ENABLED: + return sprintf(buf, "enabled\n"); + } + + return sprintf(buf, "unknown\n"); +} + +static struct device_attribute dev_attr_wq_state = + __ATTR(state, 0444, wq_state_show, NULL); + +static ssize_t wq_group_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->group) + return sprintf(buf, "%u\n", wq->group->id); + else + return sprintf(buf, "-1\n"); +} + +static ssize_t wq_group_id_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + long id; + int rc; + struct idxd_group *prevg, *group; + + rc = kstrtol(buf, 10, &id); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (id > idxd->max_groups - 1 || id < -1) + return -EINVAL; + + if (id == -1) { + if (wq->group) { + wq->group->num_wqs--; + wq->group = NULL; + } + return count; + } + + group = &idxd->groups[id]; + prevg = wq->group; + + if (prevg) + prevg->num_wqs--; + wq->group = group; + group->num_wqs++; + return count; +} + +static struct device_attribute dev_attr_wq_group_id = + __ATTR(group_id, 0644, wq_group_id_show, wq_group_id_store); + +static ssize_t wq_mode_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", + wq_dedicated(wq) ? "dedicated" : "shared"); +} + +static ssize_t wq_mode_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + struct idxd_device *idxd = wq->idxd; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (sysfs_streq(buf, "dedicated")) { + set_bit(WQ_FLAG_DEDICATED, &wq->flags); + wq->threshold = 0; + } else { + return -EINVAL; + } + + return count; +} + +static struct device_attribute dev_attr_wq_mode = + __ATTR(mode, 0644, wq_mode_show, wq_mode_store); + +static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->size); +} + +static ssize_t wq_size_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long size; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &size); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (size > idxd->max_wq_size) + return -EINVAL; + + wq->size = size; + return count; +} + +static struct device_attribute dev_attr_wq_size = + __ATTR(size, 0644, wq_size_show, wq_size_store); + +static ssize_t wq_priority_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%u\n", wq->priority); +} + +static ssize_t wq_priority_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + unsigned long prio; + struct idxd_device *idxd = wq->idxd; + int rc; + + rc = kstrtoul(buf, 10, &prio); + if (rc < 0) + return -EINVAL; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (prio > IDXD_MAX_PRIORITY) + return -EINVAL; + + wq->priority = prio; + return count; +} + +static struct device_attribute dev_attr_wq_priority = + __ATTR(priority, 0644, wq_priority_show, wq_priority_store); + +static ssize_t wq_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + switch (wq->type) { + case IDXD_WQT_KERNEL: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_KERNEL]); + case IDXD_WQT_USER: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_USER]); + case IDXD_WQT_NONE: + default: + return sprintf(buf, "%s\n", + idxd_wq_type_names[IDXD_WQT_NONE]); + } + + return -EINVAL; +} + +static ssize_t wq_type_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + enum idxd_wq_type old_type; + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + old_type = wq->type; + if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL])) + wq->type = IDXD_WQT_KERNEL; + else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER])) + wq->type = IDXD_WQT_USER; + else + wq->type = IDXD_WQT_NONE; + + /* If we are changing queue type, clear the name */ + if (wq->type != old_type) + memset(wq->name, 0, WQ_NAME_SIZE + 1); + + return count; +} + +static struct device_attribute dev_attr_wq_type = + __ATTR(type, 0644, wq_type_show, wq_type_store); + +static ssize_t wq_name_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%s\n", wq->name); +} + +static ssize_t wq_name_store(struct device *dev, + struct device_attribute *attr, const char *buf, + size_t count) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + if (wq->state != IDXD_WQ_DISABLED) + return -EPERM; + + if (strlen(buf) > WQ_NAME_SIZE || strlen(buf) == 0) + return -EINVAL; + + memset(wq->name, 0, WQ_NAME_SIZE + 1); + strncpy(wq->name, buf, WQ_NAME_SIZE); + strreplace(wq->name, '\n', '\0'); + return count; +} + +static struct device_attribute dev_attr_wq_name = + __ATTR(name, 0644, wq_name_show, wq_name_store); + +static ssize_t wq_cdev_minor_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_wq *wq = container_of(dev, struct idxd_wq, conf_dev); + + return sprintf(buf, "%d\n", wq->idxd_cdev.minor); +} + +static struct device_attribute dev_attr_wq_cdev_minor = + __ATTR(cdev_minor, 0444, wq_cdev_minor_show, NULL); + +static struct attribute *idxd_wq_attributes[] = { + &dev_attr_wq_clients.attr, + &dev_attr_wq_state.attr, + &dev_attr_wq_group_id.attr, + &dev_attr_wq_mode.attr, + &dev_attr_wq_size.attr, + &dev_attr_wq_priority.attr, + &dev_attr_wq_type.attr, + &dev_attr_wq_name.attr, + &dev_attr_wq_cdev_minor.attr, + NULL, +}; + +static const struct attribute_group idxd_wq_attribute_group = { + .attrs = idxd_wq_attributes, +}; + +static const struct attribute_group *idxd_wq_attribute_groups[] = { + &idxd_wq_attribute_group, + NULL, +}; + +/* IDXD device attribs */ +static ssize_t max_work_queues_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wq_size); +} +static DEVICE_ATTR_RO(max_work_queues_size); + +static ssize_t max_groups_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_groups); +} +static DEVICE_ATTR_RO(max_groups); + +static ssize_t max_work_queues_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_wqs); +} +static DEVICE_ATTR_RO(max_work_queues); + +static ssize_t max_engines_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_engines); +} +static DEVICE_ATTR_RO(max_engines); + +static ssize_t numa_node_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%d\n", dev_to_node(&idxd->pdev->dev)); +} +static DEVICE_ATTR_RO(numa_node); + +static ssize_t max_batch_size_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_batch_size); +} +static DEVICE_ATTR_RO(max_batch_size); + +static ssize_t max_transfer_size_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%llu\n", idxd->max_xfer_bytes); +} +static DEVICE_ATTR_RO(max_transfer_size); + +static ssize_t op_cap_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%#llx\n", idxd->hw.opcap.bits[0]); +} +static DEVICE_ATTR_RO(op_cap); + +static ssize_t configurable_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", + test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)); +} +static DEVICE_ATTR_RO(configurable); + +static ssize_t clients_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long flags; + int count = 0, i; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + count += wq->client_count; + } + spin_unlock_irqrestore(&idxd->dev_lock, flags); + + return sprintf(buf, "%d\n", count); +} +static DEVICE_ATTR_RO(clients); + +static ssize_t state_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + switch (idxd->state) { + case IDXD_DEV_DISABLED: + case IDXD_DEV_CONF_READY: + return sprintf(buf, "disabled\n"); + case IDXD_DEV_ENABLED: + return sprintf(buf, "enabled\n"); + case IDXD_DEV_HALTED: + return sprintf(buf, "halted\n"); + } + + return sprintf(buf, "unknown\n"); +} +static DEVICE_ATTR_RO(state); + +static ssize_t errors_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + int i, out = 0; + unsigned long flags; + + spin_lock_irqsave(&idxd->dev_lock, flags); + for (i = 0; i < 4; i++) + out += sprintf(buf + out, "%#018llx ", idxd->sw_err.bits[i]); + spin_unlock_irqrestore(&idxd->dev_lock, flags); + out--; + out += sprintf(buf + out, "\n"); + return out; +} +static DEVICE_ATTR_RO(errors); + +static ssize_t max_tokens_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->max_tokens); +} +static DEVICE_ATTR_RO(max_tokens); + +static ssize_t token_limit_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->token_limit); +} + +static ssize_t token_limit_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + unsigned long val; + int rc; + + rc = kstrtoul(buf, 10, &val); + if (rc < 0) + return -EINVAL; + + if (idxd->state == IDXD_DEV_ENABLED) + return -EPERM; + + if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) + return -EPERM; + + if (!idxd->hw.group_cap.token_limit) + return -EPERM; + + if (val > idxd->hw.group_cap.total_tokens) + return -EINVAL; + + idxd->token_limit = val; + return count; +} +static DEVICE_ATTR_RW(token_limit); + +static ssize_t cdev_major_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct idxd_device *idxd = + container_of(dev, struct idxd_device, conf_dev); + + return sprintf(buf, "%u\n", idxd->major); +} +static DEVICE_ATTR_RO(cdev_major); + +static struct attribute *idxd_device_attributes[] = { + &dev_attr_max_groups.attr, + &dev_attr_max_work_queues.attr, + &dev_attr_max_work_queues_size.attr, + &dev_attr_max_engines.attr, + &dev_attr_numa_node.attr, + &dev_attr_max_batch_size.attr, + &dev_attr_max_transfer_size.attr, + &dev_attr_op_cap.attr, + &dev_attr_configurable.attr, + &dev_attr_clients.attr, + &dev_attr_state.attr, + &dev_attr_errors.attr, + &dev_attr_max_tokens.attr, + &dev_attr_token_limit.attr, + &dev_attr_cdev_major.attr, + NULL, +}; + +static const struct attribute_group idxd_device_attribute_group = { + .attrs = idxd_device_attributes, +}; + +static const struct attribute_group *idxd_attribute_groups[] = { + &idxd_device_attribute_group, + NULL, +}; + +static int idxd_setup_engine_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + engine->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&engine->conf_dev, "engine%d.%d", + idxd->id, engine->id); + engine->conf_dev.bus = idxd_get_bus_type(idxd); + engine->conf_dev.groups = idxd_engine_attribute_groups; + engine->conf_dev.type = &idxd_engine_device_type; + dev_dbg(dev, "Engine device register: %s\n", + dev_name(&engine->conf_dev)); + rc = device_register(&engine->conf_dev); + if (rc < 0) { + put_device(&engine->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + return rc; +} + +static int idxd_setup_group_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + group->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&group->conf_dev, "group%d.%d", + idxd->id, group->id); + group->conf_dev.bus = idxd_get_bus_type(idxd); + group->conf_dev.groups = idxd_group_attribute_groups; + group->conf_dev.type = &idxd_group_device_type; + dev_dbg(dev, "Group device register: %s\n", + dev_name(&group->conf_dev)); + rc = device_register(&group->conf_dev); + if (rc < 0) { + put_device(&group->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + return rc; +} + +static int idxd_setup_wq_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int i, rc; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + wq->conf_dev.parent = &idxd->conf_dev; + dev_set_name(&wq->conf_dev, "wq%d.%d", idxd->id, wq->id); + wq->conf_dev.bus = idxd_get_bus_type(idxd); + wq->conf_dev.groups = idxd_wq_attribute_groups; + wq->conf_dev.type = &idxd_wq_device_type; + dev_dbg(dev, "WQ device register: %s\n", + dev_name(&wq->conf_dev)); + rc = device_register(&wq->conf_dev); + if (rc < 0) { + put_device(&wq->conf_dev); + goto cleanup; + } + } + + return 0; + +cleanup: + while (i--) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + return rc; +} + +static int idxd_setup_device_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + char devname[IDXD_NAME_SIZE]; + + sprintf(devname, "%s%d", idxd_get_dev_name(idxd), idxd->id); + idxd->conf_dev.parent = dev; + dev_set_name(&idxd->conf_dev, "%s", devname); + idxd->conf_dev.bus = idxd_get_bus_type(idxd); + idxd->conf_dev.groups = idxd_attribute_groups; + idxd->conf_dev.type = idxd_get_device_type(idxd); + + dev_dbg(dev, "IDXD device register: %s\n", dev_name(&idxd->conf_dev)); + rc = device_register(&idxd->conf_dev); + if (rc < 0) { + put_device(&idxd->conf_dev); + return rc; + } + + return 0; +} + +int idxd_setup_sysfs(struct idxd_device *idxd) +{ + struct device *dev = &idxd->pdev->dev; + int rc; + + rc = idxd_setup_device_sysfs(idxd); + if (rc < 0) { + dev_dbg(dev, "Device sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_wq_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Work Queue sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_group_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Group sysfs registering failed: %d\n", rc); + return rc; + } + + rc = idxd_setup_engine_sysfs(idxd); + if (rc < 0) { + /* unregister conf dev */ + dev_dbg(dev, "Engine sysfs registering failed: %d\n", rc); + return rc; + } + + return 0; +} + +void idxd_cleanup_sysfs(struct idxd_device *idxd) +{ + int i; + + for (i = 0; i < idxd->max_wqs; i++) { + struct idxd_wq *wq = &idxd->wqs[i]; + + device_unregister(&wq->conf_dev); + } + + for (i = 0; i < idxd->max_engines; i++) { + struct idxd_engine *engine = &idxd->engines[i]; + + device_unregister(&engine->conf_dev); + } + + for (i = 0; i < idxd->max_groups; i++) { + struct idxd_group *group = &idxd->groups[i]; + + device_unregister(&group->conf_dev); + } + + device_unregister(&idxd->conf_dev); +} + +int idxd_register_bus_type(void) +{ + int i, rc; + + for (i = 0; i < IDXD_TYPE_MAX; i++) { + rc = bus_register(idxd_bus_types[i]); + if (rc < 0) + goto bus_err; + } + + return 0; + +bus_err: + for (; i > 0; i--) + bus_unregister(idxd_bus_types[i]); + return rc; +} + +void idxd_unregister_bus_type(void) +{ + int i; + + for (i = 0; i < IDXD_TYPE_MAX; i++) + bus_unregister(idxd_bus_types[i]); +} diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index c27e206a764c..066b21a32232 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -760,12 +760,8 @@ static void sdma_start_desc(struct sdma_channel *sdmac) return; } sdmac->desc = desc = to_sdma_desc(&vd->tx); - /* - * Do not delete the node in desc_issued list in cyclic mode, otherwise - * the desc allocated will never be freed in vchan_dma_desc_free_list - */ - if (!(sdmac->flags & IMX_DMA_SG_LOOP)) - list_del(&vd->node); + + list_del(&vd->node); sdma->channel_control[channel].base_bd_ptr = desc->bd_phys; sdma->channel_control[channel].current_bd_ptr = desc->bd_phys; @@ -1071,20 +1067,27 @@ static void sdma_channel_terminate_work(struct work_struct *work) spin_lock_irqsave(&sdmac->vc.lock, flags); vchan_get_all_descriptors(&sdmac->vc, &head); - sdmac->desc = NULL; spin_unlock_irqrestore(&sdmac->vc.lock, flags); vchan_dma_desc_free_list(&sdmac->vc, &head); sdmac->context_loaded = false; } -static int sdma_disable_channel_async(struct dma_chan *chan) +static int sdma_terminate_all(struct dma_chan *chan) { struct sdma_channel *sdmac = to_sdma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&sdmac->vc.lock, flags); sdma_disable_channel(chan); - if (sdmac->desc) + if (sdmac->desc) { + vchan_terminate_vdesc(&sdmac->desc->vd); + sdmac->desc = NULL; schedule_work(&sdmac->terminate_worker); + } + + spin_unlock_irqrestore(&sdmac->vc.lock, flags); return 0; } @@ -1324,7 +1327,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan) struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_engine *sdma = sdmac->sdma; - sdma_disable_channel_async(chan); + sdma_terminate_all(chan); sdma_channel_synchronize(chan); @@ -1648,7 +1651,7 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, struct dma_tx_state *txstate) { struct sdma_channel *sdmac = to_sdma_chan(chan); - struct sdma_desc *desc; + struct sdma_desc *desc = NULL; u32 residue; struct virt_dma_desc *vd; enum dma_status ret; @@ -1659,19 +1662,23 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan, return ret; spin_lock_irqsave(&sdmac->vc.lock, flags); + vd = vchan_find_desc(&sdmac->vc, cookie); - if (vd) { + if (vd) desc = to_sdma_desc(&vd->tx); + else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) + desc = sdmac->desc; + + if (desc) { if (sdmac->flags & IMX_DMA_SG_LOOP) residue = (desc->num_bd - desc->buf_ptail) * desc->period_len - desc->chn_real_count; else residue = desc->chn_count - desc->chn_real_count; - } else if (sdmac->desc && sdmac->desc->vd.tx.cookie == cookie) { - residue = sdmac->desc->chn_count - sdmac->desc->chn_real_count; } else { residue = 0; } + spin_unlock_irqrestore(&sdmac->vc.lock, flags); dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, @@ -2103,7 +2110,7 @@ static int sdma_probe(struct platform_device *pdev) sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_config = sdma_config; - sdma->dma_device.device_terminate_all = sdma_disable_channel_async; + sdma->dma_device.device_terminate_all = sdma_terminate_all; sdma->dma_device.device_synchronize = sdma_channel_synchronize; sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index a6a6dc432db8..60e9afbb896c 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -556,10 +556,6 @@ static void ioat_dma_remove(struct ioatdma_device *ioat_dma) ioat_kobject_del(ioat_dma); dma_async_device_unregister(dma); - - dma_pool_destroy(ioat_dma->completion_pool); - - INIT_LIST_HEAD(&dma->channels); } /** @@ -589,7 +585,7 @@ static void ioat_enumerate_channels(struct ioatdma_device *ioat_dma) dev_dbg(dev, "%s: xfercap = %d\n", __func__, 1 << xfercap_log); for (i = 0; i < dma->chancnt; i++) { - ioat_chan = devm_kzalloc(dev, sizeof(*ioat_chan), GFP_KERNEL); + ioat_chan = kzalloc(sizeof(*ioat_chan), GFP_KERNEL); if (!ioat_chan) break; @@ -624,12 +620,16 @@ static void ioat_free_chan_resources(struct dma_chan *c) return; ioat_stop(ioat_chan); - ioat_reset_hw(ioat_chan); - /* Put LTR to idle */ - if (ioat_dma->version >= IOAT_VER_3_4) - writeb(IOAT_CHAN_LTR_SWSEL_IDLE, - ioat_chan->reg_base + IOAT_CHAN_LTR_SWSEL_OFFSET); + if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) { + ioat_reset_hw(ioat_chan); + + /* Put LTR to idle */ + if (ioat_dma->version >= IOAT_VER_3_4) + writeb(IOAT_CHAN_LTR_SWSEL_IDLE, + ioat_chan->reg_base + + IOAT_CHAN_LTR_SWSEL_OFFSET); + } spin_lock_bh(&ioat_chan->cleanup_lock); spin_lock_bh(&ioat_chan->prep_lock); @@ -1322,16 +1322,28 @@ static struct pci_driver ioat_pci_driver = { .err_handler = &ioat_err_handler, }; +static void release_ioatdma(struct dma_device *device) +{ + struct ioatdma_device *d = to_ioatdma_device(device); + int i; + + for (i = 0; i < IOAT_MAX_CHANS; i++) + kfree(d->idx[i]); + + dma_pool_destroy(d->completion_pool); + kfree(d); +} + static struct ioatdma_device * alloc_ioatdma(struct pci_dev *pdev, void __iomem *iobase) { - struct device *dev = &pdev->dev; - struct ioatdma_device *d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL); + struct ioatdma_device *d = kzalloc(sizeof(*d), GFP_KERNEL); if (!d) return NULL; d->pdev = pdev; d->reg_base = iobase; + d->dma_dev.device_release = release_ioatdma; return d; } @@ -1400,6 +1412,8 @@ static void ioat_remove(struct pci_dev *pdev) if (!device) return; + ioat_shutdown(pdev); + dev_err(&pdev->dev, "Removing dma and dca services\n"); if (device->dca) { unregister_dca_provider(device->dca, &pdev->dev); diff --git a/drivers/dma/mediatek/mtk-uart-apdma.c b/drivers/dma/mediatek/mtk-uart-apdma.c index c20e6bd4e298..29f1223b285a 100644 --- a/drivers/dma/mediatek/mtk-uart-apdma.c +++ b/drivers/dma/mediatek/mtk-uart-apdma.c @@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) spin_lock_irqsave(&c->vc.lock, flags); vchan_get_all_descriptors(&c->vc, &head); - vchan_dma_desc_free_list(&c->vc, &head); spin_unlock_irqrestore(&c->vc.lock, flags); + vchan_dma_desc_free_list(&c->vc, &head); + return 0; } diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c index c2d779daa4b5..b2c2b5e8093c 100644 --- a/drivers/dma/of-dma.c +++ b/drivers/dma/of-dma.c @@ -15,6 +15,8 @@ #include <linux/of.h> #include <linux/of_dma.h> +#include "dmaengine.h" + static LIST_HEAD(of_dma_list); static DEFINE_MUTEX(of_dma_lock); diff --git a/drivers/dma/owl-dma.c b/drivers/dma/owl-dma.c index 023f951189a7..c683051257fd 100644 --- a/drivers/dma/owl-dma.c +++ b/drivers/dma/owl-dma.c @@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan) } vchan_get_all_descriptors(&vchan->vc, &head); - vchan_dma_desc_free_list(&vchan->vc, &head); spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6cce9ef61b29..88b884cbb7c1 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -2961,12 +2961,7 @@ static int __maybe_unused pl330_suspend(struct device *dev) { struct amba_device *pcdev = to_amba_device(dev); - pm_runtime_disable(dev); - - if (!pm_runtime_status_suspended(dev)) { - /* amba did not disable the clock */ - amba_pclk_disable(pcdev); - } + pm_runtime_force_suspend(dev); amba_pclk_unprepare(pcdev); return 0; @@ -2981,15 +2976,14 @@ static int __maybe_unused pl330_resume(struct device *dev) if (ret) return ret; - if (!pm_runtime_status_suspended(dev)) - ret = amba_pclk_enable(pcdev); - - pm_runtime_enable(dev); + pm_runtime_force_resume(dev); return ret; } -static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume); +static const struct dev_pm_ops pl330_pm = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(pl330_suspend, pl330_resume) +}; static int pl330_probe(struct amba_device *adev, const struct amba_id *id) diff --git a/drivers/dma/plx_dma.c b/drivers/dma/plx_dma.c new file mode 100644 index 000000000000..db4c5fd453a9 --- /dev/null +++ b/drivers/dma/plx_dma.c @@ -0,0 +1,639 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microsemi Switchtec(tm) PCIe Management Driver + * Copyright (c) 2019, Logan Gunthorpe <logang@deltatee.com> + * Copyright (c) 2019, GigaIO Networks, Inc + */ + +#include "dmaengine.h" + +#include <linux/circ_buf.h> +#include <linux/dmaengine.h> +#include <linux/kref.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/pci.h> + +MODULE_DESCRIPTION("PLX ExpressLane PEX PCI Switch DMA Engine"); +MODULE_VERSION("0.1"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Logan Gunthorpe"); + +#define PLX_REG_DESC_RING_ADDR 0x214 +#define PLX_REG_DESC_RING_ADDR_HI 0x218 +#define PLX_REG_DESC_RING_NEXT_ADDR 0x21C +#define PLX_REG_DESC_RING_COUNT 0x220 +#define PLX_REG_DESC_RING_LAST_ADDR 0x224 +#define PLX_REG_DESC_RING_LAST_SIZE 0x228 +#define PLX_REG_PREF_LIMIT 0x234 +#define PLX_REG_CTRL 0x238 +#define PLX_REG_CTRL2 0x23A +#define PLX_REG_INTR_CTRL 0x23C +#define PLX_REG_INTR_STATUS 0x23E + +#define PLX_REG_PREF_LIMIT_PREF_FOUR 8 + +#define PLX_REG_CTRL_GRACEFUL_PAUSE BIT(0) +#define PLX_REG_CTRL_ABORT BIT(1) +#define PLX_REG_CTRL_WRITE_BACK_EN BIT(2) +#define PLX_REG_CTRL_START BIT(3) +#define PLX_REG_CTRL_RING_STOP_MODE BIT(4) +#define PLX_REG_CTRL_DESC_MODE_BLOCK (0 << 5) +#define PLX_REG_CTRL_DESC_MODE_ON_CHIP (1 << 5) +#define PLX_REG_CTRL_DESC_MODE_OFF_CHIP (2 << 5) +#define PLX_REG_CTRL_DESC_INVALID BIT(8) +#define PLX_REG_CTRL_GRACEFUL_PAUSE_DONE BIT(9) +#define PLX_REG_CTRL_ABORT_DONE BIT(10) +#define PLX_REG_CTRL_IMM_PAUSE_DONE BIT(12) +#define PLX_REG_CTRL_IN_PROGRESS BIT(30) + +#define PLX_REG_CTRL_RESET_VAL (PLX_REG_CTRL_DESC_INVALID | \ + PLX_REG_CTRL_GRACEFUL_PAUSE_DONE | \ + PLX_REG_CTRL_ABORT_DONE | \ + PLX_REG_CTRL_IMM_PAUSE_DONE) + +#define PLX_REG_CTRL_START_VAL (PLX_REG_CTRL_WRITE_BACK_EN | \ + PLX_REG_CTRL_DESC_MODE_OFF_CHIP | \ + PLX_REG_CTRL_START | \ + PLX_REG_CTRL_RESET_VAL) + +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_64B 0 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_128B 1 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_256B 2 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_512B 3 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_1KB 4 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_2KB 5 +#define PLX_REG_CTRL2_MAX_TXFR_SIZE_4B 7 + +#define PLX_REG_INTR_CRTL_ERROR_EN BIT(0) +#define PLX_REG_INTR_CRTL_INV_DESC_EN BIT(1) +#define PLX_REG_INTR_CRTL_ABORT_DONE_EN BIT(3) +#define PLX_REG_INTR_CRTL_PAUSE_DONE_EN BIT(4) +#define PLX_REG_INTR_CRTL_IMM_PAUSE_DONE_EN BIT(5) + +#define PLX_REG_INTR_STATUS_ERROR BIT(0) +#define PLX_REG_INTR_STATUS_INV_DESC BIT(1) +#define PLX_REG_INTR_STATUS_DESC_DONE BIT(2) +#define PLX_REG_INTR_CRTL_ABORT_DONE BIT(3) + +struct plx_dma_hw_std_desc { + __le32 flags_and_size; + __le16 dst_addr_hi; + __le16 src_addr_hi; + __le32 dst_addr_lo; + __le32 src_addr_lo; +}; + +#define PLX_DESC_SIZE_MASK 0x7ffffff +#define PLX_DESC_FLAG_VALID BIT(31) +#define PLX_DESC_FLAG_INT_WHEN_DONE BIT(30) + +#define PLX_DESC_WB_SUCCESS BIT(30) +#define PLX_DESC_WB_RD_FAIL BIT(29) +#define PLX_DESC_WB_WR_FAIL BIT(28) + +#define PLX_DMA_RING_COUNT 2048 + +struct plx_dma_desc { + struct dma_async_tx_descriptor txd; + struct plx_dma_hw_std_desc *hw; + u32 orig_size; +}; + +struct plx_dma_dev { + struct dma_device dma_dev; + struct dma_chan dma_chan; + struct pci_dev __rcu *pdev; + void __iomem *bar; + struct tasklet_struct desc_task; + + spinlock_t ring_lock; + bool ring_active; + int head; + int tail; + struct plx_dma_hw_std_desc *hw_ring; + dma_addr_t hw_ring_dma; + struct plx_dma_desc **desc_ring; +}; + +static struct plx_dma_dev *chan_to_plx_dma_dev(struct dma_chan *c) +{ + return container_of(c, struct plx_dma_dev, dma_chan); +} + +static struct plx_dma_desc *to_plx_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct plx_dma_desc, txd); +} + +static struct plx_dma_desc *plx_dma_get_desc(struct plx_dma_dev *plxdev, int i) +{ + return plxdev->desc_ring[i & (PLX_DMA_RING_COUNT - 1)]; +} + +static void plx_dma_process_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + u32 flags; + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + flags = le32_to_cpu(READ_ONCE(desc->hw->flags_and_size)); + + if (flags & PLX_DESC_FLAG_VALID) + break; + + res.residue = desc->orig_size - (flags & PLX_DESC_SIZE_MASK); + + if (flags & PLX_DESC_WB_SUCCESS) + res.result = DMA_TRANS_NOERROR; + else if (flags & PLX_DESC_WB_WR_FAIL) + res.result = DMA_TRANS_WRITE_FAILED; + else + res.result = DMA_TRANS_READ_FAILED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void plx_dma_abort_desc(struct plx_dma_dev *plxdev) +{ + struct dmaengine_result res; + struct plx_dma_desc *desc; + + plx_dma_process_desc(plxdev); + + spin_lock_bh(&plxdev->ring_lock); + + while (plxdev->tail != plxdev->head) { + desc = plx_dma_get_desc(plxdev, plxdev->tail); + + res.residue = desc->orig_size; + res.result = DMA_TRANS_ABORTED; + + dma_cookie_complete(&desc->txd); + dma_descriptor_unmap(&desc->txd); + dmaengine_desc_get_callback_invoke(&desc->txd, &res); + desc->txd.callback = NULL; + desc->txd.callback_result = NULL; + + plxdev->tail++; + } + + spin_unlock_bh(&plxdev->ring_lock); +} + +static void __plx_dma_stop(struct plx_dma_dev *plxdev) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + u32 val; + + val = readl(plxdev->bar + PLX_REG_CTRL); + if (!(val & ~PLX_REG_CTRL_GRACEFUL_PAUSE)) + return; + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + while (!time_after(jiffies, timeout)) { + val = readl(plxdev->bar + PLX_REG_CTRL); + if (val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE) + break; + + cpu_relax(); + } + + if (!(val & PLX_REG_CTRL_GRACEFUL_PAUSE_DONE)) + dev_err(plxdev->dma_dev.dev, + "Timeout waiting for graceful pause!\n"); + + writel(PLX_REG_CTRL_RESET_VAL | PLX_REG_CTRL_GRACEFUL_PAUSE, + plxdev->bar + PLX_REG_CTRL); + + writel(0, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(0, plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(0, plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); +} + +static void plx_dma_stop(struct plx_dma_dev *plxdev) +{ + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + __plx_dma_stop(plxdev); + + rcu_read_unlock(); +} + +static void plx_dma_desc_task(unsigned long data) +{ + struct plx_dma_dev *plxdev = (void *)data; + + plx_dma_process_desc(plxdev); +} + +static struct dma_async_tx_descriptor *plx_dma_prep_memcpy(struct dma_chan *c, + dma_addr_t dma_dst, dma_addr_t dma_src, size_t len, + unsigned long flags) + __acquires(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(c); + struct plx_dma_desc *plxdesc; + + spin_lock_bh(&plxdev->ring_lock); + if (!plxdev->ring_active) + goto err_unlock; + + if (!CIRC_SPACE(plxdev->head, plxdev->tail, PLX_DMA_RING_COUNT)) + goto err_unlock; + + if (len > PLX_DESC_SIZE_MASK) + goto err_unlock; + + plxdesc = plx_dma_get_desc(plxdev, plxdev->head); + plxdev->head++; + + plxdesc->hw->dst_addr_lo = cpu_to_le32(lower_32_bits(dma_dst)); + plxdesc->hw->dst_addr_hi = cpu_to_le16(upper_32_bits(dma_dst)); + plxdesc->hw->src_addr_lo = cpu_to_le32(lower_32_bits(dma_src)); + plxdesc->hw->src_addr_hi = cpu_to_le16(upper_32_bits(dma_src)); + + plxdesc->orig_size = len; + + if (flags & DMA_PREP_INTERRUPT) + len |= PLX_DESC_FLAG_INT_WHEN_DONE; + + plxdesc->hw->flags_and_size = cpu_to_le32(len); + plxdesc->txd.flags = flags; + + /* return with the lock held, it will be released in tx_submit */ + + return &plxdesc->txd; + +err_unlock: + /* + * Keep sparse happy by restoring an even lock count on + * this lock. + */ + __acquire(plxdev->ring_lock); + + spin_unlock_bh(&plxdev->ring_lock); + return NULL; +} + +static dma_cookie_t plx_dma_tx_submit(struct dma_async_tx_descriptor *desc) + __releases(plxdev->ring_lock) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(desc->chan); + struct plx_dma_desc *plxdesc = to_plx_desc(desc); + dma_cookie_t cookie; + + cookie = dma_cookie_assign(desc); + + /* + * Ensure the descriptor updates are visible to the dma device + * before setting the valid bit. + */ + wmb(); + + plxdesc->hw->flags_and_size |= cpu_to_le32(PLX_DESC_FLAG_VALID); + + spin_unlock_bh(&plxdev->ring_lock); + + return cookie; +} + +static enum dma_status plx_dma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, struct dma_tx_state *txstate) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + enum dma_status ret; + + ret = dma_cookie_status(chan, cookie, txstate); + if (ret == DMA_COMPLETE) + return ret; + + plx_dma_process_desc(plxdev); + + return dma_cookie_status(chan, cookie, txstate); +} + +static void plx_dma_issue_pending(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + return; + } + + /* + * Ensure the valid bits are visible before starting the + * DMA engine. + */ + wmb(); + + writew(PLX_REG_CTRL_START_VAL, plxdev->bar + PLX_REG_CTRL); + + rcu_read_unlock(); +} + +static irqreturn_t plx_dma_isr(int irq, void *devid) +{ + struct plx_dma_dev *plxdev = devid; + u32 status; + + status = readw(plxdev->bar + PLX_REG_INTR_STATUS); + + if (!status) + return IRQ_NONE; + + if (status & PLX_REG_INTR_STATUS_DESC_DONE && plxdev->ring_active) + tasklet_schedule(&plxdev->desc_task); + + writew(status, plxdev->bar + PLX_REG_INTR_STATUS); + + return IRQ_HANDLED; +} + +static int plx_dma_alloc_desc(struct plx_dma_dev *plxdev) +{ + struct plx_dma_desc *desc; + int i; + + plxdev->desc_ring = kcalloc(PLX_DMA_RING_COUNT, + sizeof(*plxdev->desc_ring), GFP_KERNEL); + if (!plxdev->desc_ring) + return -ENOMEM; + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) { + desc = kzalloc(sizeof(*desc), GFP_KERNEL); + if (!desc) + goto free_and_exit; + + dma_async_tx_descriptor_init(&desc->txd, &plxdev->dma_chan); + desc->txd.tx_submit = plx_dma_tx_submit; + desc->hw = &plxdev->hw_ring[i]; + + plxdev->desc_ring[i] = desc; + } + + return 0; + +free_and_exit: + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + kfree(plxdev->desc_ring); + return -ENOMEM; +} + +static int plx_dma_alloc_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + int rc; + + plxdev->head = plxdev->tail = 0; + plxdev->hw_ring = dma_alloc_coherent(plxdev->dma_dev.dev, ring_sz, + &plxdev->hw_ring_dma, GFP_KERNEL); + if (!plxdev->hw_ring) + return -ENOMEM; + + rc = plx_dma_alloc_desc(plxdev); + if (rc) + goto out_free_hw_ring; + + rcu_read_lock(); + if (!rcu_dereference(plxdev->pdev)) { + rcu_read_unlock(); + rc = -ENODEV; + goto out_free_hw_ring; + } + + writel(PLX_REG_CTRL_RESET_VAL, plxdev->bar + PLX_REG_CTRL); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR); + writel(upper_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_ADDR_HI); + writel(lower_32_bits(plxdev->hw_ring_dma), + plxdev->bar + PLX_REG_DESC_RING_NEXT_ADDR); + writel(PLX_DMA_RING_COUNT, plxdev->bar + PLX_REG_DESC_RING_COUNT); + writel(PLX_REG_PREF_LIMIT_PREF_FOUR, plxdev->bar + PLX_REG_PREF_LIMIT); + + plxdev->ring_active = true; + + rcu_read_unlock(); + + return PLX_DMA_RING_COUNT; + +out_free_hw_ring: + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + return rc; +} + +static void plx_dma_free_chan_resources(struct dma_chan *chan) +{ + struct plx_dma_dev *plxdev = chan_to_plx_dma_dev(chan); + size_t ring_sz = PLX_DMA_RING_COUNT * sizeof(*plxdev->hw_ring); + struct pci_dev *pdev; + int irq = -1; + int i; + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + plx_dma_stop(plxdev); + + rcu_read_lock(); + pdev = rcu_dereference(plxdev->pdev); + if (pdev) + irq = pci_irq_vector(pdev, 0); + rcu_read_unlock(); + + if (irq > 0) + synchronize_irq(irq); + + tasklet_kill(&plxdev->desc_task); + + plx_dma_abort_desc(plxdev); + + for (i = 0; i < PLX_DMA_RING_COUNT; i++) + kfree(plxdev->desc_ring[i]); + + kfree(plxdev->desc_ring); + dma_free_coherent(plxdev->dma_dev.dev, ring_sz, plxdev->hw_ring, + plxdev->hw_ring_dma); + +} + +static void plx_dma_release(struct dma_device *dma_dev) +{ + struct plx_dma_dev *plxdev = + container_of(dma_dev, struct plx_dma_dev, dma_dev); + + put_device(dma_dev->dev); + kfree(plxdev); +} + +static int plx_dma_create(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev; + struct dma_device *dma; + struct dma_chan *chan; + int rc; + + plxdev = kzalloc(sizeof(*plxdev), GFP_KERNEL); + if (!plxdev) + return -ENOMEM; + + rc = request_irq(pci_irq_vector(pdev, 0), plx_dma_isr, 0, + KBUILD_MODNAME, plxdev); + if (rc) { + kfree(plxdev); + return rc; + } + + spin_lock_init(&plxdev->ring_lock); + tasklet_init(&plxdev->desc_task, plx_dma_desc_task, + (unsigned long)plxdev); + + RCU_INIT_POINTER(plxdev->pdev, pdev); + plxdev->bar = pcim_iomap_table(pdev)[0]; + + dma = &plxdev->dma_dev; + dma->chancnt = 1; + INIT_LIST_HEAD(&dma->channels); + dma_cap_set(DMA_MEMCPY, dma->cap_mask); + dma->copy_align = DMAENGINE_ALIGN_1_BYTE; + dma->dev = get_device(&pdev->dev); + + dma->device_alloc_chan_resources = plx_dma_alloc_chan_resources; + dma->device_free_chan_resources = plx_dma_free_chan_resources; + dma->device_prep_dma_memcpy = plx_dma_prep_memcpy; + dma->device_issue_pending = plx_dma_issue_pending; + dma->device_tx_status = plx_dma_tx_status; + dma->device_release = plx_dma_release; + + chan = &plxdev->dma_chan; + chan->device = dma; + dma_cookie_init(chan); + list_add_tail(&chan->device_node, &dma->channels); + + rc = dma_async_device_register(dma); + if (rc) { + pci_err(pdev, "Failed to register dma device: %d\n", rc); + free_irq(pci_irq_vector(pdev, 0), plxdev); + kfree(plxdev); + return rc; + } + + pci_set_drvdata(pdev, plxdev); + + return 0; +} + +static int plx_dma_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + int rc; + + rc = pcim_enable_device(pdev); + if (rc) + return rc; + + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); + if (rc) + rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); + if (rc) + return rc; + + rc = pcim_iomap_regions(pdev, 1, KBUILD_MODNAME); + if (rc) + return rc; + + rc = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_ALL_TYPES); + if (rc <= 0) + return rc; + + pci_set_master(pdev); + + rc = plx_dma_create(pdev); + if (rc) + goto err_free_irq_vectors; + + pci_info(pdev, "PLX DMA Channel Registered\n"); + + return 0; + +err_free_irq_vectors: + pci_free_irq_vectors(pdev); + return rc; +} + +static void plx_dma_remove(struct pci_dev *pdev) +{ + struct plx_dma_dev *plxdev = pci_get_drvdata(pdev); + + free_irq(pci_irq_vector(pdev, 0), plxdev); + + rcu_assign_pointer(plxdev->pdev, NULL); + synchronize_rcu(); + + spin_lock_bh(&plxdev->ring_lock); + plxdev->ring_active = false; + spin_unlock_bh(&plxdev->ring_lock); + + __plx_dma_stop(plxdev); + plx_dma_abort_desc(plxdev); + + plxdev->bar = NULL; + dma_async_device_unregister(&plxdev->dma_dev); + + pci_free_irq_vectors(pdev); +} + +static const struct pci_device_id plx_dma_pci_tbl[] = { + { + .vendor = PCI_VENDOR_ID_PLX, + .device = 0x87D0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .class = PCI_CLASS_SYSTEM_OTHER << 8, + .class_mask = 0xFFFFFFFF, + }, + {0} +}; +MODULE_DEVICE_TABLE(pci, plx_dma_pci_tbl); + +static struct pci_driver plx_dma_pci_driver = { + .name = KBUILD_MODNAME, + .id_table = plx_dma_pci_tbl, + .probe = plx_dma_probe, + .remove = plx_dma_remove, +}; +module_pci_driver(plx_dma_pci_driver); diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c index 43da8eeb18ef..8e14c72d03f0 100644 --- a/drivers/dma/s3c24xx-dma.c +++ b/drivers/dma/s3c24xx-dma.c @@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) s3c24xx_dma_start_next_sg(s3cchan, txd); } -static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma, - struct s3c24xx_dma_chan *s3cchan) -{ - LIST_HEAD(head); - - vchan_get_all_descriptors(&s3cchan->vc, &head); - vchan_dma_desc_free_list(&s3cchan->vc, &head); -} - /* * Try to allocate a physical channel. When successful, assign it to * this virtual channel, and initiate the next descriptor. The @@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) { struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_engine *s3cdma = s3cchan->host; + LIST_HEAD(head); unsigned long flags; - int ret = 0; + int ret; spin_lock_irqsave(&s3cchan->vc.lock, flags); @@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) } /* Dequeue jobs not yet fired as well */ - s3c24xx_dma_free_txd_list(s3cdma, s3cchan); + + vchan_get_all_descriptors(&s3cchan->vc, &head); + + spin_unlock_irqrestore(&s3cchan->vc.lock, flags); + + vchan_dma_desc_free_list(&s3cchan->vc, &head); + + return 0; + unlock: spin_unlock_irqrestore(&s3cchan->vc.lock, flags); @@ -1198,7 +1198,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev) /* Basic sanity check */ if (pdata->num_phy_channels > MAX_DMA_CHANNELS) { - dev_err(&pdev->dev, "to many dma channels %d, max %d\n", + dev_err(&pdev->dev, "too many dma channels %d, max %d\n", pdata->num_phy_channels, MAX_DMA_CHANNELS); return -EINVAL; } diff --git a/drivers/dma/sf-pdma/sf-pdma.c b/drivers/dma/sf-pdma/sf-pdma.c index 465256fe8b1f..6d0bec947636 100644 --- a/drivers/dma/sf-pdma/sf-pdma.c +++ b/drivers/dma/sf-pdma/sf-pdma.c @@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan) kfree(chan->desc); chan->desc = NULL; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); sf_pdma_disclaim_chan(chan); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); } static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, @@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan) chan->desc = NULL; chan->xfer_err = false; vchan_get_all_descriptors(&chan->vchan, &head); - vchan_dma_desc_free_list(&chan->vchan, &head); spin_unlock_irqrestore(&chan->vchan.lock, flags); + vchan_dma_desc_free_list(&chan->vchan, &head); return 0; } diff --git a/drivers/dma/sun4i-dma.c b/drivers/dma/sun4i-dma.c index e397a50058c8..bbc2bda3b902 100644 --- a/drivers/dma/sun4i-dma.c +++ b/drivers/dma/sun4i-dma.c @@ -669,43 +669,41 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dma_addr_t src, dest; u32 endpoints; int nr_periods, offset, plength, i; + u8 ram_type, io_mode, linear_mode; if (!is_slave_direction(dir)) { dev_err(chan2dev(chan), "Invalid DMA direction\n"); return NULL; } - if (vchan->is_dedicated) { - /* - * As we are using this just for audio data, we need to use - * normal DMA. There is nothing stopping us from supporting - * dedicated DMA here as well, so if a client comes up and - * requires it, it will be simple to implement it. - */ - dev_err(chan2dev(chan), - "Cyclic transfers are only supported on Normal DMA\n"); - return NULL; - } - contract = generate_dma_contract(); if (!contract) return NULL; contract->is_cyclic = 1; - /* Figure out the endpoints and the address we need */ + if (vchan->is_dedicated) { + io_mode = SUN4I_DDMA_ADDR_MODE_IO; + linear_mode = SUN4I_DDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_DDMA_DRQ_TYPE_SDRAM; + } else { + io_mode = SUN4I_NDMA_ADDR_MODE_IO; + linear_mode = SUN4I_NDMA_ADDR_MODE_LINEAR; + ram_type = SUN4I_NDMA_DRQ_TYPE_SDRAM; + } + if (dir == DMA_MEM_TO_DEV) { src = buf; dest = sconfig->dst_addr; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_DST_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_DST_ADDR_MODE(io_mode) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(ram_type); } else { src = sconfig->src_addr; dest = buf; - endpoints = SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | - SUN4I_DMA_CFG_SRC_ADDR_MODE(SUN4I_NDMA_ADDR_MODE_IO) | - SUN4I_DMA_CFG_DST_DRQ_TYPE(SUN4I_NDMA_DRQ_TYPE_SDRAM); + endpoints = SUN4I_DMA_CFG_DST_DRQ_TYPE(ram_type) | + SUN4I_DMA_CFG_SRC_DRQ_TYPE(vchan->endpoint) | + SUN4I_DMA_CFG_SRC_ADDR_MODE(io_mode); } /* @@ -747,8 +745,13 @@ sun4i_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf, size_t len, dest = buf + offset; /* Make the promise */ - promise = generate_ndma_promise(chan, src, dest, - plength, sconfig, dir); + if (vchan->is_dedicated) + promise = generate_ddma_promise(chan, src, dest, + plength, sconfig); + else + promise = generate_ndma_promise(chan, src, dest, + plength, sconfig, dir); + if (!promise) { /* TODO: should we free everything? */ return NULL; @@ -885,12 +888,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan) } spin_lock_irqsave(&vchan->vc.lock, flags); - vchan_dma_desc_free_list(&vchan->vc, &head); /* Clear these so the vchan is usable again */ vchan->processing = NULL; vchan->pchan = NULL; spin_unlock_irqrestore(&vchan->vc.lock, flags); + vchan_dma_desc_free_list(&vchan->vc, &head); + return 0; } diff --git a/drivers/dma/ti/Kconfig b/drivers/dma/ti/Kconfig index d507c24fbf31..f76e06651f80 100644 --- a/drivers/dma/ti/Kconfig +++ b/drivers/dma/ti/Kconfig @@ -34,5 +34,29 @@ config DMA_OMAP Enable support for the TI sDMA (System DMA or DMA4) controller. This DMA engine is found on OMAP and DRA7xx parts. +config TI_K3_UDMA + bool "Texas Instruments UDMA support" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_PROTOCOL + depends on TI_SCI_INTA_IRQCHIP + select DMA_ENGINE + select DMA_VIRTUAL_CHANNELS + select TI_K3_RINGACC + select TI_K3_PSIL + help + Enable support for the TI UDMA (Unified DMA) controller. This + DMA engine is used in AM65x and j721e. + +config TI_K3_UDMA_GLUE_LAYER + bool "Texas Instruments UDMA Glue layer for non DMAengine users" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_K3_UDMA + help + Say y here to support the K3 NAVSS DMA glue interface + If unsure, say N. + +config TI_K3_PSIL + bool + config TI_DMA_CROSSBAR bool diff --git a/drivers/dma/ti/Makefile b/drivers/dma/ti/Makefile index 113e59ec9c32..9a29a107e374 100644 --- a/drivers/dma/ti/Makefile +++ b/drivers/dma/ti/Makefile @@ -2,4 +2,7 @@ obj-$(CONFIG_TI_CPPI41) += cppi41.o obj-$(CONFIG_TI_EDMA) += edma.o obj-$(CONFIG_DMA_OMAP) += omap-dma.o +obj-$(CONFIG_TI_K3_UDMA) += k3-udma.o +obj-$(CONFIG_TI_K3_UDMA_GLUE_LAYER) += k3-udma-glue.o +obj-$(CONFIG_TI_K3_PSIL) += k3-psil.o k3-psil-am654.o k3-psil-j721e.o obj-$(CONFIG_TI_DMA_CROSSBAR) += dma-crossbar.o diff --git a/drivers/dma/ti/edma.c b/drivers/dma/ti/edma.c index 756a3c951dc7..03a7f647f7b2 100644 --- a/drivers/dma/ti/edma.c +++ b/drivers/dma/ti/edma.c @@ -2289,13 +2289,6 @@ static int edma_probe(struct platform_device *pdev) if (!info) return -ENODEV; - pm_runtime_enable(dev); - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - dev_err(dev, "pm_runtime_get_sync() failed\n"); - return ret; - } - ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; @@ -2326,27 +2319,33 @@ static int edma_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ecc); + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + pm_runtime_disable(dev); + return ret; + } + /* Get eDMA3 configuration from IP */ ret = edma_setup_from_hw(dev, info, ecc); if (ret) - return ret; + goto err_disable_pm; /* Allocate memory based on the information we got from the IP */ ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, sizeof(*ecc->slave_chans), GFP_KERNEL); - if (!ecc->slave_chans) - return -ENOMEM; ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->slot_inuse) - return -ENOMEM; ecc->channels_mask = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_channels), sizeof(unsigned long), GFP_KERNEL); - if (!ecc->channels_mask) - return -ENOMEM; + if (!ecc->slave_chans || !ecc->slot_inuse || !ecc->channels_mask) { + ret = -ENOMEM; + goto err_disable_pm; + } /* Mark all channels available initially */ bitmap_fill(ecc->channels_mask, ecc->num_channels); @@ -2388,7 +2387,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccint = irq; } @@ -2404,7 +2403,7 @@ static int edma_probe(struct platform_device *pdev) ecc); if (ret) { dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); - return ret; + goto err_disable_pm; } ecc->ccerrint = irq; } @@ -2412,7 +2411,8 @@ static int edma_probe(struct platform_device *pdev) ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { dev_err(dev, "Can't allocate PaRAM dummy slot\n"); - return ecc->dummy_slot; + ret = ecc->dummy_slot; + goto err_disable_pm; } queue_priority_mapping = info->queue_priority_mapping; @@ -2512,6 +2512,9 @@ static int edma_probe(struct platform_device *pdev) err_reg1: edma_free_slot(ecc, ecc->dummy_slot); +err_disable_pm: + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return ret; } @@ -2542,6 +2545,8 @@ static int edma_remove(struct platform_device *pdev) if (ecc->dma_memcpy) dma_async_device_unregister(ecc->dma_memcpy); edma_free_slot(ecc, ecc->dummy_slot); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); return 0; } diff --git a/drivers/dma/ti/k3-psil-am654.c b/drivers/dma/ti/k3-psil-am654.c new file mode 100644 index 000000000000..a896a15908cf --- /dev/null +++ b/drivers/dma/ti/k3-psil-am654.c @@ -0,0 +1,175 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep am654_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0x4300), + PSIL_ETHERNET(0x4301), + PSIL_ETHERNET(0x4302), + PSIL_ETHERNET(0x4303), + /* PDMA0 - McASPs */ + PSIL_PDMA_XY_TR(0x4400), + PSIL_PDMA_XY_TR(0x4401), + PSIL_PDMA_XY_TR(0x4402), + /* PDMA1 - SPI0-4 */ + PSIL_PDMA_XY_PKT(0x4500), + PSIL_PDMA_XY_PKT(0x4501), + PSIL_PDMA_XY_PKT(0x4502), + PSIL_PDMA_XY_PKT(0x4503), + PSIL_PDMA_XY_PKT(0x4504), + PSIL_PDMA_XY_PKT(0x4505), + PSIL_PDMA_XY_PKT(0x4506), + PSIL_PDMA_XY_PKT(0x4507), + PSIL_PDMA_XY_PKT(0x4508), + PSIL_PDMA_XY_PKT(0x4509), + PSIL_PDMA_XY_PKT(0x450a), + PSIL_PDMA_XY_PKT(0x450b), + PSIL_PDMA_XY_PKT(0x450c), + PSIL_PDMA_XY_PKT(0x450d), + PSIL_PDMA_XY_PKT(0x450e), + PSIL_PDMA_XY_PKT(0x450f), + PSIL_PDMA_XY_PKT(0x4510), + PSIL_PDMA_XY_PKT(0x4511), + PSIL_PDMA_XY_PKT(0x4512), + PSIL_PDMA_XY_PKT(0x4513), + /* PDMA1 - USART0-2 */ + PSIL_PDMA_XY_PKT(0x4514), + PSIL_PDMA_XY_PKT(0x4515), + PSIL_PDMA_XY_PKT(0x4516), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 - ADCs */ + PSIL_PDMA_XY_TR(0x7100), + PSIL_PDMA_XY_TR(0x7101), + PSIL_PDMA_XY_TR(0x7102), + PSIL_PDMA_XY_TR(0x7103), + /* MCU_PDMA1 - MCU_SPI0-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + PSIL_PDMA_XY_PKT(0x7208), + PSIL_PDMA_XY_PKT(0x7209), + PSIL_PDMA_XY_PKT(0x720a), + PSIL_PDMA_XY_PKT(0x720b), + /* MCU_PDMA1 - MCU_USART0 */ + PSIL_PDMA_XY_PKT(0x7212), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep am654_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* PRU_ICSSG2 */ + PSIL_ETHERNET(0xc300), + PSIL_ETHERNET(0xc301), + PSIL_ETHERNET(0xc302), + PSIL_ETHERNET(0xc303), + PSIL_ETHERNET(0xc304), + PSIL_ETHERNET(0xc305), + PSIL_ETHERNET(0xc306), + PSIL_ETHERNET(0xc307), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), +}; + +struct psil_ep_map am654_ep_map = { + .name = "am654", + .src = am654_src_ep_map, + .src_count = ARRAY_SIZE(am654_src_ep_map), + .dst = am654_dst_ep_map, + .dst_count = ARRAY_SIZE(am654_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-j721e.c b/drivers/dma/ti/k3-psil-j721e.c new file mode 100644 index 000000000000..e3cfd5f66842 --- /dev/null +++ b/drivers/dma/ti/k3-psil-j721e.c @@ -0,0 +1,222 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> + +#include "k3-psil-priv.h" + +#define PSIL_PDMA_XY_TR(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + }, \ + } + +#define PSIL_PDMA_XY_PKT(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pkt_mode = 1, \ + }, \ + } + +#define PSIL_PDMA_MCASP(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_PDMA_XY, \ + .pdma_acc32 = 1, \ + .pdma_burst = 1, \ + }, \ + } + +#define PSIL_ETHERNET(x) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 16, \ + }, \ + } + +#define PSIL_SA2UL(x, tx) \ + { \ + .thread_id = x, \ + .ep_config = { \ + .ep_type = PSIL_EP_NATIVE, \ + .pkt_mode = 1, \ + .needs_epib = 1, \ + .psd_size = 64, \ + .notdpkt = tx, \ + }, \ + } + +/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */ +static struct psil_ep j721e_src_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0x4000, 0), + PSIL_SA2UL(0x4001, 0), + PSIL_SA2UL(0x4002, 0), + PSIL_SA2UL(0x4003, 0), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0x4100), + PSIL_ETHERNET(0x4101), + PSIL_ETHERNET(0x4102), + PSIL_ETHERNET(0x4103), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0x4200), + PSIL_ETHERNET(0x4201), + PSIL_ETHERNET(0x4202), + PSIL_ETHERNET(0x4203), + /* PDMA6 (PSIL_PDMA_MCASP_G0) - McASP0-2 */ + PSIL_PDMA_MCASP(0x4400), + PSIL_PDMA_MCASP(0x4401), + PSIL_PDMA_MCASP(0x4402), + /* PDMA7 (PSIL_PDMA_MCASP_G1) - McASP3-11 */ + PSIL_PDMA_MCASP(0x4500), + PSIL_PDMA_MCASP(0x4501), + PSIL_PDMA_MCASP(0x4502), + PSIL_PDMA_MCASP(0x4503), + PSIL_PDMA_MCASP(0x4504), + PSIL_PDMA_MCASP(0x4505), + PSIL_PDMA_MCASP(0x4506), + PSIL_PDMA_MCASP(0x4507), + PSIL_PDMA_MCASP(0x4508), + /* PDMA8 (PDMA_MISC_G0) - SPI0-1 */ + PSIL_PDMA_XY_PKT(0x4600), + PSIL_PDMA_XY_PKT(0x4601), + PSIL_PDMA_XY_PKT(0x4602), + PSIL_PDMA_XY_PKT(0x4603), + PSIL_PDMA_XY_PKT(0x4604), + PSIL_PDMA_XY_PKT(0x4605), + PSIL_PDMA_XY_PKT(0x4606), + PSIL_PDMA_XY_PKT(0x4607), + /* PDMA9 (PDMA_MISC_G1) - SPI2-3 */ + PSIL_PDMA_XY_PKT(0x460c), + PSIL_PDMA_XY_PKT(0x460d), + PSIL_PDMA_XY_PKT(0x460e), + PSIL_PDMA_XY_PKT(0x460f), + PSIL_PDMA_XY_PKT(0x4610), + PSIL_PDMA_XY_PKT(0x4611), + PSIL_PDMA_XY_PKT(0x4612), + PSIL_PDMA_XY_PKT(0x4613), + /* PDMA10 (PDMA_MISC_G2) - SPI4-5 */ + PSIL_PDMA_XY_PKT(0x4618), + PSIL_PDMA_XY_PKT(0x4619), + PSIL_PDMA_XY_PKT(0x461a), + PSIL_PDMA_XY_PKT(0x461b), + PSIL_PDMA_XY_PKT(0x461c), + PSIL_PDMA_XY_PKT(0x461d), + PSIL_PDMA_XY_PKT(0x461e), + PSIL_PDMA_XY_PKT(0x461f), + /* PDMA11 (PDMA_MISC_G3) */ + PSIL_PDMA_XY_PKT(0x4624), + PSIL_PDMA_XY_PKT(0x4625), + PSIL_PDMA_XY_PKT(0x4626), + PSIL_PDMA_XY_PKT(0x4627), + PSIL_PDMA_XY_PKT(0x4628), + PSIL_PDMA_XY_PKT(0x4629), + PSIL_PDMA_XY_PKT(0x4630), + PSIL_PDMA_XY_PKT(0x463a), + /* PDMA13 (PDMA_USART_G0) - UART0-1 */ + PSIL_PDMA_XY_PKT(0x4700), + PSIL_PDMA_XY_PKT(0x4701), + /* PDMA14 (PDMA_USART_G1) - UART2-3 */ + PSIL_PDMA_XY_PKT(0x4702), + PSIL_PDMA_XY_PKT(0x4703), + /* PDMA15 (PDMA_USART_G2) - UART4-9 */ + PSIL_PDMA_XY_PKT(0x4704), + PSIL_PDMA_XY_PKT(0x4705), + PSIL_PDMA_XY_PKT(0x4706), + PSIL_PDMA_XY_PKT(0x4707), + PSIL_PDMA_XY_PKT(0x4708), + PSIL_PDMA_XY_PKT(0x4709), + /* CPSW9 */ + PSIL_ETHERNET(0x4a00), + /* CPSW0 */ + PSIL_ETHERNET(0x7000), + /* MCU_PDMA0 (MCU_PDMA_MISC_G0) - SPI0 */ + PSIL_PDMA_XY_PKT(0x7100), + PSIL_PDMA_XY_PKT(0x7101), + PSIL_PDMA_XY_PKT(0x7102), + PSIL_PDMA_XY_PKT(0x7103), + /* MCU_PDMA1 (MCU_PDMA_MISC_G1) - SPI1-2 */ + PSIL_PDMA_XY_PKT(0x7200), + PSIL_PDMA_XY_PKT(0x7201), + PSIL_PDMA_XY_PKT(0x7202), + PSIL_PDMA_XY_PKT(0x7203), + PSIL_PDMA_XY_PKT(0x7204), + PSIL_PDMA_XY_PKT(0x7205), + PSIL_PDMA_XY_PKT(0x7206), + PSIL_PDMA_XY_PKT(0x7207), + /* MCU_PDMA2 (MCU_PDMA_MISC_G2) - UART0 */ + PSIL_PDMA_XY_PKT(0x7300), + /* MCU_PDMA_ADC - ADC0-1 */ + PSIL_PDMA_XY_TR(0x7400), + PSIL_PDMA_XY_TR(0x7401), + PSIL_PDMA_XY_TR(0x7402), + PSIL_PDMA_XY_TR(0x7403), + /* SA2UL */ + PSIL_SA2UL(0x7500, 0), + PSIL_SA2UL(0x7501, 0), +}; + +/* PSI-L destination thread IDs, used for TX (DMA_MEM_TO_DEV) */ +static struct psil_ep j721e_dst_ep_map[] = { + /* SA2UL */ + PSIL_SA2UL(0xc000, 1), + PSIL_SA2UL(0xc001, 1), + /* PRU_ICSSG0 */ + PSIL_ETHERNET(0xc100), + PSIL_ETHERNET(0xc101), + PSIL_ETHERNET(0xc102), + PSIL_ETHERNET(0xc103), + PSIL_ETHERNET(0xc104), + PSIL_ETHERNET(0xc105), + PSIL_ETHERNET(0xc106), + PSIL_ETHERNET(0xc107), + /* PRU_ICSSG1 */ + PSIL_ETHERNET(0xc200), + PSIL_ETHERNET(0xc201), + PSIL_ETHERNET(0xc202), + PSIL_ETHERNET(0xc203), + PSIL_ETHERNET(0xc204), + PSIL_ETHERNET(0xc205), + PSIL_ETHERNET(0xc206), + PSIL_ETHERNET(0xc207), + /* CPSW9 */ + PSIL_ETHERNET(0xca00), + PSIL_ETHERNET(0xca01), + PSIL_ETHERNET(0xca02), + PSIL_ETHERNET(0xca03), + PSIL_ETHERNET(0xca04), + PSIL_ETHERNET(0xca05), + PSIL_ETHERNET(0xca06), + PSIL_ETHERNET(0xca07), + /* CPSW0 */ + PSIL_ETHERNET(0xf000), + PSIL_ETHERNET(0xf001), + PSIL_ETHERNET(0xf002), + PSIL_ETHERNET(0xf003), + PSIL_ETHERNET(0xf004), + PSIL_ETHERNET(0xf005), + PSIL_ETHERNET(0xf006), + PSIL_ETHERNET(0xf007), + /* SA2UL */ + PSIL_SA2UL(0xf500, 1), +}; + +struct psil_ep_map j721e_ep_map = { + .name = "j721e", + .src = j721e_src_ep_map, + .src_count = ARRAY_SIZE(j721e_src_ep_map), + .dst = j721e_dst_ep_map, + .dst_count = ARRAY_SIZE(j721e_dst_ep_map), +}; diff --git a/drivers/dma/ti/k3-psil-priv.h b/drivers/dma/ti/k3-psil-priv.h new file mode 100644 index 000000000000..a1f389ca371e --- /dev/null +++ b/drivers/dma/ti/k3-psil-priv.h @@ -0,0 +1,43 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_PSIL_PRIV_H_ +#define K3_PSIL_PRIV_H_ + +#include <linux/dma/k3-psil.h> + +struct psil_ep { + u32 thread_id; + struct psil_endpoint_config ep_config; +}; + +/** + * struct psil_ep_map - PSI-L thread ID configuration maps + * @name: Name of the map, set it to the name of the SoC + * @src: Array of source PSI-L thread configurations + * @src_count: Number of entries in the src array + * @dst: Array of destination PSI-L thread configurations + * @dst_count: Number of entries in the dst array + * + * In case of symmetric configuration for a matching src/dst thread (for example + * 0x4400 and 0xc400) only the src configuration can be present. If no dst + * configuration found the code will look for (dst_thread_id & ~0x8000) to find + * the symmetric match. + */ +struct psil_ep_map { + char *name; + struct psil_ep *src; + int src_count; + struct psil_ep *dst; + int dst_count; +}; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id); + +/* SoC PSI-L endpoint maps */ +extern struct psil_ep_map am654_ep_map; +extern struct psil_ep_map j721e_ep_map; + +#endif /* K3_PSIL_PRIV_H_ */ diff --git a/drivers/dma/ti/k3-psil.c b/drivers/dma/ti/k3-psil.c new file mode 100644 index 000000000000..d7b965049ccb --- /dev/null +++ b/drivers/dma/ti/k3-psil.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/device.h> +#include <linux/init.h> +#include <linux/mutex.h> +#include <linux/of.h> + +#include "k3-psil-priv.h" + +static DEFINE_MUTEX(ep_map_mutex); +static struct psil_ep_map *soc_ep_map; + +struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) +{ + int i; + + mutex_lock(&ep_map_mutex); + if (!soc_ep_map) { + if (of_machine_is_compatible("ti,am654")) { + soc_ep_map = &am654_ep_map; + } else if (of_machine_is_compatible("ti,j721e")) { + soc_ep_map = &j721e_ep_map; + } else { + pr_err("PSIL: No compatible machine found for map\n"); + return ERR_PTR(-ENOTSUPP); + } + pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); + } + mutex_unlock(&ep_map_mutex); + + if (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET && soc_ep_map->dst) { + /* check in destination thread map */ + for (i = 0; i < soc_ep_map->dst_count; i++) { + if (soc_ep_map->dst[i].thread_id == thread_id) + return &soc_ep_map->dst[i].ep_config; + } + } + + thread_id &= ~K3_PSIL_DST_THREAD_ID_OFFSET; + if (soc_ep_map->src) { + for (i = 0; i < soc_ep_map->src_count; i++) { + if (soc_ep_map->src[i].thread_id == thread_id) + return &soc_ep_map->src[i].ep_config; + } + } + + return ERR_PTR(-ENOENT); +} +EXPORT_SYMBOL_GPL(psil_get_ep_config); + +int psil_set_new_ep_config(struct device *dev, const char *name, + struct psil_endpoint_config *ep_config) +{ + struct psil_endpoint_config *dst_ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int index; + + if (!dev || !dev->of_node) + return -EINVAL; + + index = of_property_match_string(dev->of_node, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(dev->of_node, "dmas", "#dma-cells", + index, &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + dst_ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(dst_ep_config)) { + pr_err("PSIL: thread ID 0x%04x not defined in map\n", + thread_id); + of_node_put(dma_spec.np); + return PTR_ERR(dst_ep_config); + } + + memcpy(dst_ep_config, ep_config, sizeof(*dst_ep_config)); + + of_node_put(dma_spec.np); + return 0; +} +EXPORT_SYMBOL_GPL(psil_set_new_ep_config); diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c new file mode 100644 index 000000000000..c1511298ece2 --- /dev/null +++ b/drivers/dma/ti/k3-udma-glue.c @@ -0,0 +1,1198 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * K3 NAVSS DMA glue interface + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * + */ + +#include <linux/atomic.h> +#include <linux/delay.h> +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/dma/ti-cppi5.h> +#include <linux/dma/k3-udma-glue.h> + +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct k3_udma_glue_common { + struct device *dev; + struct udma_dev *udmax; + const struct udma_tisci_rm *tisci_rm; + struct k3_ringacc *ringacc; + u32 src_thread; + u32 dst_thread; + + u32 hdesc_size; + bool epib; + u32 psdata_size; + u32 swdata_size; +}; + +struct k3_udma_glue_tx_channel { + struct k3_udma_glue_common common; + + struct udma_tchan *udma_tchanx; + int udma_tchan_id; + + struct k3_ring *ringtx; + struct k3_ring *ringtxcq; + + bool psil_paired; + + int virq; + + atomic_t free_pkts; + bool tx_pause_on_err; + bool tx_filt_einfo; + bool tx_filt_pswords; + bool tx_supr_tdpkt; +}; + +struct k3_udma_glue_rx_flow { + struct udma_rflow *udma_rflow; + int udma_rflow_id; + struct k3_ring *ringrx; + struct k3_ring *ringrxfdq; + + int virq; +}; + +struct k3_udma_glue_rx_channel { + struct k3_udma_glue_common common; + + struct udma_rchan *udma_rchanx; + int udma_rchan_id; + bool remote; + + bool psil_paired; + + u32 swdata_size; + int flow_id_base; + + struct k3_udma_glue_rx_flow *flows; + u32 flow_num; + u32 flows_ready; +}; + +#define K3_UDMAX_TDOWN_TIMEOUT_US 1000 + +static int of_k3_udma_glue_parse(struct device_node *udmax_np, + struct k3_udma_glue_common *common) +{ + common->ringacc = of_k3_ringacc_get_by_phandle(udmax_np, + "ti,ringacc"); + if (IS_ERR(common->ringacc)) + return PTR_ERR(common->ringacc); + + common->udmax = of_xudma_dev_get(udmax_np, NULL); + if (IS_ERR(common->udmax)) + return PTR_ERR(common->udmax); + + common->tisci_rm = xudma_dev_get_tisci_rm(common->udmax); + + return 0; +} + +static int of_k3_udma_glue_parse_chn(struct device_node *chn_np, + const char *name, struct k3_udma_glue_common *common, + bool tx_chn) +{ + struct psil_endpoint_config *ep_config; + struct of_phandle_args dma_spec; + u32 thread_id; + int ret = 0; + int index; + + if (unlikely(!name)) + return -EINVAL; + + index = of_property_match_string(chn_np, "dma-names", name); + if (index < 0) + return index; + + if (of_parse_phandle_with_args(chn_np, "dmas", "#dma-cells", index, + &dma_spec)) + return -ENOENT; + + thread_id = dma_spec.args[0]; + + if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) { + ret = -EINVAL; + goto out_put_spec; + } + + /* get psil endpoint config */ + ep_config = psil_get_ep_config(thread_id); + if (IS_ERR(ep_config)) { + dev_err(common->dev, + "No configuration for psi-l thread 0x%04x\n", + thread_id); + ret = PTR_ERR(ep_config); + goto out_put_spec; + } + + common->epib = ep_config->needs_epib; + common->psdata_size = ep_config->psd_size; + + if (tx_chn) + common->dst_thread = thread_id; + else + common->src_thread = thread_id; + + ret = of_k3_udma_glue_parse(dma_spec.np, common); + +out_put_spec: + of_node_put(dma_spec.np); + return ret; +}; + +static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + struct device *dev = tx_chn->common.dev; + + dev_dbg(dev, "dump_tx_chn:\n" + "udma_tchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n", + tx_chn->udma_tchan_id, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); +} + +static void k3_udma_glue_dump_tx_rt_chn(struct k3_udma_glue_tx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_CTL_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PEER_RT_EN_REG, + xudma_tchanrt_read(chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_PCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_BCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_TCHAN_RT_SBCNT_REG, + xudma_tchanrt_read(chn->udma_tchanx, UDMA_TCHAN_RT_SBCNT_REG)); +} + +static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + const struct udma_tisci_rm *tisci_rm = tx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_tx_ch_cfg req; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.index = tx_chn->udma_tchan_id; + if (tx_chn->tx_pause_on_err) + req.tx_pause_on_err = 1; + if (tx_chn->tx_filt_einfo) + req.tx_filt_einfo = 1; + if (tx_chn->tx_filt_pswords) + req.tx_filt_pswords = 1; + req.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + if (tx_chn->tx_supr_tdpkt) + req.tx_supr_tdpkt = 1; + req.tx_fetch_size = tx_chn->common.hdesc_size >> 2; + req.txcq_qnum = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + + return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req); +} + +struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, + const char *name, struct k3_udma_glue_tx_channel_cfg *cfg) +{ + struct k3_udma_glue_tx_channel *tx_chn; + int ret; + + tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL); + if (!tx_chn) + return ERR_PTR(-ENOMEM); + + tx_chn->common.dev = dev; + tx_chn->common.swdata_size = cfg->swdata_size; + tx_chn->tx_pause_on_err = cfg->tx_pause_on_err; + tx_chn->tx_filt_einfo = cfg->tx_filt_einfo; + tx_chn->tx_filt_pswords = cfg->tx_filt_pswords; + tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &tx_chn->common, true); + if (ret) + goto err; + + tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib, + tx_chn->common.psdata_size, + tx_chn->common.swdata_size); + + /* request and cfg UDMAP TX channel */ + tx_chn->udma_tchanx = xudma_tchan_get(tx_chn->common.udmax, -1); + if (IS_ERR(tx_chn->udma_tchanx)) { + ret = PTR_ERR(tx_chn->udma_tchanx); + dev_err(dev, "UDMAX tchanx get err %d\n", ret); + goto err; + } + tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx); + + atomic_set(&tx_chn->free_pkts, cfg->txcq_cfg.size); + + /* request and cfg rings */ + tx_chn->ringtx = k3_ringacc_request_ring(tx_chn->common.ringacc, + tx_chn->udma_tchan_id, 0); + if (!tx_chn->ringtx) { + ret = -ENODEV; + dev_err(dev, "Failed to get TX ring %u\n", + tx_chn->udma_tchan_id); + goto err; + } + + tx_chn->ringtxcq = k3_ringacc_request_ring(tx_chn->common.ringacc, + -1, 0); + if (!tx_chn->ringtxcq) { + ret = -ENODEV; + dev_err(dev, "Failed to get TXCQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringtx %d\n", ret); + goto err; + } + + /* request and cfg psi-l */ + tx_chn->common.src_thread = + xudma_dev_get_psil_base(tx_chn->common.udmax) + + tx_chn->udma_tchan_id; + + ret = k3_udma_glue_cfg_tx_chn(tx_chn); + if (ret) { + dev_err(dev, "Failed to cfg tchan %d\n", ret); + goto err; + } + + ret = xudma_navss_psil_pair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + tx_chn->psil_paired = true; + + /* reset TX RT registers */ + k3_udma_glue_disable_tx_chn(tx_chn); + + k3_udma_glue_dump_tx_chn(tx_chn); + + return tx_chn; + +err: + k3_udma_glue_release_tx_chn(tx_chn); + return ERR_PTR(ret); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn); + +void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + if (tx_chn->psil_paired) { + xudma_navss_psil_unpair(tx_chn->common.udmax, + tx_chn->common.src_thread, + tx_chn->common.dst_thread); + tx_chn->psil_paired = false; + } + + if (!IS_ERR_OR_NULL(tx_chn->udma_tchanx)) + xudma_tchan_put(tx_chn->common.udmax, + tx_chn->udma_tchanx); + + if (tx_chn->ringtxcq) + k3_ringacc_ring_free(tx_chn->ringtxcq); + + if (tx_chn->ringtx) + k3_ringacc_ring_free(tx_chn->ringtx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_tx_chn); + +int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + struct cppi5_host_desc_t *desc_tx, + dma_addr_t desc_dma) +{ + u32 ringtxcq_id; + + if (!atomic_add_unless(&tx_chn->free_pkts, -1, 0)) + return -ENOMEM; + + ringtxcq_id = k3_ringacc_get_ring_id(tx_chn->ringtxcq); + cppi5_desc_set_retpolicy(&desc_tx->hdr, 0, ringtxcq_id); + + return k3_ringacc_ring_push(tx_chn->ringtx, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_tx_chn); + +int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + dma_addr_t *desc_dma) +{ + int ret; + + ret = k3_ringacc_ring_pop(tx_chn->ringtxcq, desc_dma); + if (!ret) + atomic_inc(&tx_chn->free_pkts); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_tx_chn); + +int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + u32 txrt_ctl; + + txrt_ctl = UDMA_PEER_RT_EN_ENABLE; + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + txrt_ctl); + + txrt_ctl = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + txrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + txrt_ctl); + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_tx_chn); + +void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn) +{ + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, 0); + + xudma_tchanrt_write(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_tx_chn); + +void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + bool sync) +{ + int i = 0; + u32 val; + + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown1"); + + xudma_tchanrt_write(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | UDMA_CHAN_RT_CTL_TDOWN); + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, UDMA_TCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(tx_chn->common.dev, "TX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_tchanrt_read(tx_chn->udma_tchanx, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(tx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_tx_rt_chn(tx_chn, "txchn tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_tx_chn); + +void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn, + void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma)) +{ + dma_addr_t desc_dma; + int occ_tx, i, ret; + + /* reset TXCQ as it is not input for udma - expected to be empty */ + if (tx_chn->ringtxcq) + k3_ringacc_ring_reset(tx_chn->ringtxcq); + + /* + * TXQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save TXQ occ + * 2) clean up TXQ and call callback .cleanup() for each desc + * 3) reset TXQ in a special way + */ + occ_tx = k3_ringacc_ring_get_occ(tx_chn->ringtx); + dev_dbg(tx_chn->common.dev, "TX reset occ_tx %u\n", occ_tx); + + for (i = 0; i < occ_tx; i++) { + ret = k3_ringacc_ring_pop(tx_chn->ringtx, &desc_dma); + if (ret) { + dev_err(tx_chn->common.dev, "TX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(tx_chn->ringtx, occ_tx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_tx_chn); + +u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn) +{ + return tx_chn->common.hdesc_size; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_hdesc_size); + +u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn) +{ + return k3_ringacc_get_ring_id(tx_chn->ringtxcq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_txcq_id); + +int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn) +{ + tx_chn->virq = k3_ringacc_get_ring_irq_num(tx_chn->ringtxcq); + + return tx_chn->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq); + +static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req; + int ret; + + memset(&req, 0, sizeof(req)); + + req.valid_params = TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID; + + req.nav_id = tisci_rm->tisci_dev_id; + req.index = rx_chn->udma_rchan_id; + req.rx_fetch_size = rx_chn->common.hdesc_size >> 2; + /* + * TODO: we can't support rxcq_qnum/RCHAN[a]_RCQ cfg with current sysfw + * and udmax impl, so just configure it to invalid value. + * req.rxcq_qnum = k3_ringacc_get_ring_id(rx_chn->flows[0].ringrx); + */ + req.rxcq_qnum = 0xFFFF; + if (rx_chn->flow_num && rx_chn->flow_id_base != rx_chn->udma_rchan_id) { + /* Default flow + extra ones */ + req.flowid_start = rx_chn->flow_id_base; + req.flowid_cnt = rx_chn->flow_num; + } + req.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + + ret = tisci_rm->tisci_udmap_ops->rx_ch_cfg(tisci_rm->tisci, &req); + if (ret) + dev_err(rx_chn->common.dev, "rchan%d cfg failed %d\n", + rx_chn->udma_rchan_id, ret); + + return ret; +} + +static void k3_udma_glue_release_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + if (IS_ERR_OR_NULL(flow->udma_rflow)) + return; + + if (flow->ringrxfdq) + k3_ringacc_ring_free(flow->ringrxfdq); + + if (flow->ringrx) + k3_ringacc_ring_free(flow->ringrx); + + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + flow->udma_rflow = NULL; + rx_chn->flows_ready--; +} + +static int k3_udma_glue_cfg_rx_flow(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + flow->udma_rflow = xudma_rflow_get(rx_chn->common.udmax, + flow->udma_rflow_id); + if (IS_ERR(flow->udma_rflow)) { + ret = PTR_ERR(flow->udma_rflow); + dev_err(dev, "UDMAX rflow get err %d\n", ret); + goto err; + } + + if (flow->udma_rflow_id != xudma_rflow_get_id(flow->udma_rflow)) { + xudma_rflow_put(rx_chn->common.udmax, flow->udma_rflow); + return -ENODEV; + } + + /* request and cfg rings */ + flow->ringrx = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxq_id, 0); + if (!flow->ringrx) { + ret = -ENODEV; + dev_err(dev, "Failed to get RX ring\n"); + goto err; + } + + flow->ringrxfdq = k3_ringacc_request_ring(rx_chn->common.ringacc, + flow_cfg->ring_rxfdq0_id, 0); + if (!flow->ringrxfdq) { + ret = -ENODEV; + dev_err(dev, "Failed to get RXFDQ ring\n"); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrx, &flow_cfg->rx_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrx %d\n", ret); + goto err; + } + + ret = k3_ringacc_ring_cfg(flow->ringrxfdq, &flow_cfg->rxfdq_cfg); + if (ret) { + dev_err(dev, "Failed to cfg ringrxfdq %d\n", ret); + goto err; + } + + if (rx_chn->remote) { + rx_ring_id = TI_SCI_RESOURCE_NULL; + rx_ringfdq_id = TI_SCI_RESOURCE_NULL; + } else { + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + } + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + if (rx_chn->common.epib) + req.rx_einfo_present = 1; + if (rx_chn->common.psdata_size) + req.rx_psinfo_present = 1; + if (flow_cfg->rx_error_handling) + req.rx_error_handling = 1; + req.rx_desc_type = 0; + req.rx_dest_qnum = rx_ring_id; + req.rx_src_tag_hi_sel = 0; + req.rx_src_tag_lo_sel = flow_cfg->src_tag_lo_sel; + req.rx_dest_tag_hi_sel = 0; + req.rx_dest_tag_lo_sel = 0; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d config failed: %d\n", flow->udma_rflow_id, + ret); + goto err; + } + + rx_chn->flows_ready++; + dev_dbg(dev, "flow%d config done. ready:%d\n", + flow->udma_rflow_id, rx_chn->flows_ready); + + return 0; +err: + k3_udma_glue_release_rx_flow(rx_chn, flow_idx); + return ret; +} + +static void k3_udma_glue_dump_rx_chn(struct k3_udma_glue_rx_channel *chn) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "dump_rx_chn:\n" + "udma_rchan_id: %d\n" + "src_thread: %08x\n" + "dst_thread: %08x\n" + "epib: %d\n" + "hdesc_size: %u\n" + "psdata_size: %u\n" + "swdata_size: %u\n" + "flow_id_base: %d\n" + "flow_num: %d\n", + chn->udma_rchan_id, + chn->common.src_thread, + chn->common.dst_thread, + chn->common.epib, + chn->common.hdesc_size, + chn->common.psdata_size, + chn->common.swdata_size, + chn->flow_id_base, + chn->flow_num); +} + +static void k3_udma_glue_dump_rx_rt_chn(struct k3_udma_glue_rx_channel *chn, + char *mark) +{ + struct device *dev = chn->common.dev; + + dev_dbg(dev, "=== dump ===> %s\n", mark); + + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_CTL_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PEER_RT_EN_REG, + xudma_rchanrt_read(chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_PCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_PCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_BCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_BCNT_REG)); + dev_dbg(dev, "0x%08X: %08X\n", UDMA_RCHAN_RT_SBCNT_REG, + xudma_rchanrt_read(chn->udma_rchanx, UDMA_RCHAN_RT_SBCNT_REG)); +} + +static int +k3_udma_glue_allocate_rx_flows(struct k3_udma_glue_rx_channel *rx_chn, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + int ret; + + /* default rflow */ + if (cfg->flow_id_use_rxchan_id) + return 0; + + /* not a GP rflows */ + if (rx_chn->flow_id_base != -1 && + !xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + return 0; + + /* Allocate range of GP rflows */ + ret = xudma_alloc_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + if (ret < 0) { + dev_err(rx_chn->common.dev, "UDMAX reserve_rflow %d cnt:%d err: %d\n", + rx_chn->flow_id_base, rx_chn->flow_num, ret); + return ret; + } + rx_chn->flow_id_base = ret; + + return 0; +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0) + return ERR_PTR(-EINVAL); + + if (cfg->flow_id_num != 1 && + (cfg->def_flow_cfg || cfg->flow_id_use_rxchan_id)) + return ERR_PTR(-EINVAL); + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + /* request and cfg UDMAP RX channel */ + rx_chn->udma_rchanx = xudma_rchan_get(rx_chn->common.udmax, -1); + if (IS_ERR(rx_chn->udma_rchanx)) { + ret = PTR_ERR(rx_chn->udma_rchanx); + dev_err(dev, "UDMAX rchanx get err %d\n", ret); + goto err; + } + rx_chn->udma_rchan_id = xudma_rchan_get_id(rx_chn->udma_rchanx); + + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + + /* Use RX channel id as flow id: target dev can't generate flow_id */ + if (cfg->flow_id_use_rxchan_id) + rx_chn->flow_id_base = rx_chn->udma_rchan_id; + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + /* request and cfg psi-l */ + rx_chn->common.dst_thread = + xudma_dev_get_psil_base(rx_chn->common.udmax) + + rx_chn->udma_rchan_id; + + ret = k3_udma_glue_cfg_rx_chn(rx_chn); + if (ret) { + dev_err(dev, "Failed to cfg rchan %d\n", ret); + goto err; + } + + /* init default RX flow only if flow_num = 1 */ + if (cfg->def_flow_cfg) { + ret = k3_udma_glue_cfg_rx_flow(rx_chn, 0, cfg->def_flow_cfg); + if (ret) + goto err; + } + + ret = xudma_navss_psil_pair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + if (ret) { + dev_err(dev, "PSI-L request err %d\n", ret); + goto err; + } + + rx_chn->psil_paired = true; + + /* reset RX RT registers */ + k3_udma_glue_disable_rx_chn(rx_chn); + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +static struct k3_udma_glue_rx_channel * +k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + struct k3_udma_glue_rx_channel *rx_chn; + int ret, i; + + if (cfg->flow_id_num <= 0 || + cfg->flow_id_use_rxchan_id || + cfg->def_flow_cfg || + cfg->flow_id_base < 0) + return ERR_PTR(-EINVAL); + + /* + * Remote RX channel is under control of Remote CPU core, so + * Linux can only request and manipulate by dedicated RX flows + */ + + rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL); + if (!rx_chn) + return ERR_PTR(-ENOMEM); + + rx_chn->common.dev = dev; + rx_chn->common.swdata_size = cfg->swdata_size; + rx_chn->remote = true; + rx_chn->udma_rchan_id = -1; + rx_chn->flow_num = cfg->flow_id_num; + rx_chn->flow_id_base = cfg->flow_id_base; + rx_chn->psil_paired = false; + + /* parse of udmap channel */ + ret = of_k3_udma_glue_parse_chn(dev->of_node, name, + &rx_chn->common, false); + if (ret) + goto err; + + rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib, + rx_chn->common.psdata_size, + rx_chn->common.swdata_size); + + rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num, + sizeof(*rx_chn->flows), GFP_KERNEL); + if (!rx_chn->flows) { + ret = -ENOMEM; + goto err; + } + + ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg); + if (ret) + goto err; + + for (i = 0; i < rx_chn->flow_num; i++) + rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i; + + k3_udma_glue_dump_rx_chn(rx_chn); + + return rx_chn; + +err: + k3_udma_glue_release_rx_chn(rx_chn); + return ERR_PTR(ret); +} + +struct k3_udma_glue_rx_channel * +k3_udma_glue_request_rx_chn(struct device *dev, const char *name, + struct k3_udma_glue_rx_channel_cfg *cfg) +{ + if (cfg->remote) + return k3_udma_glue_request_remote_rx_chn(dev, name, cfg); + else + return k3_udma_glue_request_rx_chn_priv(dev, name, cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_request_rx_chn); + +void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + int i; + + if (IS_ERR_OR_NULL(rx_chn->common.udmax)) + return; + + if (rx_chn->psil_paired) { + xudma_navss_psil_unpair(rx_chn->common.udmax, + rx_chn->common.src_thread, + rx_chn->common.dst_thread); + rx_chn->psil_paired = false; + } + + for (i = 0; i < rx_chn->flow_num; i++) + k3_udma_glue_release_rx_flow(rx_chn, i); + + if (xudma_rflow_is_gp(rx_chn->common.udmax, rx_chn->flow_id_base)) + xudma_free_gp_rflow_range(rx_chn->common.udmax, + rx_chn->flow_id_base, + rx_chn->flow_num); + + if (!IS_ERR_OR_NULL(rx_chn->udma_rchanx)) + xudma_rchan_put(rx_chn->common.udmax, + rx_chn->udma_rchanx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_release_rx_chn); + +int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx, + struct k3_udma_glue_rx_flow_cfg *flow_cfg) +{ + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + return k3_udma_glue_cfg_rx_flow(rx_chn, flow_idx, flow_cfg); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_init); + +u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow; + + if (flow_idx >= rx_chn->flow_num) + return -EINVAL; + + flow = &rx_chn->flows[flow_idx]; + + return k3_ringacc_get_ring_id(flow->ringrxfdq); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_get_fdq_id); + +u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn) +{ + return rx_chn->flow_id_base; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_flow_id_base); + +int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int rx_ring_id; + int rx_ringfdq_id; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + rx_ring_id = k3_ringacc_get_ring_id(flow->ringrx); + rx_ringfdq_id = k3_ringacc_get_ring_id(flow->ringrxfdq); + + memset(&req, 0, sizeof(req)); + + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = rx_ring_id; + req.rx_fdq0_sz0_qnum = rx_ringfdq_id; + req.rx_fdq1_qnum = rx_ringfdq_id; + req.rx_fdq2_qnum = rx_ringfdq_id; + req.rx_fdq3_qnum = rx_ringfdq_id; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d enable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_enable); + +int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_idx) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_idx]; + const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm; + struct device *dev = rx_chn->common.dev; + struct ti_sci_msg_rm_udmap_flow_cfg req; + int ret = 0; + + if (!rx_chn->remote) + return -EINVAL; + + memset(&req, 0, sizeof(req)); + req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + req.nav_id = tisci_rm->tisci_dev_id; + req.flow_index = flow->udma_rflow_id; + req.rx_dest_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq0_sz0_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq1_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq2_qnum = TI_SCI_RESOURCE_NULL; + req.rx_fdq3_qnum = TI_SCI_RESOURCE_NULL; + + ret = tisci_rm->tisci_udmap_ops->rx_flow_cfg(tisci_rm->tisci, &req); + if (ret) { + dev_err(dev, "flow%d disable failed: %d\n", flow->udma_rflow_id, + ret); + } + + return ret; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_flow_disable); + +int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + u32 rxrt_ctl; + + if (rx_chn->remote) + return -EINVAL; + + if (rx_chn->flows_ready < rx_chn->flow_num) + return -EINVAL; + + rxrt_ctl = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + rxrt_ctl |= UDMA_CHAN_RT_CTL_EN; + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, + rxrt_ctl); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt en"); + return 0; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_enable_rx_chn); + +void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn) +{ + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + 0); + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG, 0); + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt dis2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_disable_rx_chn); + +void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + bool sync) +{ + int i = 0; + u32 val; + + if (rx_chn->remote) + return; + + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown1"); + + xudma_rchanrt_write(rx_chn->udma_rchanx, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | UDMA_PEER_RT_EN_TEARDOWN); + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, UDMA_RCHAN_RT_CTL_REG); + + while (sync && (val & UDMA_CHAN_RT_CTL_EN)) { + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_CTL_REG); + udelay(1); + if (i > K3_UDMAX_TDOWN_TIMEOUT_US) { + dev_err(rx_chn->common.dev, "RX tdown timeout\n"); + break; + } + i++; + } + + val = xudma_rchanrt_read(rx_chn->udma_rchanx, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + if (sync && (val & UDMA_PEER_RT_EN_ENABLE)) + dev_err(rx_chn->common.dev, "TX tdown peer not stopped\n"); + k3_udma_glue_dump_rx_rt_chn(rx_chn, "rxrt tdown2"); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_tdown_rx_chn); + +void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, void *data, + void (*cleanup)(void *data, dma_addr_t desc_dma), bool skip_fdq) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + struct device *dev = rx_chn->common.dev; + dma_addr_t desc_dma; + int occ_rx, i, ret; + + /* reset RXCQ as it is not input for udma - expected to be empty */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrx); + dev_dbg(dev, "RX reset flow %u occ_rx %u\n", flow_num, occ_rx); + if (flow->ringrx) + k3_ringacc_ring_reset(flow->ringrx); + + /* Skip RX FDQ in case one FDQ is used for the set of flows */ + if (skip_fdq) + return; + + /* + * RX FDQ reset need to be special way as it is input for udma and its + * state cached by udma, so: + * 1) save RX FDQ occ + * 2) clean up RX FDQ and call callback .cleanup() for each desc + * 3) reset RX FDQ in a special way + */ + occ_rx = k3_ringacc_ring_get_occ(flow->ringrxfdq); + dev_dbg(dev, "RX reset flow %u occ_rx_fdq %u\n", flow_num, occ_rx); + + for (i = 0; i < occ_rx; i++) { + ret = k3_ringacc_ring_pop(flow->ringrxfdq, &desc_dma); + if (ret) { + dev_err(dev, "RX reset pop %d\n", ret); + break; + } + cleanup(data, desc_dma); + } + + k3_ringacc_ring_reset_dma(flow->ringrxfdq, occ_rx); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_reset_rx_chn); + +int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, struct cppi5_host_desc_t *desc_rx, + dma_addr_t desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_push(flow->ringrxfdq, &desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_push_rx_chn); + +int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num, dma_addr_t *desc_dma) +{ + struct k3_udma_glue_rx_flow *flow = &rx_chn->flows[flow_num]; + + return k3_ringacc_ring_pop(flow->ringrx, desc_dma); +} +EXPORT_SYMBOL_GPL(k3_udma_glue_pop_rx_chn); + +int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, + u32 flow_num) +{ + struct k3_udma_glue_rx_flow *flow; + + flow = &rx_chn->flows[flow_num]; + + flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); + + return flow->virq; +} +EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); diff --git a/drivers/dma/ti/k3-udma-private.c b/drivers/dma/ti/k3-udma-private.c new file mode 100644 index 000000000000..0b8f3dd6b146 --- /dev/null +++ b/drivers/dma/ti/k3-udma-private.c @@ -0,0 +1,133 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_pair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_pair); + +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + return navss_psil_unpair(ud, src_thread, dst_thread); +} +EXPORT_SYMBOL(xudma_navss_psil_unpair); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property) +{ + struct device_node *udma_node = np; + struct platform_device *pdev; + struct udma_dev *ud; + + if (property) { + udma_node = of_parse_phandle(np, property, 0); + if (!udma_node) { + pr_err("UDMA node is not found\n"); + return ERR_PTR(-ENODEV); + } + } + + pdev = of_find_device_by_node(udma_node); + if (!pdev) { + pr_debug("UDMA device not found\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + if (np != udma_node) + of_node_put(udma_node); + + ud = platform_get_drvdata(pdev); + if (!ud) { + pr_debug("UDMA has not been probed\n"); + return ERR_PTR(-EPROBE_DEFER); + } + + return ud; +} +EXPORT_SYMBOL(of_xudma_dev_get); + +u32 xudma_dev_get_psil_base(struct udma_dev *ud) +{ + return ud->psil_base; +} +EXPORT_SYMBOL(xudma_dev_get_psil_base); + +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud) +{ + return &ud->tisci_rm; +} +EXPORT_SYMBOL(xudma_dev_get_tisci_rm); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_alloc_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_alloc_gp_rflow_range); + +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + return __udma_free_gp_rflow_range(ud, from, cnt); +} +EXPORT_SYMBOL(xudma_free_gp_rflow_range); + +bool xudma_rflow_is_gp(struct udma_dev *ud, int id) +{ + return !test_bit(id, ud->rflow_gp_map); +} +EXPORT_SYMBOL(xudma_rflow_is_gp); + +#define XUDMA_GET_PUT_RESOURCE(res) \ +struct udma_##res *xudma_##res##_get(struct udma_dev *ud, int id) \ +{ \ + return __udma_reserve_##res(ud, false, id); \ +} \ +EXPORT_SYMBOL(xudma_##res##_get); \ + \ +void xudma_##res##_put(struct udma_dev *ud, struct udma_##res *p) \ +{ \ + clear_bit(p->id, ud->res##_map); \ +} \ +EXPORT_SYMBOL(xudma_##res##_put) +XUDMA_GET_PUT_RESOURCE(tchan); +XUDMA_GET_PUT_RESOURCE(rchan); + +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id) +{ + return __udma_get_rflow(ud, id); +} +EXPORT_SYMBOL(xudma_rflow_get); + +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p) +{ + __udma_put_rflow(ud, p); +} +EXPORT_SYMBOL(xudma_rflow_put); + +#define XUDMA_GET_RESOURCE_ID(res) \ +int xudma_##res##_get_id(struct udma_##res *p) \ +{ \ + return p->id; \ +} \ +EXPORT_SYMBOL(xudma_##res##_get_id) +XUDMA_GET_RESOURCE_ID(tchan); +XUDMA_GET_RESOURCE_ID(rchan); +XUDMA_GET_RESOURCE_ID(rflow); + +/* Exported register access functions */ +#define XUDMA_RT_IO_FUNCTIONS(res) \ +u32 xudma_##res##rt_read(struct udma_##res *p, int reg) \ +{ \ + return udma_##res##rt_read(p, reg); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_read); \ + \ +void xudma_##res##rt_write(struct udma_##res *p, int reg, u32 val) \ +{ \ + udma_##res##rt_write(p, reg, val); \ +} \ +EXPORT_SYMBOL(xudma_##res##rt_write) +XUDMA_RT_IO_FUNCTIONS(tchan); +XUDMA_RT_IO_FUNCTIONS(rchan); diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c new file mode 100644 index 000000000000..ea79c2df28e0 --- /dev/null +++ b/drivers/dma/ti/k3-udma.c @@ -0,0 +1,3432 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> + */ + +#include <linux/kernel.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/list.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/of.h> +#include <linux/of_dma.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/workqueue.h> +#include <linux/completion.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/dma/ti-cppi5.h> + +#include "../virt-dma.h" +#include "k3-udma.h" +#include "k3-psil-priv.h" + +struct udma_static_tr { + u8 elsize; /* RPSTR0 */ + u16 elcnt; /* RPSTR0 */ + u16 bstcnt; /* RPSTR1 */ +}; + +#define K3_UDMA_MAX_RFLOWS 1024 +#define K3_UDMA_DEFAULT_RING_SIZE 16 + +/* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */ +#define UDMA_RFLOW_SRCTAG_NONE 0 +#define UDMA_RFLOW_SRCTAG_CFG_TAG 1 +#define UDMA_RFLOW_SRCTAG_FLOW_ID 2 +#define UDMA_RFLOW_SRCTAG_SRC_TAG 4 + +#define UDMA_RFLOW_DSTTAG_NONE 0 +#define UDMA_RFLOW_DSTTAG_CFG_TAG 1 +#define UDMA_RFLOW_DSTTAG_FLOW_ID 2 +#define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4 +#define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5 + +struct udma_chan; + +enum udma_mmr { + MMR_GCFG = 0, + MMR_RCHANRT, + MMR_TCHANRT, + MMR_LAST, +}; + +static const char * const mmr_names[] = { "gcfg", "rchanrt", "tchanrt" }; + +struct udma_tchan { + void __iomem *reg_rt; + + int id; + struct k3_ring *t_ring; /* Transmit ring */ + struct k3_ring *tc_ring; /* Transmit Completion ring */ +}; + +struct udma_rflow { + int id; + struct k3_ring *fd_ring; /* Free Descriptor ring */ + struct k3_ring *r_ring; /* Receive ring */ +}; + +struct udma_rchan { + void __iomem *reg_rt; + + int id; +}; + +#define UDMA_FLAG_PDMA_ACC32 BIT(0) +#define UDMA_FLAG_PDMA_BURST BIT(1) + +struct udma_match_data { + u32 psil_base; + bool enable_memcpy_support; + u32 flags; + u32 statictr_z_mask; + u32 rchan_oes_offset; + + u8 tpl_levels; + u32 level_start_idx[]; +}; + +struct udma_dev { + struct dma_device ddev; + struct device *dev; + void __iomem *mmrs[MMR_LAST]; + const struct udma_match_data *match_data; + + size_t desc_align; /* alignment to use for descriptors */ + + struct udma_tisci_rm tisci_rm; + + struct k3_ringacc *ringacc; + + struct work_struct purge_work; + struct list_head desc_to_purge; + spinlock_t lock; + + int tchan_cnt; + int echan_cnt; + int rchan_cnt; + int rflow_cnt; + unsigned long *tchan_map; + unsigned long *rchan_map; + unsigned long *rflow_gp_map; + unsigned long *rflow_gp_map_allocated; + unsigned long *rflow_in_use; + + struct udma_tchan *tchans; + struct udma_rchan *rchans; + struct udma_rflow *rflows; + + struct udma_chan *channels; + u32 psil_base; +}; + +struct udma_hwdesc { + size_t cppi5_desc_size; + void *cppi5_desc_vaddr; + dma_addr_t cppi5_desc_paddr; + + /* TR descriptor internal pointers */ + void *tr_req_base; + struct cppi5_tr_resp_t *tr_resp_base; +}; + +struct udma_desc { + struct virt_dma_desc vd; + + bool terminated; + + enum dma_transfer_direction dir; + + struct udma_static_tr static_tr; + u32 residue; + + unsigned int sglen; + unsigned int desc_idx; /* Only used for cyclic in packet mode */ + unsigned int tr_idx; + + u32 metadata_size; + void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */ + + unsigned int hwdesc_count; + struct udma_hwdesc hwdesc[0]; +}; + +enum udma_chan_state { + UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */ + UDMA_CHAN_IS_ACTIVE, /* Normal operation */ + UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */ +}; + +struct udma_tx_drain { + struct delayed_work work; + unsigned long jiffie; + u32 residue; +}; + +struct udma_chan_config { + bool pkt_mode; /* TR or packet */ + bool needs_epib; /* EPIB is needed for the communication or not */ + u32 psd_size; /* size of Protocol Specific Data */ + u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */ + u32 hdesc_size; /* Size of a packet descriptor in packet mode */ + bool notdpkt; /* Suppress sending TDC packet */ + int remote_thread_id; + u32 src_thread; + u32 dst_thread; + enum psil_endpoint_type ep_type; + bool enable_acc32; + bool enable_burst; + enum udma_tp_level channel_tpl; /* Channel Throughput Level */ + + enum dma_transfer_direction dir; +}; + +struct udma_chan { + struct virt_dma_chan vc; + struct dma_slave_config cfg; + struct udma_dev *ud; + struct udma_desc *desc; + struct udma_desc *terminated_desc; + struct udma_static_tr static_tr; + char *name; + + struct udma_tchan *tchan; + struct udma_rchan *rchan; + struct udma_rflow *rflow; + + bool psil_paired; + + int irq_num_ring; + int irq_num_udma; + + bool cyclic; + bool paused; + + enum udma_chan_state state; + struct completion teardown_completed; + + struct udma_tx_drain tx_drain; + + u32 bcnt; /* number of bytes completed since the start of the channel */ + u32 in_ring_cnt; /* number of descriptors in flight */ + + /* Channel configuration parameters */ + struct udma_chan_config config; + + /* dmapool for packet mode descriptors */ + bool use_dma_pool; + struct dma_pool *hdesc_pool; + + u32 id; +}; + +static inline struct udma_dev *to_udma_dev(struct dma_device *d) +{ + return container_of(d, struct udma_dev, ddev); +} + +static inline struct udma_chan *to_udma_chan(struct dma_chan *c) +{ + return container_of(c, struct udma_chan, vc.chan); +} + +static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t) +{ + return container_of(t, struct udma_desc, vd.tx); +} + +/* Generic register access functions */ +static inline u32 udma_read(void __iomem *base, int reg) +{ + return readl(base + reg); +} + +static inline void udma_write(void __iomem *base, int reg, u32 val) +{ + writel(val, base + reg); +} + +static inline void udma_update_bits(void __iomem *base, int reg, + u32 mask, u32 val) +{ + u32 tmp, orig; + + orig = readl(base + reg); + tmp = orig & ~mask; + tmp |= (val & mask); + + if (tmp != orig) + writel(tmp, base + reg); +} + +/* TCHANRT */ +static inline u32 udma_tchanrt_read(struct udma_tchan *tchan, int reg) +{ + if (!tchan) + return 0; + return udma_read(tchan->reg_rt, reg); +} + +static inline void udma_tchanrt_write(struct udma_tchan *tchan, int reg, + u32 val) +{ + if (!tchan) + return; + udma_write(tchan->reg_rt, reg, val); +} + +static inline void udma_tchanrt_update_bits(struct udma_tchan *tchan, int reg, + u32 mask, u32 val) +{ + if (!tchan) + return; + udma_update_bits(tchan->reg_rt, reg, mask, val); +} + +/* RCHANRT */ +static inline u32 udma_rchanrt_read(struct udma_rchan *rchan, int reg) +{ + if (!rchan) + return 0; + return udma_read(rchan->reg_rt, reg); +} + +static inline void udma_rchanrt_write(struct udma_rchan *rchan, int reg, + u32 val) +{ + if (!rchan) + return; + udma_write(rchan->reg_rt, reg, val); +} + +static inline void udma_rchanrt_update_bits(struct udma_rchan *rchan, int reg, + u32 mask, u32 val) +{ + if (!rchan) + return; + udma_update_bits(rchan->reg_rt, reg, mask, val); +} + +static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread) +{ + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + + dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci, + tisci_rm->tisci_navss_dev_id, + src_thread, dst_thread); +} + +static void udma_reset_uchan(struct udma_chan *uc) +{ + memset(&uc->config, 0, sizeof(uc->config)); + uc->config.remote_thread_id = -1; + uc->state = UDMA_CHAN_IS_IDLE; +} + +static void udma_dump_chan_stdata(struct udma_chan *uc) +{ + struct device *dev = uc->ud->dev; + u32 offset; + int i; + + if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "TCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_TCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i, + udma_tchanrt_read(uc->tchan, offset)); + } + } + + if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) { + dev_dbg(dev, "RCHAN State data:\n"); + for (i = 0; i < 32; i++) { + offset = UDMA_RCHAN_RT_STDATA_REG + i * 4; + dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i, + udma_rchanrt_read(uc->rchan, offset)); + } + } +} + +static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d, + int idx) +{ + return d->hwdesc[idx].cppi5_desc_paddr; +} + +static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx) +{ + return d->hwdesc[idx].cppi5_desc_vaddr; +} + +static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc, + dma_addr_t paddr) +{ + struct udma_desc *d = uc->terminated_desc; + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + + if (!d) { + d = uc->desc; + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + + if (desc_paddr != paddr) + d = NULL; + } + } + + return d; +} + +static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d) +{ + if (uc->use_dma_pool) { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_pool_free(uc->hdesc_pool, + d->hwdesc[i].cppi5_desc_vaddr, + d->hwdesc[i].cppi5_desc_paddr); + + d->hwdesc[i].cppi5_desc_vaddr = NULL; + } + } else if (d->hwdesc[0].cppi5_desc_vaddr) { + struct udma_dev *ud = uc->ud; + + dma_free_coherent(ud->dev, d->hwdesc[0].cppi5_desc_size, + d->hwdesc[0].cppi5_desc_vaddr, + d->hwdesc[0].cppi5_desc_paddr); + + d->hwdesc[0].cppi5_desc_vaddr = NULL; + } +} + +static void udma_purge_desc_work(struct work_struct *work) +{ + struct udma_dev *ud = container_of(work, typeof(*ud), purge_work); + struct virt_dma_desc *vd, *_vd; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&ud->lock, flags); + list_splice_tail_init(&ud->desc_to_purge, &head); + spin_unlock_irqrestore(&ud->lock, flags); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + + udma_free_hwdesc(uc, d); + list_del(&vd->node); + kfree(d); + } + + /* If more to purge, schedule the work again */ + if (!list_empty(&ud->desc_to_purge)) + schedule_work(&ud->purge_work); +} + +static void udma_desc_free(struct virt_dma_desc *vd) +{ + struct udma_dev *ud = to_udma_dev(vd->tx.chan->device); + struct udma_chan *uc = to_udma_chan(vd->tx.chan); + struct udma_desc *d = to_udma_desc(&vd->tx); + unsigned long flags; + + if (uc->terminated_desc == d) + uc->terminated_desc = NULL; + + if (uc->use_dma_pool) { + udma_free_hwdesc(uc, d); + kfree(d); + return; + } + + spin_lock_irqsave(&ud->lock, flags); + list_add_tail(&vd->node, &ud->desc_to_purge); + spin_unlock_irqrestore(&ud->lock, flags); + + schedule_work(&ud->purge_work); +} + +static bool udma_is_chan_running(struct udma_chan *uc) +{ + u32 trt_ctl = 0; + u32 rrt_ctl = 0; + + if (uc->tchan) + trt_ctl = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + if (uc->rchan) + rrt_ctl = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_CTL_REG); + + if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN) + return true; + + return false; +} + +static bool udma_is_chan_paused(struct udma_chan *uc) +{ + u32 val, pause_mask; + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + val = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_DEV: + val = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG); + pause_mask = UDMA_PEER_RT_EN_PAUSE; + break; + case DMA_MEM_TO_MEM: + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_CTL_REG); + pause_mask = UDMA_CHAN_RT_CTL_PAUSE; + break; + default: + return false; + } + + if (val & pause_mask) + return true; + + return false; +} + +static void udma_sync_for_device(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + if (uc->cyclic && uc->config.pkt_mode) { + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[idx].cppi5_desc_paddr, + d->hwdesc[idx].cppi5_desc_size, + DMA_TO_DEVICE); + } else { + int i; + + for (i = 0; i < d->hwdesc_count; i++) { + if (!d->hwdesc[i].cppi5_desc_vaddr) + continue; + + dma_sync_single_for_device(uc->ud->dev, + d->hwdesc[i].cppi5_desc_paddr, + d->hwdesc[i].cppi5_desc_size, + DMA_TO_DEVICE); + } + } +} + +static int udma_push_to_ring(struct udma_chan *uc, int idx) +{ + struct udma_desc *d = uc->desc; + + struct k3_ring *ring = NULL; + int ret = -EINVAL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->fd_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->t_ring; + break; + default: + break; + } + + if (ring) { + dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx); + + wmb(); /* Ensure that writes are not moved over this point */ + udma_sync_for_device(uc, idx); + ret = k3_ringacc_ring_push(ring, &desc_addr); + uc->in_ring_cnt++; + } + + return ret; +} + +static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) +{ + struct k3_ring *ring = NULL; + int ret = -ENOENT; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + ring = uc->rflow->r_ring; + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + ring = uc->tchan->tc_ring; + break; + default: + break; + } + + if (ring && k3_ringacc_ring_get_occ(ring)) { + struct udma_desc *d = NULL; + + ret = k3_ringacc_ring_pop(ring, addr); + if (ret) + return ret; + + /* Teardown completion */ + if (cppi5_desc_is_tdcm(*addr)) + return ret; + + d = udma_udma_desc_from_paddr(uc, *addr); + + if (d) + dma_sync_single_for_cpu(uc->ud->dev, *addr, + d->hwdesc[0].cppi5_desc_size, + DMA_FROM_DEVICE); + rmb(); /* Ensure that reads are not moved before this point */ + + if (!ret) + uc->in_ring_cnt--; + } + + return ret; +} + +static void udma_reset_rings(struct udma_chan *uc) +{ + struct k3_ring *ring1 = NULL; + struct k3_ring *ring2 = NULL; + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + if (uc->rchan) { + ring1 = uc->rflow->fd_ring; + ring2 = uc->rflow->r_ring; + } + break; + case DMA_MEM_TO_DEV: + case DMA_MEM_TO_MEM: + if (uc->tchan) { + ring1 = uc->tchan->t_ring; + ring2 = uc->tchan->tc_ring; + } + break; + default: + break; + } + + if (ring1) + k3_ringacc_ring_reset_dma(ring1, + k3_ringacc_ring_get_occ(ring1)); + if (ring2) + k3_ringacc_ring_reset(ring2); + + /* make sure we are not leaking memory by stalled descriptor */ + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + uc->in_ring_cnt = 0; +} + +static void udma_reset_counters(struct udma_chan *uc) +{ + u32 val; + + if (uc->tchan) { + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_BCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_SBCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PCNT_REG, val); + + val = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG, val); + } + + if (uc->rchan) { + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_BCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_SBCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PCNT_REG, val); + + val = udma_rchanrt_read(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_BCNT_REG, val); + } + + uc->bcnt = 0; +} + +static int udma_reset_chan(struct udma_chan *uc, bool hard) +{ + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, 0); + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, 0); + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, 0); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, 0); + break; + default: + return -EINVAL; + } + + /* Reset all counters */ + udma_reset_counters(uc); + + /* Hard reset: re-initialize the channel to reset */ + if (hard) { + struct udma_chan_config ucc_backup; + int ret; + + memcpy(&ucc_backup, &uc->config, sizeof(uc->config)); + uc->ud->ddev.device_free_chan_resources(&uc->vc.chan); + + /* restore the channel configuration */ + memcpy(&uc->config, &ucc_backup, sizeof(uc->config)); + ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan); + if (ret) + return ret; + + /* + * Setting forced teardown after forced reset helps recovering + * the rchan. + */ + if (uc->config.dir == DMA_DEV_TO_MEM) + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN | + UDMA_CHAN_RT_CTL_FTDOWN); + } + uc->state = UDMA_CHAN_IS_IDLE; + + return 0; +} + +static void udma_start_desc(struct udma_chan *uc) +{ + struct udma_chan_config *ucc = &uc->config; + + if (ucc->pkt_mode && (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) { + int i; + + /* Push all descriptors to ring for packet mode cyclic or RX */ + for (i = 0; i < uc->desc->sglen; i++) + udma_push_to_ring(uc, i); + } else { + udma_push_to_ring(uc, 0); + } +} + +static bool udma_chan_needs_reconfiguration(struct udma_chan *uc) +{ + /* Only PDMAs have staticTR */ + if (uc->config.ep_type == PSIL_EP_NATIVE) + return false; + + /* Check if the staticTR configuration has changed for TX */ + if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr))) + return true; + + return false; +} + +static int udma_start(struct udma_chan *uc) +{ + struct virt_dma_desc *vd = vchan_next_desc(&uc->vc); + + if (!vd) { + uc->desc = NULL; + return -ENOENT; + } + + list_del(&vd->node); + + uc->desc = to_udma_desc(&vd->tx); + + /* Channel is already running and does not need reconfiguration */ + if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) { + udma_start_desc(uc); + goto out; + } + + /* Make sure that we clear the teardown bit, if it is set */ + udma_reset_chan(uc, false); + + /* Push descriptors before we start the channel */ + udma_start_desc(uc); + + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + const struct udma_match_data *match_data = + uc->ud->match_data; + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + udma_rchanrt_write(uc->rchan, + UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG, + PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt, + match_data->statictr_z_mask)); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + /* Enable remote */ + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + break; + case DMA_MEM_TO_DEV: + /* Config remote TR */ + if (uc->config.ep_type == PSIL_EP_PDMA_XY) { + u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) | + PDMA_STATIC_TR_X(uc->desc->static_tr.elsize); + + if (uc->config.enable_acc32) + val |= PDMA_STATIC_TR_XY_ACC32; + if (uc->config.enable_burst) + val |= PDMA_STATIC_TR_XY_BURST; + + udma_tchanrt_write(uc->tchan, + UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG, val); + + /* save the current staticTR configuration */ + memcpy(&uc->static_tr, &uc->desc->static_tr, + sizeof(uc->static_tr)); + } + + /* Enable remote */ + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE); + + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + case DMA_MEM_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN); + + break; + default: + return -EINVAL; + } + + uc->state = UDMA_CHAN_IS_ACTIVE; +out: + + return 0; +} + +static int udma_stop(struct udma_chan *uc) +{ + enum udma_chan_state old_state = uc->state; + + uc->state = UDMA_CHAN_IS_TERMINATING; + reinit_completion(&uc->teardown_completed); + + switch (uc->config.dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_TEARDOWN); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_ENABLE | + UDMA_PEER_RT_EN_FLUSH); + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_write(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_EN | + UDMA_CHAN_RT_CTL_TDOWN); + break; + default: + uc->state = old_state; + complete_all(&uc->teardown_completed); + return -EINVAL; + } + + return 0; +} + +static void udma_cyclic_packet_elapsed(struct udma_chan *uc) +{ + struct udma_desc *d = uc->desc; + struct cppi5_host_desc_t *h_desc; + + h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr; + cppi5_hdesc_reset_to_original(h_desc); + udma_push_to_ring(uc, d->desc_idx); + d->desc_idx = (d->desc_idx + 1) % d->sglen; +} + +static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d) +{ + struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + memcpy(d->metadata, h_desc->epib, d->metadata_size); +} + +static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d) +{ + u32 peer_bcnt, bcnt; + + /* Only TX towards PDMA is affected */ + if (uc->config.ep_type == PSIL_EP_NATIVE || + uc->config.dir != DMA_MEM_TO_DEV) + return true; + + peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG); + bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG); + + if (peer_bcnt < bcnt) { + uc->tx_drain.residue = bcnt - peer_bcnt; + uc->tx_drain.jiffie = jiffies; + return false; + } + + return true; +} + +static void udma_check_tx_completion(struct work_struct *work) +{ + struct udma_chan *uc = container_of(work, typeof(*uc), + tx_drain.work.work); + bool desc_done = true; + u32 residue_diff; + unsigned long jiffie_diff, delay; + + if (uc->desc) { + residue_diff = uc->tx_drain.residue; + jiffie_diff = uc->tx_drain.jiffie; + desc_done = udma_is_desc_really_done(uc, uc->desc); + } + + if (!desc_done) { + jiffie_diff = uc->tx_drain.jiffie - jiffie_diff; + residue_diff -= uc->tx_drain.residue; + if (residue_diff) { + /* Try to guess when we should check next time */ + residue_diff /= jiffie_diff; + delay = uc->tx_drain.residue / residue_diff / 3; + if (jiffies_to_msecs(delay) < 5) + delay = 0; + } else { + /* No progress, check again in 1 second */ + delay = HZ; + } + + schedule_delayed_work(&uc->tx_drain.work, delay); + } else if (uc->desc) { + struct udma_desc *d = uc->desc; + + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } +} + +static irqreturn_t udma_ring_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + dma_addr_t paddr = 0; + + if (udma_pop_from_ring(uc, &paddr) || !paddr) + return IRQ_HANDLED; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* Teardown completion message */ + if (cppi5_desc_is_tdcm(paddr)) { + /* Compensate our internal pop/push counter */ + uc->in_ring_cnt++; + + complete_all(&uc->teardown_completed); + + if (uc->terminated_desc) { + udma_desc_free(&uc->terminated_desc->vd); + uc->terminated_desc = NULL; + } + + if (!uc->desc) + udma_start(uc); + + goto out; + } + + d = udma_udma_desc_from_paddr(uc, paddr); + + if (d) { + dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d, + d->desc_idx); + if (desc_paddr != paddr) { + dev_err(uc->ud->dev, "not matching descriptors!\n"); + goto out; + } + + if (uc->cyclic) { + /* push the descriptor back to the ring */ + if (d == uc->desc) { + udma_cyclic_packet_elapsed(uc); + vchan_cyclic_callback(&d->vd); + } + } else { + bool desc_done = false; + + if (d == uc->desc) { + desc_done = udma_is_desc_really_done(uc, d); + + if (desc_done) { + uc->bcnt += d->residue; + udma_start(uc); + } else { + schedule_delayed_work(&uc->tx_drain.work, + 0); + } + } + + if (desc_done) + vchan_cookie_complete(&d->vd); + } + } +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +static irqreturn_t udma_udma_irq_handler(int irq, void *data) +{ + struct udma_chan *uc = data; + struct udma_desc *d; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + d = uc->desc; + if (d) { + d->tr_idx = (d->tr_idx + 1) % d->sglen; + + if (uc->cyclic) { + vchan_cyclic_callback(&d->vd); + } else { + /* TODO: figure out the real amount of data */ + uc->bcnt += d->residue; + udma_start(uc); + vchan_cookie_complete(&d->vd); + } + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); + + return IRQ_HANDLED; +} + +/** + * __udma_alloc_gp_rflow_range - alloc range of GP RX flows + * @ud: UDMA device + * @from: Start the search from this flow id number + * @cnt: Number of consecutive flow ids to allocate + * + * Allocate range of RX flow ids for future use, those flows can be requested + * only using explicit flow id number. if @from is set to -1 it will try to find + * first free range. if @from is positive value it will force allocation only + * of the specified range of flows. + * + * Returns -ENOMEM if can't find free range. + * -EEXIST if requested range is busy. + * -EINVAL if wrong input values passed. + * Returns flow id on success. + */ +static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + int start, tmp_from; + DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS); + + tmp_from = from; + if (tmp_from < 0) + tmp_from = ud->rchan_cnt; + /* default flows can't be allocated and accessible only by id */ + if (tmp_from < ud->rchan_cnt) + return -EINVAL; + + if (tmp_from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated, + ud->rflow_cnt); + + start = bitmap_find_next_zero_area(tmp, + ud->rflow_cnt, + tmp_from, cnt, 0); + if (start >= ud->rflow_cnt) + return -ENOMEM; + + if (from >= 0 && start != from) + return -EEXIST; + + bitmap_set(ud->rflow_gp_map_allocated, start, cnt); + return start; +} + +static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt) +{ + if (from < ud->rchan_cnt) + return -EINVAL; + if (from + cnt > ud->rflow_cnt) + return -EINVAL; + + bitmap_clear(ud->rflow_gp_map_allocated, from, cnt); + return 0; +} + +static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id) +{ + /* + * Attempt to request rflow by ID can be made for any rflow + * if not in use with assumption that caller knows what's doing. + * TI-SCI FW will perform additional permission check ant way, it's + * safe + */ + + if (id < 0 || id >= ud->rflow_cnt) + return ERR_PTR(-ENOENT); + + if (test_bit(id, ud->rflow_in_use)) + return ERR_PTR(-ENOENT); + + /* GP rflow has to be allocated first */ + if (!test_bit(id, ud->rflow_gp_map) && + !test_bit(id, ud->rflow_gp_map_allocated)) + return ERR_PTR(-EINVAL); + + dev_dbg(ud->dev, "get rflow%d\n", id); + set_bit(id, ud->rflow_in_use); + return &ud->rflows[id]; +} + +static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow) +{ + if (!test_bit(rflow->id, ud->rflow_in_use)) { + dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id); + return; + } + + dev_dbg(ud->dev, "put rflow%d\n", rflow->id); + clear_bit(rflow->id, ud->rflow_in_use); +} + +#define UDMA_RESERVE_RESOURCE(res) \ +static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \ + enum udma_tp_level tpl, \ + int id) \ +{ \ + if (id >= 0) { \ + if (test_bit(id, ud->res##_map)) { \ + dev_err(ud->dev, "res##%d is in use\n", id); \ + return ERR_PTR(-ENOENT); \ + } \ + } else { \ + int start; \ + \ + if (tpl >= ud->match_data->tpl_levels) \ + tpl = ud->match_data->tpl_levels - 1; \ + \ + start = ud->match_data->level_start_idx[tpl]; \ + \ + id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \ + start); \ + if (id == ud->res##_cnt) { \ + return ERR_PTR(-ENOENT); \ + } \ + } \ + \ + set_bit(id, ud->res##_map); \ + return &ud->res##s[id]; \ +} + +UDMA_RESERVE_RESOURCE(tchan); +UDMA_RESERVE_RESOURCE(rchan); + +static int udma_get_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->tchan)) + return PTR_ERR(uc->tchan); + + return 0; +} + +static int udma_get_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return 0; + } + + uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl, -1); + if (IS_ERR(uc->rchan)) + return PTR_ERR(uc->rchan); + + return 0; +} + +static int udma_get_chan_pair(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + const struct udma_match_data *match_data = ud->match_data; + int chan_id, end; + + if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) { + dev_info(ud->dev, "chan%d: already have %d pair allocated\n", + uc->id, uc->tchan->id); + return 0; + } + + if (uc->tchan) { + dev_err(ud->dev, "chan%d: already have tchan%d allocated\n", + uc->id, uc->tchan->id); + return -EBUSY; + } else if (uc->rchan) { + dev_err(ud->dev, "chan%d: already have rchan%d allocated\n", + uc->id, uc->rchan->id); + return -EBUSY; + } + + /* Can be optimized, but let's have it like this for now */ + end = min(ud->tchan_cnt, ud->rchan_cnt); + /* Try to use the highest TPL channel pair for MEM_TO_MEM channels */ + chan_id = match_data->level_start_idx[match_data->tpl_levels - 1]; + for (; chan_id < end; chan_id++) { + if (!test_bit(chan_id, ud->tchan_map) && + !test_bit(chan_id, ud->rchan_map)) + break; + } + + if (chan_id == end) + return -ENOENT; + + set_bit(chan_id, ud->tchan_map); + set_bit(chan_id, ud->rchan_map); + uc->tchan = &ud->tchans[chan_id]; + uc->rchan = &ud->rchans[chan_id]; + + return 0; +} + +static int udma_get_rflow(struct udma_chan *uc, int flow_id) +{ + struct udma_dev *ud = uc->ud; + + if (!uc->rchan) { + dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id); + return -EINVAL; + } + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n", + uc->id, uc->rflow->id); + return 0; + } + + uc->rflow = __udma_get_rflow(ud, flow_id); + if (IS_ERR(uc->rflow)) + return PTR_ERR(uc->rflow); + + return 0; +} + +static void udma_put_rchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rchan) { + dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id, + uc->rchan->id); + clear_bit(uc->rchan->id, ud->rchan_map); + uc->rchan = NULL; + } +} + +static void udma_put_tchan(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->tchan) { + dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id, + uc->tchan->id); + clear_bit(uc->tchan->id, ud->tchan_map); + uc->tchan = NULL; + } +} + +static void udma_put_rflow(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + + if (uc->rflow) { + dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id, + uc->rflow->id); + __udma_put_rflow(ud, uc->rflow); + uc->rflow = NULL; + } +} + +static void udma_free_tx_resources(struct udma_chan *uc) +{ + if (!uc->tchan) + return; + + k3_ringacc_ring_free(uc->tchan->t_ring); + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->t_ring = NULL; + uc->tchan->tc_ring = NULL; + + udma_put_tchan(uc); +} + +static int udma_alloc_tx_resources(struct udma_chan *uc) +{ + struct k3_ring_cfg ring_cfg; + struct udma_dev *ud = uc->ud; + int ret; + + ret = udma_get_tchan(uc); + if (ret) + return ret; + + uc->tchan->t_ring = k3_ringacc_request_ring(ud->ringacc, + uc->tchan->id, 0); + if (!uc->tchan->t_ring) { + ret = -EBUSY; + goto err_tx_ring; + } + + uc->tchan->tc_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!uc->tchan->tc_ring) { + ret = -EBUSY; + goto err_txc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(uc->tchan->t_ring, &ring_cfg); + ret |= k3_ringacc_ring_cfg(uc->tchan->tc_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(uc->tchan->tc_ring); + uc->tchan->tc_ring = NULL; +err_txc_ring: + k3_ringacc_ring_free(uc->tchan->t_ring); + uc->tchan->t_ring = NULL; +err_tx_ring: + udma_put_tchan(uc); + + return ret; +} + +static void udma_free_rx_resources(struct udma_chan *uc) +{ + if (!uc->rchan) + return; + + if (uc->rflow) { + struct udma_rflow *rflow = uc->rflow; + + k3_ringacc_ring_free(rflow->fd_ring); + k3_ringacc_ring_free(rflow->r_ring); + rflow->fd_ring = NULL; + rflow->r_ring = NULL; + + udma_put_rflow(uc); + } + + udma_put_rchan(uc); +} + +static int udma_alloc_rx_resources(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct k3_ring_cfg ring_cfg; + struct udma_rflow *rflow; + int fd_ring_id; + int ret; + + ret = udma_get_rchan(uc); + if (ret) + return ret; + + /* For MEM_TO_MEM we don't need rflow or rings */ + if (uc->config.dir == DMA_MEM_TO_MEM) + return 0; + + ret = udma_get_rflow(uc, uc->rchan->id); + if (ret) { + ret = -EBUSY; + goto err_rflow; + } + + rflow = uc->rflow; + fd_ring_id = ud->tchan_cnt + ud->echan_cnt + uc->rchan->id; + rflow->fd_ring = k3_ringacc_request_ring(ud->ringacc, fd_ring_id, 0); + if (!rflow->fd_ring) { + ret = -EBUSY; + goto err_rx_ring; + } + + rflow->r_ring = k3_ringacc_request_ring(ud->ringacc, -1, 0); + if (!rflow->r_ring) { + ret = -EBUSY; + goto err_rxc_ring; + } + + memset(&ring_cfg, 0, sizeof(ring_cfg)); + + if (uc->config.pkt_mode) + ring_cfg.size = SG_MAX_SEGMENTS; + else + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + + ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8; + ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE; + + ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg); + ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE; + ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg); + + if (ret) + goto err_ringcfg; + + return 0; + +err_ringcfg: + k3_ringacc_ring_free(rflow->r_ring); + rflow->r_ring = NULL; +err_rxc_ring: + k3_ringacc_ring_free(rflow->fd_ring); + rflow->fd_ring = NULL; +err_rx_ring: + udma_put_rflow(uc); +err_rflow: + udma_put_rchan(uc); + + return ret; +} + +#define TISCI_TCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID) + +#define TISCI_RCHAN_VALID_PARAMS ( \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \ + TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID) + +static int udma_tisci_m2m_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + struct udma_rchan *rchan = uc->rchan; + int ret = 0; + + /* Non synchronized - mem to mem type of transfer */ + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) { + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + return ret; + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2; + req_rx.rxcq_qnum = tc_ring; + req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) + dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret); + + return ret; +} + +static int udma_tisci_tx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_tchan *tchan = uc->tchan; + int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring); + struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_tx.valid_params = TISCI_TCHAN_VALID_PARAMS; + req_tx.nav_id = tisci_rm->tisci_dev_id; + req_tx.index = tchan->id; + req_tx.tx_chan_type = mode; + req_tx.tx_supr_tdpkt = uc->config.notdpkt; + req_tx.tx_fetch_size = fetch_size >> 2; + req_tx.txcq_qnum = tc_ring; + + ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx); + if (ret) + dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret); + + return ret; +} + +static int udma_tisci_rx_channel_config(struct udma_chan *uc) +{ + struct udma_dev *ud = uc->ud; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops; + struct udma_rchan *rchan = uc->rchan; + int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring); + int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring); + struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 }; + struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 }; + u32 mode, fetch_size; + int ret = 0; + + if (uc->config.pkt_mode) { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR; + fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib, + uc->config.psd_size, 0); + } else { + mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR; + fetch_size = sizeof(struct cppi5_desc_hdr_t); + } + + req_rx.valid_params = TISCI_RCHAN_VALID_PARAMS; + req_rx.nav_id = tisci_rm->tisci_dev_id; + req_rx.index = rchan->id; + req_rx.rx_fetch_size = fetch_size >> 2; + req_rx.rxcq_qnum = rx_ring; + req_rx.rx_chan_type = mode; + + ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx); + if (ret) { + dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret); + return ret; + } + + flow_req.valid_params = + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID | + TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID; + + flow_req.nav_id = tisci_rm->tisci_dev_id; + flow_req.flow_index = rchan->id; + + if (uc->config.needs_epib) + flow_req.rx_einfo_present = 1; + else + flow_req.rx_einfo_present = 0; + if (uc->config.psd_size) + flow_req.rx_psinfo_present = 1; + else + flow_req.rx_psinfo_present = 0; + flow_req.rx_error_handling = 1; + flow_req.rx_dest_qnum = rx_ring; + flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE; + flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG; + flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI; + flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO; + flow_req.rx_fdq0_sz0_qnum = fd_ring; + flow_req.rx_fdq1_qnum = fd_ring; + flow_req.rx_fdq2_qnum = fd_ring; + flow_req.rx_fdq3_qnum = fd_ring; + + ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req); + + if (ret) + dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret); + + return 0; +} + +static int udma_alloc_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + const struct udma_match_data *match_data = ud->match_data; + struct k3_ring *irq_ring; + u32 irq_udma_idx; + int ret; + + if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) { + uc->use_dma_pool = true; + /* in case of MEM_TO_MEM we have maximum of two TRs */ + if (uc->config.dir == DMA_MEM_TO_MEM) { + uc->config.hdesc_size = cppi5_trdesc_calc_size( + sizeof(struct cppi5_tr_type15_t), 2); + uc->config.pkt_mode = false; + } + } + + if (uc->use_dma_pool) { + uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev, + uc->config.hdesc_size, + ud->desc_align, + 0); + if (!uc->hdesc_pool) { + dev_err(ud->ddev.dev, + "Descriptor pool allocation failed\n"); + uc->use_dma_pool = false; + return -ENOMEM; + } + } + + /* + * Make sure that the completion is in a known state: + * No teardown, the channel is idle + */ + reinit_completion(&uc->teardown_completed); + complete_all(&uc->teardown_completed); + uc->state = UDMA_CHAN_IS_IDLE; + + switch (uc->config.dir) { + case DMA_MEM_TO_MEM: + /* Non synchronized - mem to mem type of transfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__, + uc->id); + + ret = udma_get_chan_pair(uc); + if (ret) + return ret; + + ret = udma_alloc_tx_resources(uc); + if (ret) + return ret; + + ret = udma_alloc_rx_resources(uc); + if (ret) { + udma_free_tx_resources(uc); + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_m2m_channel_config(uc); + break; + case DMA_MEM_TO_DEV: + /* Slave transfer synchronized - mem to dev (TX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__, + uc->id); + + ret = udma_alloc_tx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = ud->psil_base + uc->tchan->id; + uc->config.dst_thread = uc->config.remote_thread_id; + uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->tchan->tc_ring; + irq_udma_idx = uc->tchan->id; + + ret = udma_tisci_tx_channel_config(uc); + break; + case DMA_DEV_TO_MEM: + /* Slave transfer synchronized - dev to mem (RX) trasnfer */ + dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__, + uc->id); + + ret = udma_alloc_rx_resources(uc); + if (ret) { + uc->config.remote_thread_id = -1; + return ret; + } + + uc->config.src_thread = uc->config.remote_thread_id; + uc->config.dst_thread = (ud->psil_base + uc->rchan->id) | + K3_PSIL_DST_THREAD_ID_OFFSET; + + irq_ring = uc->rflow->r_ring; + irq_udma_idx = match_data->rchan_oes_offset + uc->rchan->id; + + ret = udma_tisci_rx_channel_config(uc); + break; + default: + /* Can not happen */ + dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n", + __func__, uc->id, uc->config.dir); + return -EINVAL; + } + + /* check if the channel configuration was successful */ + if (ret) + goto err_res_free; + + if (udma_is_chan_running(uc)) { + dev_warn(ud->dev, "chan%d: is running!\n", uc->id); + udma_stop(uc); + if (udma_is_chan_running(uc)) { + dev_err(ud->dev, "chan%d: won't stop!\n", uc->id); + goto err_res_free; + } + } + + /* PSI-L pairing */ + ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread); + if (ret) { + dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n", + uc->config.src_thread, uc->config.dst_thread); + goto err_res_free; + } + + uc->psil_paired = true; + + uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring); + if (uc->irq_num_ring <= 0) { + dev_err(ud->dev, "Failed to get ring irq (index: %u)\n", + k3_ringacc_get_ring_id(irq_ring)); + ret = -EINVAL; + goto err_psi_free; + } + + ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler, + IRQF_TRIGGER_HIGH, uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id); + goto err_irq_free; + } + + /* Event from UDMA (TR events) only needed for slave TR mode channels */ + if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) { + uc->irq_num_udma = ti_sci_inta_msi_get_virq(ud->dev, + irq_udma_idx); + if (uc->irq_num_udma <= 0) { + dev_err(ud->dev, "Failed to get udma irq (index: %u)\n", + irq_udma_idx); + free_irq(uc->irq_num_ring, uc); + ret = -EINVAL; + goto err_irq_free; + } + + ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0, + uc->name, uc); + if (ret) { + dev_err(ud->dev, "chan%d: UDMA irq request failed\n", + uc->id); + free_irq(uc->irq_num_ring, uc); + goto err_irq_free; + } + } else { + uc->irq_num_udma = 0; + } + + udma_reset_rings(uc); + + INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work, + udma_check_tx_completion); + return 0; + +err_irq_free: + uc->irq_num_ring = 0; + uc->irq_num_udma = 0; +err_psi_free: + navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread); + uc->psil_paired = false; +err_res_free: + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } + + return ret; +} + +static int udma_slave_config(struct dma_chan *chan, + struct dma_slave_config *cfg) +{ + struct udma_chan *uc = to_udma_chan(chan); + + memcpy(&uc->cfg, cfg, sizeof(uc->cfg)); + + return 0; +} + +static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc, + size_t tr_size, int tr_count, + enum dma_transfer_direction dir) +{ + struct udma_hwdesc *hwdesc; + struct cppi5_desc_hdr_t *tr_desc; + struct udma_desc *d; + u32 reload_count = 0; + u32 ring_id; + + switch (tr_size) { + case 16: + case 32: + case 64: + case 128: + break; + default: + dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size); + return NULL; + } + + /* We have only one descriptor containing multiple TRs */ + d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = tr_count; + + d->hwdesc_count = 1; + hwdesc = &d->hwdesc[0]; + + /* Allocate memory for DMA ring descriptor */ + if (uc->use_dma_pool) { + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + } else { + hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, + tr_count); + hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size, + uc->ud->desc_align); + hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev, + hwdesc->cppi5_desc_size, + &hwdesc->cppi5_desc_paddr, + GFP_NOWAIT); + } + + if (!hwdesc->cppi5_desc_vaddr) { + kfree(d); + return NULL; + } + + /* Start of the TR req records */ + hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size; + /* Start address of the TR response array */ + hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count; + + tr_desc = hwdesc->cppi5_desc_vaddr; + + if (uc->cyclic) + reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count); + cppi5_desc_set_pktids(tr_desc, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(tr_desc, 0, ring_id); + + return d; +} + +static struct udma_desc * +udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + enum dma_slave_buswidth dev_width; + struct scatterlist *sgent; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req = NULL; + unsigned int i; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, sglen, dir); + if (!d) + return NULL; + + d->sglen = sglen; + + tr_req = d->hwdesc[0].tr_req_base; + for_each_sg(sgl, sgent, sglen, i) { + d->residue += sg_dma_len(sgent); + + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[i].addr = sg_dma_address(sgent); + tr_req[i].icnt0 = burst * dev_width; + tr_req[i].dim1 = burst * dev_width; + tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0; + } + + cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP); + + return d; +} + +static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d, + enum dma_slave_buswidth dev_width, + u16 elcnt) +{ + if (uc->config.ep_type != PSIL_EP_PDMA_XY) + return 0; + + /* Bus width translates to the element size (ES) */ + switch (dev_width) { + case DMA_SLAVE_BUSWIDTH_1_BYTE: + d->static_tr.elsize = 0; + break; + case DMA_SLAVE_BUSWIDTH_2_BYTES: + d->static_tr.elsize = 1; + break; + case DMA_SLAVE_BUSWIDTH_3_BYTES: + d->static_tr.elsize = 2; + break; + case DMA_SLAVE_BUSWIDTH_4_BYTES: + d->static_tr.elsize = 3; + break; + case DMA_SLAVE_BUSWIDTH_8_BYTES: + d->static_tr.elsize = 4; + break; + default: /* not reached */ + return -EINVAL; + } + + d->static_tr.elcnt = elcnt; + + /* + * PDMA must to close the packet when the channel is in packet mode. + * For TR mode when the channel is not cyclic we also need PDMA to close + * the packet otherwise the transfer will stall because PDMA holds on + * the data it has received from the peripheral. + */ + if (uc->config.pkt_mode || !uc->cyclic) { + unsigned int div = dev_width * elcnt; + + if (uc->cyclic) + d->static_tr.bstcnt = d->residue / d->sglen / div; + else + d->static_tr.bstcnt = d->residue / div; + + if (uc->config.dir == DMA_DEV_TO_MEM && + d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask) + return -EINVAL; + } else { + d->static_tr.bstcnt = 0; + } + + return 0; +} + +static struct udma_desc * +udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct scatterlist *sgent; + struct cppi5_host_desc_t *h_desc = NULL; + struct udma_desc *d; + u32 ring_id; + unsigned int i; + + d = kzalloc(sizeof(*d) + sglen * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->sglen = sglen; + d->hwdesc_count = sglen; + + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for_each_sg(sgl, sgent, sglen, i) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t sg_addr = sg_dma_address(sgent); + struct cppi5_host_desc_t *desc; + size_t sg_len = sg_dma_len(sgent); + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + d->residue += sg_len; + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + desc = hwdesc->cppi5_desc_vaddr; + + if (i == 0) { + cppi5_hdesc_init(desc, 0, 0); + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id); + } else { + cppi5_hdesc_reset_hbdesc(desc); + cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff); + } + + /* attach the sg buffer to the descriptor */ + cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len); + + /* Attach link as host buffer descriptor */ + if (h_desc) + cppi5_hdesc_link_hbdesc(h_desc, + hwdesc->cppi5_desc_paddr); + + if (dir == DMA_MEM_TO_DEV) + h_desc = desc; + } + + if (d->residue >= SZ_4M) { + dev_err(uc->ud->dev, + "%s: Transfer size %u is over the supported 4M range\n", + __func__, d->residue); + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + cppi5_hdesc_set_pktlen(h_desc, d->residue); + + return d; +} + +static int udma_attach_metadata(struct dma_async_tx_descriptor *desc, + void *data, size_t len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (!data || len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + if (d->dir == DMA_MEM_TO_DEV) + memcpy(h_desc->epib, data, len); + + if (uc->config.needs_epib) + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + + d->metadata = data; + d->metadata_size = len; + if (uc->config.needs_epib) + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc, + size_t *payload_len, size_t *max_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return ERR_PTR(-ENOTSUPP); + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + *max_len = uc->config.metadata_size; + + *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ? + CPPI5_INFO0_HDESC_EPIB_SIZE : 0; + *payload_len += cppi5_hdesc_get_psdata_size(h_desc); + + return h_desc->epib; +} + +static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc, + size_t payload_len) +{ + struct udma_desc *d = to_udma_desc(desc); + struct udma_chan *uc = to_udma_chan(desc->chan); + struct cppi5_host_desc_t *h_desc; + u32 psd_size = payload_len; + u32 flags = 0; + + if (!uc->config.pkt_mode || !uc->config.metadata_size) + return -ENOTSUPP; + + if (payload_len > uc->config.metadata_size) + return -EINVAL; + + if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE) + return -EINVAL; + + h_desc = d->hwdesc[0].cppi5_desc_vaddr; + + if (uc->config.needs_epib) { + psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE; + flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT; + } + + cppi5_hdesc_update_flags(h_desc, flags); + cppi5_hdesc_update_psdata_size(h_desc, psd_size); + + return 0; +} + +static struct dma_descriptor_metadata_ops metadata_ops = { + .attach = udma_attach_metadata, + .get_ptr = udma_get_metadata_ptr, + .set_len = udma_set_metadata_len, +}; + +static struct dma_async_tx_descriptor * +udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, + unsigned int sglen, enum dma_transfer_direction dir, + unsigned long tx_flags, void *context) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(chan->device->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags, + context); + else + d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags, + context); + + if (!d) + return NULL; + + d->dir = dir; + d->desc_idx = 0; + d->tr_idx = 0; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static struct udma_desc * +udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + size_t tr_size; + struct cppi5_tr_type1_t *tr_req; + unsigned int i; + unsigned int periods = buf_len / period_len; + u32 burst; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + /* Now allocate and setup the descriptor. */ + tr_size = sizeof(struct cppi5_tr_type1_t); + d = udma_alloc_tr_desc(uc, tr_size, periods, dir); + if (!d) + return NULL; + + tr_req = d->hwdesc[0].tr_req_base; + for (i = 0; i < periods; i++) { + cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + + tr_req[i].addr = buf_addr + period_len * i; + tr_req[i].icnt0 = dev_width; + tr_req[i].icnt1 = period_len / dev_width; + tr_req[i].dim1 = dev_width; + + if (!(flags & DMA_PREP_INTERRUPT)) + cppi5_tr_csf_set(&tr_req[i].flags, + CPPI5_TR_CSF_SUPR_EVT); + } + + return d; +} + +static struct udma_desc * +udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr, + size_t buf_len, size_t period_len, + enum dma_transfer_direction dir, unsigned long flags) +{ + struct udma_desc *d; + u32 ring_id; + int i; + int periods = buf_len / period_len; + + if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1)) + return NULL; + + if (period_len >= SZ_4M) + return NULL; + + d = kzalloc(sizeof(*d) + periods * sizeof(d->hwdesc[0]), GFP_NOWAIT); + if (!d) + return NULL; + + d->hwdesc_count = periods; + + /* TODO: re-check this... */ + if (dir == DMA_DEV_TO_MEM) + ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring); + else + ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring); + + for (i = 0; i < periods; i++) { + struct udma_hwdesc *hwdesc = &d->hwdesc[i]; + dma_addr_t period_addr = buf_addr + (period_len * i); + struct cppi5_host_desc_t *h_desc; + + hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool, + GFP_NOWAIT, + &hwdesc->cppi5_desc_paddr); + if (!hwdesc->cppi5_desc_vaddr) { + dev_err(uc->ud->dev, + "descriptor%d allocation failed\n", i); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + hwdesc->cppi5_desc_size = uc->config.hdesc_size; + h_desc = hwdesc->cppi5_desc_vaddr; + + cppi5_hdesc_init(h_desc, 0, 0); + cppi5_hdesc_set_pktlen(h_desc, period_len); + + /* Flow and Packed ID */ + cppi5_desc_set_pktids(&h_desc->hdr, uc->id, + CPPI5_INFO1_DESC_FLOWID_DEFAULT); + cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id); + + /* attach each period to a new descriptor */ + cppi5_hdesc_attach_buf(h_desc, + period_addr, period_len, + period_addr, period_len); + } + + return d; +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, + size_t period_len, enum dma_transfer_direction dir, + unsigned long flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_slave_buswidth dev_width; + struct udma_desc *d; + u32 burst; + + if (dir != uc->config.dir) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(dir)); + return NULL; + } + + uc->cyclic = true; + + if (dir == DMA_DEV_TO_MEM) { + dev_width = uc->cfg.src_addr_width; + burst = uc->cfg.src_maxburst; + } else if (dir == DMA_MEM_TO_DEV) { + dev_width = uc->cfg.dst_addr_width; + burst = uc->cfg.dst_maxburst; + } else { + dev_err(uc->ud->dev, "%s: bad direction?\n", __func__); + return NULL; + } + + if (!burst) + burst = 1; + + if (uc->config.pkt_mode) + d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len, + dir, flags); + else + d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len, + dir, flags); + + if (!d) + return NULL; + + d->sglen = buf_len / period_len; + + d->dir = dir; + d->residue = buf_len; + + /* static TR for remote PDMA */ + if (udma_configure_statictr(uc, d, dev_width, burst)) { + dev_err(uc->ud->dev, + "%s: StaticTR Z is limited to maximum 4095 (%u)\n", + __func__, d->static_tr.bstcnt); + + udma_free_hwdesc(uc, d); + kfree(d); + return NULL; + } + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, flags); +} + +static struct dma_async_tx_descriptor * +udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, + size_t len, unsigned long tx_flags) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_desc *d; + struct cppi5_tr_type15_t *tr_req; + int num_tr; + size_t tr_size = sizeof(struct cppi5_tr_type15_t); + u16 tr0_cnt0, tr0_cnt1, tr1_cnt0; + + if (uc->config.dir != DMA_MEM_TO_MEM) { + dev_err(chan->device->dev, + "%s: chan%d is for %s, not supporting %s\n", + __func__, uc->id, + dmaengine_get_direction_text(uc->config.dir), + dmaengine_get_direction_text(DMA_MEM_TO_MEM)); + return NULL; + } + + if (len < SZ_64K) { + num_tr = 1; + tr0_cnt0 = len; + tr0_cnt1 = 1; + } else { + unsigned long align_to = __ffs(src | dest); + + if (align_to > 3) + align_to = 3; + /* + * Keep simple: tr0: SZ_64K-alignment blocks, + * tr1: the remaining + */ + num_tr = 2; + tr0_cnt0 = (SZ_64K - BIT(align_to)); + if (len / tr0_cnt0 >= SZ_64K) { + dev_err(uc->ud->dev, "size %zu is not supported\n", + len); + return NULL; + } + + tr0_cnt1 = len / tr0_cnt0; + tr1_cnt0 = len % tr0_cnt0; + } + + d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM); + if (!d) + return NULL; + + d->dir = DMA_MEM_TO_MEM; + d->desc_idx = 0; + d->tr_idx = 0; + d->residue = len; + + tr_req = d->hwdesc[0].tr_req_base; + + cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[0].addr = src; + tr_req[0].icnt0 = tr0_cnt0; + tr_req[0].icnt1 = tr0_cnt1; + tr_req[0].icnt2 = 1; + tr_req[0].icnt3 = 1; + tr_req[0].dim1 = tr0_cnt0; + + tr_req[0].daddr = dest; + tr_req[0].dicnt0 = tr0_cnt0; + tr_req[0].dicnt1 = tr0_cnt1; + tr_req[0].dicnt2 = 1; + tr_req[0].dicnt3 = 1; + tr_req[0].ddim1 = tr0_cnt0; + + if (num_tr == 2) { + cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true, + CPPI5_TR_EVENT_SIZE_COMPLETION, 0); + cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT); + + tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0; + tr_req[1].icnt0 = tr1_cnt0; + tr_req[1].icnt1 = 1; + tr_req[1].icnt2 = 1; + tr_req[1].icnt3 = 1; + + tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0; + tr_req[1].dicnt0 = tr1_cnt0; + tr_req[1].dicnt1 = 1; + tr_req[1].dicnt2 = 1; + tr_req[1].dicnt3 = 1; + } + + cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); + + if (uc->config.metadata_size) + d->vd.tx.metadata_ops = &metadata_ops; + + return vchan_tx_prep(&uc->vc, &d->vd, tx_flags); +} + +static void udma_issue_pending(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + /* If we have something pending and no active descriptor, then */ + if (vchan_issue_pending(&uc->vc) && !uc->desc) { + /* + * start a descriptor if the channel is NOT [marked as + * terminating _and_ it is still running (teardown has not + * completed yet)]. + */ + if (!(uc->state == UDMA_CHAN_IS_TERMINATING && + udma_is_chan_running(uc))) + udma_start(uc); + } + + spin_unlock_irqrestore(&uc->vc.lock, flags); +} + +static enum dma_status udma_tx_status(struct dma_chan *chan, + dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + struct udma_chan *uc = to_udma_chan(chan); + enum dma_status ret; + unsigned long flags; + + spin_lock_irqsave(&uc->vc.lock, flags); + + ret = dma_cookie_status(chan, cookie, txstate); + + if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc)) + ret = DMA_PAUSED; + + if (ret == DMA_COMPLETE || !txstate) + goto out; + + if (uc->desc && uc->desc->vd.tx.cookie == cookie) { + u32 peer_bcnt = 0; + u32 bcnt = 0; + u32 residue = uc->desc->residue; + u32 delay = 0; + + if (uc->desc->dir == DMA_MEM_TO_DEV) { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_SBCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_PEER_BCNT_REG); + + if (bcnt > peer_bcnt) + delay = bcnt - peer_bcnt; + } + } else if (uc->desc->dir == DMA_DEV_TO_MEM) { + bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_BCNT_REG); + + if (uc->config.ep_type != PSIL_EP_NATIVE) { + peer_bcnt = udma_rchanrt_read(uc->rchan, + UDMA_RCHAN_RT_PEER_BCNT_REG); + + if (peer_bcnt > bcnt) + delay = peer_bcnt - bcnt; + } + } else { + bcnt = udma_tchanrt_read(uc->tchan, + UDMA_TCHAN_RT_BCNT_REG); + } + + bcnt -= uc->bcnt; + if (bcnt && !(bcnt % uc->desc->residue)) + residue = 0; + else + residue -= bcnt % uc->desc->residue; + + if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) { + ret = DMA_COMPLETE; + delay = 0; + } + + dma_set_residue(txstate, residue); + dma_set_in_flight_bytes(txstate, delay); + + } else { + ret = DMA_COMPLETE; + } + +out: + spin_unlock_irqrestore(&uc->vc.lock, flags); + return ret; +} + +static int udma_pause(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* pause the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, + UDMA_PEER_RT_EN_PAUSE); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, + UDMA_CHAN_RT_CTL_PAUSE); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_resume(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + + if (!uc->desc) + return -EINVAL; + + /* resume the channel */ + switch (uc->desc->dir) { + case DMA_DEV_TO_MEM: + udma_rchanrt_update_bits(uc->rchan, + UDMA_RCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + + break; + case DMA_MEM_TO_DEV: + udma_tchanrt_update_bits(uc->tchan, + UDMA_TCHAN_RT_PEER_RT_EN_REG, + UDMA_PEER_RT_EN_PAUSE, 0); + break; + case DMA_MEM_TO_MEM: + udma_tchanrt_update_bits(uc->tchan, UDMA_TCHAN_RT_CTL_REG, + UDMA_CHAN_RT_CTL_PAUSE, 0); + break; + default: + return -EINVAL; + } + + return 0; +} + +static int udma_terminate_all(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&uc->vc.lock, flags); + + if (udma_is_chan_running(uc)) + udma_stop(uc); + + if (uc->desc) { + uc->terminated_desc = uc->desc; + uc->desc = NULL; + uc->terminated_desc->terminated = true; + cancel_delayed_work(&uc->tx_drain.work); + } + + uc->paused = false; + + vchan_get_all_descriptors(&uc->vc, &head); + spin_unlock_irqrestore(&uc->vc.lock, flags); + vchan_dma_desc_free_list(&uc->vc, &head); + + return 0; +} + +static void udma_synchronize(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + unsigned long timeout = msecs_to_jiffies(1000); + + vchan_synchronize(&uc->vc); + + if (uc->state == UDMA_CHAN_IS_TERMINATING) { + timeout = wait_for_completion_timeout(&uc->teardown_completed, + timeout); + if (!timeout) { + dev_warn(uc->ud->dev, "chan%d teardown timeout!\n", + uc->id); + udma_dump_chan_stdata(uc); + udma_reset_chan(uc, true); + } + } + + udma_reset_chan(uc, false); + if (udma_is_chan_running(uc)) + dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id); + + cancel_delayed_work_sync(&uc->tx_drain.work); + udma_reset_rings(uc); +} + +static void udma_desc_pre_callback(struct virt_dma_chan *vc, + struct virt_dma_desc *vd, + struct dmaengine_result *result) +{ + struct udma_chan *uc = to_udma_chan(&vc->chan); + struct udma_desc *d; + + if (!vd) + return; + + d = to_udma_desc(&vd->tx); + + if (d->metadata_size) + udma_fetch_epib(uc, d); + + /* Provide residue information for the client */ + if (result) { + void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx); + + if (cppi5_desc_get_type(desc_vaddr) == + CPPI5_INFO0_DESC_TYPE_VAL_HOST) { + result->residue = d->residue - + cppi5_hdesc_get_pktlen(desc_vaddr); + if (result->residue) + result->result = DMA_TRANS_ABORTED; + else + result->result = DMA_TRANS_NOERROR; + } else { + result->residue = 0; + result->result = DMA_TRANS_NOERROR; + } + } +} + +/* + * This tasklet handles the completion of a DMA descriptor by + * calling its callback and freeing it. + */ +static void udma_vchan_complete(unsigned long arg) +{ + struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; + struct virt_dma_desc *vd, *_vd; + struct dmaengine_desc_callback cb; + LIST_HEAD(head); + + spin_lock_irq(&vc->lock); + list_splice_tail_init(&vc->desc_completed, &head); + vd = vc->cyclic; + if (vd) { + vc->cyclic = NULL; + dmaengine_desc_get_callback(&vd->tx, &cb); + } else { + memset(&cb, 0, sizeof(cb)); + } + spin_unlock_irq(&vc->lock); + + udma_desc_pre_callback(vc, vd, NULL); + dmaengine_desc_callback_invoke(&cb, NULL); + + list_for_each_entry_safe(vd, _vd, &head, node) { + struct dmaengine_result result; + + dmaengine_desc_get_callback(&vd->tx, &cb); + + list_del(&vd->node); + + udma_desc_pre_callback(vc, vd, &result); + dmaengine_desc_callback_invoke(&cb, &result); + + vchan_vdesc_fini(vd); + } +} + +static void udma_free_chan_resources(struct dma_chan *chan) +{ + struct udma_chan *uc = to_udma_chan(chan); + struct udma_dev *ud = to_udma_dev(chan->device); + + udma_terminate_all(chan); + if (uc->terminated_desc) { + udma_reset_chan(uc, false); + udma_reset_rings(uc); + } + + cancel_delayed_work_sync(&uc->tx_drain.work); + destroy_delayed_work_on_stack(&uc->tx_drain.work); + + if (uc->irq_num_ring > 0) { + free_irq(uc->irq_num_ring, uc); + + uc->irq_num_ring = 0; + } + if (uc->irq_num_udma > 0) { + free_irq(uc->irq_num_udma, uc); + + uc->irq_num_udma = 0; + } + + /* Release PSI-L pairing */ + if (uc->psil_paired) { + navss_psil_unpair(ud, uc->config.src_thread, + uc->config.dst_thread); + uc->psil_paired = false; + } + + vchan_free_chan_resources(&uc->vc); + tasklet_kill(&uc->vc.task); + + udma_free_tx_resources(uc); + udma_free_rx_resources(uc); + udma_reset_uchan(uc); + + if (uc->use_dma_pool) { + dma_pool_destroy(uc->hdesc_pool); + uc->use_dma_pool = false; + } +} + +static struct platform_driver udma_driver; + +static bool udma_dma_filter_fn(struct dma_chan *chan, void *param) +{ + struct udma_chan_config *ucc; + struct psil_endpoint_config *ep_config; + struct udma_chan *uc; + struct udma_dev *ud; + u32 *args; + + if (chan->device->dev->driver != &udma_driver.driver) + return false; + + uc = to_udma_chan(chan); + ucc = &uc->config; + ud = uc->ud; + args = param; + + ucc->remote_thread_id = args[0]; + + if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) + ucc->dir = DMA_MEM_TO_DEV; + else + ucc->dir = DMA_DEV_TO_MEM; + + ep_config = psil_get_ep_config(ucc->remote_thread_id); + if (IS_ERR(ep_config)) { + dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n", + ucc->remote_thread_id); + ucc->dir = DMA_MEM_TO_MEM; + ucc->remote_thread_id = -1; + return false; + } + + ucc->pkt_mode = ep_config->pkt_mode; + ucc->channel_tpl = ep_config->channel_tpl; + ucc->notdpkt = ep_config->notdpkt; + ucc->ep_type = ep_config->ep_type; + + if (ucc->ep_type != PSIL_EP_NATIVE) { + const struct udma_match_data *match_data = ud->match_data; + + if (match_data->flags & UDMA_FLAG_PDMA_ACC32) + ucc->enable_acc32 = ep_config->pdma_acc32; + if (match_data->flags & UDMA_FLAG_PDMA_BURST) + ucc->enable_burst = ep_config->pdma_burst; + } + + ucc->needs_epib = ep_config->needs_epib; + ucc->psd_size = ep_config->psd_size; + ucc->metadata_size = + (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) + + ucc->psd_size; + + if (ucc->pkt_mode) + ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) + + ucc->metadata_size, ud->desc_align); + + dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id, + ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir)); + + return true; +} + +static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct udma_dev *ud = ofdma->of_dma_data; + dma_cap_mask_t mask = ud->ddev.cap_mask; + struct dma_chan *chan; + + if (dma_spec->args_count != 1) + return NULL; + + chan = __dma_request_channel(&mask, udma_dma_filter_fn, + &dma_spec->args[0], ofdma->of_node); + if (!chan) { + dev_err(ud->dev, "get channel fail in %s.\n", __func__); + return ERR_PTR(-EINVAL); + } + + return chan; +} + +static struct udma_match_data am654_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 8, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data am654_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = true, /* TEST: DMA domains */ + .statictr_z_mask = GENMASK(11, 0), + .rchan_oes_offset = 0x2000, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_main_data = { + .psil_base = 0x1000, + .enable_memcpy_support = true, + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 3, + .level_start_idx = { + [0] = 16, /* Normal channels */ + [1] = 4, /* High Throughput channels */ + [2] = 0, /* Ultra High Throughput channels */ + }, +}; + +static struct udma_match_data j721e_mcu_data = { + .psil_base = 0x6000, + .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */ + .flags = UDMA_FLAG_PDMA_ACC32 | UDMA_FLAG_PDMA_BURST, + .statictr_z_mask = GENMASK(23, 0), + .rchan_oes_offset = 0x400, + .tpl_levels = 2, + .level_start_idx = { + [0] = 2, /* Normal channels */ + [1] = 0, /* High Throughput channels */ + }, +}; + +static const struct of_device_id udma_of_match[] = { + { + .compatible = "ti,am654-navss-main-udmap", + .data = &am654_main_data, + }, + { + .compatible = "ti,am654-navss-mcu-udmap", + .data = &am654_mcu_data, + }, { + .compatible = "ti,j721e-navss-main-udmap", + .data = &j721e_main_data, + }, { + .compatible = "ti,j721e-navss-mcu-udmap", + .data = &j721e_mcu_data, + }, + { /* Sentinel */ }, +}; + +static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud) +{ + struct resource *res; + int i; + + for (i = 0; i < MMR_LAST; i++) { + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + mmr_names[i]); + ud->mmrs[i] = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ud->mmrs[i])) + return PTR_ERR(ud->mmrs[i]); + } + + return 0; +} + +static int udma_setup_resources(struct udma_dev *ud) +{ + struct device *dev = ud->dev; + int ch_count, ret, i, j; + u32 cap2, cap3; + struct ti_sci_resource_desc *rm_desc; + struct ti_sci_resource *rm_res, irq_res; + struct udma_tisci_rm *tisci_rm = &ud->tisci_rm; + static const char * const range_names[] = { "ti,sci-rm-range-tchan", + "ti,sci-rm-range-rchan", + "ti,sci-rm-range-rflow" }; + + cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28); + cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c); + + ud->rflow_cnt = cap3 & 0x3fff; + ud->tchan_cnt = cap2 & 0x1ff; + ud->echan_cnt = (cap2 >> 9) & 0x1ff; + ud->rchan_cnt = (cap2 >> 18) & 0x1ff; + ch_count = ud->tchan_cnt + ud->rchan_cnt; + + ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans), + GFP_KERNEL); + ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt), + sizeof(unsigned long), GFP_KERNEL); + ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans), + GFP_KERNEL); + ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_gp_map_allocated = devm_kcalloc(dev, + BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt), + sizeof(unsigned long), + GFP_KERNEL); + ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows), + GFP_KERNEL); + + if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map || + !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans || + !ud->rflows || !ud->rflow_in_use) + return -ENOMEM; + + /* + * RX flows with the same Ids as RX channels are reserved to be used + * as default flows if remote HW can't generate flow_ids. Those + * RX flows can be requested only explicitly by id. + */ + bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt); + + /* by default no GP rflows are assigned to Linux */ + bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt); + + /* Get resource ranges from tisci */ + for (i = 0; i < RM_RANGE_LAST; i++) + tisci_rm->rm_ranges[i] = + devm_ti_sci_get_of_resource(tisci_rm->tisci, dev, + tisci_rm->tisci_dev_id, + (char *)range_names[i]); + + /* tchan ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->tchan_map, ud->tchan_cnt); + } else { + bitmap_fill(ud->tchan_map, ud->tchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->tchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: tchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + irq_res.sets = rm_res->sets; + + /* rchan and matching default flow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + if (IS_ERR(rm_res)) { + bitmap_zero(ud->rchan_map, ud->rchan_cnt); + } else { + bitmap_fill(ud->rchan_map, ud->rchan_cnt); + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rchan_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rchan: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + irq_res.sets += rm_res->sets; + irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL); + rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN]; + for (i = 0; i < rm_res->sets; i++) { + irq_res.desc[i].start = rm_res->desc[i].start; + irq_res.desc[i].num = rm_res->desc[i].num; + } + rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN]; + for (j = 0; j < rm_res->sets; j++, i++) { + irq_res.desc[i].start = rm_res->desc[j].start + + ud->match_data->rchan_oes_offset; + irq_res.desc[i].num = rm_res->desc[j].num; + } + ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res); + kfree(irq_res.desc); + if (ret) { + dev_err(ud->dev, "Failed to allocate MSI interrupts\n"); + return ret; + } + + /* GP rflow ranges */ + rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW]; + if (IS_ERR(rm_res)) { + /* all gp flows are assigned exclusively to Linux */ + bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt, + ud->rflow_cnt - ud->rchan_cnt); + } else { + for (i = 0; i < rm_res->sets; i++) { + rm_desc = &rm_res->desc[i]; + bitmap_clear(ud->rflow_gp_map, rm_desc->start, + rm_desc->num); + dev_dbg(dev, "ti-sci-res: rflow: %d:%d\n", + rm_desc->start, rm_desc->num); + } + } + + ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt); + ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt); + if (!ch_count) + return -ENODEV; + + ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels), + GFP_KERNEL); + if (!ud->channels) + return -ENOMEM; + + dev_info(dev, "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n", + ch_count, + ud->tchan_cnt - bitmap_weight(ud->tchan_map, ud->tchan_cnt), + ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt), + ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map, + ud->rflow_cnt)); + + return ch_count; +} + +#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)) + +static int udma_probe(struct platform_device *pdev) +{ + struct device_node *navss_node = pdev->dev.parent->of_node; + struct device *dev = &pdev->dev; + struct udma_dev *ud; + const struct of_device_id *match; + int i, ret; + int ch_count; + + ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48)); + if (ret) + dev_err(dev, "failed to set dma mask stuff\n"); + + ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL); + if (!ud) + return -ENOMEM; + + ret = udma_get_mmrs(pdev, ud); + if (ret) + return ret; + + ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci"); + if (IS_ERR(ud->tisci_rm.tisci)) + return PTR_ERR(ud->tisci_rm.tisci); + + ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read failure %d\n", ret); + return ret; + } + pdev->id = ud->tisci_rm.tisci_dev_id; + + ret = of_property_read_u32(navss_node, "ti,sci-dev-id", + &ud->tisci_rm.tisci_navss_dev_id); + if (ret) { + dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret); + return ret; + } + + ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops; + ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops; + + ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc"); + if (IS_ERR(ud->ringacc)) + return PTR_ERR(ud->ringacc); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + match = of_match_node(udma_of_match, dev->of_node); + if (!match) { + dev_err(dev, "No compatible match found\n"); + return -ENODEV; + } + ud->match_data = match->data; + + dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask); + dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask); + + ud->ddev.device_alloc_chan_resources = udma_alloc_chan_resources; + ud->ddev.device_config = udma_slave_config; + ud->ddev.device_prep_slave_sg = udma_prep_slave_sg; + ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic; + ud->ddev.device_issue_pending = udma_issue_pending; + ud->ddev.device_tx_status = udma_tx_status; + ud->ddev.device_pause = udma_pause; + ud->ddev.device_resume = udma_resume; + ud->ddev.device_terminate_all = udma_terminate_all; + ud->ddev.device_synchronize = udma_synchronize; + + ud->ddev.device_free_chan_resources = udma_free_chan_resources; + ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS; + ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); + ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + ud->ddev.copy_align = DMAENGINE_ALIGN_8_BYTES; + ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT | + DESC_METADATA_ENGINE; + if (ud->match_data->enable_memcpy_support) { + dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask); + ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy; + ud->ddev.directions |= BIT(DMA_MEM_TO_MEM); + } + + ud->ddev.dev = dev; + ud->dev = dev; + ud->psil_base = ud->match_data->psil_base; + + INIT_LIST_HEAD(&ud->ddev.channels); + INIT_LIST_HEAD(&ud->desc_to_purge); + + ch_count = udma_setup_resources(ud); + if (ch_count <= 0) + return ch_count; + + spin_lock_init(&ud->lock); + INIT_WORK(&ud->purge_work, udma_purge_desc_work); + + ud->desc_align = 64; + if (ud->desc_align < dma_get_cache_alignment()) + ud->desc_align = dma_get_cache_alignment(); + + for (i = 0; i < ud->tchan_cnt; i++) { + struct udma_tchan *tchan = &ud->tchans[i]; + + tchan->id = i; + tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rchan_cnt; i++) { + struct udma_rchan *rchan = &ud->rchans[i]; + + rchan->id = i; + rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000; + } + + for (i = 0; i < ud->rflow_cnt; i++) { + struct udma_rflow *rflow = &ud->rflows[i]; + + rflow->id = i; + } + + for (i = 0; i < ch_count; i++) { + struct udma_chan *uc = &ud->channels[i]; + + uc->ud = ud; + uc->vc.desc_free = udma_desc_free; + uc->id = i; + uc->tchan = NULL; + uc->rchan = NULL; + uc->config.remote_thread_id = -1; + uc->config.dir = DMA_MEM_TO_MEM; + uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d", + dev_name(dev), i); + + vchan_init(&uc->vc, &ud->ddev); + /* Use custom vchan completion handling */ + tasklet_init(&uc->vc.task, udma_vchan_complete, + (unsigned long)&uc->vc); + init_completion(&uc->teardown_completed); + } + + ret = dma_async_device_register(&ud->ddev); + if (ret) { + dev_err(dev, "failed to register slave DMA engine: %d\n", ret); + return ret; + } + + platform_set_drvdata(pdev, ud); + + ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud); + if (ret) { + dev_err(dev, "failed to register of_dma controller\n"); + dma_async_device_unregister(&ud->ddev); + } + + return ret; +} + +static struct platform_driver udma_driver = { + .driver = { + .name = "ti-udma", + .of_match_table = udma_of_match, + .suppress_bind_attrs = true, + }, + .probe = udma_probe, +}; +builtin_platform_driver(udma_driver); + +/* Private interfaces to UDMA */ +#include "k3-udma-private.c" diff --git a/drivers/dma/ti/k3-udma.h b/drivers/dma/ti/k3-udma.h new file mode 100644 index 000000000000..128d8744a435 --- /dev/null +++ b/drivers/dma/ti/k3-udma.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#ifndef K3_UDMA_H_ +#define K3_UDMA_H_ + +#include <linux/soc/ti/ti_sci_protocol.h> + +/* Global registers */ +#define UDMA_REV_REG 0x0 +#define UDMA_PERF_CTL_REG 0x4 +#define UDMA_EMU_CTL_REG 0x8 +#define UDMA_PSIL_TO_REG 0x10 +#define UDMA_UTC_CTL_REG 0x1c +#define UDMA_CAP_REG(i) (0x20 + ((i) * 4)) +#define UDMA_RX_FLOW_ID_FW_OES_REG 0x80 +#define UDMA_RX_FLOW_ID_FW_STATUS_REG 0x88 + +/* TX chan RT regs */ +#define UDMA_TCHAN_RT_CTL_REG 0x0 +#define UDMA_TCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_TCHAN_RT_STDATA_REG 0x80 + +#define UDMA_TCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_TCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_TCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_TCHAN_RT_PEER_BCNT_REG \ + UDMA_TCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_TCHAN_RT_PEER_RT_EN_REG \ + UDMA_TCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_TCHAN_RT_PCNT_REG 0x400 +#define UDMA_TCHAN_RT_BCNT_REG 0x408 +#define UDMA_TCHAN_RT_SBCNT_REG 0x410 + +/* RX chan RT regs */ +#define UDMA_RCHAN_RT_CTL_REG 0x0 +#define UDMA_RCHAN_RT_SWTRIG_REG 0x8 +#define UDMA_RCHAN_RT_STDATA_REG 0x80 + +#define UDMA_RCHAN_RT_PEER_REG(i) (0x200 + ((i) * 0x4)) +#define UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG \ + UDMA_RCHAN_RT_PEER_REG(0) /* PSI-L: 0x400 */ +#define UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG \ + UDMA_RCHAN_RT_PEER_REG(1) /* PSI-L: 0x401 */ +#define UDMA_RCHAN_RT_PEER_BCNT_REG \ + UDMA_RCHAN_RT_PEER_REG(4) /* PSI-L: 0x404 */ +#define UDMA_RCHAN_RT_PEER_RT_EN_REG \ + UDMA_RCHAN_RT_PEER_REG(8) /* PSI-L: 0x408 */ + +#define UDMA_RCHAN_RT_PCNT_REG 0x400 +#define UDMA_RCHAN_RT_BCNT_REG 0x408 +#define UDMA_RCHAN_RT_SBCNT_REG 0x410 + +/* UDMA_TCHAN_RT_CTL_REG/UDMA_RCHAN_RT_CTL_REG */ +#define UDMA_CHAN_RT_CTL_EN BIT(31) +#define UDMA_CHAN_RT_CTL_TDOWN BIT(30) +#define UDMA_CHAN_RT_CTL_PAUSE BIT(29) +#define UDMA_CHAN_RT_CTL_FTDOWN BIT(28) +#define UDMA_CHAN_RT_CTL_ERROR BIT(0) + +/* UDMA_TCHAN_RT_PEER_RT_EN_REG/UDMA_RCHAN_RT_PEER_RT_EN_REG (PSI-L: 0x408) */ +#define UDMA_PEER_RT_EN_ENABLE BIT(31) +#define UDMA_PEER_RT_EN_TEARDOWN BIT(30) +#define UDMA_PEER_RT_EN_PAUSE BIT(29) +#define UDMA_PEER_RT_EN_FLUSH BIT(28) +#define UDMA_PEER_RT_EN_IDLE BIT(1) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_XY_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_XY_REG + */ +#define PDMA_STATIC_TR_X_MASK GENMASK(26, 24) +#define PDMA_STATIC_TR_X_SHIFT (24) +#define PDMA_STATIC_TR_Y_MASK GENMASK(11, 0) +#define PDMA_STATIC_TR_Y_SHIFT (0) + +#define PDMA_STATIC_TR_Y(x) \ + (((x) << PDMA_STATIC_TR_Y_SHIFT) & PDMA_STATIC_TR_Y_MASK) +#define PDMA_STATIC_TR_X(x) \ + (((x) << PDMA_STATIC_TR_X_SHIFT) & PDMA_STATIC_TR_X_MASK) + +#define PDMA_STATIC_TR_XY_ACC32 BIT(30) +#define PDMA_STATIC_TR_XY_BURST BIT(31) + +/* + * UDMA_TCHAN_RT_PEER_STATIC_TR_Z_REG / + * UDMA_RCHAN_RT_PEER_STATIC_TR_Z_REG + */ +#define PDMA_STATIC_TR_Z(x, mask) ((x) & (mask)) + +struct udma_dev; +struct udma_tchan; +struct udma_rchan; +struct udma_rflow; + +enum udma_rm_range { + RM_RANGE_TCHAN = 0, + RM_RANGE_RCHAN, + RM_RANGE_RFLOW, + RM_RANGE_LAST, +}; + +struct udma_tisci_rm { + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_udmap_ops *tisci_udmap_ops; + u32 tisci_dev_id; + + /* tisci information for PSI-L thread pairing/unpairing */ + const struct ti_sci_rm_psil_ops *tisci_psil_ops; + u32 tisci_navss_dev_id; + + struct ti_sci_resource *rm_ranges[RM_RANGE_LAST]; +}; + +/* Direct access to UDMA low lever resources for the glue layer */ +int xudma_navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread); +int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread, + u32 dst_thread); + +struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property); +void xudma_dev_put(struct udma_dev *ud); +u32 xudma_dev_get_psil_base(struct udma_dev *ud); +struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud); + +int xudma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt); +int xudma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt); + +struct udma_tchan *xudma_tchan_get(struct udma_dev *ud, int id); +struct udma_rchan *xudma_rchan_get(struct udma_dev *ud, int id); +struct udma_rflow *xudma_rflow_get(struct udma_dev *ud, int id); + +void xudma_tchan_put(struct udma_dev *ud, struct udma_tchan *p); +void xudma_rchan_put(struct udma_dev *ud, struct udma_rchan *p); +void xudma_rflow_put(struct udma_dev *ud, struct udma_rflow *p); + +int xudma_tchan_get_id(struct udma_tchan *p); +int xudma_rchan_get_id(struct udma_rchan *p); +int xudma_rflow_get_id(struct udma_rflow *p); + +u32 xudma_tchanrt_read(struct udma_tchan *tchan, int reg); +void xudma_tchanrt_write(struct udma_tchan *tchan, int reg, u32 val); +u32 xudma_rchanrt_read(struct udma_rchan *rchan, int reg); +void xudma_rchanrt_write(struct udma_rchan *rchan, int reg, u32 val); +bool xudma_rflow_is_gp(struct udma_dev *ud, int id); + +#endif /* K3_UDMA_H_ */ diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c index 256fc662c500..23e33a85f033 100644 --- a/drivers/dma/virt-dma.c +++ b/drivers/dma/virt-dma.c @@ -114,13 +114,8 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) struct virt_dma_desc *vd, *_vd; list_for_each_entry_safe(vd, _vd, head, node) { - if (dmaengine_desc_test_reuse(&vd->tx)) { - list_move_tail(&vd->node, &vc->desc_allocated); - } else { - dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); - list_del(&vd->node); - vc->desc_free(vd); - } + list_del(&vd->node); + vchan_vdesc_fini(vd); } } EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); @@ -134,6 +129,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_completed); + INIT_LIST_HEAD(&vc->desc_terminated); tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index ab158bac03a7..e9f5250fbe4d 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -31,9 +31,9 @@ struct virt_dma_chan { struct list_head desc_submitted; struct list_head desc_issued; struct list_head desc_completed; + struct list_head desc_terminated; struct virt_dma_desc *cyclic; - struct virt_dma_desc *vd_terminated; }; static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan) @@ -113,10 +113,15 @@ static inline void vchan_vdesc_fini(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - if (dmaengine_desc_test_reuse(&vd->tx)) + if (dmaengine_desc_test_reuse(&vd->tx)) { + unsigned long flags; + + spin_lock_irqsave(&vc->lock, flags); list_add(&vd->node, &vc->desc_allocated); - else + spin_unlock_irqrestore(&vc->lock, flags); + } else { vc->desc_free(vd); + } } /** @@ -141,11 +146,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) { struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); - /* free up stuck descriptor */ - if (vc->vd_terminated) - vchan_vdesc_fini(vc->vd_terminated); + list_add_tail(&vd->node, &vc->desc_terminated); - vc->vd_terminated = vd; if (vc->cyclic == vd) vc->cyclic = NULL; } @@ -179,6 +181,7 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_completed, head); + list_splice_tail_init(&vc->desc_terminated, head); } static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) @@ -207,16 +210,18 @@ static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) */ static inline void vchan_synchronize(struct virt_dma_chan *vc) { + LIST_HEAD(head); unsigned long flags; tasklet_kill(&vc->task); spin_lock_irqsave(&vc->lock, flags); - if (vc->vd_terminated) { - vchan_vdesc_fini(vc->vd_terminated); - vc->vd_terminated = NULL; - } + + list_splice_tail_init(&vc->desc_terminated, &head); + spin_unlock_irqrestore(&vc->lock, flags); + + vchan_dma_desc_free_list(vc, &head); } #endif diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c index 9c845c07b107..d47749a35863 100644 --- a/drivers/dma/xilinx/zynqmp_dma.c +++ b/drivers/dma/xilinx/zynqmp_dma.c @@ -123,10 +123,12 @@ /* Max transfer size per descriptor */ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000 +/* Max burst lengths */ +#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U +#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U + /* Reset values for data attributes */ #define ZYNQMP_DMA_AXCACHE_VAL 0xF -#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF -#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F @@ -534,17 +536,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status) static void zynqmp_dma_config(struct zynqmp_dma_chan *chan) { - u32 val; + u32 val, burst_val; val = readl(chan->regs + ZYNQMP_DMA_CTRL0); val |= ZYNQMP_DMA_POINT_TYPE_SG; writel(val, chan->regs + ZYNQMP_DMA_CTRL0); val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR); + burst_val = __ilog2_u32(chan->src_burst_len); val = (val & ~ZYNQMP_DMA_ARLEN) | - (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST); + ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN); + burst_val = __ilog2_u32(chan->dst_burst_len); val = (val & ~ZYNQMP_DMA_AWLEN) | - (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST); + ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN); writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR); } @@ -560,8 +564,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan, { struct zynqmp_dma_chan *chan = to_chan(dchan); - chan->src_burst_len = config->src_maxburst; - chan->dst_burst_len = config->dst_maxburst; + chan->src_burst_len = clamp(config->src_maxburst, 1U, + ZYNQMP_DMA_MAX_SRC_BURST_LEN); + chan->dst_burst_len = clamp(config->dst_maxburst, 1U, + ZYNQMP_DMA_MAX_DST_BURST_LEN); return 0; } @@ -887,8 +893,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev, return PTR_ERR(chan->regs); chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64; - chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL; - chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL; + chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN; + chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN; err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width); if (err < 0) { dev_err(&pdev->dev, "missing xlnx,bus-width property\n"); diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 5c8272329a65..b3c99bb5fe77 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -491,8 +491,7 @@ config EDAC_TI tristate "Texas Instruments DDR3 ECC Controller" depends on ARCH_KEYSTONE || SOC_DRA7XX help - Support for error detection and correction on the - TI SoCs. + Support for error detection and correction on the TI SoCs. config EDAC_QCOM tristate "QCOM EDAC Controller" diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 428ce98f6776..9fbad908a854 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -214,7 +214,7 @@ static int __set_scrub_rate(struct amd64_pvt *pvt, u32 new_bw, u32 min_rate) scrubval = scrubrates[i].scrubval; - if (pvt->fam == 0x17 || pvt->fam == 0x18) { + if (pvt->umc) { __f17h_set_scrubval(pvt, scrubval); } else if (pvt->fam == 0x15 && pvt->model == 0x60) { f15h_select_dct(pvt, 0); @@ -256,18 +256,7 @@ static int get_scrub_rate(struct mem_ctl_info *mci) int i, retval = -EINVAL; u32 scrubval = 0; - switch (pvt->fam) { - case 0x15: - /* Erratum #505 */ - if (pvt->model < 0x10) - f15h_select_dct(pvt, 0); - - if (pvt->model == 0x60) - amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); - break; - - case 0x17: - case 0x18: + if (pvt->umc) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_BASE_ADDR, &scrubval); if (scrubval & BIT(0)) { amd64_read_pci_cfg(pvt->F6, F17H_SCR_LIMIT_ADDR, &scrubval); @@ -276,11 +265,15 @@ static int get_scrub_rate(struct mem_ctl_info *mci) } else { scrubval = 0; } - break; + } else if (pvt->fam == 0x15) { + /* Erratum #505 */ + if (pvt->model < 0x10) + f15h_select_dct(pvt, 0); - default: + if (pvt->model == 0x60) + amd64_read_pci_cfg(pvt->F2, F15H_M60H_SCRCTRL, &scrubval); + } else { amd64_read_pci_cfg(pvt->F3, SCRCTRL, &scrubval); - break; } scrubval = scrubval & 0x001F; @@ -1055,6 +1048,16 @@ static void determine_memory_type(struct amd64_pvt *pvt) { u32 dram_ctrl, dcsm; + if (pvt->umc) { + if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) + pvt->dram_type = MEM_LRDDR4; + else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) + pvt->dram_type = MEM_RDDR4; + else + pvt->dram_type = MEM_DDR4; + return; + } + switch (pvt->fam) { case 0xf: if (pvt->ext_model >= K8_REV_F) @@ -1100,16 +1103,6 @@ static void determine_memory_type(struct amd64_pvt *pvt) case 0x16: goto ddr3; - case 0x17: - case 0x18: - if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(5)) - pvt->dram_type = MEM_LRDDR4; - else if ((pvt->umc[0].dimm_cfg | pvt->umc[1].dimm_cfg) & BIT(4)) - pvt->dram_type = MEM_RDDR4; - else - pvt->dram_type = MEM_DDR4; - return; - default: WARN(1, KERN_ERR "%s: Family??? 0x%x\n", __func__, pvt->fam); pvt->dram_type = MEM_EMPTY; @@ -2336,6 +2329,16 @@ static struct amd64_family_type family_types[] = { .dbam_to_cs = f17_addr_mask_to_cs_size, } }, + [F19_CPUS] = { + .ctl_name = "F19h", + .f0_id = PCI_DEVICE_ID_AMD_19H_DF_F0, + .f6_id = PCI_DEVICE_ID_AMD_19H_DF_F6, + .max_mcs = 8, + .ops = { + .early_channel_count = f17_early_channel_count, + .dbam_to_cs = f17_addr_mask_to_cs_size, + } + }, }; /* @@ -3368,6 +3371,12 @@ static struct amd64_family_type *per_family_init(struct amd64_pvt *pvt) family_types[F17_CPUS].ctl_name = "F18h"; break; + case 0x19: + fam_type = &family_types[F19_CPUS]; + pvt->ops = &family_types[F19_CPUS].ops; + family_types[F19_CPUS].ctl_name = "F19h"; + break; + default: amd64_err("Unsupported family!\n"); return NULL; @@ -3573,9 +3582,6 @@ static void remove_one_instance(unsigned int nid) struct mem_ctl_info *mci; struct amd64_pvt *pvt; - mci = find_mci_by_dev(&F3->dev); - WARN_ON(!mci); - /* Remove from EDAC CORE tracking list */ mci = edac_mc_del_mc(&F3->dev); if (!mci) @@ -3626,6 +3632,7 @@ static const struct x86_cpu_id amd64_cpuids[] = { { X86_VENDOR_AMD, 0x16, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_AMD, 0x17, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { X86_VENDOR_HYGON, 0x18, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, + { X86_VENDOR_AMD, 0x19, X86_MODEL_ANY, X86_FEATURE_ANY, 0 }, { } }; MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); diff --git a/drivers/edac/amd64_edac.h b/drivers/edac/amd64_edac.h index 9be31688110b..abbf3c274d74 100644 --- a/drivers/edac/amd64_edac.h +++ b/drivers/edac/amd64_edac.h @@ -122,6 +122,8 @@ #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F6 0x1496 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F0 0x1440 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F6 0x1446 +#define PCI_DEVICE_ID_AMD_19H_DF_F0 0x1650 +#define PCI_DEVICE_ID_AMD_19H_DF_F6 0x1656 /* * Function 1 - Address Map @@ -292,6 +294,7 @@ enum amd_families { F17_M10H_CPUS, F17_M30H_CPUS, F17_M70H_CPUS, + F19_CPUS, NUM_FAMILIES, }; diff --git a/drivers/edac/aspeed_edac.c b/drivers/edac/aspeed_edac.c index 09a9e3de9595..b194658b8b5c 100644 --- a/drivers/edac/aspeed_edac.c +++ b/drivers/edac/aspeed_edac.c @@ -243,7 +243,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (!np) { dev_err(mci->pdev, "dt: missing /memory node\n"); return -ENODEV; - }; + } rc = of_address_to_resource(np, 0, &r); @@ -252,7 +252,7 @@ static int init_csrows(struct mem_ctl_info *mci) if (rc) { dev_err(mci->pdev, "dt: failed requesting resource for /memory node\n"); return rc; - }; + } dev_dbg(mci->pdev, "dt: /memory node resources: first page r.start=0x%x, resource_size=0x%x, PAGE_SHIFT macro=0x%x\n", r.start, resource_size(&r), PAGE_SHIFT); diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c index 0ddc41e47a96..191aa7c19ded 100644 --- a/drivers/edac/i5100_edac.c +++ b/drivers/edac/i5100_edac.c @@ -259,11 +259,6 @@ static inline u32 i5100_nrecmemb_ras(u32 a) return a & ((1 << 16) - 1); } -static inline u32 i5100_redmemb_ecc_locator(u32 a) -{ - return a & ((1 << 18) - 1); -} - static inline u32 i5100_recmema_merr(u32 a) { return i5100_nrecmema_merr(a); @@ -486,7 +481,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, u32 dw; u32 dw2; unsigned syndrome = 0; - unsigned ecc_loc = 0; unsigned merr; unsigned bank; unsigned rank; @@ -499,7 +493,6 @@ static void i5100_read_log(struct mem_ctl_info *mci, int chan, pci_read_config_dword(pdev, I5100_REDMEMA, &dw2); syndrome = dw2; pci_read_config_dword(pdev, I5100_REDMEMB, &dw2); - ecc_loc = i5100_redmemb_ecc_locator(dw2); } if (i5100_validlog_recmemvalid(dw)) { diff --git a/drivers/edac/mce_amd.c b/drivers/edac/mce_amd.c index ea622c6f3a39..ea980c556f2e 100644 --- a/drivers/edac/mce_amd.c +++ b/drivers/edac/mce_amd.c @@ -6,7 +6,7 @@ #include "mce_amd.h" -static struct amd_decoder_ops *fam_ops; +static struct amd_decoder_ops fam_ops; static u8 xec_mask = 0xf; @@ -175,6 +175,33 @@ static const char * const smca_ls_mce_desc[] = { "L2 Fill Data error", }; +static const char * const smca_ls2_mce_desc[] = { + "An ECC error was detected on a data cache read by a probe or victimization", + "An ECC error or L2 poison was detected on a data cache read by a load", + "An ECC error was detected on a data cache read-modify-write by a store", + "An ECC error or poison bit mismatch was detected on a tag read by a probe or victimization", + "An ECC error or poison bit mismatch was detected on a tag read by a load", + "An ECC error or poison bit mismatch was detected on a tag read by a store", + "An ECC error was detected on an EMEM read by a load", + "An ECC error was detected on an EMEM read-modify-write by a store", + "A parity error was detected in an L1 TLB entry by any access", + "A parity error was detected in an L2 TLB entry by any access", + "A parity error was detected in a PWC entry by any access", + "A parity error was detected in an STQ entry by any access", + "A parity error was detected in an LDQ entry by any access", + "A parity error was detected in a MAB entry by any access", + "A parity error was detected in an SCB entry state field by any access", + "A parity error was detected in an SCB entry address field by any access", + "A parity error was detected in an SCB entry data field by any access", + "A parity error was detected in a WCB entry by any access", + "A poisoned line was detected in an SCB entry by any access", + "A SystemReadDataError error was reported on read data returned from L2 for a load", + "A SystemReadDataError error was reported on read data returned from L2 for an SCB store", + "A SystemReadDataError error was reported on read data returned from L2 for a WCB store", + "A hardware assertion error was reported", + "A parity error was detected in an STLF, SCB EMEM entry or SRB store data by any access", +}; + static const char * const smca_if_mce_desc[] = { "Op Cache Microtag Probe Port Parity Error", "IC Microtag or Full Tag Multi-hit Error", @@ -378,6 +405,7 @@ struct smca_mce_desc { static struct smca_mce_desc smca_mce_descs[] = { [SMCA_LS] = { smca_ls_mce_desc, ARRAY_SIZE(smca_ls_mce_desc) }, + [SMCA_LS_V2] = { smca_ls2_mce_desc, ARRAY_SIZE(smca_ls2_mce_desc) }, [SMCA_IF] = { smca_if_mce_desc, ARRAY_SIZE(smca_if_mce_desc) }, [SMCA_L2_CACHE] = { smca_l2_mce_desc, ARRAY_SIZE(smca_l2_mce_desc) }, [SMCA_DE] = { smca_de_mce_desc, ARRAY_SIZE(smca_de_mce_desc) }, @@ -555,7 +583,7 @@ static void decode_mc0_mce(struct mce *m) : (xec ? "multimatch" : "parity"))); return; } - } else if (fam_ops->mc0_mce(ec, xec)) + } else if (fam_ops.mc0_mce(ec, xec)) ; else pr_emerg(HW_ERR "Corrupted MC0 MCE info?\n"); @@ -669,7 +697,7 @@ static void decode_mc1_mce(struct mce *m) pr_cont("Hardware Assert.\n"); else goto wrong_mc1_mce; - } else if (fam_ops->mc1_mce(ec, xec)) + } else if (fam_ops.mc1_mce(ec, xec)) ; else goto wrong_mc1_mce; @@ -803,7 +831,7 @@ static void decode_mc2_mce(struct mce *m) pr_emerg(HW_ERR "MC2 Error: "); - if (!fam_ops->mc2_mce(ec, xec)) + if (!fam_ops.mc2_mce(ec, xec)) pr_cont(HW_ERR "Corrupted MC2 MCE info?\n"); } @@ -1102,7 +1130,8 @@ amd_decode_mce(struct notifier_block *nb, unsigned long val, void *data) if (m->tsc) pr_emerg(HW_ERR "TSC: %llu\n", m->tsc); - if (!fam_ops) + /* Doesn't matter which member to test. */ + if (!fam_ops.mc0_mce) goto err_code; switch (m->bank) { @@ -1157,80 +1186,73 @@ static int __init mce_amd_init(void) c->x86_vendor != X86_VENDOR_HYGON) return -ENODEV; - fam_ops = kzalloc(sizeof(struct amd_decoder_ops), GFP_KERNEL); - if (!fam_ops) - return -ENOMEM; + if (boot_cpu_has(X86_FEATURE_SMCA)) { + xec_mask = 0x3f; + goto out; + } switch (c->x86) { case 0xf: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x10: - fam_ops->mc0_mce = f10h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f10h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x11: - fam_ops->mc0_mce = k8_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = k8_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x12: - fam_ops->mc0_mce = f12h_mc0_mce; - fam_ops->mc1_mce = k8_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = f12h_mc0_mce; + fam_ops.mc1_mce = k8_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x14: - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = k8_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = k8_mc2_mce; break; case 0x15: xec_mask = c->x86_model == 0x60 ? 0x3f : 0x1f; - fam_ops->mc0_mce = f15h_mc0_mce; - fam_ops->mc1_mce = f15h_mc1_mce; - fam_ops->mc2_mce = f15h_mc2_mce; + fam_ops.mc0_mce = f15h_mc0_mce; + fam_ops.mc1_mce = f15h_mc1_mce; + fam_ops.mc2_mce = f15h_mc2_mce; break; case 0x16: xec_mask = 0x1f; - fam_ops->mc0_mce = cat_mc0_mce; - fam_ops->mc1_mce = cat_mc1_mce; - fam_ops->mc2_mce = f16h_mc2_mce; + fam_ops.mc0_mce = cat_mc0_mce; + fam_ops.mc1_mce = cat_mc1_mce; + fam_ops.mc2_mce = f16h_mc2_mce; break; case 0x17: case 0x18: - xec_mask = 0x3f; - if (!boot_cpu_has(X86_FEATURE_SMCA)) { - printk(KERN_WARNING "Decoding supported only on Scalable MCA processors.\n"); - goto err_out; - } - break; + pr_warn("Decoding supported only on Scalable MCA processors.\n"); + return -EINVAL; default: printk(KERN_WARNING "Huh? What family is it: 0x%x?!\n", c->x86); - goto err_out; + return -EINVAL; } +out: pr_info("MCE: In-kernel MCE decoding enabled.\n"); mce_register_decode_chain(&amd_mce_dec_nb); return 0; - -err_out: - kfree(fam_ops); - fam_ops = NULL; - return -EINVAL; } early_initcall(mce_amd_init); @@ -1238,7 +1260,6 @@ early_initcall(mce_amd_init); static void __exit mce_amd_exit(void) { mce_unregister_decode_chain(&amd_mce_dec_nb); - kfree(fam_ops); } MODULE_DESCRIPTION("AMD MCE decoder"); diff --git a/drivers/edac/sifive_edac.c b/drivers/edac/sifive_edac.c index c0cc72a3b2be..3a3dcb14ed99 100644 --- a/drivers/edac/sifive_edac.c +++ b/drivers/edac/sifive_edac.c @@ -54,8 +54,8 @@ static int ecc_register(struct platform_device *pdev) p->dci = edac_device_alloc_ctl_info(0, "sifive_ecc", 1, "sifive_ecc", 1, 1, NULL, 0, edac_device_alloc_index()); - if (IS_ERR(p->dci)) - return PTR_ERR(p->dci); + if (!p->dci) + return -ENOMEM; p->dci->dev = &pdev->dev; p->dci->mod_name = "Sifive ECC Manager"; diff --git a/drivers/edac/skx_common.c b/drivers/edac/skx_common.c index 95662a4ff4c4..99bbaf629b8d 100644 --- a/drivers/edac/skx_common.c +++ b/drivers/edac/skx_common.c @@ -256,7 +256,7 @@ int skx_get_hi_lo(unsigned int did, int off[], u64 *tolm, u64 *tohm) pdev = pci_get_device(PCI_VENDOR_ID_INTEL, did, NULL); if (!pdev) { - skx_printk(KERN_ERR, "Can't get tolm/tohm\n"); + edac_dbg(2, "Can't get tolm/tohm\n"); return -ENODEV; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index b696e4598a24..1b3f217a35e2 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -132,27 +132,6 @@ static void of_gpio_flags_quirks(struct device_node *np, int index) { /* - * Handle MMC "cd-inverted" and "wp-inverted" semantics. - */ - if (IS_ENABLED(CONFIG_MMC)) { - /* - * Active low is the default according to the - * SDHCI specification and the device tree - * bindings. However the code in the current - * kernel was written such that the phandle - * flags were always respected, and "cd-inverted" - * would invert the flag from the device phandle. - */ - if (!strcmp(propname, "cd-gpios")) { - if (of_property_read_bool(np, "cd-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - if (!strcmp(propname, "wp-gpios")) { - if (of_property_read_bool(np, "wp-inverted")) - *flags ^= OF_GPIO_ACTIVE_LOW; - } - } - /* * Some GPIO fixed regulator quirks. * Note that active low is the default. */ diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 78a16e42f222..bcfbfded9ba3 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -3371,6 +3371,17 @@ int gpiod_is_active_low(const struct gpio_desc *desc) } EXPORT_SYMBOL_GPL(gpiod_is_active_low); +/** + * gpiod_toggle_active_low - toggle whether a GPIO is active-low or not + * @desc: the gpio descriptor to change + */ +void gpiod_toggle_active_low(struct gpio_desc *desc) +{ + VALIDATE_DESC_VOID(desc); + change_bit(FLAG_ACTIVE_LOW, &desc->flags); +} +EXPORT_SYMBOL_GPL(gpiod_toggle_active_low); + /* I/O calls are only valid after configuration completed; the relevant * "is this a valid GPIO" error checks should already have been done. * diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 01a793a0cbf7..30a1e3ac21d6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1004,7 +1004,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ - {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, /* Navi12 */ {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 5a61a5596912..e6afe4faeca6 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -1916,73 +1916,90 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, return parent_lct + 1; } -static int drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt) +static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) +{ + switch (pdt) { + case DP_PEER_DEVICE_DP_LEGACY_CONV: + case DP_PEER_DEVICE_SST_SINK: + return true; + case DP_PEER_DEVICE_MST_BRANCHING: + /* For sst branch device */ + if (!mcs) + return true; + + return false; + } + return true; +} + +static int +drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, + bool new_mcs) { struct drm_dp_mst_topology_mgr *mgr = port->mgr; struct drm_dp_mst_branch *mstb; u8 rad[8], lct; int ret = 0; - if (port->pdt == new_pdt) + if (port->pdt == new_pdt && port->mcs == new_mcs) return 0; /* Teardown the old pdt, if there is one */ - switch (port->pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* - * If the new PDT would also have an i2c bus, don't bother - * with reregistering it - */ - if (new_pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - new_pdt == DP_PEER_DEVICE_SST_SINK) { - port->pdt = new_pdt; - return 0; - } + if (port->pdt != DP_PEER_DEVICE_NONE) { + if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + /* + * If the new PDT would also have an i2c bus, + * don't bother with reregistering it + */ + if (new_pdt != DP_PEER_DEVICE_NONE && + drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { + port->pdt = new_pdt; + port->mcs = new_mcs; + return 0; + } - /* remove i2c over sideband */ - drm_dp_mst_unregister_i2c_bus(&port->aux); - break; - case DP_PEER_DEVICE_MST_BRANCHING: - mutex_lock(&mgr->lock); - drm_dp_mst_topology_put_mstb(port->mstb); - port->mstb = NULL; - mutex_unlock(&mgr->lock); - break; + /* remove i2c over sideband */ + drm_dp_mst_unregister_i2c_bus(&port->aux); + } else { + mutex_lock(&mgr->lock); + drm_dp_mst_topology_put_mstb(port->mstb); + port->mstb = NULL; + mutex_unlock(&mgr->lock); + } } port->pdt = new_pdt; - switch (port->pdt) { - case DP_PEER_DEVICE_DP_LEGACY_CONV: - case DP_PEER_DEVICE_SST_SINK: - /* add i2c over sideband */ - ret = drm_dp_mst_register_i2c_bus(&port->aux); - break; + port->mcs = new_mcs; - case DP_PEER_DEVICE_MST_BRANCHING: - lct = drm_dp_calculate_rad(port, rad); - mstb = drm_dp_add_mst_branch_device(lct, rad); - if (!mstb) { - ret = -ENOMEM; - DRM_ERROR("Failed to create MSTB for port %p", port); - goto out; - } + if (port->pdt != DP_PEER_DEVICE_NONE) { + if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { + /* add i2c over sideband */ + ret = drm_dp_mst_register_i2c_bus(&port->aux); + } else { + lct = drm_dp_calculate_rad(port, rad); + mstb = drm_dp_add_mst_branch_device(lct, rad); + if (!mstb) { + ret = -ENOMEM; + DRM_ERROR("Failed to create MSTB for port %p", + port); + goto out; + } - mutex_lock(&mgr->lock); - port->mstb = mstb; - mstb->mgr = port->mgr; - mstb->port_parent = port; + mutex_lock(&mgr->lock); + port->mstb = mstb; + mstb->mgr = port->mgr; + mstb->port_parent = port; - /* - * Make sure this port's memory allocation stays - * around until its child MSTB releases it - */ - drm_dp_mst_get_port_malloc(port); - mutex_unlock(&mgr->lock); + /* + * Make sure this port's memory allocation stays + * around until its child MSTB releases it + */ + drm_dp_mst_get_port_malloc(port); + mutex_unlock(&mgr->lock); - /* And make sure we send a link address for this */ - ret = 1; - break; + /* And make sure we send a link address for this */ + ret = 1; + } } out: @@ -2135,9 +2152,8 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, goto error; } - if ((port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV || - port->pdt == DP_PEER_DEVICE_SST_SINK) && - port->port_num >= DP_MST_LOGICAL_PORT_0) { + if (port->pdt != DP_PEER_DEVICE_NONE && + drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc); drm_connector_set_tile_property(port->connector); @@ -2201,6 +2217,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port; int old_ddps = 0, ret; u8 new_pdt = DP_PEER_DEVICE_NONE; + bool new_mcs = 0; bool created = false, send_link_addr = false, changed = false; port = drm_dp_get_port(mstb, port_msg->port_number); @@ -2245,7 +2262,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, port->input = port_msg->input_port; if (!port->input) new_pdt = port_msg->peer_device_type; - port->mcs = port_msg->mcs; + new_mcs = port_msg->mcs; port->ddps = port_msg->ddps; port->ldps = port_msg->legacy_device_plug_status; port->dpcd_rev = port_msg->dpcd_revision; @@ -2272,7 +2289,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, } } - ret = drm_dp_port_set_pdt(port, new_pdt); + ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); if (ret == 1) { send_link_addr = true; } else if (ret < 0) { @@ -2286,7 +2303,8 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, * we're coming out of suspend. In this case, always resend the link * address if there's an MSTB on this port */ - if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING) + if (!created && port->pdt == DP_PEER_DEVICE_MST_BRANCHING && + port->mcs) send_link_addr = true; if (port->connector) @@ -2323,6 +2341,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, struct drm_dp_mst_port *port; int old_ddps, old_input, ret, i; u8 new_pdt; + bool new_mcs; bool dowork = false, create_connector = false; port = drm_dp_get_port(mstb, conn_stat->port_number); @@ -2354,7 +2373,6 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, old_ddps = port->ddps; old_input = port->input; port->input = conn_stat->input_port; - port->mcs = conn_stat->message_capability_status; port->ldps = conn_stat->legacy_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status; @@ -2367,8 +2385,8 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, } new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; - - ret = drm_dp_port_set_pdt(port, new_pdt); + new_mcs = conn_stat->message_capability_status; + ret = drm_dp_port_set_pdt(port, new_pdt, new_mcs); if (ret == 1) { dowork = true; } else if (ret < 0) { @@ -3929,6 +3947,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector, switch (port->pdt) { case DP_PEER_DEVICE_NONE: case DP_PEER_DEVICE_MST_BRANCHING: + if (!port->mcs) + ret = connector_status_connected; break; case DP_PEER_DEVICE_SST_SINK: @@ -4541,7 +4561,7 @@ drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port) if (port->connector) port->mgr->cbs->destroy_connector(port->mgr, port->connector); - drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE); + drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs); drm_dp_mst_put_port_malloc(port); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_busy.c b/drivers/gpu/drm/i915/gem/i915_gem_busy.c index 3d4f5775a4ba..25235ef630c1 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_busy.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_busy.c @@ -9,16 +9,16 @@ #include "i915_gem_ioctls.h" #include "i915_gem_object.h" -static __always_inline u32 __busy_read_flag(u8 id) +static __always_inline u32 __busy_read_flag(u16 id) { - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffff0000u; GEM_BUG_ON(id >= 16); return 0x10000u << id; } -static __always_inline u32 __busy_write_id(u8 id) +static __always_inline u32 __busy_write_id(u16 id) { /* * The uABI guarantees an active writer is also amongst the read @@ -29,14 +29,14 @@ static __always_inline u32 __busy_write_id(u8 id) * last_read - hence we always set both read and write busy for * last_write. */ - if (id == (u8)I915_ENGINE_CLASS_INVALID) + if (id == (u16)I915_ENGINE_CLASS_INVALID) return 0xffffffffu; return (id + 1) | __busy_read_flag(id); } static __always_inline unsigned int -__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) +__busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u16 id)) { const struct i915_request *rq; @@ -57,7 +57,7 @@ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id)) return 0; /* Beware type-expansion follies! */ - BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class)); + BUILD_BUG_ON(!typecheck(u16, rq->engine->uabi_class)); return flag(rq->engine->uabi_class); } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 4c72d74d6576..0dbb44d30885 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -402,7 +402,7 @@ struct get_pages_work { static struct sg_table * __i915_gem_userptr_alloc_pages(struct drm_i915_gem_object *obj, - struct page **pvec, int num_pages) + struct page **pvec, unsigned long num_pages) { unsigned int max_segment = i915_sg_segment_size(); struct sg_table *st; @@ -448,9 +448,10 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) { struct get_pages_work *work = container_of(_work, typeof(*work), work); struct drm_i915_gem_object *obj = work->obj; - const int npages = obj->base.size >> PAGE_SHIFT; + const unsigned long npages = obj->base.size >> PAGE_SHIFT; + unsigned long pinned; struct page **pvec; - int pinned, ret; + int ret; ret = -ENOMEM; pinned = 0; @@ -553,7 +554,7 @@ __i915_gem_userptr_get_pages_schedule(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { - const int num_pages = obj->base.size >> PAGE_SHIFT; + const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; struct mm_struct *mm = obj->userptr.mm->mm; struct page **pvec; struct sg_table *pages; diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h index 17f1f1441efc..2b446474e010 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_types.h +++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h @@ -274,8 +274,8 @@ struct intel_engine_cs { u8 class; u8 instance; - u8 uabi_class; - u8 uabi_instance; + u16 uabi_class; + u16 uabi_instance; u32 uabi_capabilities; u32 context_size; diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index c083f516fd35..44727806dfd7 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -1177,6 +1177,7 @@ gen8_ppgtt_insert_pte(struct i915_ppgtt *ppgtt, pd = i915_pd_entry(pdp, gen8_pd_index(idx, 2)); vaddr = kmap_atomic_px(i915_pt_entry(pd, gen8_pd_index(idx, 1))); do { + GEM_BUG_ON(iter->sg->length < I915_GTT_PAGE_SIZE); vaddr[gen8_pd_index(idx, 0)] = pte_encode | iter->dma; iter->dma += I915_GTT_PAGE_SIZE; @@ -1660,6 +1661,7 @@ static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, vaddr = kmap_atomic_px(i915_pt_entry(pd, act_pt)); do { + GEM_BUG_ON(iter.sg->length < I915_GTT_PAGE_SIZE); vaddr[act_pte] = pte_encode | GEN6_PTE_ADDR_ENCODE(iter.dma); iter.dma += I915_GTT_PAGE_SIZE; diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index f61364f7c471..88b431a267af 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -78,8 +78,10 @@ static int panfrost_ioctl_get_param(struct drm_device *ddev, void *data, struct static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, struct drm_file *file) { + struct panfrost_file_priv *priv = file->driver_priv; struct panfrost_gem_object *bo; struct drm_panfrost_create_bo *args = data; + struct panfrost_gem_mapping *mapping; if (!args->size || args->pad || (args->flags & ~(PANFROST_BO_NOEXEC | PANFROST_BO_HEAP))) @@ -95,7 +97,14 @@ static int panfrost_ioctl_create_bo(struct drm_device *dev, void *data, if (IS_ERR(bo)) return PTR_ERR(bo); - args->offset = bo->node.start << PAGE_SHIFT; + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + drm_gem_object_put_unlocked(&bo->base.base); + return -EINVAL; + } + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } @@ -119,6 +128,11 @@ panfrost_lookup_bos(struct drm_device *dev, struct drm_panfrost_submit *args, struct panfrost_job *job) { + struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo; + unsigned int i; + int ret; + job->bo_count = args->bo_handle_count; if (!job->bo_count) @@ -130,9 +144,32 @@ panfrost_lookup_bos(struct drm_device *dev, if (!job->implicit_fences) return -ENOMEM; - return drm_gem_objects_lookup(file_priv, - (void __user *)(uintptr_t)args->bo_handles, - job->bo_count, &job->bos); + ret = drm_gem_objects_lookup(file_priv, + (void __user *)(uintptr_t)args->bo_handles, + job->bo_count, &job->bos); + if (ret) + return ret; + + job->mappings = kvmalloc_array(job->bo_count, + sizeof(struct panfrost_gem_mapping *), + GFP_KERNEL | __GFP_ZERO); + if (!job->mappings) + return -ENOMEM; + + for (i = 0; i < job->bo_count; i++) { + struct panfrost_gem_mapping *mapping; + + bo = to_panfrost_bo(job->bos[i]); + mapping = panfrost_gem_mapping_get(bo, priv); + if (!mapping) { + ret = -EINVAL; + break; + } + + job->mappings[i] = mapping; + } + + return ret; } /** @@ -320,7 +357,9 @@ out: static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_get_bo_offset *args = data; + struct panfrost_gem_mapping *mapping; struct drm_gem_object *gem_obj; struct panfrost_gem_object *bo; @@ -331,18 +370,26 @@ static int panfrost_ioctl_get_bo_offset(struct drm_device *dev, void *data, } bo = to_panfrost_bo(gem_obj); - args->offset = bo->node.start << PAGE_SHIFT; - + mapping = panfrost_gem_mapping_get(bo, priv); drm_gem_object_put_unlocked(gem_obj); + + if (!mapping) + return -EINVAL; + + args->offset = mapping->mmnode.start << PAGE_SHIFT; + panfrost_gem_mapping_put(mapping); return 0; } static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct panfrost_file_priv *priv = file_priv->driver_priv; struct drm_panfrost_madvise *args = data; struct panfrost_device *pfdev = dev->dev_private; struct drm_gem_object *gem_obj; + struct panfrost_gem_object *bo; + int ret = 0; gem_obj = drm_gem_object_lookup(file_priv, args->handle); if (!gem_obj) { @@ -350,22 +397,48 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, return -ENOENT; } + bo = to_panfrost_bo(gem_obj); + mutex_lock(&pfdev->shrinker_lock); + mutex_lock(&bo->mappings.lock); + if (args->madv == PANFROST_MADV_DONTNEED) { + struct panfrost_gem_mapping *first; + + first = list_first_entry(&bo->mappings.list, + struct panfrost_gem_mapping, + node); + + /* + * If we want to mark the BO purgeable, there must be only one + * user: the caller FD. + * We could do something smarter and mark the BO purgeable only + * when all its users have marked it purgeable, but globally + * visible/shared BOs are likely to never be marked purgeable + * anyway, so let's not bother. + */ + if (!list_is_singular(&bo->mappings.list) || + WARN_ON_ONCE(first->mmu != &priv->mmu)) { + ret = -EINVAL; + goto out_unlock_mappings; + } + } + args->retained = drm_gem_shmem_madvise(gem_obj, args->madv); if (args->retained) { - struct panfrost_gem_object *bo = to_panfrost_bo(gem_obj); - if (args->madv == PANFROST_MADV_DONTNEED) list_add_tail(&bo->base.madv_list, &pfdev->shrinker_list); else if (args->madv == PANFROST_MADV_WILLNEED) list_del_init(&bo->base.madv_list); } + +out_unlock_mappings: + mutex_unlock(&bo->mappings.lock); mutex_unlock(&pfdev->shrinker_lock); drm_gem_object_put_unlocked(gem_obj); - return 0; + return ret; } int panfrost_unstable_ioctl_check(void) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.c b/drivers/gpu/drm/panfrost/panfrost_gem.c index fd766b1395fb..17b654e1eb94 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem.c @@ -29,6 +29,12 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) list_del_init(&bo->base.madv_list); mutex_unlock(&pfdev->shrinker_lock); + /* + * If we still have mappings attached to the BO, there's a problem in + * our refcounting. + */ + WARN_ON_ONCE(!list_empty(&bo->mappings.list)); + if (bo->sgts) { int i; int n_sgt = bo->base.base.size / SZ_2M; @@ -46,6 +52,69 @@ static void panfrost_gem_free_object(struct drm_gem_object *obj) drm_gem_shmem_free_object(obj); } +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv) +{ + struct panfrost_gem_mapping *iter, *mapping = NULL; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + kref_get(&iter->refcount); + mapping = iter; + break; + } + } + mutex_unlock(&bo->mappings.lock); + + return mapping; +} + +static void +panfrost_gem_teardown_mapping(struct panfrost_gem_mapping *mapping) +{ + struct panfrost_file_priv *priv; + + if (mapping->active) + panfrost_mmu_unmap(mapping); + + priv = container_of(mapping->mmu, struct panfrost_file_priv, mmu); + spin_lock(&priv->mm_lock); + if (drm_mm_node_allocated(&mapping->mmnode)) + drm_mm_remove_node(&mapping->mmnode); + spin_unlock(&priv->mm_lock); +} + +static void panfrost_gem_mapping_release(struct kref *kref) +{ + struct panfrost_gem_mapping *mapping; + + mapping = container_of(kref, struct panfrost_gem_mapping, refcount); + + panfrost_gem_teardown_mapping(mapping); + drm_gem_object_put_unlocked(&mapping->obj->base.base); + kfree(mapping); +} + +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping) +{ + if (!mapping) + return; + + kref_put(&mapping->refcount, panfrost_gem_mapping_release); +} + +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo) +{ + struct panfrost_gem_mapping *mapping; + + mutex_lock(&bo->mappings.lock); + list_for_each_entry(mapping, &bo->mappings.list, node) + panfrost_gem_teardown_mapping(mapping); + mutex_unlock(&bo->mappings.lock); +} + int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) { int ret; @@ -54,6 +123,16 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) struct panfrost_gem_object *bo = to_panfrost_bo(obj); unsigned long color = bo->noexec ? PANFROST_BO_NOEXEC : 0; struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_mapping *mapping; + + mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); + if (!mapping) + return -ENOMEM; + + INIT_LIST_HEAD(&mapping->node); + kref_init(&mapping->refcount); + drm_gem_object_get(obj); + mapping->obj = bo; /* * Executable buffers cannot cross a 16MB boundary as the program @@ -66,37 +145,48 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv) else align = size >= SZ_2M ? SZ_2M >> PAGE_SHIFT : 0; - bo->mmu = &priv->mmu; + mapping->mmu = &priv->mmu; spin_lock(&priv->mm_lock); - ret = drm_mm_insert_node_generic(&priv->mm, &bo->node, + ret = drm_mm_insert_node_generic(&priv->mm, &mapping->mmnode, size >> PAGE_SHIFT, align, color, 0); spin_unlock(&priv->mm_lock); if (ret) - return ret; + goto err; if (!bo->is_heap) { - ret = panfrost_mmu_map(bo); - if (ret) { - spin_lock(&priv->mm_lock); - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); - } + ret = panfrost_mmu_map(mapping); + if (ret) + goto err; } + + mutex_lock(&bo->mappings.lock); + WARN_ON(bo->base.madv != PANFROST_MADV_WILLNEED); + list_add_tail(&mapping->node, &bo->mappings.list); + mutex_unlock(&bo->mappings.lock); + +err: + if (ret) + panfrost_gem_mapping_put(mapping); return ret; } void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv) { - struct panfrost_gem_object *bo = to_panfrost_bo(obj); struct panfrost_file_priv *priv = file_priv->driver_priv; + struct panfrost_gem_object *bo = to_panfrost_bo(obj); + struct panfrost_gem_mapping *mapping = NULL, *iter; - if (bo->is_mapped) - panfrost_mmu_unmap(bo); + mutex_lock(&bo->mappings.lock); + list_for_each_entry(iter, &bo->mappings.list, node) { + if (iter->mmu == &priv->mmu) { + mapping = iter; + list_del(&iter->node); + break; + } + } + mutex_unlock(&bo->mappings.lock); - spin_lock(&priv->mm_lock); - if (drm_mm_node_allocated(&bo->node)) - drm_mm_remove_node(&bo->node); - spin_unlock(&priv->mm_lock); + panfrost_gem_mapping_put(mapping); } static int panfrost_gem_pin(struct drm_gem_object *obj) @@ -136,6 +226,8 @@ struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t if (!obj) return NULL; + INIT_LIST_HEAD(&obj->mappings.list); + mutex_init(&obj->mappings.lock); obj->base.base.funcs = &panfrost_gem_funcs; return &obj->base.base; diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h index 4b17e7308764..ca1bc9019600 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem.h +++ b/drivers/gpu/drm/panfrost/panfrost_gem.h @@ -13,23 +13,46 @@ struct panfrost_gem_object { struct drm_gem_shmem_object base; struct sg_table *sgts; - struct panfrost_mmu *mmu; - struct drm_mm_node node; - bool is_mapped :1; + /* + * Use a list for now. If searching a mapping ever becomes the + * bottleneck, we should consider using an RB-tree, or even better, + * let the core store drm_gem_object_mapping entries (where we + * could place driver specific data) instead of drm_gem_object ones + * in its drm_file->object_idr table. + * + * struct drm_gem_object_mapping { + * struct drm_gem_object *obj; + * void *driver_priv; + * }; + */ + struct { + struct list_head list; + struct mutex lock; + } mappings; + bool noexec :1; bool is_heap :1; }; +struct panfrost_gem_mapping { + struct list_head node; + struct kref refcount; + struct panfrost_gem_object *obj; + struct drm_mm_node mmnode; + struct panfrost_mmu *mmu; + bool active :1; +}; + static inline struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj) { return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base); } -static inline -struct panfrost_gem_object *drm_mm_node_to_panfrost_bo(struct drm_mm_node *node) +static inline struct panfrost_gem_mapping * +drm_mm_node_to_panfrost_mapping(struct drm_mm_node *node) { - return container_of(node, struct panfrost_gem_object, node); + return container_of(node, struct panfrost_gem_mapping, mmnode); } struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size); @@ -49,6 +72,12 @@ int panfrost_gem_open(struct drm_gem_object *obj, struct drm_file *file_priv); void panfrost_gem_close(struct drm_gem_object *obj, struct drm_file *file_priv); +struct panfrost_gem_mapping * +panfrost_gem_mapping_get(struct panfrost_gem_object *bo, + struct panfrost_file_priv *priv); +void panfrost_gem_mapping_put(struct panfrost_gem_mapping *mapping); +void panfrost_gem_teardown_mappings(struct panfrost_gem_object *bo); + void panfrost_gem_shrinker_init(struct drm_device *dev); void panfrost_gem_shrinker_cleanup(struct drm_device *dev); diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 458f0fa68111..f5dd7b29bc95 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -39,11 +39,12 @@ panfrost_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc static bool panfrost_gem_purge(struct drm_gem_object *obj) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); + struct panfrost_gem_object *bo = to_panfrost_bo(obj); if (!mutex_trylock(&shmem->pages_lock)) return false; - panfrost_mmu_unmap(to_panfrost_bo(obj)); + panfrost_gem_teardown_mappings(bo); drm_gem_shmem_purge_locked(obj); mutex_unlock(&shmem->pages_lock); diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c index d411eb6c8eb9..e364ee00f3d0 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.c +++ b/drivers/gpu/drm/panfrost/panfrost_job.c @@ -268,9 +268,20 @@ static void panfrost_job_cleanup(struct kref *ref) dma_fence_put(job->done_fence); dma_fence_put(job->render_done_fence); - if (job->bos) { + if (job->mappings) { for (i = 0; i < job->bo_count; i++) + panfrost_gem_mapping_put(job->mappings[i]); + kvfree(job->mappings); + } + + if (job->bos) { + struct panfrost_gem_object *bo; + + for (i = 0; i < job->bo_count; i++) { + bo = to_panfrost_bo(job->bos[i]); drm_gem_object_put_unlocked(job->bos[i]); + } + kvfree(job->bos); } diff --git a/drivers/gpu/drm/panfrost/panfrost_job.h b/drivers/gpu/drm/panfrost/panfrost_job.h index 62454128a792..bbd3ba97ff67 100644 --- a/drivers/gpu/drm/panfrost/panfrost_job.h +++ b/drivers/gpu/drm/panfrost/panfrost_job.h @@ -32,6 +32,7 @@ struct panfrost_job { /* Exclusive fences we have taken from the BOs to wait for */ struct dma_fence **implicit_fences; + struct panfrost_gem_mapping **mappings; struct drm_gem_object **bos; u32 bo_count; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index a3ed64a1f15e..763cfca886a7 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -269,14 +269,15 @@ static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, return 0; } -int panfrost_mmu_map(struct panfrost_gem_object *bo) +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); struct sg_table *sgt; int prot = IOMMU_READ | IOMMU_WRITE; - if (WARN_ON(bo->is_mapped)) + if (WARN_ON(mapping->active)) return 0; if (bo->noexec) @@ -286,25 +287,28 @@ int panfrost_mmu_map(struct panfrost_gem_object *bo) if (WARN_ON(IS_ERR(sgt))) return PTR_ERR(sgt); - mmu_map_sg(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, prot, sgt); - bo->is_mapped = true; + mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT, + prot, sgt); + mapping->active = true; return 0; } -void panfrost_mmu_unmap(struct panfrost_gem_object *bo) +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping) { + struct panfrost_gem_object *bo = mapping->obj; struct drm_gem_object *obj = &bo->base.base; struct panfrost_device *pfdev = to_panfrost_device(obj->dev); - struct io_pgtable_ops *ops = bo->mmu->pgtbl_ops; - u64 iova = bo->node.start << PAGE_SHIFT; - size_t len = bo->node.size << PAGE_SHIFT; + struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops; + u64 iova = mapping->mmnode.start << PAGE_SHIFT; + size_t len = mapping->mmnode.size << PAGE_SHIFT; size_t unmapped_len = 0; - if (WARN_ON(!bo->is_mapped)) + if (WARN_ON(!mapping->active)) return; - dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", bo->mmu->as, iova, len); + dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx", + mapping->mmu->as, iova, len); while (unmapped_len < len) { size_t unmapped_page; @@ -318,8 +322,9 @@ void panfrost_mmu_unmap(struct panfrost_gem_object *bo) unmapped_len += pgsize; } - panfrost_mmu_flush_range(pfdev, bo->mmu, bo->node.start << PAGE_SHIFT, len); - bo->is_mapped = false; + panfrost_mmu_flush_range(pfdev, mapping->mmu, + mapping->mmnode.start << PAGE_SHIFT, len); + mapping->active = false; } static void mmu_tlb_inv_context_s1(void *cookie) @@ -394,10 +399,10 @@ void panfrost_mmu_pgtable_free(struct panfrost_file_priv *priv) free_io_pgtable_ops(mmu->pgtbl_ops); } -static struct panfrost_gem_object * -addr_to_drm_mm_node(struct panfrost_device *pfdev, int as, u64 addr) +static struct panfrost_gem_mapping * +addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr) { - struct panfrost_gem_object *bo = NULL; + struct panfrost_gem_mapping *mapping = NULL; struct panfrost_file_priv *priv; struct drm_mm_node *node; u64 offset = addr >> PAGE_SHIFT; @@ -418,8 +423,9 @@ found_mmu: drm_mm_for_each_node(node, &priv->mm) { if (offset >= node->start && offset < (node->start + node->size)) { - bo = drm_mm_node_to_panfrost_bo(node); - drm_gem_object_get(&bo->base.base); + mapping = drm_mm_node_to_panfrost_mapping(node); + + kref_get(&mapping->refcount); break; } } @@ -427,7 +433,7 @@ found_mmu: spin_unlock(&priv->mm_lock); out: spin_unlock(&pfdev->as_lock); - return bo; + return mapping; } #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE) @@ -436,28 +442,30 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, u64 addr) { int ret, i; + struct panfrost_gem_mapping *bomapping; struct panfrost_gem_object *bo; struct address_space *mapping; pgoff_t page_offset; struct sg_table *sgt; struct page **pages; - bo = addr_to_drm_mm_node(pfdev, as, addr); - if (!bo) + bomapping = addr_to_mapping(pfdev, as, addr); + if (!bomapping) return -ENOENT; + bo = bomapping->obj; if (!bo->is_heap) { dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)", - bo->node.start << PAGE_SHIFT); + bomapping->mmnode.start << PAGE_SHIFT); ret = -EINVAL; goto err_bo; } - WARN_ON(bo->mmu->as != as); + WARN_ON(bomapping->mmu->as != as); /* Assume 2MB alignment and size multiple */ addr &= ~((u64)SZ_2M - 1); page_offset = addr >> PAGE_SHIFT; - page_offset -= bo->node.start; + page_offset -= bomapping->mmnode.start; mutex_lock(&bo->base.pages_lock); @@ -509,13 +517,14 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_map; } - mmu_map_sg(pfdev, bo->mmu, addr, IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); + mmu_map_sg(pfdev, bomapping->mmu, addr, + IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt); - bo->is_mapped = true; + bomapping->active = true; dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr); - drm_gem_object_put_unlocked(&bo->base.base); + panfrost_gem_mapping_put(bomapping); return 0; diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.h b/drivers/gpu/drm/panfrost/panfrost_mmu.h index 7c5b6775ae23..44fc2edf63ce 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.h +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.h @@ -4,12 +4,12 @@ #ifndef __PANFROST_MMU_H__ #define __PANFROST_MMU_H__ -struct panfrost_gem_object; +struct panfrost_gem_mapping; struct panfrost_file_priv; struct panfrost_mmu; -int panfrost_mmu_map(struct panfrost_gem_object *bo); -void panfrost_mmu_unmap(struct panfrost_gem_object *bo); +int panfrost_mmu_map(struct panfrost_gem_mapping *mapping); +void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping); int panfrost_mmu_init(struct panfrost_device *pfdev); void panfrost_mmu_fini(struct panfrost_device *pfdev); diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index 2c04e858c50a..684820448be3 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -25,7 +25,7 @@ #define V4_SHADERS_PER_COREGROUP 4 struct panfrost_perfcnt { - struct panfrost_gem_object *bo; + struct panfrost_gem_mapping *mapping; size_t bosize; void *buf; struct panfrost_file_priv *user; @@ -49,7 +49,7 @@ static int panfrost_perfcnt_dump_locked(struct panfrost_device *pfdev) int ret; reinit_completion(&pfdev->perfcnt->dump_comp); - gpuva = pfdev->perfcnt->bo->node.start << PAGE_SHIFT; + gpuva = pfdev->perfcnt->mapping->mmnode.start << PAGE_SHIFT; gpu_write(pfdev, GPU_PERFCNT_BASE_LO, gpuva); gpu_write(pfdev, GPU_PERFCNT_BASE_HI, gpuva >> 32); gpu_write(pfdev, GPU_INT_CLEAR, @@ -89,17 +89,22 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (IS_ERR(bo)) return PTR_ERR(bo); - perfcnt->bo = to_panfrost_bo(&bo->base); - /* Map the perfcnt buf in the address space attached to file_priv. */ - ret = panfrost_gem_open(&perfcnt->bo->base.base, file_priv); + ret = panfrost_gem_open(&bo->base, file_priv); if (ret) goto err_put_bo; + perfcnt->mapping = panfrost_gem_mapping_get(to_panfrost_bo(&bo->base), + user); + if (!perfcnt->mapping) { + ret = -EINVAL; + goto err_close_bo; + } + perfcnt->buf = drm_gem_shmem_vmap(&bo->base); if (IS_ERR(perfcnt->buf)) { ret = PTR_ERR(perfcnt->buf); - goto err_close_bo; + goto err_put_mapping; } /* @@ -154,12 +159,17 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, if (panfrost_has_hw_issue(pfdev, HW_ISSUE_8186)) gpu_write(pfdev, GPU_PRFCNT_TILER_EN, 0xffffffff); + /* The BO ref is retained by the mapping. */ + drm_gem_object_put_unlocked(&bo->base); + return 0; err_vunmap: - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&bo->base, perfcnt->buf); +err_put_mapping: + panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: - panfrost_gem_close(&perfcnt->bo->base.base, file_priv); + panfrost_gem_close(&bo->base, file_priv); err_put_bo: drm_gem_object_put_unlocked(&bo->base); return ret; @@ -182,11 +192,11 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_shmem_vunmap(&perfcnt->bo->base.base, perfcnt->buf); + drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf); perfcnt->buf = NULL; - panfrost_gem_close(&perfcnt->bo->base.base, file_priv); - drm_gem_object_put_unlocked(&perfcnt->bo->base.base); - perfcnt->bo = NULL; + panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); + panfrost_gem_mapping_put(perfcnt->mapping); + perfcnt->mapping = NULL; pm_runtime_mark_last_busy(pfdev->dev); pm_runtime_put_autosuspend(pfdev->dev); diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index cd9193078525..70e1cb928bf0 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -49,6 +49,10 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_REPORT_LONG_LENGTH 20 #define HIDPP_REPORT_VERY_LONG_MAX_LENGTH 64 +#define HIDPP_REPORT_SHORT_SUPPORTED BIT(0) +#define HIDPP_REPORT_LONG_SUPPORTED BIT(1) +#define HIDPP_REPORT_VERY_LONG_SUPPORTED BIT(2) + #define HIDPP_SUB_ID_CONSUMER_VENDOR_KEYS 0x03 #define HIDPP_SUB_ID_ROLLER 0x05 #define HIDPP_SUB_ID_MOUSE_EXTRA_BTNS 0x06 @@ -87,6 +91,7 @@ MODULE_PARM_DESC(disable_tap_to_click, #define HIDPP_CAPABILITY_HIDPP20_BATTERY BIT(1) #define HIDPP_CAPABILITY_BATTERY_MILEAGE BIT(2) #define HIDPP_CAPABILITY_BATTERY_LEVEL_STATUS BIT(3) +#define HIDPP_CAPABILITY_BATTERY_VOLTAGE BIT(4) /* * There are two hidpp protocols in use, the first version hidpp10 is known @@ -135,12 +140,15 @@ struct hidpp_report { struct hidpp_battery { u8 feature_index; u8 solar_feature_index; + u8 voltage_feature_index; struct power_supply_desc desc; struct power_supply *ps; char name[64]; int status; int capacity; int level; + int voltage; + int charge_type; bool online; }; @@ -183,9 +191,12 @@ struct hidpp_device { unsigned long quirks; unsigned long capabilities; + u8 supported_reports; struct hidpp_battery battery; struct hidpp_scroll_counter vertical_wheel_counter; + + u8 wireless_feature_index; }; /* HID++ 1.0 error codes */ @@ -340,6 +351,11 @@ static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev, struct hidpp_report *message; int ret, max_count; + /* Send as long report if short reports are not supported. */ + if (report_id == REPORT_ID_HIDPP_SHORT && + !(hidpp_dev->supported_reports & HIDPP_REPORT_SHORT_SUPPORTED)) + report_id = REPORT_ID_HIDPP_LONG; + switch (report_id) { case REPORT_ID_HIDPP_SHORT: max_count = HIDPP_REPORT_SHORT_LENGTH - 4; @@ -393,10 +409,13 @@ static inline bool hidpp_match_error(struct hidpp_report *question, (answer->fap.params[0] == question->fap.funcindex_clientid); } -static inline bool hidpp_report_is_connect_event(struct hidpp_report *report) +static inline bool hidpp_report_is_connect_event(struct hidpp_device *hidpp, + struct hidpp_report *report) { - return (report->report_id == REPORT_ID_HIDPP_SHORT) && - (report->rap.sub_id == 0x41); + return (hidpp->wireless_feature_index && + (report->fap.feature_index == hidpp->wireless_feature_index)) || + ((report->report_id == REPORT_ID_HIDPP_SHORT) && + (report->rap.sub_id == 0x41)); } /** @@ -1222,6 +1241,144 @@ static int hidpp20_battery_event(struct hidpp_device *hidpp, return 0; } +/* -------------------------------------------------------------------------- */ +/* 0x1001: Battery voltage */ +/* -------------------------------------------------------------------------- */ + +#define HIDPP_PAGE_BATTERY_VOLTAGE 0x1001 + +#define CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE 0x00 + +#define EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST 0x00 + +static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage, + int *level, int *charge_type) +{ + int status; + + long charge_sts = (long)data[2]; + + *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN; + switch (data[2] & 0xe0) { + case 0x00: + status = POWER_SUPPLY_STATUS_CHARGING; + break; + case 0x20: + status = POWER_SUPPLY_STATUS_FULL; + *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL; + break; + case 0x40: + status = POWER_SUPPLY_STATUS_DISCHARGING; + break; + case 0xe0: + status = POWER_SUPPLY_STATUS_NOT_CHARGING; + break; + default: + status = POWER_SUPPLY_STATUS_UNKNOWN; + } + + *charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD; + if (test_bit(3, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST; + } + if (test_bit(4, &charge_sts)) { + *charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE; + } + + if (test_bit(5, &charge_sts)) { + *level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL; + } + + *voltage = get_unaligned_be16(data); + + return status; +} + +static int hidpp20_battery_get_battery_voltage(struct hidpp_device *hidpp, + u8 feature_index, + int *status, int *voltage, + int *level, int *charge_type) +{ + struct hidpp_report response; + int ret; + u8 *params = (u8 *)response.fap.params; + + ret = hidpp_send_fap_command_sync(hidpp, feature_index, + CMD_BATTERY_VOLTAGE_GET_BATTERY_VOLTAGE, + NULL, 0, &response); + + if (ret > 0) { + hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n", + __func__, ret); + return -EPROTO; + } + if (ret) + return ret; + + hidpp->capabilities |= HIDPP_CAPABILITY_BATTERY_VOLTAGE; + + *status = hidpp20_battery_map_status_voltage(params, voltage, + level, charge_type); + + return 0; +} + +static int hidpp20_query_battery_voltage_info(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + int status, voltage, level, charge_type; + + if (hidpp->battery.voltage_feature_index == 0xff) { + ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_BATTERY_VOLTAGE, + &hidpp->battery.voltage_feature_index, + &feature_type); + if (ret) + return ret; + } + + ret = hidpp20_battery_get_battery_voltage(hidpp, + hidpp->battery.voltage_feature_index, + &status, &voltage, &level, &charge_type); + + if (ret) + return ret; + + hidpp->battery.status = status; + hidpp->battery.voltage = voltage; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + return 0; +} + +static int hidpp20_battery_voltage_event(struct hidpp_device *hidpp, + u8 *data, int size) +{ + struct hidpp_report *report = (struct hidpp_report *)data; + int status, voltage, level, charge_type; + + if (report->fap.feature_index != hidpp->battery.voltage_feature_index || + report->fap.funcindex_clientid != EVENT_BATTERY_VOLTAGE_STATUS_BROADCAST) + return 0; + + status = hidpp20_battery_map_status_voltage(report->fap.params, &voltage, + &level, &charge_type); + + hidpp->battery.online = status != POWER_SUPPLY_STATUS_NOT_CHARGING; + + if (voltage != hidpp->battery.voltage || status != hidpp->battery.status) { + hidpp->battery.voltage = voltage; + hidpp->battery.status = status; + hidpp->battery.level = level; + hidpp->battery.charge_type = charge_type; + if (hidpp->battery.ps) + power_supply_changed(hidpp->battery.ps); + } + return 0; +} + static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_STATUS, @@ -1231,6 +1388,7 @@ static enum power_supply_property hidpp_battery_props[] = { POWER_SUPPLY_PROP_SERIAL_NUMBER, 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY, */ 0, /* placeholder for POWER_SUPPLY_PROP_CAPACITY_LEVEL, */ + 0, /* placeholder for POWER_SUPPLY_PROP_VOLTAGE_NOW, */ }; static int hidpp_battery_get_property(struct power_supply *psy, @@ -1268,6 +1426,13 @@ static int hidpp_battery_get_property(struct power_supply *psy, case POWER_SUPPLY_PROP_SERIAL_NUMBER: val->strval = hidpp->hid_dev->uniq; break; + case POWER_SUPPLY_PROP_VOLTAGE_NOW: + /* hardware reports voltage in in mV. sysfs expects uV */ + val->intval = hidpp->battery.voltage * 1000; + break; + case POWER_SUPPLY_PROP_CHARGE_TYPE: + val->intval = hidpp->battery.charge_type; + break; default: ret = -EINVAL; break; @@ -1277,6 +1442,24 @@ static int hidpp_battery_get_property(struct power_supply *psy, } /* -------------------------------------------------------------------------- */ +/* 0x1d4b: Wireless device status */ +/* -------------------------------------------------------------------------- */ +#define HIDPP_PAGE_WIRELESS_DEVICE_STATUS 0x1d4b + +static int hidpp_set_wireless_feature_index(struct hidpp_device *hidpp) +{ + u8 feature_type; + int ret; + + ret = hidpp_root_get_feature(hidpp, + HIDPP_PAGE_WIRELESS_DEVICE_STATUS, + &hidpp->wireless_feature_index, + &feature_type); + + return ret; +} + +/* -------------------------------------------------------------------------- */ /* 0x2120: Hi-resolution scrolling */ /* -------------------------------------------------------------------------- */ @@ -3091,7 +3274,7 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, } } - if (unlikely(hidpp_report_is_connect_event(report))) { + if (unlikely(hidpp_report_is_connect_event(hidpp, report))) { atomic_set(&hidpp->connected, !(report->rap.params[0] & (1 << 6))); if (schedule_work(&hidpp->work) == 0) @@ -3106,6 +3289,9 @@ static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data, ret = hidpp_solar_battery_event(hidpp, data, size); if (ret != 0) return ret; + ret = hidpp20_battery_voltage_event(hidpp, data, size); + if (ret != 0) + return ret; } if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP10_BATTERY) { @@ -3227,12 +3413,16 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) hidpp->battery.feature_index = 0xff; hidpp->battery.solar_feature_index = 0xff; + hidpp->battery.voltage_feature_index = 0xff; if (hidpp->protocol_major >= 2) { if (hidpp->quirks & HIDPP_QUIRK_CLASS_K750) ret = hidpp_solar_request_battery_event(hidpp); - else - ret = hidpp20_query_battery_info(hidpp); + else { + ret = hidpp20_query_battery_voltage_info(hidpp); + if (ret) + ret = hidpp20_query_battery_info(hidpp); + } if (ret) return ret; @@ -3257,7 +3447,7 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) if (!battery_props) return -ENOMEM; - num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 2; + num_battery_props = ARRAY_SIZE(hidpp_battery_props) - 3; if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_MILEAGE) battery_props[num_battery_props++] = @@ -3267,6 +3457,10 @@ static int hidpp_initialize_battery(struct hidpp_device *hidpp) battery_props[num_battery_props++] = POWER_SUPPLY_PROP_CAPACITY_LEVEL; + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + battery_props[num_battery_props++] = + POWER_SUPPLY_PROP_VOLTAGE_NOW; + battery = &hidpp->battery; n = atomic_inc_return(&battery_no) - 1; @@ -3430,7 +3624,10 @@ static void hidpp_connect_event(struct hidpp_device *hidpp) else hidpp10_query_battery_status(hidpp); } else if (hidpp->capabilities & HIDPP_CAPABILITY_HIDPP20_BATTERY) { - hidpp20_query_battery_info(hidpp); + if (hidpp->capabilities & HIDPP_CAPABILITY_BATTERY_VOLTAGE) + hidpp20_query_battery_voltage_info(hidpp); + else + hidpp20_query_battery_info(hidpp); } if (hidpp->battery.ps) power_supply_changed(hidpp->battery.ps); @@ -3481,10 +3678,11 @@ static int hidpp_get_report_length(struct hid_device *hdev, int id) return report->field[0]->report_count + 1; } -static bool hidpp_validate_device(struct hid_device *hdev) +static u8 hidpp_validate_device(struct hid_device *hdev) { struct hidpp_device *hidpp = hid_get_drvdata(hdev); - int id, report_length, supported_reports = 0; + int id, report_length; + u8 supported_reports = 0; id = REPORT_ID_HIDPP_SHORT; report_length = hidpp_get_report_length(hdev, id); @@ -3492,7 +3690,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_SHORT_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_SHORT_SUPPORTED; } id = REPORT_ID_HIDPP_LONG; @@ -3501,7 +3699,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) if (report_length < HIDPP_REPORT_LONG_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_LONG_SUPPORTED; } id = REPORT_ID_HIDPP_VERY_LONG; @@ -3511,7 +3709,7 @@ static bool hidpp_validate_device(struct hid_device *hdev) report_length > HIDPP_REPORT_VERY_LONG_MAX_LENGTH) goto bad_device; - supported_reports++; + supported_reports |= HIDPP_REPORT_VERY_LONG_SUPPORTED; hidpp->very_long_report_length = report_length; } @@ -3560,7 +3758,9 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) /* * Make sure the device is HID++ capable, otherwise treat as generic HID */ - if (!hidpp_validate_device(hdev)) { + hidpp->supported_reports = hidpp_validate_device(hdev); + + if (!hidpp->supported_reports) { hid_set_drvdata(hdev, NULL); devm_kfree(&hdev->dev, hidpp); return hid_hw_start(hdev, HID_CONNECT_DEFAULT); @@ -3617,7 +3817,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) if (ret < 0) { dev_err(&hdev->dev, "%s:hid_hw_open returned error:%d\n", __func__, ret); - hid_hw_stop(hdev); goto hid_hw_open_fail; } @@ -3639,6 +3838,14 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id) hidpp_overwrite_name(hdev); } + if (connected && hidpp->protocol_major >= 2) { + ret = hidpp_set_wireless_feature_index(hidpp); + if (ret == -ENOENT) + hidpp->wireless_feature_index = 0; + else if (ret) + goto hid_hw_init_fail; + } + if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) { ret = wtp_get_config(hidpp); if (ret) @@ -3752,6 +3959,8 @@ static const struct hid_device_id hidpp_devices[] = { { LDJ_DEVICE(0x4071), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech MX Master 2S */ LDJ_DEVICE(0x4069), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* Mouse Logitech MX Master 3 */ + LDJ_DEVICE(0x4082), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, { /* Mouse Logitech Performance MX */ LDJ_DEVICE(0x101a), .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_1P0 }, { /* Keyboard logitech K400 */ @@ -3808,6 +4017,14 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* MX Master mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb012), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb01e), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, + { /* MX Master 3 mouse over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb023), + .driver_data = HIDPP_QUIRK_HI_RES_SCROLL_X2121 }, {} }; diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c index 7a75aff78388..2eee5e31c2b7 100644 --- a/drivers/hid/hidraw.c +++ b/drivers/hid/hidraw.c @@ -451,6 +451,15 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd, -EFAULT : len; break; } + + if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWUNIQ(0))) { + int len = strlen(hid->uniq) + 1; + if (len > _IOC_SIZE(cmd)) + len = _IOC_SIZE(cmd); + ret = copy_to_user(user_arg, hid->uniq, len) ? + -EFAULT : len; + break; + } } ret = -ENOTTY; diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index 23dfe848979a..47ac20aee06f 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -164,6 +164,16 @@ config SENSORS_ADM1031 This driver can also be built as a module. If so, the module will be called adm1031. +config SENSORS_ADM1177 + tristate "Analog Devices ADM1177 and compatibles" + depends on I2C + help + If you say yes here you get support for Analog Devices ADM1177 + sensor chips. + + This driver can also be built as a module. If so, the module + will be called adm1177. + config SENSORS_ADM9240 tristate "Analog Devices ADM9240 and compatibles" depends on I2C @@ -385,6 +395,16 @@ config SENSORS_ATXP1 This driver can also be built as a module. If so, the module will be called atxp1. +config SENSORS_DRIVETEMP + tristate "Hard disk drives with temperature sensors" + depends on SCSI && ATA + help + If you say yes you get support for the temperature sensor on + hard disk drives. + + This driver can also be built as a module. If so, the module + will be called satatemp. + config SENSORS_DS620 tristate "Dallas Semiconductor DS620" depends on I2C @@ -889,7 +909,7 @@ config SENSORS_MAX197 will be called max197. config SENSORS_MAX31722 -tristate "MAX31722 temperature sensor" + tristate "MAX31722 temperature sensor" depends on SPI help Support for the Maxim Integrated MAX31722/MAX31723 digital @@ -898,6 +918,16 @@ tristate "MAX31722 temperature sensor" This driver can also be built as a module. If so, the module will be called max31722. +config SENSORS_MAX31730 + tristate "MAX31730 temperature sensor" + depends on I2C + help + Support for the Maxim Integrated MAX31730 3-Channel Remote + Temperature Sensor. + + This driver can also be built as a module. If so, the module + will be called max31730. + config SENSORS_MAX6621 tristate "Maxim MAX6621 sensor chip" depends on I2C @@ -1905,7 +1935,7 @@ config SENSORS_W83627HF will be called w83627hf. config SENSORS_W83627EHF - tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG, NCT6775F, NCT6776F" + tristate "Winbond W83627EHF/EHG/DHG/UHG, W83667HG" depends on !PPC select HWMON_VID help @@ -1918,8 +1948,7 @@ config SENSORS_W83627EHF the Core 2 Duo. And also the W83627UHG, which is a stripped down version of the W83627DHG (as far as hardware monitoring goes.) - This driver also supports Nuvoton W83667HG, W83667HG-B, NCT6775F - (also known as W83667HG-I), and NCT6776F. + This driver also supports Nuvoton W83667HG and W83667HG-B. This driver can also be built as a module. If so, the module will be called w83627ehf. diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 6db5db9cdc29..613f50987965 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -34,6 +34,7 @@ obj-$(CONFIG_SENSORS_ADM1025) += adm1025.o obj-$(CONFIG_SENSORS_ADM1026) += adm1026.o obj-$(CONFIG_SENSORS_ADM1029) += adm1029.o obj-$(CONFIG_SENSORS_ADM1031) += adm1031.o +obj-$(CONFIG_SENSORS_ADM1177) += adm1177.o obj-$(CONFIG_SENSORS_ADM9240) += adm9240.o obj-$(CONFIG_SENSORS_ADS7828) += ads7828.o obj-$(CONFIG_SENSORS_ADS7871) += ads7871.o @@ -56,6 +57,7 @@ obj-$(CONFIG_SENSORS_DA9052_ADC)+= da9052-hwmon.o obj-$(CONFIG_SENSORS_DA9055)+= da9055-hwmon.o obj-$(CONFIG_SENSORS_DELL_SMM) += dell-smm-hwmon.o obj-$(CONFIG_SENSORS_DME1737) += dme1737.o +obj-$(CONFIG_SENSORS_DRIVETEMP) += drivetemp.o obj-$(CONFIG_SENSORS_DS620) += ds620.o obj-$(CONFIG_SENSORS_DS1621) += ds1621.o obj-$(CONFIG_SENSORS_EMC1403) += emc1403.o @@ -123,6 +125,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o obj-$(CONFIG_SENSORS_MAX1668) += max1668.o obj-$(CONFIG_SENSORS_MAX197) += max197.o obj-$(CONFIG_SENSORS_MAX31722) += max31722.o +obj-$(CONFIG_SENSORS_MAX31730) += max31730.o obj-$(CONFIG_SENSORS_MAX6621) += max6621.o obj-$(CONFIG_SENSORS_MAX6639) += max6639.o obj-$(CONFIG_SENSORS_MAX6642) += max6642.o diff --git a/drivers/hwmon/adm1177.c b/drivers/hwmon/adm1177.c new file mode 100644 index 000000000000..d314223a404a --- /dev/null +++ b/drivers/hwmon/adm1177.c @@ -0,0 +1,288 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ADM1177 Hot Swap Controller and Digital Power Monitor with Soft Start Pin + * + * Copyright 2015-2019 Analog Devices Inc. + */ + +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/regulator/consumer.h> + +/* Command Byte Operations */ +#define ADM1177_CMD_V_CONT BIT(0) +#define ADM1177_CMD_I_CONT BIT(2) +#define ADM1177_CMD_VRANGE BIT(4) + +/* Extended Register */ +#define ADM1177_REG_ALERT_TH 2 + +#define ADM1177_BITS 12 + +/** + * struct adm1177_state - driver instance specific data + * @client pointer to i2c client + * @reg regulator info for the the power supply of the device + * @r_sense_uohm current sense resistor value + * @alert_threshold_ua current limit for shutdown + * @vrange_high internal voltage divider + */ +struct adm1177_state { + struct i2c_client *client; + struct regulator *reg; + u32 r_sense_uohm; + u32 alert_threshold_ua; + bool vrange_high; +}; + +static int adm1177_read_raw(struct adm1177_state *st, u8 num, u8 *data) +{ + return i2c_master_recv(st->client, data, num); +} + +static int adm1177_write_cmd(struct adm1177_state *st, u8 cmd) +{ + return i2c_smbus_write_byte(st->client, cmd); +} + +static int adm1177_write_alert_thr(struct adm1177_state *st, + u32 alert_threshold_ua) +{ + u64 val; + int ret; + + val = 0xFFULL * alert_threshold_ua * st->r_sense_uohm; + val = div_u64(val, 105840000U); + val = div_u64(val, 1000U); + if (val > 0xFF) + val = 0xFF; + + ret = i2c_smbus_write_byte_data(st->client, ADM1177_REG_ALERT_TH, + val); + if (ret) + return ret; + + st->alert_threshold_ua = alert_threshold_ua; + return 0; +} + +static int adm1177_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + u8 data[3]; + long dummy; + int ret; + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[1] << 4) | (data[2] & 0xF); + /* + * convert to milliamperes + * ((105.84mV / 4096) x raw) / senseResistor(ohm) + */ + *val = div_u64((105840000ull * dummy), + 4096 * st->r_sense_uohm); + return 0; + case hwmon_curr_max_alarm: + *val = st->alert_threshold_ua; + return 0; + default: + return -EOPNOTSUPP; + } + case hwmon_in: + ret = adm1177_read_raw(st, 3, data); + if (ret < 0) + return ret; + dummy = (data[0] << 4) | (data[2] >> 4); + /* + * convert to millivolts based on resistor devision + * (V_fullscale / 4096) * raw + */ + if (st->vrange_high) + dummy *= 26350; + else + dummy *= 6650; + + *val = DIV_ROUND_CLOSEST(dummy, 4096); + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int adm1177_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct adm1177_state *st = dev_get_drvdata(dev); + + switch (type) { + case hwmon_curr: + switch (attr) { + case hwmon_curr_max_alarm: + adm1177_write_alert_thr(st, val); + return 0; + default: + return -EOPNOTSUPP; + } + default: + return -EOPNOTSUPP; + } +} + +static umode_t adm1177_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct adm1177_state *st = data; + + switch (type) { + case hwmon_in: + switch (attr) { + case hwmon_in_input: + return 0444; + } + break; + case hwmon_curr: + switch (attr) { + case hwmon_curr_input: + if (st->r_sense_uohm) + return 0444; + return 0; + case hwmon_curr_max_alarm: + if (st->r_sense_uohm) + return 0644; + return 0; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *adm1177_info[] = { + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_MAX_ALARM), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT), + NULL +}; + +static const struct hwmon_ops adm1177_hwmon_ops = { + .is_visible = adm1177_is_visible, + .read = adm1177_read, + .write = adm1177_write, +}; + +static const struct hwmon_chip_info adm1177_chip_info = { + .ops = &adm1177_hwmon_ops, + .info = adm1177_info, +}; + +static void adm1177_remove(void *data) +{ + struct adm1177_state *st = data; + + regulator_disable(st->reg); +} + +static int adm1177_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct adm1177_state *st; + u32 alert_threshold_ua; + int ret; + + st = devm_kzalloc(dev, sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->client = client; + + st->reg = devm_regulator_get_optional(&client->dev, "vref"); + if (IS_ERR(st->reg)) { + if (PTR_ERR(st->reg) == -EPROBE_DEFER) + return -EPROBE_DEFER; + + st->reg = NULL; + } else { + ret = regulator_enable(st->reg); + if (ret) + return ret; + ret = devm_add_action_or_reset(&client->dev, adm1177_remove, + st); + if (ret) + return ret; + } + + if (device_property_read_u32(dev, "shunt-resistor-micro-ohms", + &st->r_sense_uohm)) + st->r_sense_uohm = 0; + if (device_property_read_u32(dev, "adi,shutdown-threshold-microamp", + &alert_threshold_ua)) { + if (st->r_sense_uohm) + /* + * set maximum default value from datasheet based on + * shunt-resistor + */ + alert_threshold_ua = div_u64(105840000000, + st->r_sense_uohm); + else + alert_threshold_ua = 0; + } + st->vrange_high = device_property_read_bool(dev, + "adi,vrange-high-enable"); + if (alert_threshold_ua && st->r_sense_uohm) + adm1177_write_alert_thr(st, alert_threshold_ua); + + ret = adm1177_write_cmd(st, ADM1177_CMD_V_CONT | + ADM1177_CMD_I_CONT | + (st->vrange_high ? 0 : ADM1177_CMD_VRANGE)); + if (ret) + return ret; + + hwmon_dev = + devm_hwmon_device_register_with_info(dev, client->name, st, + &adm1177_chip_info, NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id adm1177_id[] = { + {"adm1177", 0}, + {} +}; +MODULE_DEVICE_TABLE(i2c, adm1177_id); + +static const struct of_device_id adm1177_dt_ids[] = { + { .compatible = "adi,adm1177" }, + {}, +}; +MODULE_DEVICE_TABLE(of, adm1177_dt_ids); + +static struct i2c_driver adm1177_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "adm1177", + .of_match_table = adm1177_dt_ids, + }, + .probe = adm1177_probe, + .id_table = adm1177_id, +}; +module_i2c_driver(adm1177_driver); + +MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>"); +MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); +MODULE_DESCRIPTION("Analog Devices ADM1177 ADC driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c new file mode 100644 index 000000000000..370d0c74eb01 --- /dev/null +++ b/drivers/hwmon/drivetemp.c @@ -0,0 +1,574 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hwmon client for disk and solid state drives with temperature sensors + * Copyright (C) 2019 Zodiac Inflight Innovations + * + * With input from: + * Hwmon client for S.M.A.R.T. hard disk drives with temperature sensors. + * (C) 2018 Linus Walleij + * + * hwmon: Driver for SCSI/ATA temperature sensors + * by Constantin Baranov <const@mimas.ru>, submitted September 2009 + * + * This drive supports reporting the temperatire of SATA drives. It can be + * easily extended to report the temperature of SCSI drives. + * + * The primary means to read drive temperatures and temperature limits + * for ATA drives is the SCT Command Transport feature set as specified in + * ATA8-ACS. + * It can be used to read the current drive temperature, temperature limits, + * and historic minimum and maximum temperatures. The SCT Command Transport + * feature set is documented in "AT Attachment 8 - ATA/ATAPI Command Set + * (ATA8-ACS)". + * + * If the SCT Command Transport feature set is not available, drive temperatures + * may be readable through SMART attributes. Since SMART attributes are not well + * defined, this method is only used as fallback mechanism. + * + * There are three SMART attributes which may report drive temperatures. + * Those are defined as follows (from + * http://www.cropel.com/library/smart-attribute-list.aspx). + * + * 190 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 194 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * 231 Temperature Temperature, monitored by a sensor somewhere inside + * the drive. Raw value typicaly holds the actual + * temperature (hexadecimal) in its rightmost two digits. + * + * Wikipedia defines attributes a bit differently. + * + * 190 Temperature Value is equal to (100-temp. °C), allowing manufacturer + * Difference or to set a minimum threshold which corresponds to a + * Airflow maximum temperature. This also follows the convention of + * Temperature 100 being a best-case value and lower values being + * undesirable. However, some older drives may instead + * report raw Temperature (identical to 0xC2) or + * Temperature minus 50 here. + * 194 Temperature or Indicates the device temperature, if the appropriate + * Temperature sensor is fitted. Lowest byte of the raw value contains + * Celsius the exact temperature value (Celsius degrees). + * 231 Life Left Indicates the approximate SSD life left, in terms of + * (SSDs) or program/erase cycles or available reserved blocks. + * Temperature A normalized value of 100 represents a new drive, with + * a threshold value at 10 indicating a need for + * replacement. A value of 0 may mean that the drive is + * operating in read-only mode to allow data recovery. + * Previously (pre-2010) occasionally used for Drive + * Temperature (more typically reported at 0xC2). + * + * Common denominator is that the first raw byte reports the temperature + * in degrees C on almost all drives. Some drives may report a fractional + * temperature in the second raw byte. + * + * Known exceptions (from libatasmart): + * - SAMSUNG SV0412H and SAMSUNG SV1204H) report the temperature in 10th + * degrees C in the first two raw bytes. + * - A few Maxtor drives report an unknown or bad value in attribute 194. + * - Certain Apple SSD drives report an unknown value in attribute 190. + * Only certain firmware versions are affected. + * + * Those exceptions affect older ATA drives and are currently ignored. + * Also, the second raw byte (possibly reporting the fractional temperature) + * is currently ignored. + * + * Many drives also report temperature limits in additional SMART data raw + * bytes. The format of those is not well defined and varies widely. + * The driver does not currently attempt to report those limits. + * + * According to data in smartmontools, attribute 231 is rarely used to report + * drive temperatures. At the same time, several drives report SSD life left + * in attribute 231, but do not support temperature sensors. For this reason, + * attribute 231 is currently ignored. + * + * Following above definitions, temperatures are reported as follows. + * If SCT Command Transport is supported, it is used to read the + * temperature and, if available, temperature limits. + * - Otherwise, if SMART attribute 194 is supported, it is used to read + * the temperature. + * - Otherwise, if SMART attribute 190 is supported, it is used to read + * the temperature. + */ + +#include <linux/ata.h> +#include <linux/bits.h> +#include <linux/device.h> +#include <linux/hwmon.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <scsi/scsi_cmnd.h> +#include <scsi/scsi_device.h> +#include <scsi/scsi_driver.h> +#include <scsi/scsi_proto.h> + +struct drivetemp_data { + struct list_head list; /* list of instantiated devices */ + struct mutex lock; /* protect data buffer accesses */ + struct scsi_device *sdev; /* SCSI device */ + struct device *dev; /* instantiating device */ + struct device *hwdev; /* hardware monitoring device */ + u8 smartdata[ATA_SECT_SIZE]; /* local buffer */ + int (*get_temp)(struct drivetemp_data *st, u32 attr, long *val); + bool have_temp_lowest; /* lowest temp in SCT status */ + bool have_temp_highest; /* highest temp in SCT status */ + bool have_temp_min; /* have min temp */ + bool have_temp_max; /* have max temp */ + bool have_temp_lcrit; /* have lower critical limit */ + bool have_temp_crit; /* have critical limit */ + int temp_min; /* min temp */ + int temp_max; /* max temp */ + int temp_lcrit; /* lower critical limit */ + int temp_crit; /* critical limit */ +}; + +static LIST_HEAD(drivetemp_devlist); + +#define ATA_MAX_SMART_ATTRS 30 +#define SMART_TEMP_PROP_190 190 +#define SMART_TEMP_PROP_194 194 + +#define SCT_STATUS_REQ_ADDR 0xe0 +#define SCT_STATUS_VERSION_LOW 0 /* log byte offsets */ +#define SCT_STATUS_VERSION_HIGH 1 +#define SCT_STATUS_TEMP 200 +#define SCT_STATUS_TEMP_LOWEST 201 +#define SCT_STATUS_TEMP_HIGHEST 202 +#define SCT_READ_LOG_ADDR 0xe1 +#define SMART_READ_LOG 0xd5 +#define SMART_WRITE_LOG 0xd6 + +#define INVALID_TEMP 0x80 + +#define temp_is_valid(temp) ((temp) != INVALID_TEMP) +#define temp_from_sct(temp) (((s8)(temp)) * 1000) + +static inline bool ata_id_smart_supported(u16 *id) +{ + return id[ATA_ID_COMMAND_SET_1] & BIT(0); +} + +static inline bool ata_id_smart_enabled(u16 *id) +{ + return id[ATA_ID_CFS_ENABLE_1] & BIT(0); +} + +static int drivetemp_scsi_command(struct drivetemp_data *st, + u8 ata_command, u8 feature, + u8 lba_low, u8 lba_mid, u8 lba_high) +{ + u8 scsi_cmd[MAX_COMMAND_SIZE]; + int data_dir; + + memset(scsi_cmd, 0, sizeof(scsi_cmd)); + scsi_cmd[0] = ATA_16; + if (ata_command == ATA_CMD_SMART && feature == SMART_WRITE_LOG) { + scsi_cmd[1] = (5 << 1); /* PIO Data-out */ + /* + * No off.line or cc, write to dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x06; + data_dir = DMA_TO_DEVICE; + } else { + scsi_cmd[1] = (4 << 1); /* PIO Data-in */ + /* + * No off.line or cc, read from dev, block count in sector count + * field. + */ + scsi_cmd[2] = 0x0e; + data_dir = DMA_FROM_DEVICE; + } + scsi_cmd[4] = feature; + scsi_cmd[6] = 1; /* 1 sector */ + scsi_cmd[8] = lba_low; + scsi_cmd[10] = lba_mid; + scsi_cmd[12] = lba_high; + scsi_cmd[14] = ata_command; + + return scsi_execute_req(st->sdev, scsi_cmd, data_dir, + st->smartdata, ATA_SECT_SIZE, NULL, HZ, 5, + NULL); +} + +static int drivetemp_ata_command(struct drivetemp_data *st, u8 feature, + u8 select) +{ + return drivetemp_scsi_command(st, ATA_CMD_SMART, feature, select, + ATA_SMART_LBAM_PASS, ATA_SMART_LBAH_PASS); +} + +static int drivetemp_get_smarttemp(struct drivetemp_data *st, u32 attr, + long *temp) +{ + u8 *buf = st->smartdata; + bool have_temp = false; + u8 temp_raw; + u8 csum; + int err; + int i; + + err = drivetemp_ata_command(st, ATA_SMART_READ_VALUES, 0); + if (err) + return err; + + /* Checksum the read value table */ + csum = 0; + for (i = 0; i < ATA_SECT_SIZE; i++) + csum += buf[i]; + if (csum) { + dev_dbg(&st->sdev->sdev_gendev, + "checksum error reading SMART values\n"); + return -EIO; + } + + for (i = 0; i < ATA_MAX_SMART_ATTRS; i++) { + u8 *attr = buf + i * 12; + int id = attr[2]; + + if (!id) + continue; + + if (id == SMART_TEMP_PROP_190) { + temp_raw = attr[7]; + have_temp = true; + } + if (id == SMART_TEMP_PROP_194) { + temp_raw = attr[7]; + have_temp = true; + break; + } + } + + if (have_temp) { + *temp = temp_raw * 1000; + return 0; + } + + return -ENXIO; +} + +static int drivetemp_get_scttemp(struct drivetemp_data *st, u32 attr, long *val) +{ + u8 *buf = st->smartdata; + int err; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + return err; + switch (attr) { + case hwmon_temp_input: + *val = temp_from_sct(buf[SCT_STATUS_TEMP]); + break; + case hwmon_temp_lowest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_LOWEST]); + break; + case hwmon_temp_highest: + *val = temp_from_sct(buf[SCT_STATUS_TEMP_HIGHEST]); + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static int drivetemp_identify_sata(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + u8 *buf = st->smartdata; + struct scsi_vpd *vpd; + bool is_ata, is_sata; + bool have_sct_data_table; + bool have_sct_temp; + bool have_smart; + bool have_sct; + u16 *ata_id; + u16 version; + long temp; + int err; + + /* SCSI-ATA Translation present? */ + rcu_read_lock(); + vpd = rcu_dereference(sdev->vpd_pg89); + + /* + * Verify that ATA IDENTIFY DEVICE data is included in ATA Information + * VPD and that the drive implements the SATA protocol. + */ + if (!vpd || vpd->len < 572 || vpd->data[56] != ATA_CMD_ID_ATA || + vpd->data[36] != 0x34) { + rcu_read_unlock(); + return -ENODEV; + } + ata_id = (u16 *)&vpd->data[60]; + is_ata = ata_id_is_ata(ata_id); + is_sata = ata_id_is_sata(ata_id); + have_sct = ata_id_sct_supported(ata_id); + have_sct_data_table = ata_id_sct_data_tables(ata_id); + have_smart = ata_id_smart_supported(ata_id) && + ata_id_smart_enabled(ata_id); + + rcu_read_unlock(); + + /* bail out if this is not a SATA device */ + if (!is_ata || !is_sata) + return -ENODEV; + if (!have_sct) + goto skip_sct; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct; + + version = (buf[SCT_STATUS_VERSION_HIGH] << 8) | + buf[SCT_STATUS_VERSION_LOW]; + if (version != 2 && version != 3) + goto skip_sct; + + have_sct_temp = temp_is_valid(buf[SCT_STATUS_TEMP]); + if (!have_sct_temp) + goto skip_sct; + + st->have_temp_lowest = temp_is_valid(buf[SCT_STATUS_TEMP_LOWEST]); + st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]); + + if (!have_sct_data_table) + goto skip_sct; + + /* Request and read temperature history table */ + memset(buf, '\0', sizeof(st->smartdata)); + buf[0] = 5; /* data table command */ + buf[2] = 1; /* read table */ + buf[4] = 2; /* temperature history table */ + + err = drivetemp_ata_command(st, SMART_WRITE_LOG, SCT_STATUS_REQ_ADDR); + if (err) + goto skip_sct_data; + + err = drivetemp_ata_command(st, SMART_READ_LOG, SCT_READ_LOG_ADDR); + if (err) + goto skip_sct_data; + + /* + * Temperature limits per AT Attachment 8 - + * ATA/ATAPI Command Set (ATA8-ACS) + */ + st->have_temp_max = temp_is_valid(buf[6]); + st->have_temp_crit = temp_is_valid(buf[7]); + st->have_temp_min = temp_is_valid(buf[8]); + st->have_temp_lcrit = temp_is_valid(buf[9]); + + st->temp_max = temp_from_sct(buf[6]); + st->temp_crit = temp_from_sct(buf[7]); + st->temp_min = temp_from_sct(buf[8]); + st->temp_lcrit = temp_from_sct(buf[9]); + +skip_sct_data: + if (have_sct_temp) { + st->get_temp = drivetemp_get_scttemp; + return 0; + } +skip_sct: + if (!have_smart) + return -ENODEV; + st->get_temp = drivetemp_get_smarttemp; + return drivetemp_get_smarttemp(st, hwmon_temp_input, &temp); +} + +static int drivetemp_identify(struct drivetemp_data *st) +{ + struct scsi_device *sdev = st->sdev; + + /* Bail out immediately if there is no inquiry data */ + if (!sdev->inquiry || sdev->inquiry_len < 16) + return -ENODEV; + + /* Disk device? */ + if (sdev->type != TYPE_DISK && sdev->type != TYPE_ZBC) + return -ENODEV; + + return drivetemp_identify_sata(st); +} + +static int drivetemp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct drivetemp_data *st = dev_get_drvdata(dev); + int err = 0; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_lowest: + case hwmon_temp_highest: + mutex_lock(&st->lock); + err = st->get_temp(st, attr, val); + mutex_unlock(&st->lock); + break; + case hwmon_temp_lcrit: + *val = st->temp_lcrit; + break; + case hwmon_temp_min: + *val = st->temp_min; + break; + case hwmon_temp_max: + *val = st->temp_max; + break; + case hwmon_temp_crit: + *val = st->temp_crit; + break; + default: + err = -EINVAL; + break; + } + return err; +} + +static umode_t drivetemp_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct drivetemp_data *st = data; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + return 0444; + case hwmon_temp_lowest: + if (st->have_temp_lowest) + return 0444; + break; + case hwmon_temp_highest: + if (st->have_temp_highest) + return 0444; + break; + case hwmon_temp_min: + if (st->have_temp_min) + return 0444; + break; + case hwmon_temp_max: + if (st->have_temp_max) + return 0444; + break; + case hwmon_temp_lcrit: + if (st->have_temp_lcrit) + return 0444; + break; + case hwmon_temp_crit: + if (st->have_temp_crit) + return 0444; + break; + default: + break; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *drivetemp_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | + HWMON_T_LOWEST | HWMON_T_HIGHEST | + HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_LCRIT | HWMON_T_CRIT), + NULL +}; + +static const struct hwmon_ops drivetemp_ops = { + .is_visible = drivetemp_is_visible, + .read = drivetemp_read, +}; + +static const struct hwmon_chip_info drivetemp_chip_info = { + .ops = &drivetemp_ops, + .info = drivetemp_info, +}; + +/* + * The device argument points to sdev->sdev_dev. Its parent is + * sdev->sdev_gendev, which we can use to get the scsi_device pointer. + */ +static int drivetemp_add(struct device *dev, struct class_interface *intf) +{ + struct scsi_device *sdev = to_scsi_device(dev->parent); + struct drivetemp_data *st; + int err; + + st = kzalloc(sizeof(*st), GFP_KERNEL); + if (!st) + return -ENOMEM; + + st->sdev = sdev; + st->dev = dev; + mutex_init(&st->lock); + + if (drivetemp_identify(st)) { + err = -ENODEV; + goto abort; + } + + st->hwdev = hwmon_device_register_with_info(dev->parent, "drivetemp", + st, &drivetemp_chip_info, + NULL); + if (IS_ERR(st->hwdev)) { + err = PTR_ERR(st->hwdev); + goto abort; + } + + list_add(&st->list, &drivetemp_devlist); + return 0; + +abort: + kfree(st); + return err; +} + +static void drivetemp_remove(struct device *dev, struct class_interface *intf) +{ + struct drivetemp_data *st, *tmp; + + list_for_each_entry_safe(st, tmp, &drivetemp_devlist, list) { + if (st->dev == dev) { + list_del(&st->list); + hwmon_device_unregister(st->hwdev); + kfree(st); + break; + } + } +} + +static struct class_interface drivetemp_interface = { + .add_dev = drivetemp_add, + .remove_dev = drivetemp_remove, +}; + +static int __init drivetemp_init(void) +{ + return scsi_register_interface(&drivetemp_interface); +} + +static void __exit drivetemp_exit(void) +{ + scsi_unregister_interface(&drivetemp_interface); +} + +module_init(drivetemp_init); +module_exit(drivetemp_exit); + +MODULE_AUTHOR("Guenter Roeck <linus@roeck-us.net>"); +MODULE_DESCRIPTION("Hard drive temperature monitor"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/hwmon.c b/drivers/hwmon/hwmon.c index d018b20089ec..6a30fb453f7a 100644 --- a/drivers/hwmon/hwmon.c +++ b/drivers/hwmon/hwmon.c @@ -188,7 +188,7 @@ static int hwmon_thermal_add_sensor(struct device *dev, int index) static int hwmon_attr_base(enum hwmon_sensor_types type) { - if (type == hwmon_in) + if (type == hwmon_in || type == hwmon_intrusion) return 0; return 1; } @@ -343,6 +343,7 @@ static const char * const hwmon_chip_attrs[] = { }; static const char * const hwmon_temp_attr_templates[] = { + [hwmon_temp_enable] = "temp%d_enable", [hwmon_temp_input] = "temp%d_input", [hwmon_temp_type] = "temp%d_type", [hwmon_temp_lcrit] = "temp%d_lcrit", @@ -370,6 +371,7 @@ static const char * const hwmon_temp_attr_templates[] = { }; static const char * const hwmon_in_attr_templates[] = { + [hwmon_in_enable] = "in%d_enable", [hwmon_in_input] = "in%d_input", [hwmon_in_min] = "in%d_min", [hwmon_in_max] = "in%d_max", @@ -385,10 +387,10 @@ static const char * const hwmon_in_attr_templates[] = { [hwmon_in_max_alarm] = "in%d_max_alarm", [hwmon_in_lcrit_alarm] = "in%d_lcrit_alarm", [hwmon_in_crit_alarm] = "in%d_crit_alarm", - [hwmon_in_enable] = "in%d_enable", }; static const char * const hwmon_curr_attr_templates[] = { + [hwmon_curr_enable] = "curr%d_enable", [hwmon_curr_input] = "curr%d_input", [hwmon_curr_min] = "curr%d_min", [hwmon_curr_max] = "curr%d_max", @@ -407,6 +409,7 @@ static const char * const hwmon_curr_attr_templates[] = { }; static const char * const hwmon_power_attr_templates[] = { + [hwmon_power_enable] = "power%d_enable", [hwmon_power_average] = "power%d_average", [hwmon_power_average_interval] = "power%d_average_interval", [hwmon_power_average_interval_max] = "power%d_interval_max", @@ -438,11 +441,13 @@ static const char * const hwmon_power_attr_templates[] = { }; static const char * const hwmon_energy_attr_templates[] = { + [hwmon_energy_enable] = "energy%d_enable", [hwmon_energy_input] = "energy%d_input", [hwmon_energy_label] = "energy%d_label", }; static const char * const hwmon_humidity_attr_templates[] = { + [hwmon_humidity_enable] = "humidity%d_enable", [hwmon_humidity_input] = "humidity%d_input", [hwmon_humidity_label] = "humidity%d_label", [hwmon_humidity_min] = "humidity%d_min", @@ -454,6 +459,7 @@ static const char * const hwmon_humidity_attr_templates[] = { }; static const char * const hwmon_fan_attr_templates[] = { + [hwmon_fan_enable] = "fan%d_enable", [hwmon_fan_input] = "fan%d_input", [hwmon_fan_label] = "fan%d_label", [hwmon_fan_min] = "fan%d_min", @@ -474,6 +480,11 @@ static const char * const hwmon_pwm_attr_templates[] = { [hwmon_pwm_freq] = "pwm%d_freq", }; +static const char * const hwmon_intrusion_attr_templates[] = { + [hwmon_intrusion_alarm] = "intrusion%d_alarm", + [hwmon_intrusion_beep] = "intrusion%d_beep", +}; + static const char * const *__templates[] = { [hwmon_chip] = hwmon_chip_attrs, [hwmon_temp] = hwmon_temp_attr_templates, @@ -484,6 +495,7 @@ static const char * const *__templates[] = { [hwmon_humidity] = hwmon_humidity_attr_templates, [hwmon_fan] = hwmon_fan_attr_templates, [hwmon_pwm] = hwmon_pwm_attr_templates, + [hwmon_intrusion] = hwmon_intrusion_attr_templates, }; static const int __templates_size[] = { @@ -496,6 +508,7 @@ static const int __templates_size[] = { [hwmon_humidity] = ARRAY_SIZE(hwmon_humidity_attr_templates), [hwmon_fan] = ARRAY_SIZE(hwmon_fan_attr_templates), [hwmon_pwm] = ARRAY_SIZE(hwmon_pwm_attr_templates), + [hwmon_intrusion] = ARRAY_SIZE(hwmon_intrusion_attr_templates), }; static int hwmon_num_channel_attrs(const struct hwmon_channel_info *info) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index 5c1dddde193c..e39354ffe973 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -1,13 +1,29 @@ // SPDX-License-Identifier: GPL-2.0-or-later /* - * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h processor hardware monitoring + * k10temp.c - AMD Family 10h/11h/12h/14h/15h/16h/17h + * processor hardware monitoring * * Copyright (c) 2009 Clemens Ladisch <clemens@ladisch.de> + * Copyright (c) 2020 Guenter Roeck <linux@roeck-us.net> + * + * Implementation notes: + * - CCD register address information as well as the calculation to + * convert raw register values is from https://github.com/ocerman/zenpower. + * The information is not confirmed from chip datasheets, but experiments + * suggest that it provides reasonable temperature values. + * - Register addresses to read chip voltage and current are also from + * https://github.com/ocerman/zenpower, and not confirmed from chip + * datasheets. Current calibration is board specific and not typically + * shared by board vendors. For this reason, current values are + * normalized to report 1A/LSB for core current and and 0.25A/LSB for SoC + * current. Reported values can be adjusted using the sensors configuration + * file. */ +#include <linux/bitops.h> +#include <linux/debugfs.h> #include <linux/err.h> #include <linux/hwmon.h> -#include <linux/hwmon-sysfs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> @@ -31,22 +47,22 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #endif /* CPUID function 0x80000001, ebx */ -#define CPUID_PKGTYPE_MASK 0xf0000000 +#define CPUID_PKGTYPE_MASK GENMASK(31, 28) #define CPUID_PKGTYPE_F 0x00000000 #define CPUID_PKGTYPE_AM2R2_AM3 0x10000000 /* DRAM controller (PCI function 2) */ #define REG_DCT0_CONFIG_HIGH 0x094 -#define DDR3_MODE 0x00000100 +#define DDR3_MODE BIT(8) /* miscellaneous (PCI function 3) */ #define REG_HARDWARE_THERMAL_CONTROL 0x64 -#define HTC_ENABLE 0x00000001 +#define HTC_ENABLE BIT(0) #define REG_REPORTED_TEMPERATURE 0xa4 #define REG_NORTHBRIDGE_CAPABILITIES 0xe8 -#define NB_CAP_HTC 0x00000400 +#define NB_CAP_HTC BIT(10) /* * For F15h M60h and M70h, REG_HARDWARE_THERMAL_CONTROL @@ -60,6 +76,20 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); /* F17h M01h Access througn SMN */ #define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 +#define F17H_M70H_CCD_TEMP(x) (0x00059954 + ((x) * 4)) +#define F17H_M70H_CCD_TEMP_VALID BIT(11) +#define F17H_M70H_CCD_TEMP_MASK GENMASK(10, 0) + +#define F17H_M01H_SVI 0x0005A000 +#define F17H_M01H_SVI_TEL_PLANE0 (F17H_M01H_SVI + 0xc) +#define F17H_M01H_SVI_TEL_PLANE1 (F17H_M01H_SVI + 0x10) + +#define CUR_TEMP_SHIFT 21 +#define CUR_TEMP_RANGE_SEL_MASK BIT(19) + +#define CFACTOR_ICORE 1000000 /* 1A / LSB */ +#define CFACTOR_ISOC 250000 /* 0.25A / LSB */ + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -67,6 +97,10 @@ struct k10temp_data { int temp_offset; u32 temp_adjust_mask; bool show_tdie; + u32 show_tccd; + u32 svi_addr[2]; + bool show_current; + int cfactor[2]; }; struct tctl_offset { @@ -84,6 +118,16 @@ static const struct tctl_offset tctl_offset_table[] = { { 0x17, "AMD Ryzen Threadripper 29", 27000 }, /* 29{20,50,70,90}[W]X */ }; +static bool is_threadripper(void) +{ + return strstr(boot_cpu_data.x86_model_id, "Threadripper"); +} + +static bool is_epyc(void) +{ + return strstr(boot_cpu_data.x86_model_id, "EPYC"); +} + static void read_htcreg_pci(struct pci_dev *pdev, u32 *regval) { pci_read_config_dword(pdev, REG_HARDWARE_THERMAL_CONTROL, regval); @@ -123,130 +167,237 @@ static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); } -static unsigned int get_raw_temp(struct k10temp_data *data) +static long get_raw_temp(struct k10temp_data *data) { - unsigned int temp; u32 regval; + long temp; data->read_tempreg(data->pdev, ®val); - temp = (regval >> 21) * 125; + temp = (regval >> CUR_TEMP_SHIFT) * 125; if (regval & data->temp_adjust_mask) temp -= 49000; return temp; } -static ssize_t temp1_input_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); +const char *k10temp_temp_label[] = { + "Tdie", + "Tctl", + "Tccd1", + "Tccd2", + "Tccd3", + "Tccd4", + "Tccd5", + "Tccd6", + "Tccd7", + "Tccd8", +}; - if (temp > data->temp_offset) - temp -= data->temp_offset; - else - temp = 0; +const char *k10temp_in_label[] = { + "Vcore", + "Vsoc", +}; - return sprintf(buf, "%u\n", temp); -} +const char *k10temp_curr_label[] = { + "Icore", + "Isoc", +}; -static ssize_t temp2_input_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_labels(struct device *dev, + enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) { - struct k10temp_data *data = dev_get_drvdata(dev); - unsigned int temp = get_raw_temp(data); - - return sprintf(buf, "%u\n", temp); + switch (type) { + case hwmon_temp: + *str = k10temp_temp_label[channel]; + break; + case hwmon_in: + *str = k10temp_in_label[channel]; + break; + case hwmon_curr: + *str = k10temp_curr_label[channel]; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_label_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_curr(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; - return sprintf(buf, "%s\n", attr->index ? "Tctl" : "Tdie"); + switch (attr) { + case hwmon_curr_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + *val = DIV_ROUND_CLOSEST(data->cfactor[channel] * + (regval & 0xff), + 1000); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp1_max_show(struct device *dev, - struct device_attribute *attr, char *buf) +static int k10temp_read_in(struct device *dev, u32 attr, int channel, long *val) { - return sprintf(buf, "%d\n", 70 * 1000); + struct k10temp_data *data = dev_get_drvdata(dev); + u32 regval; + + switch (attr) { + case hwmon_in_input: + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + data->svi_addr[channel], ®val); + regval = (regval >> 16) & 0xff; + *val = DIV_ROUND_CLOSEST(155000 - regval * 625, 100); + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static ssize_t temp_crit_show(struct device *dev, - struct device_attribute *devattr, char *buf) +static int k10temp_read_temp(struct device *dev, u32 attr, int channel, + long *val) { - struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct k10temp_data *data = dev_get_drvdata(dev); - int show_hyst = attr->index; u32 regval; - int value; - data->read_htcreg(data->pdev, ®val); - value = ((regval >> 16) & 0x7f) * 500 + 52000; - if (show_hyst) - value -= ((regval >> 24) & 0xf) * 500; - return sprintf(buf, "%d\n", value); + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie */ + *val = get_raw_temp(data) - data->temp_offset; + if (*val < 0) + *val = 0; + break; + case 1: /* Tctl */ + *val = get_raw_temp(data); + if (*val < 0) + *val = 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + amd_smn_read(amd_pci_dev_to_node_id(data->pdev), + F17H_M70H_CCD_TEMP(channel - 2), ®val); + *val = (regval & F17H_M70H_CCD_TEMP_MASK) * 125 - 49000; + break; + default: + return -EOPNOTSUPP; + } + break; + case hwmon_temp_max: + *val = 70 * 1000; + break; + case hwmon_temp_crit: + data->read_htcreg(data->pdev, ®val); + *val = ((regval >> 16) & 0x7f) * 500 + 52000; + break; + case hwmon_temp_crit_hyst: + data->read_htcreg(data->pdev, ®val); + *val = (((regval >> 16) & 0x7f) + - ((regval >> 24) & 0xf)) * 500 + 52000; + break; + default: + return -EOPNOTSUPP; + } + return 0; } -static DEVICE_ATTR_RO(temp1_input); -static DEVICE_ATTR_RO(temp1_max); -static SENSOR_DEVICE_ATTR_RO(temp1_crit, temp_crit, 0); -static SENSOR_DEVICE_ATTR_RO(temp1_crit_hyst, temp_crit, 1); - -static SENSOR_DEVICE_ATTR_RO(temp1_label, temp_label, 0); -static DEVICE_ATTR_RO(temp2_input); -static SENSOR_DEVICE_ATTR_RO(temp2_label, temp_label, 1); +static int k10temp_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + switch (type) { + case hwmon_temp: + return k10temp_read_temp(dev, attr, channel, val); + case hwmon_in: + return k10temp_read_in(dev, attr, channel, val); + case hwmon_curr: + return k10temp_read_curr(dev, attr, channel, val); + default: + return -EOPNOTSUPP; + } +} -static umode_t k10temp_is_visible(struct kobject *kobj, - struct attribute *attr, int index) +static umode_t k10temp_is_visible(const void *_data, + enum hwmon_sensor_types type, + u32 attr, int channel) { - struct device *dev = container_of(kobj, struct device, kobj); - struct k10temp_data *data = dev_get_drvdata(dev); + const struct k10temp_data *data = _data; struct pci_dev *pdev = data->pdev; u32 reg; - switch (index) { - case 0 ... 1: /* temp1_input, temp1_max */ - default: - break; - case 2 ... 3: /* temp1_crit, temp1_crit_hyst */ - if (!data->read_htcreg) - return 0; - - pci_read_config_dword(pdev, REG_NORTHBRIDGE_CAPABILITIES, - ®); - if (!(reg & NB_CAP_HTC)) - return 0; - - data->read_htcreg(data->pdev, ®); - if (!(reg & HTC_ENABLE)) + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + switch (channel) { + case 0: /* Tdie, or Tctl if we don't show it */ + break; + case 1: /* Tctl */ + if (!data->show_tdie) + return 0; + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + case hwmon_temp_max: + if (channel || data->show_tdie) + return 0; + break; + case hwmon_temp_crit: + case hwmon_temp_crit_hyst: + if (channel || !data->read_htcreg) + return 0; + + pci_read_config_dword(pdev, + REG_NORTHBRIDGE_CAPABILITIES, + ®); + if (!(reg & NB_CAP_HTC)) + return 0; + + data->read_htcreg(data->pdev, ®); + if (!(reg & HTC_ENABLE)) + return 0; + break; + case hwmon_temp_label: + /* No labels if we don't show the die temperature */ + if (!data->show_tdie) + return 0; + switch (channel) { + case 0: /* Tdie */ + case 1: /* Tctl */ + break; + case 2 ... 9: /* Tccd{1-8} */ + if (!(data->show_tccd & BIT(channel - 2))) + return 0; + break; + default: + return 0; + } + break; + default: return 0; + } break; - case 4 ... 6: /* temp1_label, temp2_input, temp2_label */ - if (!data->show_tdie) + case hwmon_in: + case hwmon_curr: + if (!data->show_current) return 0; break; + default: + return 0; } - return attr->mode; + return 0444; } -static struct attribute *k10temp_attrs[] = { - &dev_attr_temp1_input.attr, - &dev_attr_temp1_max.attr, - &sensor_dev_attr_temp1_crit.dev_attr.attr, - &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, - &sensor_dev_attr_temp1_label.dev_attr.attr, - &dev_attr_temp2_input.attr, - &sensor_dev_attr_temp2_label.dev_attr.attr, - NULL -}; - -static const struct attribute_group k10temp_group = { - .attrs = k10temp_attrs, - .is_visible = k10temp_is_visible, -}; -__ATTRIBUTE_GROUPS(k10temp); - static bool has_erratum_319(struct pci_dev *pdev) { u32 pkg_type, reg_dram_cfg; @@ -281,8 +432,125 @@ static bool has_erratum_319(struct pci_dev *pdev) (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_stepping <= 2); } -static int k10temp_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +#ifdef CONFIG_DEBUG_FS + +static void k10temp_smn_regs_show(struct seq_file *s, struct pci_dev *pdev, + u32 addr, int count) +{ + u32 reg; + int i; + + for (i = 0; i < count; i++) { + if (!(i & 3)) + seq_printf(s, "0x%06x: ", addr + i * 4); + amd_smn_read(amd_pci_dev_to_node_id(pdev), addr + i * 4, ®); + seq_printf(s, "%08x ", reg); + if ((i & 3) == 3) + seq_puts(s, "\n"); + } +} + +static int svi_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, F17H_M01H_SVI, 32); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(svi); + +static int thm_show(struct seq_file *s, void *unused) +{ + struct k10temp_data *data = s->private; + + k10temp_smn_regs_show(s, data->pdev, + F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, 256); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(thm); + +static void k10temp_debugfs_cleanup(void *ddir) +{ + debugfs_remove_recursive(ddir); +} + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ + struct dentry *debugfs; + char name[32]; + + /* Only show debugfs data for Family 17h/18h CPUs */ + if (!data->show_tdie) + return; + + scnprintf(name, sizeof(name), "k10temp-%s", pci_name(data->pdev)); + + debugfs = debugfs_create_dir(name, NULL); + if (debugfs) { + debugfs_create_file("svi", 0444, debugfs, data, &svi_fops); + debugfs_create_file("thm", 0444, debugfs, data, &thm_fops); + devm_add_action_or_reset(&data->pdev->dev, + k10temp_debugfs_cleanup, debugfs); + } +} + +#else + +static void k10temp_init_debugfs(struct k10temp_data *data) +{ +} + +#endif + +static const struct hwmon_channel_info *k10temp_info[] = { + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MAX | + HWMON_T_CRIT | HWMON_T_CRIT_HYST | + HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL), + HWMON_CHANNEL_INFO(in, + HWMON_I_INPUT | HWMON_I_LABEL, + HWMON_I_INPUT | HWMON_I_LABEL), + HWMON_CHANNEL_INFO(curr, + HWMON_C_INPUT | HWMON_C_LABEL, + HWMON_C_INPUT | HWMON_C_LABEL), + NULL +}; + +static const struct hwmon_ops k10temp_hwmon_ops = { + .is_visible = k10temp_is_visible, + .read = k10temp_read, + .read_string = k10temp_read_labels, +}; + +static const struct hwmon_chip_info k10temp_chip_info = { + .ops = &k10temp_hwmon_ops, + .info = k10temp_info, +}; + +static void k10temp_get_ccd_support(struct pci_dev *pdev, + struct k10temp_data *data, int limit) +{ + u32 regval; + int i; + + for (i = 0; i < limit; i++) { + amd_smn_read(amd_pci_dev_to_node_id(pdev), + F17H_M70H_CCD_TEMP(i), ®val); + if (regval & F17H_M70H_CCD_TEMP_VALID) + data->show_tccd |= BIT(i); + } +} + +static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; @@ -312,9 +580,32 @@ static int k10temp_probe(struct pci_dev *pdev, data->read_htcreg = read_htcreg_nb_f15; data->read_tempreg = read_tempreg_nb_f15; } else if (boot_cpu_data.x86 == 0x17 || boot_cpu_data.x86 == 0x18) { - data->temp_adjust_mask = 0x80000; + data->temp_adjust_mask = CUR_TEMP_RANGE_SEL_MASK; data->read_tempreg = read_tempreg_nb_f17; data->show_tdie = true; + + switch (boot_cpu_data.x86_model) { + case 0x1: /* Zen */ + case 0x8: /* Zen+ */ + case 0x11: /* Zen APU */ + case 0x18: /* Zen+ APU */ + data->show_current = !is_threadripper() && !is_epyc(); + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE0; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE1; + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + k10temp_get_ccd_support(pdev, data, 4); + break; + case 0x31: /* Zen2 Threadripper */ + case 0x71: /* Zen2 */ + data->show_current = !is_threadripper() && !is_epyc(); + data->cfactor[0] = CFACTOR_ICORE; + data->cfactor[1] = CFACTOR_ISOC; + data->svi_addr[0] = F17H_M01H_SVI_TEL_PLANE1; + data->svi_addr[1] = F17H_M01H_SVI_TEL_PLANE0; + k10temp_get_ccd_support(pdev, data, 8); + break; + } } else { data->read_htcreg = read_htcreg_pci; data->read_tempreg = read_tempreg_pci; @@ -330,9 +621,15 @@ static int k10temp_probe(struct pci_dev *pdev, } } - hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, - k10temp_groups); - return PTR_ERR_OR_ZERO(hwmon_dev); + hwmon_dev = devm_hwmon_device_register_with_info(dev, "k10temp", data, + &k10temp_chip_info, + NULL); + if (IS_ERR(hwmon_dev)) + return PTR_ERR(hwmon_dev); + + k10temp_init_debugfs(data); + + return 0; } static const struct pci_device_id k10temp_id_table[] = { diff --git a/drivers/hwmon/max31730.c b/drivers/hwmon/max31730.c new file mode 100644 index 000000000000..eb22a34dc36b --- /dev/null +++ b/drivers/hwmon/max31730.c @@ -0,0 +1,440 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX31730 3-Channel Remote Temperature Sensor + * + * Copyright (c) 2019 Guenter Roeck <linux@roeck-us.net> + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/hwmon.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/slab.h> + +/* Addresses scanned */ +static const unsigned short normal_i2c[] = { 0x1c, 0x1d, 0x1e, 0x1f, 0x4c, + 0x4d, 0x4e, 0x4f, I2C_CLIENT_END }; + +/* The MAX31730 registers */ +#define MAX31730_REG_TEMP 0x00 +#define MAX31730_REG_CONF 0x13 +#define MAX31730_STOP BIT(7) +#define MAX31730_EXTRANGE BIT(1) +#define MAX31730_REG_TEMP_OFFSET 0x16 +#define MAX31730_TEMP_OFFSET_BASELINE 0x77 +#define MAX31730_REG_OFFSET_ENABLE 0x17 +#define MAX31730_REG_TEMP_MAX 0x20 +#define MAX31730_REG_TEMP_MIN 0x30 +#define MAX31730_REG_STATUS_HIGH 0x32 +#define MAX31730_REG_STATUS_LOW 0x33 +#define MAX31730_REG_CHANNEL_ENABLE 0x35 +#define MAX31730_REG_TEMP_FAULT 0x36 + +#define MAX31730_REG_MFG_ID 0x50 +#define MAX31730_MFG_ID 0x4d +#define MAX31730_REG_MFG_REV 0x51 +#define MAX31730_MFG_REV 0x01 + +#define MAX31730_TEMP_MIN (-128000) +#define MAX31730_TEMP_MAX 127937 + +/* Each client has this additional data */ +struct max31730_data { + struct i2c_client *client; + u8 orig_conf; + u8 current_conf; + u8 offset_enable; + u8 channel_enable; +}; + +/*-----------------------------------------------------------------------*/ + +static inline long max31730_reg_to_mc(s16 temp) +{ + return DIV_ROUND_CLOSEST((temp >> 4) * 1000, 16); +} + +static int max31730_write_config(struct max31730_data *data, u8 set_mask, + u8 clr_mask) +{ + u8 value; + + clr_mask |= MAX31730_EXTRANGE; + value = data->current_conf & ~clr_mask; + value |= set_mask; + + if (data->current_conf != value) { + s32 err; + + err = i2c_smbus_write_byte_data(data->client, MAX31730_REG_CONF, + value); + if (err) + return err; + data->current_conf = value; + } + return 0; +} + +static int max31730_set_enable(struct i2c_client *client, int reg, + u8 *confdata, int channel, bool enable) +{ + u8 regval = *confdata; + int err; + + if (enable) + regval |= BIT(channel); + else + regval &= ~BIT(channel); + + if (regval != *confdata) { + err = i2c_smbus_write_byte_data(client, reg, regval); + if (err) + return err; + *confdata = regval; + } + return 0; +} + +static int max31730_set_offset_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_OFFSET_ENABLE, + &data->offset_enable, channel, enable); +} + +static int max31730_set_channel_enable(struct max31730_data *data, int channel, + bool enable) +{ + return max31730_set_enable(data->client, MAX31730_REG_CHANNEL_ENABLE, + &data->channel_enable, channel, enable); +} + +static int max31730_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int regval, reg, offset; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_input: + if (!(data->channel_enable & BIT(channel))) + return -ENODATA; + reg = MAX31730_REG_TEMP + (channel * 2); + break; + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + (channel * 2); + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + *val = !!(data->channel_enable & BIT(channel)); + return 0; + case hwmon_temp_offset: + if (!channel) + return -EINVAL; + if (!(data->offset_enable & BIT(channel))) { + *val = 0; + return 0; + } + offset = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET); + if (offset < 0) + return offset; + *val = (offset - MAX31730_TEMP_OFFSET_BASELINE) * 125; + return 0; + case hwmon_temp_fault: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_TEMP_FAULT); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_min_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_LOW); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + case hwmon_temp_max_alarm: + regval = i2c_smbus_read_byte_data(data->client, + MAX31730_REG_STATUS_HIGH); + if (regval < 0) + return regval; + *val = !!(regval & BIT(channel)); + return 0; + default: + return -EINVAL; + } + regval = i2c_smbus_read_word_swapped(data->client, reg); + if (regval < 0) + return regval; + + *val = max31730_reg_to_mc(regval); + + return 0; +} + +static int max31730_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct max31730_data *data = dev_get_drvdata(dev); + int reg, err; + + if (type != hwmon_temp) + return -EINVAL; + + switch (attr) { + case hwmon_temp_max: + reg = MAX31730_REG_TEMP_MAX + channel * 2; + break; + case hwmon_temp_min: + reg = MAX31730_REG_TEMP_MIN; + break; + case hwmon_temp_enable: + if (val != 0 && val != 1) + return -EINVAL; + return max31730_set_channel_enable(data, channel, val); + case hwmon_temp_offset: + val = clamp_val(val, -14875, 17000) + 14875; + val = DIV_ROUND_CLOSEST(val, 125); + err = max31730_set_offset_enable(data, channel, + val != MAX31730_TEMP_OFFSET_BASELINE); + if (err) + return err; + return i2c_smbus_write_byte_data(data->client, + MAX31730_REG_TEMP_OFFSET, val); + default: + return -EINVAL; + } + + val = clamp_val(val, MAX31730_TEMP_MIN, MAX31730_TEMP_MAX); + val = DIV_ROUND_CLOSEST(val << 4, 1000) << 4; + + return i2c_smbus_write_word_swapped(data->client, reg, (u16)val); +} + +static umode_t max31730_is_visible(const void *data, + enum hwmon_sensor_types type, + u32 attr, int channel) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_min_alarm: + case hwmon_temp_max_alarm: + case hwmon_temp_fault: + return 0444; + case hwmon_temp_min: + return channel ? 0444 : 0644; + case hwmon_temp_offset: + case hwmon_temp_enable: + case hwmon_temp_max: + return 0644; + } + break; + default: + break; + } + return 0; +} + +static const struct hwmon_channel_info *max31730_info[] = { + HWMON_CHANNEL_INFO(chip, + HWMON_C_REGISTER_TZ), + HWMON_CHANNEL_INFO(temp, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT, + HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | + HWMON_T_OFFSET | HWMON_T_ENABLE | + HWMON_T_MIN_ALARM | HWMON_T_MAX_ALARM | + HWMON_T_FAULT + ), + NULL +}; + +static const struct hwmon_ops max31730_hwmon_ops = { + .is_visible = max31730_is_visible, + .read = max31730_read, + .write = max31730_write, +}; + +static const struct hwmon_chip_info max31730_chip_info = { + .ops = &max31730_hwmon_ops, + .info = max31730_info, +}; + +static void max31730_remove(void *data) +{ + struct max31730_data *max31730 = data; + struct i2c_client *client = max31730->client; + + i2c_smbus_write_byte_data(client, MAX31730_REG_CONF, + max31730->orig_conf); +} + +static int +max31730_probe(struct i2c_client *client, const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct device *hwmon_dev; + struct max31730_data *data; + int status, err; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) + return -EIO; + + data = devm_kzalloc(dev, sizeof(struct max31730_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->client = client; + + /* Cache original configuration and enable status */ + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CHANNEL_ENABLE); + if (status < 0) + return status; + data->channel_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_OFFSET_ENABLE); + if (status < 0) + return status; + data->offset_enable = status; + + status = i2c_smbus_read_byte_data(client, MAX31730_REG_CONF); + if (status < 0) + return status; + data->orig_conf = status; + data->current_conf = status; + + err = max31730_write_config(data, + data->channel_enable ? 0 : MAX31730_STOP, + data->channel_enable ? MAX31730_STOP : 0); + if (err) + return err; + + dev_set_drvdata(dev, data); + + err = devm_add_action_or_reset(dev, max31730_remove, data); + if (err) + return err; + + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, + &max31730_chip_info, + NULL); + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id max31730_ids[] = { + { "max31730", 0, }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max31730_ids); + +static const struct of_device_id __maybe_unused max31730_of_match[] = { + { + .compatible = "maxim,max31730", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, max31730_of_match); + +static bool max31730_check_reg_temp(struct i2c_client *client, + int reg) +{ + int regval; + + regval = i2c_smbus_read_byte_data(client, reg + 1); + return regval < 0 || (regval & 0x0f); +} + +/* Return 0 if detection is successful, -ENODEV otherwise */ +static int max31730_detect(struct i2c_client *client, + struct i2c_board_info *info) +{ + struct i2c_adapter *adapter = client->adapter; + int regval; + int i; + + if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | + I2C_FUNC_SMBUS_WORD_DATA)) + return -ENODEV; + + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_ID); + if (regval != MAX31730_MFG_ID) + return -ENODEV; + regval = i2c_smbus_read_byte_data(client, MAX31730_REG_MFG_REV); + if (regval != MAX31730_MFG_REV) + return -ENODEV; + + /* lower 4 bit of temperature and limit registers must be 0 */ + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP_MIN)) + return -ENODEV; + + for (i = 0; i < 4; i++) { + if (max31730_check_reg_temp(client, MAX31730_REG_TEMP + i * 2)) + return -ENODEV; + if (max31730_check_reg_temp(client, + MAX31730_REG_TEMP_MAX + i * 2)) + return -ENODEV; + } + + strlcpy(info->type, "max31730", I2C_NAME_SIZE); + + return 0; +} + +static int __maybe_unused max31730_suspend(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, MAX31730_STOP, 0); +} + +static int __maybe_unused max31730_resume(struct device *dev) +{ + struct max31730_data *data = dev_get_drvdata(dev); + + return max31730_write_config(data, 0, MAX31730_STOP); +} + +static SIMPLE_DEV_PM_OPS(max31730_pm_ops, max31730_suspend, max31730_resume); + +static struct i2c_driver max31730_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "max31730", + .of_match_table = of_match_ptr(max31730_of_match), + .pm = &max31730_pm_ops, + }, + .probe = max31730_probe, + .id_table = max31730_ids, + .detect = max31730_detect, + .address_list = normal_i2c, +}; + +module_i2c_driver(max31730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("MAX31730 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index 59859979571d..a9ea06204767 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -20,8 +20,8 @@ config SENSORS_PMBUS help If you say yes here you get hardware monitoring support for generic PMBus devices, including but not limited to ADP4000, BMR453, BMR454, - MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, TPS544B20, - TPS544B25, TPS544C20, TPS544C25, and UDT020. + MAX20796, MDT040, NCP4200, NCP4208, PDT003, PDT006, PDT012, TPS40400, + TPS544B20, TPS544B25, TPS544C20, TPS544C25, and UDT020. This driver can also be built as a module. If so, the module will be called pmbus. @@ -145,6 +145,15 @@ config SENSORS_MAX16064 This driver can also be built as a module. If so, the module will be called max16064. +config SENSORS_MAX20730 + tristate "Maxim MAX20730, MAX20734, MAX20743" + help + If you say yes here you get hardware monitoring support for Maxim + MAX20730, MAX20734, and MAX20743. + + This driver can also be built as a module. If so, the module will + be called max20730. + config SENSORS_MAX20751 tristate "Maxim MAX20751" help @@ -200,20 +209,20 @@ config SENSORS_TPS40422 be called tps40422. config SENSORS_TPS53679 - tristate "TI TPS53679" + tristate "TI TPS53679, TPS53688" help If you say yes here you get hardware monitoring support for TI - TPS53679. + TPS53679, TPS53688 This driver can also be built as a module. If so, the module will be called tps53679. config SENSORS_UCD9000 - tristate "TI UCD90120, UCD90124, UCD90160, UCD9090, UCD90910" + tristate "TI UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910" help If you say yes here you get hardware monitoring support for TI - UCD90120, UCD90124, UCD90160, UCD9090, UCD90910, Sequencer and System - Health Controllers. + UCD90120, UCD90124, UCD90160, UCD90320, UCD9090, UCD90910, Sequencer + and System Health Controllers. This driver can also be built as a module. If so, the module will be called ucd9000. @@ -228,6 +237,15 @@ config SENSORS_UCD9200 This driver can also be built as a module. If so, the module will be called ucd9200. +config SENSORS_XDPE122 + tristate "Infineon XDPE122 family" + help + If you say yes here you get hardware monitoring support for Infineon + XDPE12254, XDPE12284, device. + + This driver can also be built as a module. If so, the module will + be called xdpe12284. + config SENSORS_ZL6100 tristate "Intersil ZL6100 and compatibles" help diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index 3f8c1014938b..5feb45806123 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -17,6 +17,7 @@ obj-$(CONFIG_SENSORS_LM25066) += lm25066.o obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o +obj-$(CONFIG_SENSORS_MAX20730) += max20730.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o obj-$(CONFIG_SENSORS_MAX31785) += max31785.o obj-$(CONFIG_SENSORS_MAX34440) += max34440.o @@ -26,4 +27,5 @@ obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o obj-$(CONFIG_SENSORS_TPS53679) += tps53679.o obj-$(CONFIG_SENSORS_UCD9000) += ucd9000.o obj-$(CONFIG_SENSORS_UCD9200) += ucd9200.o +obj-$(CONFIG_SENSORS_XDPE122) += xdpe12284.o obj-$(CONFIG_SENSORS_ZL6100) += zl6100.o diff --git a/drivers/hwmon/pmbus/ibm-cffps.c b/drivers/hwmon/pmbus/ibm-cffps.c index d359b76bcb36..3795fe55b84f 100644 --- a/drivers/hwmon/pmbus/ibm-cffps.c +++ b/drivers/hwmon/pmbus/ibm-cffps.c @@ -20,12 +20,15 @@ #define CFFPS_FRU_CMD 0x9A #define CFFPS_PN_CMD 0x9B +#define CFFPS_HEADER_CMD 0x9C #define CFFPS_SN_CMD 0x9E +#define CFFPS_MAX_POWER_OUT_CMD 0xA7 #define CFFPS_CCIN_CMD 0xBD #define CFFPS_FW_CMD 0xFA #define CFFPS1_FW_NUM_BYTES 4 #define CFFPS2_FW_NUM_WORDS 3 #define CFFPS_SYS_CONFIG_CMD 0xDA +#define CFFPS_12VCS_VOUT_CMD 0xDE #define CFFPS_INPUT_HISTORY_CMD 0xD6 #define CFFPS_INPUT_HISTORY_SIZE 100 @@ -44,22 +47,21 @@ #define CFFPS_MFR_VAUX_FAULT BIT(6) #define CFFPS_MFR_CURRENT_SHARE_WARNING BIT(7) -/* - * LED off state actually relinquishes LED control to PSU firmware, so it can - * turn on the LED for faults. - */ -#define CFFPS_LED_OFF 0 #define CFFPS_LED_BLINK BIT(0) #define CFFPS_LED_ON BIT(1) +#define CFFPS_LED_OFF BIT(2) #define CFFPS_BLINK_RATE_MS 250 enum { CFFPS_DEBUGFS_INPUT_HISTORY = 0, CFFPS_DEBUGFS_FRU, CFFPS_DEBUGFS_PN, + CFFPS_DEBUGFS_HEADER, CFFPS_DEBUGFS_SN, + CFFPS_DEBUGFS_MAX_POWER_OUT, CFFPS_DEBUGFS_CCIN, CFFPS_DEBUGFS_FW, + CFFPS_DEBUGFS_ON_OFF_CONFIG, CFFPS_DEBUGFS_NUM_ENTRIES }; @@ -136,15 +138,15 @@ static ssize_t ibm_cffps_read_input_history(struct ibm_cffps *psu, psu->input_history.byte_count); } -static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, - size_t count, loff_t *ppos) +static ssize_t ibm_cffps_debugfs_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) { u8 cmd; int i, rc; int *idxp = file->private_data; int idx = *idxp; struct ibm_cffps *psu = to_psu(idxp, idx); - char data[I2C_SMBUS_BLOCK_MAX] = { 0 }; + char data[I2C_SMBUS_BLOCK_MAX + 2] = { 0 }; pmbus_set_page(psu->client, 0); @@ -157,9 +159,20 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, case CFFPS_DEBUGFS_PN: cmd = CFFPS_PN_CMD; break; + case CFFPS_DEBUGFS_HEADER: + cmd = CFFPS_HEADER_CMD; + break; case CFFPS_DEBUGFS_SN: cmd = CFFPS_SN_CMD; break; + case CFFPS_DEBUGFS_MAX_POWER_OUT: + rc = i2c_smbus_read_word_swapped(psu->client, + CFFPS_MAX_POWER_OUT_CMD); + if (rc < 0) + return rc; + + rc = snprintf(data, I2C_SMBUS_BLOCK_MAX, "%d", rc); + goto done; case CFFPS_DEBUGFS_CCIN: rc = i2c_smbus_read_word_swapped(psu->client, CFFPS_CCIN_CMD); if (rc < 0) @@ -199,6 +212,14 @@ static ssize_t ibm_cffps_debugfs_op(struct file *file, char __user *buf, return -EOPNOTSUPP; } goto done; + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + rc = i2c_smbus_read_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG); + if (rc < 0) + return rc; + + rc = snprintf(data, 3, "%02x", rc); + goto done; default: return -EINVAL; } @@ -214,9 +235,42 @@ done: return simple_read_from_buffer(buf, count, ppos, data, rc); } +static ssize_t ibm_cffps_debugfs_write(struct file *file, + const char __user *buf, size_t count, + loff_t *ppos) +{ + u8 data; + ssize_t rc; + int *idxp = file->private_data; + int idx = *idxp; + struct ibm_cffps *psu = to_psu(idxp, idx); + + switch (idx) { + case CFFPS_DEBUGFS_ON_OFF_CONFIG: + pmbus_set_page(psu->client, 0); + + rc = simple_write_to_buffer(&data, 1, ppos, buf, count); + if (rc <= 0) + return rc; + + rc = i2c_smbus_write_byte_data(psu->client, + PMBUS_ON_OFF_CONFIG, data); + if (rc) + return rc; + + rc = 1; + break; + default: + return -EINVAL; + } + + return rc; +} + static const struct file_operations ibm_cffps_fops = { .llseek = noop_llseek, - .read = ibm_cffps_debugfs_op, + .read = ibm_cffps_debugfs_read, + .write = ibm_cffps_debugfs_write, .open = simple_open, }; @@ -293,6 +347,9 @@ static int ibm_cffps_read_word_data(struct i2c_client *client, int page, if (mfr & CFFPS_MFR_PS_KILL) rc |= PB_STATUS_OFF; break; + case PMBUS_VIRT_READ_VMON: + rc = pmbus_read_word_data(client, page, CFFPS_12VCS_VOUT_CMD); + break; default: rc = -ENODATA; break; @@ -375,6 +432,9 @@ static void ibm_cffps_create_led_class(struct ibm_cffps *psu) rc = devm_led_classdev_register(dev, &psu->led); if (rc) dev_warn(dev, "failed to register led class: %d\n", rc); + else + i2c_smbus_write_byte_data(client, CFFPS_SYS_CONFIG_CMD, + CFFPS_LED_OFF); } static struct pmbus_driver_info ibm_cffps_info[] = { @@ -396,7 +456,7 @@ static struct pmbus_driver_info ibm_cffps_info[] = { PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT | PMBUS_HAVE_STATUS_INPUT | PMBUS_HAVE_STATUS_TEMP | - PMBUS_HAVE_STATUS_FAN12, + PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_VMON, .func[1] = PMBUS_HAVE_VOUT | PMBUS_HAVE_IOUT | PMBUS_HAVE_TEMP | PMBUS_HAVE_TEMP2 | PMBUS_HAVE_TEMP3 | PMBUS_HAVE_STATUS_VOUT | PMBUS_HAVE_STATUS_IOUT, @@ -486,15 +546,24 @@ static int ibm_cffps_probe(struct i2c_client *client, debugfs_create_file("part_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_PN], &ibm_cffps_fops); + debugfs_create_file("header", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_HEADER], + &ibm_cffps_fops); debugfs_create_file("serial_number", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_SN], &ibm_cffps_fops); + debugfs_create_file("max_power_out", 0444, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_MAX_POWER_OUT], + &ibm_cffps_fops); debugfs_create_file("ccin", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_CCIN], &ibm_cffps_fops); debugfs_create_file("fw_version", 0444, ibm_cffps_dir, &psu->debugfs_entries[CFFPS_DEBUGFS_FW], &ibm_cffps_fops); + debugfs_create_file("on_off_config", 0644, ibm_cffps_dir, + &psu->debugfs_entries[CFFPS_DEBUGFS_ON_OFF_CONFIG], + &ibm_cffps_fops); return 0; } diff --git a/drivers/hwmon/pmbus/max20730.c b/drivers/hwmon/pmbus/max20730.c new file mode 100644 index 000000000000..294e2212f61e --- /dev/null +++ b/drivers/hwmon/pmbus/max20730.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Driver for MAX20730, MAX20734, and MAX20743 Integrated, Step-Down + * Switching Regulators + * + * Copyright 2019 Google LLC. + */ + +#include <linux/bits.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/of_device.h> +#include <linux/pmbus.h> +#include <linux/util_macros.h> +#include "pmbus.h" + +enum chips { + max20730, + max20734, + max20743 +}; + +struct max20730_data { + enum chips id; + struct pmbus_driver_info info; + struct mutex lock; /* Used to protect against parallel writes */ + u16 mfr_devset1; +}; + +#define to_max20730_data(x) container_of(x, struct max20730_data, info) + +#define MAX20730_MFR_DEVSET1 0xd2 + +/* + * Convert discreet value to direct data format. Strictly speaking, all passed + * values are constants, so we could do that calculation manually. On the + * downside, that would make the driver more difficult to maintain, so lets + * use this approach. + */ +static u16 val_to_direct(int v, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; /* take milli-units into account */ + int b = info->b[class] * 1000; + long d; + + d = v * info->m[class] + b; + /* + * R < 0 is true for all callers, so we don't need to bother + * about the R > 0 case. + */ + while (R < 0) { + d = DIV_ROUND_CLOSEST(d, 10); + R++; + } + return (u16)d; +} + +static long direct_to_val(u16 w, enum pmbus_sensor_classes class, + const struct pmbus_driver_info *info) +{ + int R = info->R[class] - 3; + int b = info->b[class] * 1000; + int m = info->m[class]; + long d = (s16)w; + + if (m == 0) + return 0; + + while (R < 0) { + d *= 10; + R++; + } + d = (d - b) / m; + return d; +} + +static u32 max_current[][5] = { + [max20730] = { 13000, 16600, 20100, 23600 }, + [max20734] = { 21000, 27000, 32000, 38000 }, + [max20743] = { 18900, 24100, 29200, 34100 }, +}; + +static int max20730_read_word_data(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + const struct max20730_data *data = to_max20730_data(info); + int ret = 0; + u32 max_c; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + switch ((data->mfr_devset1 >> 11) & 0x3) { + case 0x0: + ret = val_to_direct(150000, PSC_TEMPERATURE, info); + break; + case 0x1: + ret = val_to_direct(130000, PSC_TEMPERATURE, info); + break; + default: + ret = -ENODATA; + break; + } + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + max_c = max_current[data->id][(data->mfr_devset1 >> 5) & 0x3]; + ret = val_to_direct(max_c, PSC_CURRENT_OUT, info); + break; + default: + ret = -ENODATA; + break; + } + return ret; +} + +static int max20730_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + struct pmbus_driver_info *info; + struct max20730_data *data; + u16 devset1; + int ret = 0; + int idx; + + info = (struct pmbus_driver_info *)pmbus_get_driver_info(client); + data = to_max20730_data(info); + + mutex_lock(&data->lock); + devset1 = data->mfr_devset1; + + switch (reg) { + case PMBUS_OT_FAULT_LIMIT: + devset1 &= ~(BIT(11) | BIT(12)); + if (direct_to_val(word, PSC_TEMPERATURE, info) < 140000) + devset1 |= BIT(11); + break; + case PMBUS_IOUT_OC_FAULT_LIMIT: + devset1 &= ~(BIT(5) | BIT(6)); + + idx = find_closest(direct_to_val(word, PSC_CURRENT_OUT, info), + max_current[data->id], 4); + devset1 |= (idx << 5); + break; + default: + ret = -ENODATA; + break; + } + + if (!ret && devset1 != data->mfr_devset1) { + ret = i2c_smbus_write_word_data(client, MAX20730_MFR_DEVSET1, + devset1); + if (!ret) { + data->mfr_devset1 = devset1; + pmbus_clear_cache(client); + } + } + mutex_unlock(&data->lock); + return ret; +} + +static const struct pmbus_driver_info max20730_info[] = { + [max20730] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3609, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + /* + * Values in the datasheet are adjusted for temperature and + * for the relationship between Vin and Vout. + * Unfortunately, the data sheet suggests that Vout measurement + * may be scaled with a resistor array. This is indeed the case + * at least on the evaulation boards. As a result, any in-driver + * adjustments would either be wrong or require elaborate means + * to configure the scaling. Instead of doing that, just report + * raw values and let userspace handle adjustments. + */ + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 153, + .b[PSC_CURRENT_OUT] = 4976, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20734] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6209 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3592, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 111, + .b[PSC_CURRENT_OUT] = 3461, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, + [max20743] = { + .pages = 1, + .read_word_data = max20730_read_word_data, + .write_word_data = max20730_write_word_data, + + /* Source : Maxim AN6042 */ + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 21, + .b[PSC_TEMPERATURE] = 5887, + .R[PSC_TEMPERATURE] = -1, + + .format[PSC_VOLTAGE_IN] = direct, + .m[PSC_VOLTAGE_IN] = 3597, + .b[PSC_VOLTAGE_IN] = 0, + .R[PSC_VOLTAGE_IN] = -2, + + .format[PSC_CURRENT_OUT] = direct, + .m[PSC_CURRENT_OUT] = 95, + .b[PSC_CURRENT_OUT] = 5014, + .R[PSC_CURRENT_OUT] = -1, + + .format[PSC_VOLTAGE_OUT] = linear, + + .func[0] = PMBUS_HAVE_VIN | + PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP, + }, +}; + +static int max20730_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + u8 buf[I2C_SMBUS_BLOCK_MAX + 1]; + struct max20730_data *data; + enum chips chip_id; + int ret; + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_READ_BYTE_DATA | + I2C_FUNC_SMBUS_READ_WORD_DATA | + I2C_FUNC_SMBUS_BLOCK_DATA)) + return -ENODEV; + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_ID, buf); + if (ret < 0) { + dev_err(&client->dev, "Failed to read Manufacturer ID\n"); + return ret; + } + if (ret != 5 || strncmp(buf, "MAXIM", 5)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer ID '%s'\n", buf); + return -ENODEV; + } + + /* + * The chips support reading PMBUS_MFR_MODEL. On both MAX20730 + * and MAX20734, reading it returns M20743. Presumably that is + * the reason why the command is not documented. Unfortunately, + * that means that there is no reliable means to detect the chip. + * However, we can at least detect the chip series. Compare + * the returned value against 'M20743' and bail out if there is + * a mismatch. If that doesn't work for all chips, we may have + * to remove this check. + */ + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_MODEL, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Model\n"); + return ret; + } + if (ret != 6 || strncmp(buf, "M20743", 6)) { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Model '%s'\n", buf); + return -ENODEV; + } + + ret = i2c_smbus_read_block_data(client, PMBUS_MFR_REVISION, buf); + if (ret < 0) { + dev_err(dev, "Failed to read Manufacturer Revision\n"); + return ret; + } + if (ret != 1 || buf[0] != 'F') { + buf[ret] = '\0'; + dev_err(dev, "Unsupported Manufacturer Revision '%s'\n", buf); + return -ENODEV; + } + + if (client->dev.of_node) + chip_id = (enum chips)of_device_get_match_data(dev); + else + chip_id = id->driver_data; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + data->id = chip_id; + mutex_init(&data->lock); + memcpy(&data->info, &max20730_info[chip_id], sizeof(data->info)); + + ret = i2c_smbus_read_word_data(client, MAX20730_MFR_DEVSET1); + if (ret < 0) + return ret; + data->mfr_devset1 = ret; + + return pmbus_do_probe(client, id, &data->info); +} + +static const struct i2c_device_id max20730_id[] = { + { "max20730", max20730 }, + { "max20734", max20734 }, + { "max20743", max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, max20730_id); + +static const struct of_device_id max20730_of_match[] = { + { .compatible = "maxim,max20730", .data = (void *)max20730 }, + { .compatible = "maxim,max20734", .data = (void *)max20734 }, + { .compatible = "maxim,max20743", .data = (void *)max20743 }, + { }, +}; + +MODULE_DEVICE_TABLE(of, max20730_of_match); + +static struct i2c_driver max20730_driver = { + .driver = { + .name = "max20730", + .of_match_table = max20730_of_match, + }, + .probe = max20730_probe, + .remove = pmbus_do_remove, + .id_table = max20730_id, +}; + +module_i2c_driver(max20730_driver); + +MODULE_AUTHOR("Guenter Roeck <linux@roeck-us.net>"); +MODULE_DESCRIPTION("PMBus driver for Maxim MAX20730 / MAX20734 / MAX20743"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/max20751.c b/drivers/hwmon/pmbus/max20751.c index ee5f0cdbde06..da3c38cb9a5c 100644 --- a/drivers/hwmon/pmbus/max20751.c +++ b/drivers/hwmon/pmbus/max20751.c @@ -16,7 +16,7 @@ static struct pmbus_driver_info max20751_info = { .pages = 1, .format[PSC_VOLTAGE_IN] = linear, .format[PSC_VOLTAGE_OUT] = vid, - .vrm_version = vr12, + .vrm_version[0] = vr12, .format[PSC_TEMPERATURE] = linear, .format[PSC_CURRENT_OUT] = linear, .format[PSC_POWER] = linear, diff --git a/drivers/hwmon/pmbus/pmbus.c b/drivers/hwmon/pmbus/pmbus.c index c0bc43d01018..51e8312b6c2d 100644 --- a/drivers/hwmon/pmbus/pmbus.c +++ b/drivers/hwmon/pmbus/pmbus.c @@ -115,7 +115,7 @@ static int pmbus_identify(struct i2c_client *client, } if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - int vout_mode; + int vout_mode, i; vout_mode = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); if (vout_mode >= 0 && vout_mode != 0xff) { @@ -124,7 +124,8 @@ static int pmbus_identify(struct i2c_client *client, break; case 1: info->format[PSC_VOLTAGE_OUT] = vid; - info->vrm_version = vr11; + for (i = 0; i < info->pages; i++) + info->vrm_version[i] = vr11; break; case 2: info->format[PSC_VOLTAGE_OUT] = direct; @@ -210,6 +211,7 @@ static const struct i2c_device_id pmbus_id[] = { {"dps460", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps650ab", (kernel_ulong_t)&pmbus_info_one_skip}, {"dps800", (kernel_ulong_t)&pmbus_info_one_skip}, + {"max20796", (kernel_ulong_t)&pmbus_info_one}, {"mdt040", (kernel_ulong_t)&pmbus_info_one}, {"ncp4200", (kernel_ulong_t)&pmbus_info_one}, {"ncp4208", (kernel_ulong_t)&pmbus_info_one}, diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index d198af3a92b6..13b34bd67f23 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -22,6 +22,8 @@ enum pmbus_regs { PMBUS_CLEAR_FAULTS = 0x03, PMBUS_PHASE = 0x04, + PMBUS_WRITE_PROTECT = 0x10, + PMBUS_CAPABILITY = 0x19, PMBUS_QUERY = 0x1A, @@ -226,6 +228,15 @@ enum pmbus_regs { #define PB_OPERATION_CONTROL_ON BIT(7) /* + * WRITE_PROTECT + */ +#define PB_WP_ALL BIT(7) /* all but WRITE_PROTECT */ +#define PB_WP_OP BIT(6) /* all but WP, OPERATION, PAGE */ +#define PB_WP_VOUT BIT(5) /* all but WP, OPERATION, PAGE, VOUT, ON_OFF */ + +#define PB_WP_ANY (PB_WP_ALL | PB_WP_OP | PB_WP_VOUT) + +/* * CAPABILITY */ #define PB_CAPABILITY_SMBALERT BIT(4) @@ -377,12 +388,12 @@ enum pmbus_sensor_classes { #define PMBUS_PAGE_VIRTUAL BIT(31) enum pmbus_data_format { linear = 0, direct, vid }; -enum vrm_version { vr11 = 0, vr12, vr13 }; +enum vrm_version { vr11 = 0, vr12, vr13, imvp9, amd625mv }; struct pmbus_driver_info { int pages; /* Total number of pages */ enum pmbus_data_format format[PSC_NUM_CLASSES]; - enum vrm_version vrm_version; + enum vrm_version vrm_version[PMBUS_PAGES]; /* vrm version per page */ /* * Support one set of coefficients for each sensor type * Used for chips providing data in direct mode. diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 8470097907bc..d9c17feb7b4a 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -696,7 +696,7 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, long val = sensor->data; long rv = 0; - switch (data->info->vrm_version) { + switch (data->info->vrm_version[sensor->page]) { case vr11: if (val >= 0x02 && val <= 0xb2) rv = DIV_ROUND_CLOSEST(160000 - (val - 2) * 625, 100); @@ -709,6 +709,14 @@ static long pmbus_reg2data_vid(struct pmbus_data *data, if (val >= 0x01) rv = 500 + (val - 1) * 10; break; + case imvp9: + if (val >= 0x01) + rv = 200 + (val - 1) * 10; + break; + case amd625mv: + if (val >= 0x0 && val <= 0xd8) + rv = DIV_ROUND_CLOSEST(155000 - val * 625, 100); + break; } return rv; } @@ -1088,6 +1096,9 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data, snprintf(sensor->name, sizeof(sensor->name), "%s%d", name, seq); + if (data->flags & PMBUS_WRITE_PROTECTED) + readonly = true; + sensor->page = page; sensor->reg = reg; sensor->class = class; @@ -2141,6 +2152,15 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data, if (ret >= 0 && (ret & PB_CAPABILITY_ERROR_CHECK)) client->flags |= I2C_CLIENT_PEC; + /* + * Check if the chip is write protected. If it is, we can not clear + * faults, and we should not try it. Also, in that case, writes into + * limit registers need to be disabled. + */ + ret = i2c_smbus_read_byte_data(client, PMBUS_WRITE_PROTECT); + if (ret > 0 && (ret & PB_WP_ANY)) + data->flags |= PMBUS_WRITE_PROTECTED | PMBUS_SKIP_STATUS_CHECK; + if (data->info->pages) pmbus_clear_faults(client); else diff --git a/drivers/hwmon/pmbus/pxe1610.c b/drivers/hwmon/pmbus/pxe1610.c index ebe3f023f840..517584cff3de 100644 --- a/drivers/hwmon/pmbus/pxe1610.c +++ b/drivers/hwmon/pmbus/pxe1610.c @@ -19,26 +19,30 @@ static int pxe1610_identify(struct i2c_client *client, struct pmbus_driver_info *info) { - if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE)) { - u8 vout_mode; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_mode = ret & GENMASK(4, 0); - - switch (vout_mode) { - case 1: - info->vrm_version = vr12; - break; - case 2: - info->vrm_version = vr13; - break; - default: - return -ENODEV; + int i; + + for (i = 0; i < PXE1610_NUM_PAGES; i++) { + if (pmbus_check_byte_register(client, i, PMBUS_VOUT_MODE)) { + u8 vout_mode; + int ret; + + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_mode = ret & GENMASK(4, 0); + + switch (vout_mode) { + case 1: + info->vrm_version[i] = vr12; + break; + case 2: + info->vrm_version[i] = vr13; + break; + default: + return -ENODEV; + } } } diff --git a/drivers/hwmon/pmbus/tps53679.c b/drivers/hwmon/pmbus/tps53679.c index 86bb3aca09ed..9c22e9013dd7 100644 --- a/drivers/hwmon/pmbus/tps53679.c +++ b/drivers/hwmon/pmbus/tps53679.c @@ -24,27 +24,29 @@ static int tps53679_identify(struct i2c_client *client, struct pmbus_driver_info *info) { u8 vout_params; - int ret; - - /* Read the register with VOUT scaling value.*/ - ret = pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE); - if (ret < 0) - return ret; - - vout_params = ret & GENMASK(4, 0); - - switch (vout_params) { - case TPS53679_PROT_VR13_10MV: - case TPS53679_PROT_VR12_5_10MV: - info->vrm_version = vr13; - break; - case TPS53679_PROT_VR13_5MV: - case TPS53679_PROT_VR12_5MV: - case TPS53679_PROT_IMVP8_5MV: - info->vrm_version = vr12; - break; - default: - return -EINVAL; + int i, ret; + + for (i = 0; i < TPS53679_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case TPS53679_PROT_VR13_10MV: + case TPS53679_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case TPS53679_PROT_VR13_5MV: + case TPS53679_PROT_VR12_5MV: + case TPS53679_PROT_IMVP8_5MV: + info->vrm_version[i] = vr12; + break; + default: + return -EINVAL; + } } return 0; @@ -83,6 +85,7 @@ static int tps53679_probe(struct i2c_client *client, static const struct i2c_device_id tps53679_id[] = { {"tps53679", 0}, + {"tps53688", 0}, {} }; @@ -90,6 +93,7 @@ MODULE_DEVICE_TABLE(i2c, tps53679_id); static const struct of_device_id __maybe_unused tps53679_of_match[] = { {.compatible = "ti,tps53679"}, + {.compatible = "ti,tps53688"}, {} }; MODULE_DEVICE_TABLE(of, tps53679_of_match); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index a9229c6b0e84..23ea3415f166 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -18,7 +18,8 @@ #include <linux/gpio/driver.h> #include "pmbus.h" -enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; +enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090, + ucd90910 }; #define UCD9000_MONITOR_CONFIG 0xd5 #define UCD9000_NUM_PAGES 0xd6 @@ -38,7 +39,7 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_OUTPUT 1 #define UCD9000_MON_TYPE(x) (((x) >> 5) & 0x07) -#define UCD9000_MON_PAGE(x) ((x) & 0x0f) +#define UCD9000_MON_PAGE(x) ((x) & 0x1f) #define UCD9000_MON_VOLTAGE 1 #define UCD9000_MON_TEMPERATURE 2 @@ -50,10 +51,12 @@ enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd9090, ucd90910 }; #define UCD9000_GPIO_NAME_LEN 16 #define UCD9090_NUM_GPIOS 23 #define UCD901XX_NUM_GPIOS 26 +#define UCD90320_NUM_GPIOS 84 #define UCD90910_NUM_GPIOS 26 #define UCD9000_DEBUGFS_NAME_LEN 24 #define UCD9000_GPI_COUNT 8 +#define UCD90320_GPI_COUNT 32 struct ucd9000_data { u8 fan_data[UCD9000_NUM_FAN][I2C_SMBUS_BLOCK_MAX]; @@ -131,6 +134,7 @@ static const struct i2c_device_id ucd9000_id[] = { {"ucd90120", ucd90120}, {"ucd90124", ucd90124}, {"ucd90160", ucd90160}, + {"ucd90320", ucd90320}, {"ucd9090", ucd9090}, {"ucd90910", ucd90910}, {} @@ -155,6 +159,10 @@ static const struct of_device_id __maybe_unused ucd9000_of_match[] = { .data = (void *)ucd90160 }, { + .compatible = "ti,ucd90320", + .data = (void *)ucd90320 + }, + { .compatible = "ti,ucd9090", .data = (void *)ucd9090 }, @@ -322,6 +330,9 @@ static void ucd9000_probe_gpio(struct i2c_client *client, case ucd90160: data->gpio.ngpio = UCD901XX_NUM_GPIOS; break; + case ucd90320: + data->gpio.ngpio = UCD90320_NUM_GPIOS; + break; case ucd90910: data->gpio.ngpio = UCD90910_NUM_GPIOS; break; @@ -372,17 +383,18 @@ static int ucd9000_debugfs_show_mfr_status_bit(void *data, u64 *val) struct ucd9000_debugfs_entry *entry = data; struct i2c_client *client = entry->client; u8 buffer[I2C_SMBUS_BLOCK_MAX]; - int ret; + int ret, i; ret = ucd9000_get_mfr_status(client, buffer); if (ret < 0) return ret; /* - * Attribute only created for devices with gpi fault bits at bits - * 16-23, which is the second byte of the response. + * GPI fault bits are in sets of 8, two bytes from end of response. */ - *val = !!(buffer[1] & BIT(entry->index)); + i = ret - 3 - entry->index / 8; + if (i >= 0) + *val = !!(buffer[i] & BIT(entry->index % 8)); return 0; } @@ -422,7 +434,7 @@ static int ucd9000_init_debugfs(struct i2c_client *client, { struct dentry *debugfs; struct ucd9000_debugfs_entry *entries; - int i; + int i, gpi_count; char name[UCD9000_DEBUGFS_NAME_LEN]; debugfs = pmbus_get_debugfs_dir(client); @@ -435,18 +447,21 @@ static int ucd9000_init_debugfs(struct i2c_client *client, /* * Of the chips this driver supports, only the UCD9090, UCD90160, - * and UCD90910 report GPI faults in their MFR_STATUS register, so only - * create the GPI fault debugfs attributes for those chips. + * UCD90320, and UCD90910 report GPI faults in their MFR_STATUS + * register, so only create the GPI fault debugfs attributes for those + * chips. */ if (mid->driver_data == ucd9090 || mid->driver_data == ucd90160 || - mid->driver_data == ucd90910) { + mid->driver_data == ucd90320 || mid->driver_data == ucd90910) { + gpi_count = mid->driver_data == ucd90320 ? UCD90320_GPI_COUNT + : UCD9000_GPI_COUNT; entries = devm_kcalloc(&client->dev, - UCD9000_GPI_COUNT, sizeof(*entries), + gpi_count, sizeof(*entries), GFP_KERNEL); if (!entries) return -ENOMEM; - for (i = 0; i < UCD9000_GPI_COUNT; i++) { + for (i = 0; i < gpi_count; i++) { entries[i].client = client; entries[i].index = i; scnprintf(name, UCD9000_DEBUGFS_NAME_LEN, diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c new file mode 100644 index 000000000000..3d47806ff4d3 --- /dev/null +++ b/drivers/hwmon/pmbus/xdpe12284.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Hardware monitoring driver for Infineon Multi-phase Digital VR Controllers + * + * Copyright (c) 2020 Mellanox Technologies. All rights reserved. + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include "pmbus.h" + +#define XDPE122_PROT_VR12_5MV 0x01 /* VR12.0 mode, 5-mV DAC */ +#define XDPE122_PROT_VR12_5_10MV 0x02 /* VR12.5 mode, 10-mV DAC */ +#define XDPE122_PROT_IMVP9_10MV 0x03 /* IMVP9 mode, 10-mV DAC */ +#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */ +#define XDPE122_PAGE_NUM 2 + +static int xdpe122_identify(struct i2c_client *client, + struct pmbus_driver_info *info) +{ + u8 vout_params; + int i, ret; + + for (i = 0; i < XDPE122_PAGE_NUM; i++) { + /* Read the register with VOUT scaling value.*/ + ret = pmbus_read_byte_data(client, i, PMBUS_VOUT_MODE); + if (ret < 0) + return ret; + + vout_params = ret & GENMASK(4, 0); + + switch (vout_params) { + case XDPE122_PROT_VR12_5_10MV: + info->vrm_version[i] = vr13; + break; + case XDPE122_PROT_VR12_5MV: + info->vrm_version[i] = vr12; + break; + case XDPE122_PROT_IMVP9_10MV: + info->vrm_version[i] = imvp9; + break; + case XDPE122_AMD_625MV: + info->vrm_version[i] = amd625mv; + break; + default: + return -EINVAL; + } + } + + return 0; +} + +static struct pmbus_driver_info xdpe122_info = { + .pages = XDPE122_PAGE_NUM, + .format[PSC_VOLTAGE_IN] = linear, + .format[PSC_VOLTAGE_OUT] = vid, + .format[PSC_TEMPERATURE] = linear, + .format[PSC_CURRENT_IN] = linear, + .format[PSC_CURRENT_OUT] = linear, + .format[PSC_POWER] = linear, + .func[0] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .func[1] = PMBUS_HAVE_VIN | PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT | + PMBUS_HAVE_IIN | PMBUS_HAVE_IOUT | PMBUS_HAVE_STATUS_IOUT | + PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP | + PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT, + .identify = xdpe122_identify, +}; + +static int xdpe122_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct pmbus_driver_info *info; + + info = devm_kmemdup(&client->dev, &xdpe122_info, sizeof(*info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + return pmbus_do_probe(client, id, info); +} + +static const struct i2c_device_id xdpe122_id[] = { + {"xdpe12254", 0}, + {"xdpe12284", 0}, + {} +}; + +MODULE_DEVICE_TABLE(i2c, xdpe122_id); + +static const struct of_device_id __maybe_unused xdpe122_of_match[] = { + {.compatible = "infineon, xdpe12254"}, + {.compatible = "infineon, xdpe12284"}, + {} +}; +MODULE_DEVICE_TABLE(of, xdpe122_of_match); + +static struct i2c_driver xdpe122_driver = { + .driver = { + .name = "xdpe12284", + .of_match_table = of_match_ptr(xdpe122_of_match), + }, + .probe = xdpe122_probe, + .remove = pmbus_do_remove, + .id_table = xdpe122_id, +}; + +module_i2c_driver(xdpe122_driver); + +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("PMBus driver for Infineon XDPE122 family"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index 42ffd2e5182d..30b7b3ea8836 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -390,8 +390,7 @@ static int pwm_fan_probe(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int pwm_fan_suspend(struct device *dev) +static int pwm_fan_disable(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); struct pwm_args args; @@ -418,6 +417,17 @@ static int pwm_fan_suspend(struct device *dev) return 0; } +static void pwm_fan_shutdown(struct platform_device *pdev) +{ + pwm_fan_disable(&pdev->dev); +} + +#ifdef CONFIG_PM_SLEEP +static int pwm_fan_suspend(struct device *dev) +{ + return pwm_fan_disable(dev); +} + static int pwm_fan_resume(struct device *dev) { struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); @@ -455,6 +465,7 @@ MODULE_DEVICE_TABLE(of, of_pwm_fan_match); static struct platform_driver pwm_fan_driver = { .probe = pwm_fan_probe, + .shutdown = pwm_fan_shutdown, .driver = { .name = "pwm-fan", .pm = &pwm_fan_pm, diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c index eb171d15ac48..7ffadc2da57b 100644 --- a/drivers/hwmon/w83627ehf.c +++ b/drivers/hwmon/w83627ehf.c @@ -28,8 +28,6 @@ * w83627uhg 8 2 2 3 0xa230 0xc1 0x5ca3 * w83667hg 9 5 3 3 0xa510 0xc1 0x5ca3 * w83667hg-b 9 5 3 4 0xb350 0xc1 0x5ca3 - * nct6775f 9 4 3 9 0xb470 0xc1 0x5ca3 - * nct6776f 9 5 3 9 0xC330 0xc1 0x5ca3 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt @@ -50,7 +48,7 @@ enum kinds { w83627ehf, w83627dhg, w83627dhg_p, w83627uhg, - w83667hg, w83667hg_b, nct6775, nct6776, + w83667hg, w83667hg_b, }; /* used to set data->name = w83627ehf_device_names[data->sio_kind] */ @@ -61,18 +59,12 @@ static const char * const w83627ehf_device_names[] = { "w83627uhg", "w83667hg", "w83667hg", - "nct6775", - "nct6776", }; static unsigned short force_id; module_param(force_id, ushort, 0); MODULE_PARM_DESC(force_id, "Override the detected device ID"); -static unsigned short fan_debounce; -module_param(fan_debounce, ushort, 0); -MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); - #define DRVNAME "w83627ehf" /* @@ -97,8 +89,6 @@ MODULE_PARM_DESC(fan_debounce, "Enable debouncing for fan RPM signal"); #define SIO_W83627UHG_ID 0xa230 #define SIO_W83667HG_ID 0xa510 #define SIO_W83667HG_B_ID 0xb350 -#define SIO_NCT6775_ID 0xb470 -#define SIO_NCT6776_ID 0xc330 #define SIO_ID_MASK 0xFFF0 static inline void @@ -187,11 +177,6 @@ static const u16 W83627EHF_REG_TEMP_CONFIG[] = { 0, 0x152, 0x252, 0 }; #define W83627EHF_REG_DIODE 0x59 #define W83627EHF_REG_SMI_OVT 0x4C -/* NCT6775F has its own fan divider registers */ -#define NCT6775_REG_FANDIV1 0x506 -#define NCT6775_REG_FANDIV2 0x507 -#define NCT6775_REG_FAN_DEBOUNCE 0xf0 - #define W83627EHF_REG_ALARM1 0x459 #define W83627EHF_REG_ALARM2 0x45A #define W83627EHF_REG_ALARM3 0x45B @@ -235,28 +220,6 @@ static const u16 W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B[] static const u16 W83627EHF_REG_TEMP_OFFSET[] = { 0x454, 0x455, 0x456 }; -static const u16 NCT6775_REG_TARGET[] = { 0x101, 0x201, 0x301 }; -static const u16 NCT6775_REG_FAN_MODE[] = { 0x102, 0x202, 0x302 }; -static const u16 NCT6775_REG_FAN_STOP_OUTPUT[] = { 0x105, 0x205, 0x305 }; -static const u16 NCT6775_REG_FAN_START_OUTPUT[] = { 0x106, 0x206, 0x306 }; -static const u16 NCT6775_REG_FAN_STOP_TIME[] = { 0x107, 0x207, 0x307 }; -static const u16 NCT6775_REG_PWM[] = { 0x109, 0x209, 0x309 }; -static const u16 NCT6775_REG_FAN_MAX_OUTPUT[] = { 0x10a, 0x20a, 0x30a }; -static const u16 NCT6775_REG_FAN_STEP_OUTPUT[] = { 0x10b, 0x20b, 0x30b }; -static const u16 NCT6775_REG_FAN[] = { 0x630, 0x632, 0x634, 0x636, 0x638 }; -static const u16 NCT6776_REG_FAN_MIN[] = { 0x63a, 0x63c, 0x63e, 0x640, 0x642}; - -static const u16 NCT6775_REG_TEMP[] - = { 0x27, 0x150, 0x250, 0x73, 0x75, 0x77, 0x62b, 0x62c, 0x62d }; -static const u16 NCT6775_REG_TEMP_CONFIG[] - = { 0, 0x152, 0x252, 0, 0, 0, 0x628, 0x629, 0x62A }; -static const u16 NCT6775_REG_TEMP_HYST[] - = { 0x3a, 0x153, 0x253, 0, 0, 0, 0x673, 0x678, 0x67D }; -static const u16 NCT6775_REG_TEMP_OVER[] - = { 0x39, 0x155, 0x255, 0, 0, 0, 0x672, 0x677, 0x67C }; -static const u16 NCT6775_REG_TEMP_SOURCE[] - = { 0x621, 0x622, 0x623, 0x100, 0x200, 0x300, 0x624, 0x625, 0x626 }; - static const char *const w83667hg_b_temp_label[] = { "SYSTIN", "CPUTIN", @@ -268,57 +231,7 @@ static const char *const w83667hg_b_temp_label[] = { "PECI Agent 4" }; -static const char *const nct6775_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "AMD SB-TSI", - "PECI Agent 0", - "PECI Agent 1", - "PECI Agent 2", - "PECI Agent 3", - "PECI Agent 4", - "PECI Agent 5", - "PECI Agent 6", - "PECI Agent 7", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP" -}; - -static const char *const nct6776_temp_label[] = { - "", - "SYSTIN", - "CPUTIN", - "AUXTIN", - "SMBUSMASTER 0", - "SMBUSMASTER 1", - "SMBUSMASTER 2", - "SMBUSMASTER 3", - "SMBUSMASTER 4", - "SMBUSMASTER 5", - "SMBUSMASTER 6", - "SMBUSMASTER 7", - "PECI Agent 0", - "PECI Agent 1", - "PCH_CHIP_CPU_MAX_TEMP", - "PCH_CHIP_TEMP", - "PCH_CPU_TEMP", - "PCH_MCH_TEMP", - "PCH_DIM0_TEMP", - "PCH_DIM1_TEMP", - "PCH_DIM2_TEMP", - "PCH_DIM3_TEMP", - "BYTE_TEMP" -}; - -#define NUM_REG_TEMP ARRAY_SIZE(NCT6775_REG_TEMP) +#define NUM_REG_TEMP ARRAY_SIZE(W83627EHF_REG_TEMP) static int is_word_sized(u16 reg) { @@ -358,31 +271,6 @@ static unsigned int fan_from_reg8(u16 reg, unsigned int divreg) return 1350000U / (reg << divreg); } -static unsigned int fan_from_reg13(u16 reg, unsigned int divreg) -{ - if ((reg & 0xff1f) == 0xff1f) - return 0; - - reg = (reg & 0x1f) | ((reg & 0xff00) >> 3); - - if (reg == 0) - return 0; - - return 1350000U / reg; -} - -static unsigned int fan_from_reg16(u16 reg, unsigned int divreg) -{ - if (reg == 0 || reg == 0xffff) - return 0; - - /* - * Even though the registers are 16 bit wide, the fan divisor - * still applies. - */ - return 1350000U / (reg << divreg); -} - static inline unsigned int div_from_reg(u8 reg) { @@ -418,7 +306,6 @@ struct w83627ehf_data { int addr; /* IO base of hw monitor block */ const char *name; - struct device *hwmon_dev; struct mutex lock; u16 reg_temp[NUM_REG_TEMP]; @@ -428,20 +315,10 @@ struct w83627ehf_data { u8 temp_src[NUM_REG_TEMP]; const char * const *temp_label; - const u16 *REG_PWM; - const u16 *REG_TARGET; - const u16 *REG_FAN; - const u16 *REG_FAN_MIN; - const u16 *REG_FAN_START_OUTPUT; - const u16 *REG_FAN_STOP_OUTPUT; - const u16 *REG_FAN_STOP_TIME; const u16 *REG_FAN_MAX_OUTPUT; const u16 *REG_FAN_STEP_OUTPUT; const u16 *scale_in; - unsigned int (*fan_from_reg)(u16 reg, unsigned int divreg); - unsigned int (*fan_from_reg_min)(u16 reg, unsigned int divreg); - struct mutex update_lock; char valid; /* !=0 if following fields are valid */ unsigned long last_updated; /* In jiffies */ @@ -457,7 +334,6 @@ struct w83627ehf_data { u8 fan_div[5]; u8 has_fan; /* some fan inputs can be disabled */ u8 has_fan_min; /* some fans don't have min register */ - bool has_fan_div; u8 temp_type[3]; s8 temp_offset[3]; s16 temp[9]; @@ -494,6 +370,7 @@ struct w83627ehf_data { u16 have_temp_offset; u8 in6_skip:1; u8 temp3_val_only:1; + u8 have_vid:1; #ifdef CONFIG_PM /* Remember extra register values over suspend/resume */ @@ -584,35 +461,6 @@ static int w83627ehf_write_temp(struct w83627ehf_data *data, u16 reg, } /* This function assumes that the caller holds data->update_lock */ -static void nct6775_write_fan_div(struct w83627ehf_data *data, int nr) -{ - u8 reg; - - switch (nr) { - case 0: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x70) - | (data->fan_div[0] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 1: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV1) & 0x7) - | ((data->fan_div[1] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, reg); - break; - case 2: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x70) - | (data->fan_div[2] & 0x7); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - case 3: - reg = (w83627ehf_read_value(data, NCT6775_REG_FANDIV2) & 0x7) - | ((data->fan_div[3] << 4) & 0x70); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, reg); - break; - } -} - -/* This function assumes that the caller holds data->update_lock */ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) { u8 reg; @@ -663,32 +511,6 @@ static void w83627ehf_write_fan_div(struct w83627ehf_data *data, int nr) } } -static void w83627ehf_write_fan_div_common(struct device *dev, - struct w83627ehf_data *data, int nr) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_write_fan_div(data, nr); - else - w83627ehf_write_fan_div(data, nr); -} - -static void nct6775_update_fan_div(struct w83627ehf_data *data) -{ - u8 i; - - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fan_div[0] = i & 0x7; - data->fan_div[1] = (i & 0x70) >> 4; - i = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - data->fan_div[2] = i & 0x7; - if (data->has_fan & (1<<3)) - data->fan_div[3] = (i & 0x70) >> 4; -} - static void w83627ehf_update_fan_div(struct w83627ehf_data *data) { int i; @@ -714,37 +536,6 @@ static void w83627ehf_update_fan_div(struct w83627ehf_data *data) } } -static void w83627ehf_update_fan_div_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6776) - ; /* no dividers, do nothing */ - else if (sio_data->kind == nct6775) - nct6775_update_fan_div(data); - else - w83627ehf_update_fan_div(data); -} - -static void nct6775_update_pwm(struct w83627ehf_data *data) -{ - int i; - int pwmcfg, fanmodecfg; - - for (i = 0; i < data->pwm_num; i++) { - pwmcfg = w83627ehf_read_value(data, - W83627EHF_REG_PWM_ENABLE[i]); - fanmodecfg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[i]); - data->pwm_mode[i] = - ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; - data->pwm_enable[i] = ((fanmodecfg >> 4) & 7) + 1; - data->tolerance[i] = fanmodecfg & 0x0f; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); - } -} - static void w83627ehf_update_pwm(struct w83627ehf_data *data) { int i; @@ -765,28 +556,15 @@ static void w83627ehf_update_pwm(struct w83627ehf_data *data) ((pwmcfg >> W83627EHF_PWM_MODE_SHIFT[i]) & 1) ? 0 : 1; data->pwm_enable[i] = ((pwmcfg >> W83627EHF_PWM_ENABLE_SHIFT[i]) & 3) + 1; - data->pwm[i] = w83627ehf_read_value(data, data->REG_PWM[i]); + data->pwm[i] = w83627ehf_read_value(data, W83627EHF_REG_PWM[i]); data->tolerance[i] = (tolerance >> (i == 1 ? 4 : 0)) & 0x0f; } } -static void w83627ehf_update_pwm_common(struct device *dev, - struct w83627ehf_data *data) -{ - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) - nct6775_update_pwm(data); - else - w83627ehf_update_pwm(data); -} - static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int i; mutex_lock(&data->update_lock); @@ -794,7 +572,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (time_after(jiffies, data->last_updated + HZ + HZ/2) || !data->valid) { /* Fan clock dividers */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Measured voltages and limits */ for (i = 0; i < data->in_num; i++) { @@ -816,40 +594,36 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) if (!(data->has_fan & (1 << i))) continue; - reg = w83627ehf_read_value(data, data->REG_FAN[i]); - data->rpm[i] = data->fan_from_reg(reg, - data->fan_div[i]); + reg = w83627ehf_read_value(data, W83627EHF_REG_FAN[i]); + data->rpm[i] = fan_from_reg8(reg, data->fan_div[i]); if (data->has_fan_min & (1 << i)) data->fan_min[i] = w83627ehf_read_value(data, - data->REG_FAN_MIN[i]); + W83627EHF_REG_FAN_MIN[i]); /* * If we failed to measure the fan speed and clock * divider can be increased, let's try that for next * time */ - if (data->has_fan_div - && (reg >= 0xff || (sio_data->kind == nct6775 - && reg == 0x00)) - && data->fan_div[i] < 0x07) { + if (reg >= 0xff && data->fan_div[i] < 0x07) { dev_dbg(dev, "Increasing fan%d clock divider from %u to %u\n", i + 1, div_from_reg(data->fan_div[i]), div_from_reg(data->fan_div[i] + 1)); data->fan_div[i]++; - w83627ehf_write_fan_div_common(dev, data, i); + w83627ehf_write_fan_div(data, i); /* Preserve min limit if possible */ if ((data->has_fan_min & (1 << i)) && data->fan_min[i] >= 2 && data->fan_min[i] != 255) w83627ehf_write_value(data, - data->REG_FAN_MIN[i], + W83627EHF_REG_FAN_MIN[i], (data->fan_min[i] /= 2)); } } - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) { if (!(data->has_fan & (1 << i))) @@ -857,13 +631,13 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->fan_start_output[i] = w83627ehf_read_value(data, - data->REG_FAN_START_OUTPUT[i]); + W83627EHF_REG_FAN_START_OUTPUT[i]); data->fan_stop_output[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_OUTPUT[i]); + W83627EHF_REG_FAN_STOP_OUTPUT[i]); data->fan_stop_time[i] = w83627ehf_read_value(data, - data->REG_FAN_STOP_TIME[i]); + W83627EHF_REG_FAN_STOP_TIME[i]); if (data->REG_FAN_MAX_OUTPUT && data->REG_FAN_MAX_OUTPUT[i] != 0xff) @@ -879,7 +653,7 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) data->target_temp[i] = w83627ehf_read_value(data, - data->REG_TARGET[i]) & + W83627EHF_REG_TARGET[i]) & (data->pwm_mode[i] == 1 ? 0x7f : 0xff); } @@ -923,199 +697,61 @@ static struct w83627ehf_data *w83627ehf_update_device(struct device *dev) return data; } -/* - * Sysfs callback functions - */ -#define show_in_reg(reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%ld\n", in_from_reg(data->reg[nr], nr, \ - data->scale_in)); \ -} -show_in_reg(in) -show_in_reg(in_min) -show_in_reg(in_max) - #define store_in_reg(REG, reg) \ -static ssize_t \ -store_in_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_in_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - unsigned long val; \ - int err; \ - err = kstrtoul(buf, 10, &val); \ - if (err < 0) \ - return err; \ + if (val < 0) \ + return -EINVAL; \ mutex_lock(&data->update_lock); \ - data->in_##reg[nr] = in_to_reg(val, nr, data->scale_in); \ - w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(nr), \ - data->in_##reg[nr]); \ + data->in_##reg[channel] = in_to_reg(val, channel, data->scale_in); \ + w83627ehf_write_value(data, W83627EHF_REG_IN_##REG(channel), \ + data->in_##reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_in_reg(MIN, min) store_in_reg(MAX, max) -static ssize_t show_alarm(struct device *dev, struct device_attribute *attr, - char *buf) +static int +store_fan_min(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", (data->alarms >> nr) & 0x01); -} - -static struct sensor_device_attribute sda_in_input[] = { - SENSOR_ATTR(in0_input, S_IRUGO, show_in, NULL, 0), - SENSOR_ATTR(in1_input, S_IRUGO, show_in, NULL, 1), - SENSOR_ATTR(in2_input, S_IRUGO, show_in, NULL, 2), - SENSOR_ATTR(in3_input, S_IRUGO, show_in, NULL, 3), - SENSOR_ATTR(in4_input, S_IRUGO, show_in, NULL, 4), - SENSOR_ATTR(in5_input, S_IRUGO, show_in, NULL, 5), - SENSOR_ATTR(in6_input, S_IRUGO, show_in, NULL, 6), - SENSOR_ATTR(in7_input, S_IRUGO, show_in, NULL, 7), - SENSOR_ATTR(in8_input, S_IRUGO, show_in, NULL, 8), - SENSOR_ATTR(in9_input, S_IRUGO, show_in, NULL, 9), -}; - -static struct sensor_device_attribute sda_in_alarm[] = { - SENSOR_ATTR(in0_alarm, S_IRUGO, show_alarm, NULL, 0), - SENSOR_ATTR(in1_alarm, S_IRUGO, show_alarm, NULL, 1), - SENSOR_ATTR(in2_alarm, S_IRUGO, show_alarm, NULL, 2), - SENSOR_ATTR(in3_alarm, S_IRUGO, show_alarm, NULL, 3), - SENSOR_ATTR(in4_alarm, S_IRUGO, show_alarm, NULL, 8), - SENSOR_ATTR(in5_alarm, S_IRUGO, show_alarm, NULL, 21), - SENSOR_ATTR(in6_alarm, S_IRUGO, show_alarm, NULL, 20), - SENSOR_ATTR(in7_alarm, S_IRUGO, show_alarm, NULL, 16), - SENSOR_ATTR(in8_alarm, S_IRUGO, show_alarm, NULL, 17), - SENSOR_ATTR(in9_alarm, S_IRUGO, show_alarm, NULL, 19), -}; - -static struct sensor_device_attribute sda_in_min[] = { - SENSOR_ATTR(in0_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 0), - SENSOR_ATTR(in1_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 1), - SENSOR_ATTR(in2_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 2), - SENSOR_ATTR(in3_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 3), - SENSOR_ATTR(in4_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 4), - SENSOR_ATTR(in5_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 5), - SENSOR_ATTR(in6_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 6), - SENSOR_ATTR(in7_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 7), - SENSOR_ATTR(in8_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 8), - SENSOR_ATTR(in9_min, S_IWUSR | S_IRUGO, show_in_min, store_in_min, 9), -}; - -static struct sensor_device_attribute sda_in_max[] = { - SENSOR_ATTR(in0_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 0), - SENSOR_ATTR(in1_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 1), - SENSOR_ATTR(in2_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 2), - SENSOR_ATTR(in3_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 3), - SENSOR_ATTR(in4_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 4), - SENSOR_ATTR(in5_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 5), - SENSOR_ATTR(in6_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 6), - SENSOR_ATTR(in7_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 7), - SENSOR_ATTR(in8_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 8), - SENSOR_ATTR(in9_max, S_IWUSR | S_IRUGO, show_in_max, store_in_max, 9), -}; - -static ssize_t -show_fan(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", data->rpm[nr]); -} - -static ssize_t -show_fan_min(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", - data->fan_from_reg_min(data->fan_min[nr], - data->fan_div[nr])); -} - -static ssize_t -show_fan_div(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%u\n", div_from_reg(data->fan_div[nr])); -} - -static ssize_t -store_fan_min(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; unsigned int reg; u8 new_div; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; + if (val < 0) + return -EINVAL; mutex_lock(&data->update_lock); - if (!data->has_fan_div) { - /* - * Only NCT6776F for now, so we know that this is a 13 bit - * register - */ - if (!val) { - val = 0xff1f; - } else { - if (val > 1350000U) - val = 135000U; - val = 1350000U / val; - val = (val & 0x1f) | ((val << 3) & 0xff00); - } - data->fan_min[nr] = val; - goto done; /* Leave fan divider alone */ - } if (!val) { /* No min limit, alarm disabled */ - data->fan_min[nr] = 255; - new_div = data->fan_div[nr]; /* No change */ - dev_info(dev, "fan%u low limit and alarm disabled\n", nr + 1); + data->fan_min[channel] = 255; + new_div = data->fan_div[channel]; /* No change */ + dev_info(dev, "fan%u low limit and alarm disabled\n", + channel + 1); } else if ((reg = 1350000U / val) >= 128 * 255) { /* * Speed below this value cannot possibly be represented, * even with the highest divider (128) */ - data->fan_min[nr] = 254; + data->fan_min[channel] = 254; new_div = 7; /* 128 == (1 << 7) */ dev_warn(dev, "fan%u low limit %lu below minimum %u, set to minimum\n", - nr + 1, val, data->fan_from_reg_min(254, 7)); + channel + 1, val, fan_from_reg8(254, 7)); } else if (!reg) { /* * Speed above this value cannot possibly be represented, * even with the lowest divider (1) */ - data->fan_min[nr] = 1; + data->fan_min[channel] = 1; new_div = 0; /* 1 == (1 << 0) */ dev_warn(dev, "fan%u low limit %lu above maximum %u, set to maximum\n", - nr + 1, val, data->fan_from_reg_min(1, 0)); + channel + 1, val, fan_from_reg8(1, 0)); } else { /* * Automatically pick the best divider, i.e. the one such @@ -1127,362 +763,117 @@ store_fan_min(struct device *dev, struct device_attribute *attr, reg >>= 1; new_div++; } - data->fan_min[nr] = reg; + data->fan_min[channel] = reg; } /* * Write both the fan clock divider (if it changed) and the new * fan min (unconditionally) */ - if (new_div != data->fan_div[nr]) { + if (new_div != data->fan_div[channel]) { dev_dbg(dev, "fan%u clock divider changed from %u to %u\n", - nr + 1, div_from_reg(data->fan_div[nr]), + channel + 1, div_from_reg(data->fan_div[channel]), div_from_reg(new_div)); - data->fan_div[nr] = new_div; - w83627ehf_write_fan_div_common(dev, data, nr); + data->fan_div[channel] = new_div; + w83627ehf_write_fan_div(data, channel); /* Give the chip time to sample a new speed value */ data->last_updated = jiffies; } -done: - w83627ehf_write_value(data, data->REG_FAN_MIN[nr], - data->fan_min[nr]); - mutex_unlock(&data->update_lock); - return count; -} - -static struct sensor_device_attribute sda_fan_input[] = { - SENSOR_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0), - SENSOR_ATTR(fan2_input, S_IRUGO, show_fan, NULL, 1), - SENSOR_ATTR(fan3_input, S_IRUGO, show_fan, NULL, 2), - SENSOR_ATTR(fan4_input, S_IRUGO, show_fan, NULL, 3), - SENSOR_ATTR(fan5_input, S_IRUGO, show_fan, NULL, 4), -}; - -static struct sensor_device_attribute sda_fan_alarm[] = { - SENSOR_ATTR(fan1_alarm, S_IRUGO, show_alarm, NULL, 6), - SENSOR_ATTR(fan2_alarm, S_IRUGO, show_alarm, NULL, 7), - SENSOR_ATTR(fan3_alarm, S_IRUGO, show_alarm, NULL, 11), - SENSOR_ATTR(fan4_alarm, S_IRUGO, show_alarm, NULL, 10), - SENSOR_ATTR(fan5_alarm, S_IRUGO, show_alarm, NULL, 23), -}; - -static struct sensor_device_attribute sda_fan_min[] = { - SENSOR_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 0), - SENSOR_ATTR(fan2_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 1), - SENSOR_ATTR(fan3_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 2), - SENSOR_ATTR(fan4_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 3), - SENSOR_ATTR(fan5_min, S_IWUSR | S_IRUGO, show_fan_min, - store_fan_min, 4), -}; - -static struct sensor_device_attribute sda_fan_div[] = { - SENSOR_ATTR(fan1_div, S_IRUGO, show_fan_div, NULL, 0), - SENSOR_ATTR(fan2_div, S_IRUGO, show_fan_div, NULL, 1), - SENSOR_ATTR(fan3_div, S_IRUGO, show_fan_div, NULL, 2), - SENSOR_ATTR(fan4_div, S_IRUGO, show_fan_div, NULL, 3), - SENSOR_ATTR(fan5_div, S_IRUGO, show_fan_div, NULL, 4), -}; - -static ssize_t -show_temp_label(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%s\n", data->temp_label[data->temp_src[nr]]); -} + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[channel], + data->fan_min[channel]); + mutex_unlock(&data->update_lock); -#define show_temp_reg(addr, reg) \ -static ssize_t \ -show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", LM75_TEMP_FROM_REG(data->reg[nr])); \ + return 0; } -show_temp_reg(reg_temp, temp); -show_temp_reg(reg_temp_over, temp_max); -show_temp_reg(reg_temp_hyst, temp_max_hyst); #define store_temp_reg(addr, reg) \ -static ssize_t \ -store_##reg(struct device *dev, struct device_attribute *attr, \ - const char *buf, size_t count) \ +static int \ +store_##reg(struct device *dev, struct w83627ehf_data *data, int channel, \ + long val) \ { \ - struct w83627ehf_data *data = dev_get_drvdata(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - int err; \ - long val; \ - err = kstrtol(buf, 10, &val); \ - if (err < 0) \ - return err; \ mutex_lock(&data->update_lock); \ - data->reg[nr] = LM75_TEMP_TO_REG(val); \ - w83627ehf_write_temp(data, data->addr[nr], data->reg[nr]); \ + data->reg[channel] = LM75_TEMP_TO_REG(val); \ + w83627ehf_write_temp(data, data->addr[channel], data->reg[channel]); \ mutex_unlock(&data->update_lock); \ - return count; \ + return 0; \ } store_temp_reg(reg_temp_over, temp_max); store_temp_reg(reg_temp_hyst, temp_max_hyst); -static ssize_t -show_temp_offset(struct device *dev, struct device_attribute *attr, char *buf) +static int +store_temp_offset(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - - return sprintf(buf, "%d\n", - data->temp_offset[sensor_attr->index] * 1000); -} - -static ssize_t -store_temp_offset(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - long val; - int err; - - err = kstrtol(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -128, 127); mutex_lock(&data->update_lock); - data->temp_offset[nr] = val; - w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[nr], val); + data->temp_offset[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_TEMP_OFFSET[channel], val); mutex_unlock(&data->update_lock); - return count; -} - -static ssize_t -show_temp_type(struct device *dev, struct device_attribute *attr, char *buf) -{ - struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - return sprintf(buf, "%d\n", (int)data->temp_type[nr]); -} - -static struct sensor_device_attribute sda_temp_input[] = { - SENSOR_ATTR(temp1_input, S_IRUGO, show_temp, NULL, 0), - SENSOR_ATTR(temp2_input, S_IRUGO, show_temp, NULL, 1), - SENSOR_ATTR(temp3_input, S_IRUGO, show_temp, NULL, 2), - SENSOR_ATTR(temp4_input, S_IRUGO, show_temp, NULL, 3), - SENSOR_ATTR(temp5_input, S_IRUGO, show_temp, NULL, 4), - SENSOR_ATTR(temp6_input, S_IRUGO, show_temp, NULL, 5), - SENSOR_ATTR(temp7_input, S_IRUGO, show_temp, NULL, 6), - SENSOR_ATTR(temp8_input, S_IRUGO, show_temp, NULL, 7), - SENSOR_ATTR(temp9_input, S_IRUGO, show_temp, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_label[] = { - SENSOR_ATTR(temp1_label, S_IRUGO, show_temp_label, NULL, 0), - SENSOR_ATTR(temp2_label, S_IRUGO, show_temp_label, NULL, 1), - SENSOR_ATTR(temp3_label, S_IRUGO, show_temp_label, NULL, 2), - SENSOR_ATTR(temp4_label, S_IRUGO, show_temp_label, NULL, 3), - SENSOR_ATTR(temp5_label, S_IRUGO, show_temp_label, NULL, 4), - SENSOR_ATTR(temp6_label, S_IRUGO, show_temp_label, NULL, 5), - SENSOR_ATTR(temp7_label, S_IRUGO, show_temp_label, NULL, 6), - SENSOR_ATTR(temp8_label, S_IRUGO, show_temp_label, NULL, 7), - SENSOR_ATTR(temp9_label, S_IRUGO, show_temp_label, NULL, 8), -}; - -static struct sensor_device_attribute sda_temp_max[] = { - SENSOR_ATTR(temp1_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 0), - SENSOR_ATTR(temp2_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 1), - SENSOR_ATTR(temp3_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 2), - SENSOR_ATTR(temp4_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 3), - SENSOR_ATTR(temp5_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 4), - SENSOR_ATTR(temp6_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 5), - SENSOR_ATTR(temp7_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 6), - SENSOR_ATTR(temp8_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 7), - SENSOR_ATTR(temp9_max, S_IRUGO | S_IWUSR, show_temp_max, - store_temp_max, 8), -}; - -static struct sensor_device_attribute sda_temp_max_hyst[] = { - SENSOR_ATTR(temp1_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 0), - SENSOR_ATTR(temp2_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 1), - SENSOR_ATTR(temp3_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 2), - SENSOR_ATTR(temp4_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 3), - SENSOR_ATTR(temp5_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 4), - SENSOR_ATTR(temp6_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 5), - SENSOR_ATTR(temp7_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 6), - SENSOR_ATTR(temp8_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 7), - SENSOR_ATTR(temp9_max_hyst, S_IRUGO | S_IWUSR, show_temp_max_hyst, - store_temp_max_hyst, 8), -}; - -static struct sensor_device_attribute sda_temp_alarm[] = { - SENSOR_ATTR(temp1_alarm, S_IRUGO, show_alarm, NULL, 4), - SENSOR_ATTR(temp2_alarm, S_IRUGO, show_alarm, NULL, 5), - SENSOR_ATTR(temp3_alarm, S_IRUGO, show_alarm, NULL, 13), -}; - -static struct sensor_device_attribute sda_temp_type[] = { - SENSOR_ATTR(temp1_type, S_IRUGO, show_temp_type, NULL, 0), - SENSOR_ATTR(temp2_type, S_IRUGO, show_temp_type, NULL, 1), - SENSOR_ATTR(temp3_type, S_IRUGO, show_temp_type, NULL, 2), -}; - -static struct sensor_device_attribute sda_temp_offset[] = { - SENSOR_ATTR(temp1_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 0), - SENSOR_ATTR(temp2_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 1), - SENSOR_ATTR(temp3_offset, S_IRUGO | S_IWUSR, show_temp_offset, - store_temp_offset, 2), -}; - -#define show_pwm_reg(reg) \ -static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ - char *buf) \ -{ \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ - struct sensor_device_attribute *sensor_attr = \ - to_sensor_dev_attr(attr); \ - int nr = sensor_attr->index; \ - return sprintf(buf, "%d\n", data->reg[nr]); \ + return 0; } -show_pwm_reg(pwm_mode) -show_pwm_reg(pwm_enable) -show_pwm_reg(pwm) - -static ssize_t -store_pwm_mode(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_mode(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (val > 1) - return -EINVAL; - - /* On NCT67766F, DC mode is only supported for pwm1 */ - if (sio_data->kind == nct6776 && nr && val != 1) + if (val < 0 || val > 1) return -EINVAL; mutex_lock(&data->update_lock); - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - data->pwm_mode[nr] = val; - reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[channel]); + data->pwm_mode[channel] = val; + reg &= ~(1 << W83627EHF_PWM_MODE_SHIFT[channel]); if (!val) - reg |= 1 << W83627EHF_PWM_MODE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); + reg |= 1 << W83627EHF_PWM_MODE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], reg); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; - - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - val = clamp_val(val, 0, 255); mutex_lock(&data->update_lock); - data->pwm[nr] = val; - w83627ehf_write_value(data, data->REG_PWM[nr], val); + data->pwm[channel] = val; + w83627ehf_write_value(data, W83627EHF_REG_PWM[channel], val); mutex_unlock(&data->update_lock); - return count; + return 0; } -static ssize_t -store_pwm_enable(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) +static int +store_pwm_enable(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); - struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); - int nr = sensor_attr->index; - unsigned long val; - int err; u16 reg; - err = kstrtoul(buf, 10, &val); - if (err < 0) - return err; - - if (!val || (val > 4 && val != data->pwm_enable_orig[nr])) - return -EINVAL; - /* SmartFan III mode is not supported on NCT6776F */ - if (sio_data->kind == nct6776 && val == 4) + if (!val || val < 0 || + (val > 4 && val != data->pwm_enable_orig[channel])) return -EINVAL; mutex_lock(&data->update_lock); - data->pwm_enable[nr] = val; - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - reg = w83627ehf_read_value(data, - NCT6775_REG_FAN_MODE[nr]); - reg &= 0x0f; - reg |= (val - 1) << 4; - w83627ehf_write_value(data, - NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]); - reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[nr]); - reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[nr]; - w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[nr], reg); - } + data->pwm_enable[channel] = val; + reg = w83627ehf_read_value(data, + W83627EHF_REG_PWM_ENABLE[channel]); + reg &= ~(0x03 << W83627EHF_PWM_ENABLE_SHIFT[channel]); + reg |= (val - 1) << W83627EHF_PWM_ENABLE_SHIFT[channel]; + w83627ehf_write_value(data, W83627EHF_REG_PWM_ENABLE[channel], + reg); mutex_unlock(&data->update_lock); - return count; + return 0; } - #define show_tol_temp(reg) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1510,7 +901,7 @@ store_target_temp(struct device *dev, struct device_attribute *attr, mutex_lock(&data->update_lock); data->target_temp[nr] = val; - w83627ehf_write_value(data, data->REG_TARGET[nr], val); + w83627ehf_write_value(data, W83627EHF_REG_TARGET[nr], val); mutex_unlock(&data->update_lock); return count; } @@ -1520,7 +911,6 @@ store_tolerance(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr); int nr = sensor_attr->index; u16 reg; @@ -1535,76 +925,34 @@ store_tolerance(struct device *dev, struct device_attribute *attr, val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), 0, 15); mutex_lock(&data->update_lock); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* Limit tolerance further for NCT6776F */ - if (sio_data->kind == nct6776 && val > 7) - val = 7; - reg = w83627ehf_read_value(data, NCT6775_REG_FAN_MODE[nr]); + reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); + if (nr == 1) + reg = (reg & 0x0f) | (val << 4); + else reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, NCT6775_REG_FAN_MODE[nr], reg); - } else { - reg = w83627ehf_read_value(data, W83627EHF_REG_TOLERANCE[nr]); - if (nr == 1) - reg = (reg & 0x0f) | (val << 4); - else - reg = (reg & 0xf0) | val; - w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); - } + w83627ehf_write_value(data, W83627EHF_REG_TOLERANCE[nr], reg); data->tolerance[nr] = val; mutex_unlock(&data->update_lock); return count; } -static struct sensor_device_attribute sda_pwm[] = { - SENSOR_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 0), - SENSOR_ATTR(pwm2, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 1), - SENSOR_ATTR(pwm3, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 2), - SENSOR_ATTR(pwm4, S_IWUSR | S_IRUGO, show_pwm, store_pwm, 3), -}; - -static struct sensor_device_attribute sda_pwm_mode[] = { - SENSOR_ATTR(pwm1_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 0), - SENSOR_ATTR(pwm2_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 1), - SENSOR_ATTR(pwm3_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 2), - SENSOR_ATTR(pwm4_mode, S_IWUSR | S_IRUGO, show_pwm_mode, - store_pwm_mode, 3), -}; - -static struct sensor_device_attribute sda_pwm_enable[] = { - SENSOR_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 0), - SENSOR_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 1), - SENSOR_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 2), - SENSOR_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, show_pwm_enable, - store_pwm_enable, 3), -}; - -static struct sensor_device_attribute sda_target_temp[] = { - SENSOR_ATTR(pwm1_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 0), - SENSOR_ATTR(pwm2_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 1), - SENSOR_ATTR(pwm3_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 2), - SENSOR_ATTR(pwm4_target, S_IWUSR | S_IRUGO, show_target_temp, - store_target_temp, 3), -}; - -static struct sensor_device_attribute sda_tolerance[] = { - SENSOR_ATTR(pwm1_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 0), - SENSOR_ATTR(pwm2_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 1), - SENSOR_ATTR(pwm3_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 2), - SENSOR_ATTR(pwm4_tolerance, S_IWUSR | S_IRUGO, show_tolerance, - store_tolerance, 3), -}; +static SENSOR_DEVICE_ATTR(pwm1_target, 0644, show_target_temp, + store_target_temp, 0); +static SENSOR_DEVICE_ATTR(pwm2_target, 0644, show_target_temp, + store_target_temp, 1); +static SENSOR_DEVICE_ATTR(pwm3_target, 0644, show_target_temp, + store_target_temp, 2); +static SENSOR_DEVICE_ATTR(pwm4_target, 0644, show_target_temp, + store_target_temp, 3); + +static SENSOR_DEVICE_ATTR(pwm1_tolerance, 0644, show_tolerance, + store_tolerance, 0); +static SENSOR_DEVICE_ATTR(pwm2_tolerance, 0644, show_tolerance, + store_tolerance, 1); +static SENSOR_DEVICE_ATTR(pwm3_tolerance, 0644, show_tolerance, + store_tolerance, 2); +static SENSOR_DEVICE_ATTR(pwm4_tolerance, 0644, show_tolerance, + store_tolerance, 3); /* Smart Fan registers */ @@ -1612,7 +960,7 @@ static struct sensor_device_attribute sda_tolerance[] = { static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1634,21 +982,21 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = clamp_val(val, 1, 255); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } -fan_functions(fan_start_output, FAN_START_OUTPUT) -fan_functions(fan_stop_output, FAN_STOP_OUTPUT) -fan_functions(fan_max_output, FAN_MAX_OUTPUT) -fan_functions(fan_step_output, FAN_STEP_OUTPUT) +fan_functions(fan_start_output, W83627EHF_REG_FAN_START_OUTPUT) +fan_functions(fan_stop_output, W83627EHF_REG_FAN_STOP_OUTPUT) +fan_functions(fan_max_output, data->REG_FAN_MAX_OUTPUT) +fan_functions(fan_step_output, data->REG_FAN_STEP_OUTPUT) #define fan_time_functions(reg, REG) \ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ - struct w83627ehf_data *data = w83627ehf_update_device(dev); \ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); \ struct sensor_device_attribute *sensor_attr = \ to_sensor_dev_attr(attr); \ int nr = sensor_attr->index; \ @@ -1673,78 +1021,61 @@ store_##reg(struct device *dev, struct device_attribute *attr, \ val = step_time_to_reg(val, data->pwm_mode[nr]); \ mutex_lock(&data->update_lock); \ data->reg[nr] = val; \ - w83627ehf_write_value(data, data->REG_##REG[nr], val); \ + w83627ehf_write_value(data, REG[nr], val); \ mutex_unlock(&data->update_lock); \ return count; \ } \ -fan_time_functions(fan_stop_time, FAN_STOP_TIME) - -static ssize_t name_show(struct device *dev, struct device_attribute *attr, - char *buf) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - - return sprintf(buf, "%s\n", data->name); -} -static DEVICE_ATTR_RO(name); - -static struct sensor_device_attribute sda_sf3_arrays_fan4[] = { - SENSOR_ATTR(pwm4_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 3), - SENSOR_ATTR(pwm4_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 3), - SENSOR_ATTR(pwm4_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 3), - SENSOR_ATTR(pwm4_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 3), - SENSOR_ATTR(pwm4_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 3), -}; - -static struct sensor_device_attribute sda_sf3_arrays_fan3[] = { - SENSOR_ATTR(pwm3_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 2), - SENSOR_ATTR(pwm3_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 2), - SENSOR_ATTR(pwm3_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 2), -}; - -static struct sensor_device_attribute sda_sf3_arrays[] = { - SENSOR_ATTR(pwm1_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 0), - SENSOR_ATTR(pwm2_stop_time, S_IWUSR | S_IRUGO, show_fan_stop_time, - store_fan_stop_time, 1), - SENSOR_ATTR(pwm1_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 0), - SENSOR_ATTR(pwm2_start_output, S_IWUSR | S_IRUGO, show_fan_start_output, - store_fan_start_output, 1), - SENSOR_ATTR(pwm1_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 0), - SENSOR_ATTR(pwm2_stop_output, S_IWUSR | S_IRUGO, show_fan_stop_output, - store_fan_stop_output, 1), -}; +fan_time_functions(fan_stop_time, W83627EHF_REG_FAN_STOP_TIME) + +static SENSOR_DEVICE_ATTR(pwm4_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 3); +static SENSOR_DEVICE_ATTR(pwm4_start_output, 0644, show_fan_start_output, + store_fan_start_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_max_output, 0644, show_fan_max_output, + store_fan_max_output, 3); +static SENSOR_DEVICE_ATTR(pwm4_step_output, 0644, show_fan_step_output, + store_fan_step_output, 3); + +static SENSOR_DEVICE_ATTR(pwm3_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 2); +static SENSOR_DEVICE_ATTR(pwm3_start_output, 0644, show_fan_start_output, + store_fan_start_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 2); + +static SENSOR_DEVICE_ATTR(pwm1_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_time, 0644, show_fan_stop_time, + store_fan_stop_time, 1); +static SENSOR_DEVICE_ATTR(pwm1_start_output, 0644, show_fan_start_output, + store_fan_start_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_start_output, 0644, show_fan_start_output, + store_fan_start_output, 1); +static SENSOR_DEVICE_ATTR(pwm1_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_stop_output, 0644, show_fan_stop_output, + store_fan_stop_output, 1); /* * pwm1 and pwm3 don't support max and step settings on all chips. * Need to check support while generating/removing attribute files. */ -static struct sensor_device_attribute sda_sf3_max_step_arrays[] = { - SENSOR_ATTR(pwm1_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 0), - SENSOR_ATTR(pwm1_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 0), - SENSOR_ATTR(pwm2_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 1), - SENSOR_ATTR(pwm2_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 1), - SENSOR_ATTR(pwm3_max_output, S_IWUSR | S_IRUGO, show_fan_max_output, - store_fan_max_output, 2), - SENSOR_ATTR(pwm3_step_output, S_IWUSR | S_IRUGO, show_fan_step_output, - store_fan_step_output, 2), -}; +static SENSOR_DEVICE_ATTR(pwm1_max_output, 0644, show_fan_max_output, + store_fan_max_output, 0); +static SENSOR_DEVICE_ATTR(pwm1_step_output, 0644, show_fan_step_output, + store_fan_step_output, 0); +static SENSOR_DEVICE_ATTR(pwm2_max_output, 0644, show_fan_max_output, + store_fan_max_output, 1); +static SENSOR_DEVICE_ATTR(pwm2_step_output, 0644, show_fan_step_output, + store_fan_step_output, 1); +static SENSOR_DEVICE_ATTR(pwm3_max_output, 0644, show_fan_max_output, + store_fan_max_output, 2); +static SENSOR_DEVICE_ATTR(pwm3_step_output, 0644, show_fan_step_output, + store_fan_step_output, 2); static ssize_t cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -1752,33 +1083,20 @@ cpu0_vid_show(struct device *dev, struct device_attribute *attr, char *buf) struct w83627ehf_data *data = dev_get_drvdata(dev); return sprintf(buf, "%d\n", vid_from_reg(data->vid, data->vrm)); } -static DEVICE_ATTR_RO(cpu0_vid); +DEVICE_ATTR_RO(cpu0_vid); /* Case open detection */ - -static ssize_t -show_caseopen(struct device *dev, struct device_attribute *attr, char *buf) +static int +clear_caseopen(struct device *dev, struct w83627ehf_data *data, int channel, + long val) { - struct w83627ehf_data *data = w83627ehf_update_device(dev); - - return sprintf(buf, "%d\n", - !!(data->caseopen & to_sensor_dev_attr_2(attr)->index)); -} - -static ssize_t -clear_caseopen(struct device *dev, struct device_attribute *attr, - const char *buf, size_t count) -{ - struct w83627ehf_data *data = dev_get_drvdata(dev); - unsigned long val; - u16 reg, mask; + const u16 mask = 0x80; + u16 reg; - if (kstrtoul(buf, 10, &val) || val != 0) + if (val != 0 || channel != 0) return -EINVAL; - mask = to_sensor_dev_attr_2(attr)->nr; - mutex_lock(&data->update_lock); reg = w83627ehf_read_value(data, W83627EHF_REG_CASEOPEN_CLR); w83627ehf_write_value(data, W83627EHF_REG_CASEOPEN_CLR, reg | mask); @@ -1786,85 +1104,116 @@ clear_caseopen(struct device *dev, struct device_attribute *attr, data->valid = 0; /* Force cache refresh */ mutex_unlock(&data->update_lock); - return count; + return 0; } -static struct sensor_device_attribute_2 sda_caseopen[] = { - SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x80, 0x10), - SENSOR_ATTR_2(intrusion1_alarm, S_IWUSR | S_IRUGO, show_caseopen, - clear_caseopen, 0x40, 0x40), -}; - -/* - * Driver and device management - */ - -static void w83627ehf_device_remove_files(struct device *dev) +static umode_t w83627ehf_attrs_visible(struct kobject *kobj, + struct attribute *a, int n) { - /* - * some entries in the following arrays may not have been used in - * device_create_file(), but device_remove_file() will ignore them - */ - int i; + struct device *dev = container_of(kobj, struct device, kobj); struct w83627ehf_data *data = dev_get_drvdata(dev); + struct device_attribute *devattr; + struct sensor_device_attribute *sda; + + devattr = container_of(a, struct device_attribute, attr); + + /* Not sensor */ + if (devattr->show == cpu0_vid_show && data->have_vid) + return a->mode; + + sda = (struct sensor_device_attribute *)devattr; + + if (sda->index < 2 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index < 3 && + (devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output) && + data->REG_FAN_STEP_OUTPUT && + data->REG_FAN_STEP_OUTPUT[sda->index] != 0xff) + return a->mode; + + /* if fan3 and fan4 are enabled create the files for them */ + if (sda->index == 2 && + (data->has_fan & (1 << 2)) && data->pwm_num >= 3 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output)) + return a->mode; + + if (sda->index == 3 && + (data->has_fan & (1 << 3)) && data->pwm_num >= 4 && + (devattr->show == show_fan_stop_time || + devattr->show == show_fan_start_output || + devattr->show == show_fan_stop_output || + devattr->show == show_fan_max_output || + devattr->show == show_fan_step_output)) + return a->mode; + + if ((devattr->show == show_target_temp || + devattr->show == show_tolerance) && + (data->has_fan & (1 << sda->index)) && + sda->index < data->pwm_num) + return a->mode; - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) - device_remove_file(dev, &sda_sf3_arrays[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) - device_remove_file(dev, &attr->dev_attr); - } - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) - device_remove_file(dev, &sda_sf3_arrays_fan3[i].dev_attr); - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) - device_remove_file(dev, &sda_sf3_arrays_fan4[i].dev_attr); - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - device_remove_file(dev, &sda_in_input[i].dev_attr); - device_remove_file(dev, &sda_in_alarm[i].dev_attr); - device_remove_file(dev, &sda_in_min[i].dev_attr); - device_remove_file(dev, &sda_in_max[i].dev_attr); - } - for (i = 0; i < 5; i++) { - device_remove_file(dev, &sda_fan_input[i].dev_attr); - device_remove_file(dev, &sda_fan_alarm[i].dev_attr); - device_remove_file(dev, &sda_fan_div[i].dev_attr); - device_remove_file(dev, &sda_fan_min[i].dev_attr); - } - for (i = 0; i < data->pwm_num; i++) { - device_remove_file(dev, &sda_pwm[i].dev_attr); - device_remove_file(dev, &sda_pwm_mode[i].dev_attr); - device_remove_file(dev, &sda_pwm_enable[i].dev_attr); - device_remove_file(dev, &sda_target_temp[i].dev_attr); - device_remove_file(dev, &sda_tolerance[i].dev_attr); - } - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - device_remove_file(dev, &sda_temp_input[i].dev_attr); - device_remove_file(dev, &sda_temp_label[i].dev_attr); - if (i == 2 && data->temp3_val_only) - continue; - device_remove_file(dev, &sda_temp_max[i].dev_attr); - device_remove_file(dev, &sda_temp_max_hyst[i].dev_attr); - if (i > 2) - continue; - device_remove_file(dev, &sda_temp_alarm[i].dev_attr); - device_remove_file(dev, &sda_temp_type[i].dev_attr); - device_remove_file(dev, &sda_temp_offset[i].dev_attr); - } + return 0; +} + +/* These groups handle non-standard attributes used in this device */ +static struct attribute *w83627ehf_attrs[] = { + + &sensor_dev_attr_pwm1_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm1_start_output.dev_attr.attr, + &sensor_dev_attr_pwm1_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm1_max_output.dev_attr.attr, + &sensor_dev_attr_pwm1_step_output.dev_attr.attr, + &sensor_dev_attr_pwm1_target.dev_attr.attr, + &sensor_dev_attr_pwm1_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm2_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm2_start_output.dev_attr.attr, + &sensor_dev_attr_pwm2_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm2_max_output.dev_attr.attr, + &sensor_dev_attr_pwm2_step_output.dev_attr.attr, + &sensor_dev_attr_pwm2_target.dev_attr.attr, + &sensor_dev_attr_pwm2_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm3_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm3_start_output.dev_attr.attr, + &sensor_dev_attr_pwm3_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm3_max_output.dev_attr.attr, + &sensor_dev_attr_pwm3_step_output.dev_attr.attr, + &sensor_dev_attr_pwm3_target.dev_attr.attr, + &sensor_dev_attr_pwm3_tolerance.dev_attr.attr, + + &sensor_dev_attr_pwm4_stop_time.dev_attr.attr, + &sensor_dev_attr_pwm4_start_output.dev_attr.attr, + &sensor_dev_attr_pwm4_stop_output.dev_attr.attr, + &sensor_dev_attr_pwm4_max_output.dev_attr.attr, + &sensor_dev_attr_pwm4_step_output.dev_attr.attr, + &sensor_dev_attr_pwm4_target.dev_attr.attr, + &sensor_dev_attr_pwm4_tolerance.dev_attr.attr, + + &dev_attr_cpu0_vid.attr, + NULL +}; - device_remove_file(dev, &sda_caseopen[0].dev_attr); - device_remove_file(dev, &sda_caseopen[1].dev_attr); +static const struct attribute_group w83627ehf_group = { + .attrs = w83627ehf_attrs, + .is_visible = w83627ehf_attrs_visible, +}; - device_remove_file(dev, &dev_attr_name); - device_remove_file(dev, &dev_attr_cpu0_vid); -} +static const struct attribute_group *w83627ehf_groups[] = { + &w83627ehf_group, + NULL +}; + +/* + * Driver and device management + */ /* Get the monitoring functions started */ static inline void w83627ehf_init_device(struct w83627ehf_data *data, @@ -1927,16 +1276,6 @@ static inline void w83627ehf_init_device(struct w83627ehf_data *data, } } -static void w82627ehf_swap_tempreg(struct w83627ehf_data *data, - int r1, int r2) -{ - swap(data->temp_src[r1], data->temp_src[r2]); - swap(data->reg_temp[r1], data->reg_temp[r2]); - swap(data->reg_temp_over[r1], data->reg_temp_over[r2]); - swap(data->reg_temp_hyst[r1], data->reg_temp_hyst[r2]); - swap(data->reg_temp_config[r1], data->reg_temp_config[r2]); -} - static void w83627ehf_set_temp_reg_ehf(struct w83627ehf_data *data, int n_temp) { @@ -1954,7 +1293,7 @@ static void w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, struct w83627ehf_data *data) { - int fan3pin, fan4pin, fan4min, fan5pin, regval; + int fan3pin, fan4pin, fan5pin, regval; /* The W83627UHG is simple, only two fan inputs, no config */ if (sio_data->kind == w83627uhg) { @@ -1964,77 +1303,392 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data, } /* fan4 and fan5 share some pins with the GPIO and serial flash */ - if (sio_data->kind == nct6775) { - /* On NCT6775, fan4 shares pins with the fdc interface */ - fan3pin = 1; - fan4pin = !(superio_inb(sio_data->sioreg, 0x2A) & 0x80); - fan4min = 0; - fan5pin = 0; - } else if (sio_data->kind == nct6776) { - bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE); - - if (regval & 0x80) - fan3pin = gpok; - else - fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40); - - if (regval & 0x40) - fan4pin = gpok; - else - fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01); - - if (regval & 0x20) - fan5pin = gpok; - else - fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02); - - fan4min = fan4pin; - } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { fan3pin = 1; fan4pin = superio_inb(sio_data->sioreg, 0x27) & 0x40; fan5pin = superio_inb(sio_data->sioreg, 0x27) & 0x20; - fan4min = fan4pin; } else { fan3pin = 1; fan4pin = !(superio_inb(sio_data->sioreg, 0x29) & 0x06); fan5pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x02); - fan4min = fan4pin; } data->has_fan = data->has_fan_min = 0x03; /* fan1 and fan2 */ data->has_fan |= (fan3pin << 2); data->has_fan_min |= (fan3pin << 2); - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - /* - * NCT6775F and NCT6776F don't have the W83627EHF_REG_FANDIV1 - * register - */ - data->has_fan |= (fan4pin << 3) | (fan5pin << 4); - data->has_fan_min |= (fan4min << 3) | (fan5pin << 4); - } else { - /* - * It looks like fan4 and fan5 pins can be alternatively used - * as fan on/off switches, but fan5 control is write only :/ - * We assume that if the serial interface is disabled, designers - * connected fan5 as input unless they are emitting log 1, which - * is not the default. - */ - regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); - if ((regval & (1 << 2)) && fan4pin) { - data->has_fan |= (1 << 3); - data->has_fan_min |= (1 << 3); + /* + * It looks like fan4 and fan5 pins can be alternatively used + * as fan on/off switches, but fan5 control is write only :/ + * We assume that if the serial interface is disabled, designers + * connected fan5 as input unless they are emitting log 1, which + * is not the default. + */ + regval = w83627ehf_read_value(data, W83627EHF_REG_FANDIV1); + if ((regval & (1 << 2)) && fan4pin) { + data->has_fan |= (1 << 3); + data->has_fan_min |= (1 << 3); + } + if (!(regval & (1 << 1)) && fan5pin) { + data->has_fan |= (1 << 4); + data->has_fan_min |= (1 << 4); + } +} + +static umode_t +w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type, + u32 attr, int channel) +{ + const struct w83627ehf_data *data = drvdata; + + switch (type) { + case hwmon_temp: + /* channel 0.., name 1.. */ + if (!(data->have_temp & (1 << channel))) + return 0; + if (attr == hwmon_temp_input || attr == hwmon_temp_label) + return 0444; + if (channel == 2 && data->temp3_val_only) + return 0; + if (attr == hwmon_temp_max) { + if (data->reg_temp_over[channel]) + return 0644; + else + return 0; + } + if (attr == hwmon_temp_max_hyst) { + if (data->reg_temp_hyst[channel]) + return 0644; + else + return 0; + } + if (channel > 2) + return 0; + if (attr == hwmon_temp_alarm || attr == hwmon_temp_type) + return 0444; + if (attr == hwmon_temp_offset) { + if (data->have_temp_offset & (1 << channel)) + return 0644; + else + return 0; + } + break; + + case hwmon_fan: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel))) + return 0; + if (attr == hwmon_fan_input || attr == hwmon_fan_alarm) + return 0444; + if (attr == hwmon_fan_div) { + return 0444; } - if (!(regval & (1 << 1)) && fan5pin) { - data->has_fan |= (1 << 4); - data->has_fan_min |= (1 << 4); + if (attr == hwmon_fan_min) { + if (data->has_fan_min & (1 << channel)) + return 0644; + else + return 0; } + break; + + case hwmon_in: + /* channel 0.., name 0.. */ + if (channel >= data->in_num) + return 0; + if (channel == 6 && data->in6_skip) + return 0; + if (attr == hwmon_in_alarm || attr == hwmon_in_input) + return 0444; + if (attr == hwmon_in_min || attr == hwmon_in_max) + return 0644; + break; + + case hwmon_pwm: + /* channel 0.., name 1.. */ + if (!(data->has_fan & (1 << channel)) || + channel >= data->pwm_num) + return 0; + if (attr == hwmon_pwm_mode || attr == hwmon_pwm_enable || + attr == hwmon_pwm_input) + return 0644; + break; + + case hwmon_intrusion: + return 0644; + + default: /* Shouldn't happen */ + return 0; } + + return 0; /* Shouldn't happen */ } +static int +w83627ehf_do_read_temp(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_temp_input: + *val = LM75_TEMP_FROM_REG(data->temp[channel]); + return 0; + case hwmon_temp_max: + *val = LM75_TEMP_FROM_REG(data->temp_max[channel]); + return 0; + case hwmon_temp_max_hyst: + *val = LM75_TEMP_FROM_REG(data->temp_max_hyst[channel]); + return 0; + case hwmon_temp_offset: + *val = data->temp_offset[channel] * 1000; + return 0; + case hwmon_temp_type: + *val = (int)data->temp_type[channel]; + return 0; + case hwmon_temp_alarm: + if (channel < 3) { + int bit[] = { 4, 5, 13 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_in(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_in_input: + *val = in_from_reg(data->in[channel], channel, data->scale_in); + return 0; + case hwmon_in_min: + *val = in_from_reg(data->in_min[channel], channel, + data->scale_in); + return 0; + case hwmon_in_max: + *val = in_from_reg(data->in_max[channel], channel, + data->scale_in); + return 0; + case hwmon_in_alarm: + if (channel < 10) { + int bit[] = { 0, 1, 2, 3, 8, 21, 20, 16, 17, 19 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_fan(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_fan_input: + *val = data->rpm[channel]; + return 0; + case hwmon_fan_min: + *val = fan_from_reg8(data->fan_min[channel], + data->fan_div[channel]); + return 0; + case hwmon_fan_div: + *val = div_from_reg(data->fan_div[channel]); + return 0; + case hwmon_fan_alarm: + if (channel < 5) { + int bit[] = { 6, 7, 11, 10, 23 }; + *val = (data->alarms >> bit[channel]) & 1; + return 0; + } + break; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_pwm(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + switch (attr) { + case hwmon_pwm_input: + *val = data->pwm[channel]; + return 0; + case hwmon_pwm_enable: + *val = data->pwm_enable[channel]; + return 0; + case hwmon_pwm_mode: + *val = data->pwm_enable[channel]; + return 0; + default: + break; + } + return -EOPNOTSUPP; +} + +static int +w83627ehf_do_read_intrusion(struct w83627ehf_data *data, u32 attr, + int channel, long *val) +{ + if (attr != hwmon_intrusion_alarm || channel != 0) + return -EOPNOTSUPP; /* shouldn't happen */ + + *val = !!(data->caseopen & 0x10); + return 0; +} + +static int +w83627ehf_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + struct w83627ehf_data *data = w83627ehf_update_device(dev->parent); + + switch (type) { + case hwmon_fan: + return w83627ehf_do_read_fan(data, attr, channel, val); + + case hwmon_in: + return w83627ehf_do_read_in(data, attr, channel, val); + + case hwmon_pwm: + return w83627ehf_do_read_pwm(data, attr, channel, val); + + case hwmon_temp: + return w83627ehf_do_read_temp(data, attr, channel, val); + + case hwmon_intrusion: + return w83627ehf_do_read_intrusion(data, attr, channel, val); + + default: + break; + } + + return -EOPNOTSUPP; +} + +static int +w83627ehf_read_string(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, const char **str) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + switch (type) { + case hwmon_temp: + if (attr == hwmon_temp_label) { + *str = data->temp_label[data->temp_src[channel]]; + return 0; + } + break; + + default: + break; + } + /* Nothing else should be read as a string */ + return -EOPNOTSUPP; +} + +static int +w83627ehf_write(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long val) +{ + struct w83627ehf_data *data = dev_get_drvdata(dev); + + if (type == hwmon_in && attr == hwmon_in_min) + return store_in_min(dev, data, channel, val); + if (type == hwmon_in && attr == hwmon_in_max) + return store_in_max(dev, data, channel, val); + + if (type == hwmon_fan && attr == hwmon_fan_min) + return store_fan_min(dev, data, channel, val); + + if (type == hwmon_temp && attr == hwmon_temp_max) + return store_temp_max(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_max_hyst) + return store_temp_max_hyst(dev, data, channel, val); + if (type == hwmon_temp && attr == hwmon_temp_offset) + return store_temp_offset(dev, data, channel, val); + + if (type == hwmon_pwm && attr == hwmon_pwm_mode) + return store_pwm_mode(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_enable) + return store_pwm_enable(dev, data, channel, val); + if (type == hwmon_pwm && attr == hwmon_pwm_input) + return store_pwm(dev, data, channel, val); + + if (type == hwmon_intrusion && attr == hwmon_intrusion_alarm) + return clear_caseopen(dev, data, channel, val); + + return -EOPNOTSUPP; +} + +static const struct hwmon_ops w83627ehf_ops = { + .is_visible = w83627ehf_is_visible, + .read = w83627ehf_read, + .read_string = w83627ehf_read_string, + .write = w83627ehf_write, +}; + +static const struct hwmon_channel_info *w83627ehf_info[] = { + HWMON_CHANNEL_INFO(fan, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN, + HWMON_F_ALARM | HWMON_F_DIV | HWMON_F_INPUT | HWMON_F_MIN), + HWMON_CHANNEL_INFO(in, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN, + HWMON_I_ALARM | HWMON_I_INPUT | HWMON_I_MAX | HWMON_I_MIN), + HWMON_CHANNEL_INFO(pwm, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE, + HWMON_PWM_ENABLE | HWMON_PWM_INPUT | HWMON_PWM_MODE), + HWMON_CHANNEL_INFO(temp, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE, + HWMON_T_ALARM | HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_MAX | + HWMON_T_MAX_HYST | HWMON_T_OFFSET | HWMON_T_TYPE), + HWMON_CHANNEL_INFO(intrusion, + HWMON_INTRUSION_ALARM), + NULL +}; + +static const struct hwmon_chip_info w83627ehf_chip_info = { + .ops = &w83627ehf_ops, + .info = w83627ehf_info, +}; + static int w83627ehf_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -2043,6 +1697,7 @@ static int w83627ehf_probe(struct platform_device *pdev) struct resource *res; u8 en_vrm10; int i, err = 0; + struct device *hwmon_dev; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (!request_region(res->start, IOREGION_LENGTH, DRVNAME)) { @@ -2069,15 +1724,13 @@ static int w83627ehf_probe(struct platform_device *pdev) /* 627EHG and 627EHF have 10 voltage inputs; 627DHG and 667HG have 9 */ data->in_num = (sio_data->kind == w83627ehf) ? 10 : 9; - /* 667HG, NCT6775F, and NCT6776F have 3 pwms, and 627UHG has only 2 */ + /* 667HG has 3 pwms, and 627UHG has only 2 */ switch (sio_data->kind) { default: data->pwm_num = 4; break; case w83667hg: case w83667hg_b: - case nct6775: - case nct6776: data->pwm_num = 3; break; case w83627uhg: @@ -2089,83 +1742,7 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp = 0x07; /* Deal with temperature register setup first. */ - if (sio_data->kind == nct6775 || sio_data->kind == nct6776) { - int mask = 0; - - /* - * Display temperature sensor output only if it monitors - * a source other than one already reported. Always display - * first three temperature registers, though. - */ - for (i = 0; i < NUM_REG_TEMP; i++) { - u8 src; - - data->reg_temp[i] = NCT6775_REG_TEMP[i]; - data->reg_temp_over[i] = NCT6775_REG_TEMP_OVER[i]; - data->reg_temp_hyst[i] = NCT6775_REG_TEMP_HYST[i]; - data->reg_temp_config[i] = NCT6775_REG_TEMP_CONFIG[i]; - - src = w83627ehf_read_value(data, - NCT6775_REG_TEMP_SOURCE[i]); - src &= 0x1f; - if (src && !(mask & (1 << src))) { - data->have_temp |= 1 << i; - mask |= 1 << src; - } - - data->temp_src[i] = src; - - /* - * Now do some register swapping if index 0..2 don't - * point to SYSTIN(1), CPUIN(2), and AUXIN(3). - * Idea is to have the first three attributes - * report SYSTIN, CPUIN, and AUXIN if possible - * without overriding the basic system configuration. - */ - if (i > 0 && data->temp_src[0] != 1 - && data->temp_src[i] == 1) - w82627ehf_swap_tempreg(data, 0, i); - if (i > 1 && data->temp_src[1] != 2 - && data->temp_src[i] == 2) - w82627ehf_swap_tempreg(data, 1, i); - if (i > 2 && data->temp_src[2] != 3 - && data->temp_src[i] == 3) - w82627ehf_swap_tempreg(data, 2, i); - } - if (sio_data->kind == nct6776) { - /* - * On NCT6776, AUXTIN and VIN3 pins are shared. - * Only way to detect it is to check if AUXTIN is used - * as a temperature source, and if that source is - * enabled. - * - * If that is the case, disable in6, which reports VIN3. - * Otherwise disable temp3. - */ - if (data->temp_src[2] == 3) { - u8 reg; - - if (data->reg_temp_config[2]) - reg = w83627ehf_read_value(data, - data->reg_temp_config[2]); - else - reg = 0; /* Assume AUXTIN is used */ - - if (reg & 0x01) - data->have_temp &= ~(1 << 2); - else - data->in6_skip = 1; - } - data->temp_label = nct6776_temp_label; - } else { - data->temp_label = nct6775_temp_label; - } - data->have_temp_offset = data->have_temp & 0x07; - for (i = 0; i < 3; i++) { - if (data->temp_src[i] > 3) - data->have_temp_offset &= ~(1 << i); - } - } else if (sio_data->kind == w83667hg_b) { + if (sio_data->kind == w83667hg_b) { u8 reg; w83627ehf_set_temp_reg_ehf(data, 4); @@ -2275,56 +1852,12 @@ static int w83627ehf_probe(struct platform_device *pdev) data->have_temp_offset = data->have_temp & 0x07; } - if (sio_data->kind == nct6775) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg16; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - data->REG_FAN_MAX_OUTPUT = NCT6775_REG_FAN_MAX_OUTPUT; - data->REG_FAN_STEP_OUTPUT = NCT6775_REG_FAN_STEP_OUTPUT; - } else if (sio_data->kind == nct6776) { - data->has_fan_div = false; - data->fan_from_reg = fan_from_reg13; - data->fan_from_reg_min = fan_from_reg13; - data->REG_PWM = NCT6775_REG_PWM; - data->REG_TARGET = NCT6775_REG_TARGET; - data->REG_FAN = NCT6775_REG_FAN; - data->REG_FAN_MIN = NCT6776_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = NCT6775_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = NCT6775_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = NCT6775_REG_FAN_STOP_TIME; - } else if (sio_data->kind == w83667hg_b) { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; + if (sio_data->kind == w83667hg_b) { data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_W83667_B; data->REG_FAN_STEP_OUTPUT = W83627EHF_REG_FAN_STEP_OUTPUT_W83667_B; } else { - data->has_fan_div = true; - data->fan_from_reg = fan_from_reg8; - data->fan_from_reg_min = fan_from_reg8; - data->REG_PWM = W83627EHF_REG_PWM; - data->REG_TARGET = W83627EHF_REG_TARGET; - data->REG_FAN = W83627EHF_REG_FAN; - data->REG_FAN_MIN = W83627EHF_REG_FAN_MIN; - data->REG_FAN_START_OUTPUT = W83627EHF_REG_FAN_START_OUTPUT; - data->REG_FAN_STOP_OUTPUT = W83627EHF_REG_FAN_STOP_OUTPUT; - data->REG_FAN_STOP_TIME = W83627EHF_REG_FAN_STOP_TIME; data->REG_FAN_MAX_OUTPUT = W83627EHF_REG_FAN_MAX_OUTPUT_COMMON; data->REG_FAN_STEP_OUTPUT = @@ -2347,8 +1880,7 @@ static int w83627ehf_probe(struct platform_device *pdev) goto exit_release; /* Read VID value */ - if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b || - sio_data->kind == nct6775 || sio_data->kind == nct6776) { + if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) { /* * W83667HG has different pins for VID input and output, so * we can get the VID input values directly at logical device D @@ -2356,11 +1888,7 @@ static int w83627ehf_probe(struct platform_device *pdev) */ superio_select(sio_data->sioreg, W83667HG_LD_VID); data->vid = superio_inb(sio_data->sioreg, 0xe3); - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else if (sio_data->kind != w83627uhg) { superio_select(sio_data->sioreg, W83627EHF_LD_HWM); if (superio_inb(sio_data->sioreg, SIO_REG_VID_CTRL) & 0x80) { @@ -2394,190 +1922,33 @@ static int w83627ehf_probe(struct platform_device *pdev) SIO_REG_VID_DATA); if (sio_data->kind == w83627ehf) /* 6 VID pins only */ data->vid &= 0x3f; - - err = device_create_file(dev, &dev_attr_cpu0_vid); - if (err) { - superio_exit(sio_data->sioreg); - goto exit_release; - } + data->have_vid = true; } else { dev_info(dev, "VID pins in output mode, CPU VID not available\n"); } } - if (fan_debounce && - (sio_data->kind == nct6775 || sio_data->kind == nct6776)) { - u8 tmp; - - superio_select(sio_data->sioreg, W83627EHF_LD_HWM); - tmp = superio_inb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE); - if (sio_data->kind == nct6776) - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x3e | tmp); - else - superio_outb(sio_data->sioreg, NCT6775_REG_FAN_DEBOUNCE, - 0x1e | tmp); - pr_info("Enabled fan debounce for chip %s\n", data->name); - } - w83627ehf_check_fan_inputs(sio_data, data); superio_exit(sio_data->sioreg); /* Read fan clock dividers immediately */ - w83627ehf_update_fan_div_common(dev, data); + w83627ehf_update_fan_div(data); /* Read pwm data to save original values */ - w83627ehf_update_pwm_common(dev, data); + w83627ehf_update_pwm(data); for (i = 0; i < data->pwm_num; i++) data->pwm_enable_orig[i] = data->pwm_enable[i]; - /* Register sysfs hooks */ - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays); i++) { - err = device_create_file(dev, &sda_sf3_arrays[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < ARRAY_SIZE(sda_sf3_max_step_arrays); i++) { - struct sensor_device_attribute *attr = - &sda_sf3_max_step_arrays[i]; - if (data->REG_FAN_STEP_OUTPUT && - data->REG_FAN_STEP_OUTPUT[attr->index] != 0xff) { - err = device_create_file(dev, &attr->dev_attr); - if (err) - goto exit_remove; - } - } - /* if fan3 and fan4 are enabled create the sf3 files for them */ - if ((data->has_fan & (1 << 2)) && data->pwm_num >= 3) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan3); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan3[i].dev_attr); - if (err) - goto exit_remove; - } - if ((data->has_fan & (1 << 3)) && data->pwm_num >= 4) - for (i = 0; i < ARRAY_SIZE(sda_sf3_arrays_fan4); i++) { - err = device_create_file(dev, - &sda_sf3_arrays_fan4[i].dev_attr); - if (err) - goto exit_remove; - } - - for (i = 0; i < data->in_num; i++) { - if ((i == 6) && data->in6_skip) - continue; - if ((err = device_create_file(dev, &sda_in_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_min[i].dev_attr)) - || (err = device_create_file(dev, - &sda_in_max[i].dev_attr))) - goto exit_remove; - } - - for (i = 0; i < 5; i++) { - if (data->has_fan & (1 << i)) { - if ((err = device_create_file(dev, - &sda_fan_input[i].dev_attr)) - || (err = device_create_file(dev, - &sda_fan_alarm[i].dev_attr))) - goto exit_remove; - if (sio_data->kind != nct6776) { - err = device_create_file(dev, - &sda_fan_div[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->has_fan_min & (1 << i)) { - err = device_create_file(dev, - &sda_fan_min[i].dev_attr); - if (err) - goto exit_remove; - } - if (i < data->pwm_num && - ((err = device_create_file(dev, - &sda_pwm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_mode[i].dev_attr)) - || (err = device_create_file(dev, - &sda_pwm_enable[i].dev_attr)) - || (err = device_create_file(dev, - &sda_target_temp[i].dev_attr)) - || (err = device_create_file(dev, - &sda_tolerance[i].dev_attr)))) - goto exit_remove; - } - } + hwmon_dev = devm_hwmon_device_register_with_info(&pdev->dev, + data->name, + data, + &w83627ehf_chip_info, + w83627ehf_groups); - for (i = 0; i < NUM_REG_TEMP; i++) { - if (!(data->have_temp & (1 << i))) - continue; - err = device_create_file(dev, &sda_temp_input[i].dev_attr); - if (err) - goto exit_remove; - if (data->temp_label) { - err = device_create_file(dev, - &sda_temp_label[i].dev_attr); - if (err) - goto exit_remove; - } - if (i == 2 && data->temp3_val_only) - continue; - if (data->reg_temp_over[i]) { - err = device_create_file(dev, - &sda_temp_max[i].dev_attr); - if (err) - goto exit_remove; - } - if (data->reg_temp_hyst[i]) { - err = device_create_file(dev, - &sda_temp_max_hyst[i].dev_attr); - if (err) - goto exit_remove; - } - if (i > 2) - continue; - if ((err = device_create_file(dev, - &sda_temp_alarm[i].dev_attr)) - || (err = device_create_file(dev, - &sda_temp_type[i].dev_attr))) - goto exit_remove; - if (data->have_temp_offset & (1 << i)) { - err = device_create_file(dev, - &sda_temp_offset[i].dev_attr); - if (err) - goto exit_remove; - } - } - - err = device_create_file(dev, &sda_caseopen[0].dev_attr); - if (err) - goto exit_remove; - - if (sio_data->kind == nct6776) { - err = device_create_file(dev, &sda_caseopen[1].dev_attr); - if (err) - goto exit_remove; - } - - err = device_create_file(dev, &dev_attr_name); - if (err) - goto exit_remove; - - data->hwmon_dev = hwmon_device_register(dev); - if (IS_ERR(data->hwmon_dev)) { - err = PTR_ERR(data->hwmon_dev); - goto exit_remove; - } + return PTR_ERR_OR_ZERO(hwmon_dev); - return 0; - -exit_remove: - w83627ehf_device_remove_files(dev); exit_release: release_region(res->start, IOREGION_LENGTH); exit: @@ -2588,8 +1959,6 @@ static int w83627ehf_remove(struct platform_device *pdev) { struct w83627ehf_data *data = platform_get_drvdata(pdev); - hwmon_device_unregister(data->hwmon_dev); - w83627ehf_device_remove_files(&pdev->dev); release_region(data->addr, IOREGION_LENGTH); return 0; @@ -2599,14 +1968,9 @@ static int w83627ehf_remove(struct platform_device *pdev) static int w83627ehf_suspend(struct device *dev) { struct w83627ehf_data *data = w83627ehf_update_device(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); mutex_lock(&data->update_lock); data->vbat = w83627ehf_read_value(data, W83627EHF_REG_VBAT); - if (sio_data->kind == nct6775) { - data->fandiv1 = w83627ehf_read_value(data, NCT6775_REG_FANDIV1); - data->fandiv2 = w83627ehf_read_value(data, NCT6775_REG_FANDIV2); - } mutex_unlock(&data->update_lock); return 0; @@ -2615,7 +1979,6 @@ static int w83627ehf_suspend(struct device *dev) static int w83627ehf_resume(struct device *dev) { struct w83627ehf_data *data = dev_get_drvdata(dev); - struct w83627ehf_sio_data *sio_data = dev_get_platdata(dev); int i; mutex_lock(&data->update_lock); @@ -2636,7 +1999,7 @@ static int w83627ehf_resume(struct device *dev) if (!(data->has_fan_min & (1 << i))) continue; - w83627ehf_write_value(data, data->REG_FAN_MIN[i], + w83627ehf_write_value(data, W83627EHF_REG_FAN_MIN[i], data->fan_min[i]); } @@ -2660,10 +2023,6 @@ static int w83627ehf_resume(struct device *dev) /* Restore other settings */ w83627ehf_write_value(data, W83627EHF_REG_VBAT, data->vbat); - if (sio_data->kind == nct6775) { - w83627ehf_write_value(data, NCT6775_REG_FANDIV1, data->fandiv1); - w83627ehf_write_value(data, NCT6775_REG_FANDIV2, data->fandiv2); - } /* Force re-reading all values */ data->valid = 0; @@ -2704,8 +2063,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, static const char sio_name_W83627UHG[] __initconst = "W83627UHG"; static const char sio_name_W83667HG[] __initconst = "W83667HG"; static const char sio_name_W83667HG_B[] __initconst = "W83667HG-B"; - static const char sio_name_NCT6775[] __initconst = "NCT6775F"; - static const char sio_name_NCT6776[] __initconst = "NCT6776F"; u16 val; const char *sio_name; @@ -2749,14 +2106,6 @@ static int __init w83627ehf_find(int sioaddr, unsigned short *addr, sio_data->kind = w83667hg_b; sio_name = sio_name_W83667HG_B; break; - case SIO_NCT6775_ID: - sio_data->kind = nct6775; - sio_name = sio_name_NCT6775; - break; - case SIO_NCT6776_ID: - sio_data->kind = nct6776; - sio_name = sio_name_NCT6776; - break; default: if (val != 0xffff) pr_debug("unsupported chip ID: 0x%04x\n", val); diff --git a/drivers/i3c/master.c b/drivers/i3c/master.c index 043691656245..7f8f896fa0c3 100644 --- a/drivers/i3c/master.c +++ b/drivers/i3c/master.c @@ -527,8 +527,8 @@ static const struct device_type i3c_masterdev_type = { .groups = i3c_masterdev_groups, }; -int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, - unsigned long max_i2c_scl_rate) +static int i3c_bus_set_mode(struct i3c_bus *i3cbus, enum i3c_bus_mode mode, + unsigned long max_i2c_scl_rate) { struct i3c_master_controller *master = i3c_bus_to_i3c_master(i3cbus); diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c index b0ff0e12d84c..bd26c3b9634e 100644 --- a/drivers/i3c/master/dw-i3c-master.c +++ b/drivers/i3c/master/dw-i3c-master.c @@ -899,6 +899,22 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev, struct dw_i3c_i2c_dev_data *data = i3c_dev_get_master_data(dev); struct i3c_master_controller *m = i3c_dev_get_master(dev); struct dw_i3c_master *master = to_dw_i3c_master(m); + int pos; + + pos = dw_i3c_master_get_free_pos(master); + + if (data->index > pos && pos > 0) { + writel(0, + master->regs + + DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index)); + + master->addrs[data->index] = 0; + master->free_pos |= BIT(data->index); + + data->index = pos; + master->addrs[pos] = dev->info.dyn_addr; + master->free_pos &= ~BIT(pos); + } writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr), master->regs + @@ -1100,15 +1116,13 @@ static const struct i3c_master_controller_ops dw_mipi_i3c_ops = { static int dw_i3c_probe(struct platform_device *pdev) { struct dw_i3c_master *master; - struct resource *res; int ret, irq; master = devm_kzalloc(&pdev->dev, sizeof(*master), GFP_KERNEL); if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); diff --git a/drivers/i3c/master/i3c-master-cdns.c b/drivers/i3c/master/i3c-master-cdns.c index 10db0bf0655a..54712793709e 100644 --- a/drivers/i3c/master/i3c-master-cdns.c +++ b/drivers/i3c/master/i3c-master-cdns.c @@ -22,6 +22,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/workqueue.h> +#include <linux/of_device.h> #define DEV_ID 0x0 #define DEV_ID_I3C_MASTER 0x5034 @@ -60,6 +61,7 @@ #define CTRL_HALT_EN BIT(30) #define CTRL_MCS BIT(29) #define CTRL_MCS_EN BIT(28) +#define CTRL_THD_DELAY(x) (((x) << 24) & GENMASK(25, 24)) #define CTRL_HJ_DISEC BIT(8) #define CTRL_MST_ACK BIT(7) #define CTRL_HJ_ACK BIT(6) @@ -70,6 +72,7 @@ #define CTRL_MIXED_FAST_BUS_MODE 2 #define CTRL_MIXED_SLOW_BUS_MODE 3 #define CTRL_BUS_MODE_MASK GENMASK(1, 0) +#define THD_DELAY_MAX 3 #define PRESCL_CTRL0 0x14 #define PRESCL_CTRL0_I2C(x) ((x) << 16) @@ -388,6 +391,10 @@ struct cdns_i3c_xfer { struct cdns_i3c_cmd cmds[0]; }; +struct cdns_i3c_data { + u8 thd_delay_ns; +}; + struct cdns_i3c_master { struct work_struct hj_work; struct i3c_master_controller base; @@ -408,6 +415,7 @@ struct cdns_i3c_master { struct clk *pclk; struct cdns_i3c_master_caps caps; unsigned long i3c_scl_lim; + const struct cdns_i3c_data *devdata; }; static inline struct cdns_i3c_master * @@ -1181,6 +1189,20 @@ static int cdns_i3c_master_do_daa(struct i3c_master_controller *m) return 0; } +static u8 cdns_i3c_master_calculate_thd_delay(struct cdns_i3c_master *master) +{ + unsigned long sysclk_rate = clk_get_rate(master->sysclk); + u8 thd_delay = DIV_ROUND_UP(master->devdata->thd_delay_ns, + (NSEC_PER_SEC / sysclk_rate)); + + /* Every value greater than 3 is not valid. */ + if (thd_delay > THD_DELAY_MAX) + thd_delay = THD_DELAY_MAX; + + /* CTLR_THD_DEL value is encoded. */ + return (THD_DELAY_MAX - thd_delay); +} + static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) { struct cdns_i3c_master *master = to_cdns_i3c_master(m); @@ -1264,6 +1286,15 @@ static int cdns_i3c_master_bus_init(struct i3c_master_controller *m) * We will issue ENTDAA afterwards from the threaded IRQ handler. */ ctrl |= CTRL_HJ_ACK | CTRL_HJ_DISEC | CTRL_HALT_EN | CTRL_MCS_EN; + + /* + * Configure data hold delay based on device-specific data. + * + * MIPI I3C Specification 1.0 defines non-zero minimal tHD_PP timing on + * master output. This setting allows to meet this timing on master's + * SoC outputs, regardless of PCB balancing. + */ + ctrl |= CTRL_THD_DELAY(cdns_i3c_master_calculate_thd_delay(master)); writel(ctrl, master->regs + CTRL); cdns_i3c_master_enable(master); @@ -1521,10 +1552,18 @@ static void cdns_i3c_master_hj(struct work_struct *work) i3c_master_do_daa(&master->base); } +static struct cdns_i3c_data cdns_i3c_devdata = { + .thd_delay_ns = 10, +}; + +static const struct of_device_id cdns_i3c_master_of_ids[] = { + { .compatible = "cdns,i3c-master", .data = &cdns_i3c_devdata }, + { /* sentinel */ }, +}; + static int cdns_i3c_master_probe(struct platform_device *pdev) { struct cdns_i3c_master *master; - struct resource *res; int ret, irq; u32 val; @@ -1532,8 +1571,11 @@ static int cdns_i3c_master_probe(struct platform_device *pdev) if (!master) return -ENOMEM; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - master->regs = devm_ioremap_resource(&pdev->dev, res); + master->devdata = of_device_get_match_data(&pdev->dev); + if (!master->devdata) + return -EINVAL; + + master->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(master->regs)) return PTR_ERR(master->regs); @@ -1631,11 +1673,6 @@ static int cdns_i3c_master_remove(struct platform_device *pdev) return 0; } -static const struct of_device_id cdns_i3c_master_of_ids[] = { - { .compatible = "cdns,i3c-master" }, - { /* sentinel */ }, -}; - static struct platform_driver cdns_i3c_master = { .probe = cdns_i3c_master_probe, .remove = cdns_i3c_master_remove, diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index a1a035270cab..b273e421e910 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -2575,17 +2575,6 @@ isert_wait4logout(struct isert_conn *isert_conn) } } -static void -isert_wait4cmds(struct iscsi_conn *conn) -{ - isert_info("iscsi_conn %p\n", conn); - - if (conn->sess) { - target_sess_cmd_list_set_waiting(conn->sess->se_sess); - target_wait_for_sess_cmds(conn->sess->se_sess); - } -} - /** * isert_put_unsol_pending_cmds() - Drop commands waiting for * unsolicitate dataout @@ -2633,7 +2622,6 @@ static void isert_wait_conn(struct iscsi_conn *conn) ib_drain_qp(isert_conn->qp); isert_put_unsol_pending_cmds(conn); - isert_wait4cmds(conn); isert_wait4logout(isert_conn); queue_work(isert_release_wq, &isert_conn->release_work); diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c index f918fca9ada3..cb6e3a5f509c 100644 --- a/drivers/input/evdev.c +++ b/drivers/input/evdev.c @@ -484,10 +484,7 @@ static int evdev_open(struct inode *inode, struct file *file) struct evdev_client *client; int error; - client = kzalloc(struct_size(client, buffer, bufsize), - GFP_KERNEL | __GFP_NOWARN); - if (!client) - client = vzalloc(struct_size(client, buffer, bufsize)); + client = kvzalloc(struct_size(client, buffer, bufsize), GFP_KERNEL); if (!client) return -ENOMEM; diff --git a/drivers/input/misc/keyspan_remote.c b/drivers/input/misc/keyspan_remote.c index 83368f1e7c4e..4650f4a94989 100644 --- a/drivers/input/misc/keyspan_remote.c +++ b/drivers/input/misc/keyspan_remote.c @@ -336,7 +336,8 @@ static int keyspan_setup(struct usb_device* dev) int retval = 0; retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x11, 0x40, 0x5601, 0x0, NULL, 0, 0); + 0x11, 0x40, 0x5601, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set bit rate due to error: %d\n", __func__, retval); @@ -344,7 +345,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x44, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x44, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to set resume sensitivity due to error: %d\n", __func__, retval); @@ -352,7 +354,8 @@ static int keyspan_setup(struct usb_device* dev) } retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 0x22, 0x40, 0x0, 0x0, NULL, 0, 0); + 0x22, 0x40, 0x0, 0x0, NULL, 0, + USB_CTRL_SET_TIMEOUT); if (retval) { dev_dbg(&dev->dev, "%s - failed to turn receive on due to error: %d\n", __func__, retval); diff --git a/drivers/input/misc/max77650-onkey.c b/drivers/input/misc/max77650-onkey.c index 4d875f2ac13d..ee55f22dbca5 100644 --- a/drivers/input/misc/max77650-onkey.c +++ b/drivers/input/misc/max77650-onkey.c @@ -108,9 +108,16 @@ static int max77650_onkey_probe(struct platform_device *pdev) return input_register_device(onkey->input); } +static const struct of_device_id max77650_onkey_of_match[] = { + { .compatible = "maxim,max77650-onkey" }, + { } +}; +MODULE_DEVICE_TABLE(of, max77650_onkey_of_match); + static struct platform_driver max77650_onkey_driver = { .driver = { .name = "max77650-onkey", + .of_match_table = max77650_onkey_of_match, }, .probe = max77650_onkey_probe, }; diff --git a/drivers/input/misc/pm8xxx-vibrator.c b/drivers/input/misc/pm8xxx-vibrator.c index ecd762f93732..53ad25eaf1a2 100644 --- a/drivers/input/misc/pm8xxx-vibrator.c +++ b/drivers/input/misc/pm8xxx-vibrator.c @@ -90,7 +90,7 @@ static int pm8xxx_vib_set(struct pm8xxx_vib *vib, bool on) if (regs->enable_mask) rc = regmap_update_bits(vib->regmap, regs->enable_addr, - on ? regs->enable_mask : 0, val); + regs->enable_mask, on ? ~0 : 0); return rc; } diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c index 0bc01cfc2b51..6b23e679606e 100644 --- a/drivers/input/rmi4/rmi_f54.c +++ b/drivers/input/rmi4/rmi_f54.c @@ -24,6 +24,12 @@ #define F54_NUM_TX_OFFSET 1 #define F54_NUM_RX_OFFSET 0 +/* + * The smbus protocol can read only 32 bytes max at a time. + * But this should be fine for i2c/spi as well. + */ +#define F54_REPORT_DATA_SIZE 32 + /* F54 commands */ #define F54_GET_REPORT 1 #define F54_FORCE_CAL 2 @@ -526,6 +532,7 @@ static void rmi_f54_work(struct work_struct *work) int report_size; u8 command; int error; + int i; report_size = rmi_f54_get_report_size(f54); if (report_size == 0) { @@ -558,23 +565,27 @@ static void rmi_f54_work(struct work_struct *work) rmi_dbg(RMI_DEBUG_FN, &fn->dev, "Get report command completed, reading data\n"); - fifo[0] = 0; - fifo[1] = 0; - error = rmi_write_block(fn->rmi_dev, - fn->fd.data_base_addr + F54_FIFO_OFFSET, - fifo, sizeof(fifo)); - if (error) { - dev_err(&fn->dev, "Failed to set fifo start offset\n"); - goto abort; - } + for (i = 0; i < report_size; i += F54_REPORT_DATA_SIZE) { + int size = min(F54_REPORT_DATA_SIZE, report_size - i); + + fifo[0] = i & 0xff; + fifo[1] = i >> 8; + error = rmi_write_block(fn->rmi_dev, + fn->fd.data_base_addr + F54_FIFO_OFFSET, + fifo, sizeof(fifo)); + if (error) { + dev_err(&fn->dev, "Failed to set fifo start offset\n"); + goto abort; + } - error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr + - F54_REPORT_DATA_OFFSET, f54->report_data, - report_size); - if (error) { - dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n", - __func__, report_size, error); - goto abort; + error = rmi_read_block(fn->rmi_dev, fn->fd.data_base_addr + + F54_REPORT_DATA_OFFSET, + f54->report_data + i, size); + if (error) { + dev_err(&fn->dev, "%s: read [%d bytes] returned %d\n", + __func__, size, error); + goto abort; + } } abort: diff --git a/drivers/input/rmi4/rmi_smbus.c b/drivers/input/rmi4/rmi_smbus.c index b313c579914f..2407ea43de59 100644 --- a/drivers/input/rmi4/rmi_smbus.c +++ b/drivers/input/rmi4/rmi_smbus.c @@ -163,6 +163,7 @@ static int rmi_smb_write_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to write next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; + rmiaddr += SMB_MAX_COUNT; } exit: mutex_unlock(&rmi_smb->page_mutex); @@ -214,6 +215,7 @@ static int rmi_smb_read_block(struct rmi_transport_dev *xport, u16 rmiaddr, /* prepare to read next block of bytes */ cur_len -= SMB_MAX_COUNT; databuff += SMB_MAX_COUNT; + rmiaddr += SMB_MAX_COUNT; } retval = 0; diff --git a/drivers/input/tablet/aiptek.c b/drivers/input/tablet/aiptek.c index 2ca586fb914f..e08b0ef078e8 100644 --- a/drivers/input/tablet/aiptek.c +++ b/drivers/input/tablet/aiptek.c @@ -1713,7 +1713,7 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) aiptek->inputdev = inputdev; aiptek->intf = intf; - aiptek->ifnum = intf->altsetting[0].desc.bInterfaceNumber; + aiptek->ifnum = intf->cur_altsetting->desc.bInterfaceNumber; aiptek->inDelay = 0; aiptek->endDelay = 0; aiptek->previousJitterable = 0; @@ -1802,14 +1802,14 @@ aiptek_probe(struct usb_interface *intf, const struct usb_device_id *id) input_set_abs_params(inputdev, ABS_WHEEL, AIPTEK_WHEEL_MIN, AIPTEK_WHEEL_MAX - 1, 0, 0); /* Verify that a device really has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "interface has %d endpoints, but must have minimum 1\n", - intf->altsetting[0].desc.bNumEndpoints); + intf->cur_altsetting->desc.bNumEndpoints); err = -EINVAL; goto fail3; } - endpoint = &intf->altsetting[0].endpoint[0].desc; + endpoint = &intf->cur_altsetting->endpoint[0].desc; /* Go set up our URB, which is called when the tablet receives * input. diff --git a/drivers/input/tablet/gtco.c b/drivers/input/tablet/gtco.c index 35031228a6d0..96d65575f75a 100644 --- a/drivers/input/tablet/gtco.c +++ b/drivers/input/tablet/gtco.c @@ -875,18 +875,14 @@ static int gtco_probe(struct usb_interface *usbinterface, } /* Sanity check that a device has an endpoint */ - if (usbinterface->altsetting[0].desc.bNumEndpoints < 1) { + if (usbinterface->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&usbinterface->dev, "Invalid number of endpoints\n"); error = -EINVAL; goto err_free_urb; } - /* - * The endpoint is always altsetting 0, we know this since we know - * this device only has one interrupt endpoint - */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; + endpoint = &usbinterface->cur_altsetting->endpoint[0].desc; /* Some debug */ dev_dbg(&usbinterface->dev, "gtco # interfaces: %d\n", usbinterface->num_altsetting); @@ -896,7 +892,8 @@ static int gtco_probe(struct usb_interface *usbinterface, if (usb_endpoint_xfer_int(endpoint)) dev_dbg(&usbinterface->dev, "endpoint: we have interrupt endpoint\n"); - dev_dbg(&usbinterface->dev, "endpoint extra len:%d\n", usbinterface->altsetting[0].extralen); + dev_dbg(&usbinterface->dev, "interface extra len:%d\n", + usbinterface->cur_altsetting->extralen); /* * Find the HID descriptor so we can find out the size of the @@ -973,8 +970,6 @@ static int gtco_probe(struct usb_interface *usbinterface, input_dev->dev.parent = &usbinterface->dev; /* Setup the URB, it will be posted later on open of input device */ - endpoint = &usbinterface->altsetting[0].endpoint[0].desc; - usb_fill_int_urb(gtco->urbinfo, udev, usb_rcvintpipe(udev, diff --git a/drivers/input/tablet/pegasus_notetaker.c b/drivers/input/tablet/pegasus_notetaker.c index a1f3a0cb197e..38f087404f7a 100644 --- a/drivers/input/tablet/pegasus_notetaker.c +++ b/drivers/input/tablet/pegasus_notetaker.c @@ -275,7 +275,7 @@ static int pegasus_probe(struct usb_interface *intf, return -ENODEV; /* Sanity check that the device has an endpoint */ - if (intf->altsetting[0].desc.bNumEndpoints < 1) { + if (intf->cur_altsetting->desc.bNumEndpoints < 1) { dev_err(&intf->dev, "Invalid number of endpoints\n"); return -EINVAL; } diff --git a/drivers/input/touchscreen/sun4i-ts.c b/drivers/input/touchscreen/sun4i-ts.c index 0af0fe8c40d7..742a7e96c1b5 100644 --- a/drivers/input/touchscreen/sun4i-ts.c +++ b/drivers/input/touchscreen/sun4i-ts.c @@ -237,6 +237,7 @@ static int sun4i_ts_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device *hwmon; + struct thermal_zone_device *thermal; int error; u32 reg; bool ts_attached; @@ -355,7 +356,10 @@ static int sun4i_ts_probe(struct platform_device *pdev) if (IS_ERR(hwmon)) return PTR_ERR(hwmon); - devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, &sun4i_ts_tz_ops); + thermal = devm_thermal_zone_of_sensor_register(ts->dev, 0, ts, + &sun4i_ts_tz_ops); + if (IS_ERR(thermal)) + return PTR_ERR(thermal); writel(TEMP_IRQ_EN(1), ts->base + TP_INT_FIFOC); diff --git a/drivers/input/touchscreen/sur40.c b/drivers/input/touchscreen/sur40.c index 1dd47dda71cd..34d31c7ec8ba 100644 --- a/drivers/input/touchscreen/sur40.c +++ b/drivers/input/touchscreen/sur40.c @@ -661,7 +661,7 @@ static int sur40_probe(struct usb_interface *interface, int error; /* Check if we really have the right interface. */ - iface_desc = &interface->altsetting[0]; + iface_desc = interface->cur_altsetting; if (iface_desc->desc.bInterfaceClass != 0xFF) return -ENODEV; diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index 568c52317757..483f7bc379fa 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1655,27 +1655,39 @@ static int iommu_pc_get_set_reg(struct amd_iommu *iommu, u8 bank, u8 cntr, static void init_iommu_perf_ctr(struct amd_iommu *iommu) { struct pci_dev *pdev = iommu->dev; - u64 val = 0xabcd, val2 = 0; + u64 val = 0xabcd, val2 = 0, save_reg = 0; if (!iommu_feature(iommu, FEATURE_PC)) return; amd_iommu_pc_present = true; + /* save the value to restore, if writable */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, false)) + goto pc_false; + /* Check if the performance counters can be written to */ if ((iommu_pc_get_set_reg(iommu, 0, 0, 0, &val, true)) || (iommu_pc_get_set_reg(iommu, 0, 0, 0, &val2, false)) || - (val != val2)) { - pci_err(pdev, "Unable to write to IOMMU perf counter.\n"); - amd_iommu_pc_present = false; - return; - } + (val != val2)) + goto pc_false; + + /* restore */ + if (iommu_pc_get_set_reg(iommu, 0, 0, 0, &save_reg, true)) + goto pc_false; pci_info(pdev, "IOMMU performance counters supported\n"); val = readl(iommu->mmio_base + MMIO_CNTR_CONF_OFFSET); iommu->max_banks = (u8) ((val >> 12) & 0x3f); iommu->max_counters = (u8) ((val >> 7) & 0xf); + + return; + +pc_false: + pci_err(pdev, "Unable to read/write to IOMMU perf counter.\n"); + amd_iommu_pc_present = false; + return; } static ssize_t amd_iommu_show_cap(struct device *dev, diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 1801f0aaf013..932267f49f9a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -5163,7 +5163,8 @@ static void dmar_remove_one_dev_info(struct device *dev) spin_lock_irqsave(&device_domain_lock, flags); info = dev->archdata.iommu; - if (info) + if (info && info != DEFER_DEVICE_DOMAIN_INFO + && info != DUMMY_DEVICE_DOMAIN_INFO) __dmar_remove_one_dev_info(info); spin_unlock_irqrestore(&device_domain_lock, flags); } diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 95b41c0891d0..663d87924e5e 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -1107,7 +1107,7 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) card->erase_arg == MMC_TRIM_ARG ? INAND_CMD38_ARG_TRIM : INAND_CMD38_ARG_ERASE, - 0); + card->ext_csd.generic_cmd6_time); } if (!err) err = mmc_erase(card, from, nr, card->erase_arg); @@ -1149,7 +1149,7 @@ retry: arg == MMC_SECURE_TRIM1_ARG ? INAND_CMD38_ARG_SECTRIM1 : INAND_CMD38_ARG_SECERASE, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } @@ -1167,7 +1167,7 @@ retry: err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, - 0); + card->ext_csd.generic_cmd6_time); if (err) goto out_retry; } diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index abf8f5eb0a1c..aa54d359dab7 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -2330,7 +2330,13 @@ void mmc_rescan(struct work_struct *work) } for (i = 0; i < ARRAY_SIZE(freqs); i++) { - if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) + unsigned int freq = freqs[i]; + if (freq > host->f_max) { + if (i + 1 < ARRAY_SIZE(freqs)) + continue; + freq = host->f_max; + } + if (!mmc_rescan_try_freq(host, max(freq, host->f_min))) break; if (freqs[i] <= host->f_min) break; @@ -2344,7 +2350,7 @@ void mmc_rescan(struct work_struct *work) void mmc_start_host(struct mmc_host *host) { - host->f_init = max(freqs[0], host->f_min); + host->f_init = max(min(freqs[0], host->f_max), host->f_min); host->rescan_disable = 0; host->ios.power_mode = MMC_POWER_UNDEFINED; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 105b7a7c0251..c8768726d925 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -175,8 +175,6 @@ int mmc_of_parse(struct mmc_host *host) struct device *dev = host->parent; u32 bus_width, drv_type, cd_debounce_delay_ms; int ret; - bool cd_cap_invert, cd_gpio_invert = false; - bool ro_cap_invert, ro_gpio_invert = false; if (!dev || !dev_fwnode(dev)) return 0; @@ -219,10 +217,12 @@ int mmc_of_parse(struct mmc_host *host) */ /* Parse Card Detection */ + if (device_property_read_bool(dev, "non-removable")) { host->caps |= MMC_CAP_NONREMOVABLE; } else { - cd_cap_invert = device_property_read_bool(dev, "cd-inverted"); + if (device_property_read_bool(dev, "cd-inverted")) + host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; if (device_property_read_u32(dev, "cd-debounce-delay-ms", &cd_debounce_delay_ms)) @@ -232,32 +232,19 @@ int mmc_of_parse(struct mmc_host *host) host->caps |= MMC_CAP_NEEDS_POLL; ret = mmc_gpiod_request_cd(host, "cd", 0, false, - cd_debounce_delay_ms * 1000, - &cd_gpio_invert); + cd_debounce_delay_ms * 1000); if (!ret) dev_info(host->parent, "Got CD GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) return ret; - - /* - * There are two ways to flag that the CD line is inverted: - * through the cd-inverted flag and by the GPIO line itself - * being inverted from the GPIO subsystem. This is a leftover - * from the times when the GPIO subsystem did not make it - * possible to flag a line as inverted. - * - * If the capability on the host AND the GPIO line are - * both inverted, the end result is that the CD line is - * not inverted. - */ - if (cd_cap_invert ^ cd_gpio_invert) - host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; } /* Parse Write Protection */ - ro_cap_invert = device_property_read_bool(dev, "wp-inverted"); - ret = mmc_gpiod_request_ro(host, "wp", 0, 0, &ro_gpio_invert); + if (device_property_read_bool(dev, "wp-inverted")) + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(host, "wp", 0, 0); if (!ret) dev_info(host->parent, "Got WP GPIO\n"); else if (ret != -ENOENT && ret != -ENOSYS) @@ -266,10 +253,6 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "disable-wp")) host->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; - /* See the comment on CD inversion above */ - if (ro_cap_invert ^ ro_gpio_invert) - host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - if (device_property_read_bool(dev, "cap-sd-highspeed")) host->caps |= MMC_CAP_SD_HIGHSPEED; if (device_property_read_bool(dev, "cap-mmc-highspeed")) diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 09113b9ad679..da425ee2d9bf 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -19,7 +19,9 @@ #include "host.h" #include "mmc_ops.h" -#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10min*/ +#define MMC_BKOPS_TIMEOUT_MS (120 * 1000) /* 120s */ +#define MMC_CACHE_FLUSH_TIMEOUT_MS (30 * 1000) /* 30s */ static const u8 tuning_blk_pattern_4bit[] = { 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, @@ -458,10 +460,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms, bool expired = false; bool busy = false; - /* We have an unspecified cmd timeout, use the fallback value. */ - if (!timeout_ms) - timeout_ms = MMC_OPS_TIMEOUT_MS; - /* * In cases when not allowed to poll by using CMD13 or because we aren't * capable of polling by using ->card_busy(), then rely on waiting the @@ -534,14 +532,19 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, mmc_retune_hold(host); + if (!timeout_ms) { + pr_warn("%s: unspecified timeout for CMD6 - use generic\n", + mmc_hostname(host)); + timeout_ms = card->ext_csd.generic_cmd6_time; + } + /* - * If the cmd timeout and the max_busy_timeout of the host are both - * specified, let's validate them. A failure means we need to prevent - * the host from doing hw busy detection, which is done by converting - * to a R1 response instead of a R1B. + * If the max_busy_timeout of the host is specified, make sure it's + * enough to fit the used timeout_ms. In case it's not, let's instruct + * the host to avoid HW busy detection, by converting to a R1 response + * instead of a R1B. */ - if (timeout_ms && host->max_busy_timeout && - (timeout_ms > host->max_busy_timeout)) + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) use_r1b_resp = false; cmd.opcode = MMC_SWITCH; @@ -552,10 +555,6 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, cmd.flags = MMC_CMD_AC; if (use_r1b_resp) { cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; - /* - * A busy_timeout of zero means the host can decide to use - * whatever value it finds suitable. - */ cmd.busy_timeout = timeout_ms; } else { cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; @@ -941,7 +940,7 @@ void mmc_run_bkops(struct mmc_card *card) * urgent levels by using an asynchronous background task, when idle. */ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BKOPS_START, 1, MMC_OPS_TIMEOUT_MS); + EXT_CSD_BKOPS_START, 1, MMC_BKOPS_TIMEOUT_MS); if (err) pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); @@ -961,7 +960,8 @@ int mmc_flush_cache(struct mmc_card *card) (card->ext_csd.cache_size > 0) && (card->ext_csd.cache_ctrl & 1)) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_FLUSH_CACHE, 1, 0); + EXT_CSD_FLUSH_CACHE, 1, + MMC_CACHE_FLUSH_TIMEOUT_MS); if (err) pr_err("%s: cache flush error %d\n", mmc_hostname(card->host), err); diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c index da2596c5fa28..05e907451df9 100644 --- a/drivers/mmc/core/slot-gpio.c +++ b/drivers/mmc/core/slot-gpio.c @@ -19,7 +19,6 @@ struct mmc_gpio { struct gpio_desc *ro_gpio; struct gpio_desc *cd_gpio; - bool override_cd_active_level; irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id); char *ro_label; char *cd_label; @@ -80,13 +79,6 @@ int mmc_gpio_get_cd(struct mmc_host *host) return -ENOSYS; cansleep = gpiod_cansleep(ctx->cd_gpio); - if (ctx->override_cd_active_level) { - int value = cansleep ? - gpiod_get_raw_value_cansleep(ctx->cd_gpio) : - gpiod_get_raw_value(ctx->cd_gpio); - return !value ^ !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); - } - return cansleep ? gpiod_get_value_cansleep(ctx->cd_gpio) : gpiod_get_value(ctx->cd_gpio); @@ -168,8 +160,6 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); * @idx: index of the GPIO to obtain in the consumer * @override_active_level: ignore %GPIO_ACTIVE_LOW flag * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, set - * to NULL to ignore * * Note that this must be called prior to mmc_add_host() * otherwise the caller must also call mmc_gpiod_request_cd_irq(). @@ -178,7 +168,7 @@ EXPORT_SYMBOL(mmc_gpio_set_cd_isr); */ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, unsigned int idx, bool override_active_level, - unsigned int debounce, bool *gpio_invert) + unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -194,10 +184,14 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, ctx->cd_debounce_delay_ms = debounce / 1000; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + /* override forces default (active-low) polarity ... */ + if (override_active_level && !gpiod_is_active_low(desc)) + gpiod_toggle_active_low(desc); + + /* ... or active-high */ + if (host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); - ctx->override_cd_active_level = override_active_level; ctx->cd_gpio = desc; return 0; @@ -218,14 +212,11 @@ EXPORT_SYMBOL(mmc_can_gpio_cd); * @con_id: function within the GPIO consumer * @idx: index of the GPIO to obtain in the consumer * @debounce: debounce time in microseconds - * @gpio_invert: will return whether the GPIO line is inverted or not, - * set to NULL to ignore * * Returns zero on success, else an error. */ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, - unsigned int idx, - unsigned int debounce, bool *gpio_invert) + unsigned int idx, unsigned int debounce) { struct mmc_gpio *ctx = host->slot.handler_priv; struct gpio_desc *desc; @@ -241,8 +232,8 @@ int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, return ret; } - if (gpio_invert) - *gpio_invert = !gpiod_is_active_low(desc); + if (host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH) + gpiod_toggle_active_low(desc); ctx->ro_gpio = desc; diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index d06b2dfe3c95..3a5089f0332c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -501,6 +501,7 @@ config MMC_SDHCI_MSM depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS + select MMC_CQHCI help This selects the Secure Digital Host Controller Interface (SDHCI) support present in Qualcomm SOCs. The controller supports @@ -990,6 +991,7 @@ config MMC_SDHCI_BRCMSTB tristate "Broadcom SDIO/SD/MMC support" depends on ARCH_BRCMSTB || BMIPS_GENERIC depends on MMC_SDHCI_PLTFM + select MMC_CQHCI default y help This selects support for the SDIO/SD/MMC Host Controller on @@ -1010,6 +1012,7 @@ config MMC_SDHCI_OMAP depends on MMC_SDHCI_PLTFM && OF select THERMAL imply TI_SOC_THERMAL + select MMC_SDHCI_EXTERNAL_DMA if DMA_ENGINE help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's DRA7 SOCs. The controller supports @@ -1040,3 +1043,6 @@ config MMC_OWL help This selects support for the SD/MMC Host Controller on Actions Semi Owl SoCs. + +config MMC_SDHCI_EXTERNAL_DMA + bool diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 6f065bb5c55a..aeaaa5314924 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -2645,7 +2645,7 @@ static int atmci_runtime_resume(struct device *dev) { struct atmel_mci *host = dev_get_drvdata(dev); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); return clk_prepare_enable(host->mck); } diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index bc8aeb47a7b4..8823680ca42c 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -984,12 +984,9 @@ static int au1xmmc_probe(struct platform_device *pdev) goto out2; } - r = platform_get_resource(pdev, IORESOURCE_IRQ, 0); - if (!r) { - dev_err(&pdev->dev, "no IRQ defined\n"); + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) goto out3; - } - host->irq = r->start; mmc->ops = &au1xmmc_ops; diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 99f61fd2a658..c3d949847cbd 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1393,7 +1393,17 @@ static int bcm2835_probe(struct platform_device *pdev) host->dma_chan = NULL; host->dma_desc = NULL; - host->dma_chan_rxtx = dma_request_slave_channel(dev, "rx-tx"); + host->dma_chan_rxtx = dma_request_chan(dev, "rx-tx"); + if (IS_ERR(host->dma_chan_rxtx)) { + ret = PTR_ERR(host->dma_chan_rxtx); + host->dma_chan_rxtx = NULL; + + if (ret == -EPROBE_DEFER) + goto err; + + /* Ignore errors to fall back to PIO mode */ + } + clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { diff --git a/drivers/mmc/host/cavium-thunderx.c b/drivers/mmc/host/cavium-thunderx.c index eee08d81b242..76013bbbcff3 100644 --- a/drivers/mmc/host/cavium-thunderx.c +++ b/drivers/mmc/host/cavium-thunderx.c @@ -76,8 +76,10 @@ static int thunder_mmc_probe(struct pci_dev *pdev, return ret; host->base = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); - if (!host->base) - return -EINVAL; + if (!host->base) { + ret = -EINVAL; + goto error; + } /* On ThunderX these are identical */ host->dma_base = host->base; @@ -86,12 +88,14 @@ static int thunder_mmc_probe(struct pci_dev *pdev, host->reg_off_dma = 0x160; host->clk = devm_clk_get(dev, NULL); - if (IS_ERR(host->clk)) - return PTR_ERR(host->clk); + if (IS_ERR(host->clk)) { + ret = PTR_ERR(host->clk); + goto error; + } ret = clk_prepare_enable(host->clk); if (ret) - return ret; + goto error; host->sys_freq = clk_get_rate(host->clk); spin_lock_init(&host->irq_handler_lock); @@ -157,6 +161,7 @@ error: } } clk_disable_unprepare(host->clk); + pci_release_regions(pdev); return ret; } @@ -175,6 +180,7 @@ static void thunder_mmc_remove(struct pci_dev *pdev) writeq(dma_cfg, host->dma_base + MIO_EMM_DMA_CFG(host)); clk_disable_unprepare(host->clk); + pci_release_regions(pdev); } static const struct pci_device_id thunder_mmc_id_table[] = { diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index ebfaeb33bc8c..f01fecd75833 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -1174,13 +1174,13 @@ static int mmc_davinci_parse_pdata(struct mmc_host *mmc) mmc->caps |= pdata->caps; /* Register a cd gpio, if there is not one, enable polling */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; else if (ret) mmc->caps |= MMC_CAP_NEEDS_POLL; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index fc9d4d000f97..bc5278ab5707 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -833,12 +833,14 @@ static int dw_mci_edmac_init(struct dw_mci *host) if (!host->dms) return -ENOMEM; - host->dms->ch = dma_request_slave_channel(host->dev, "rx-tx"); - if (!host->dms->ch) { + host->dms->ch = dma_request_chan(host->dev, "rx-tx"); + if (IS_ERR(host->dms->ch)) { + int ret = PTR_ERR(host->dms->ch); + dev_err(host->dev, "Failed to get external DMA channel.\n"); kfree(host->dms); host->dms = NULL; - return -ENXIO; + return ret; } return 0; diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 78383f60a3dc..fbae87d1f017 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -1108,7 +1108,7 @@ static int jz4740_mmc_suspend(struct device *dev) static int jz4740_mmc_resume(struct device *dev) { - return pinctrl_pm_select_default_state(dev); + return pinctrl_select_default_state(dev); } static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index e712315c7e8d..35400cf2a2e4 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -161,7 +161,6 @@ struct meson_host { bool dram_access_quirk; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_clk_gate; unsigned int bounce_buf_size; @@ -327,7 +326,7 @@ static void meson_mmc_clk_ungate(struct meson_host *host) u32 cfg; if (host->pins_clk_gate) - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(host->dev); /* Make sure the clock is not stopped in the controller */ cfg = readl(host->regs + SD_EMMC_CFG); @@ -1101,13 +1100,6 @@ static int meson_mmc_probe(struct platform_device *pdev) goto free_host; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - ret = PTR_ERR(host->pins_default); - goto free_host; - } - host->pins_clk_gate = pinctrl_lookup_state(host->pinctrl, "clk-gate"); if (IS_ERR(host->pins_clk_gate)) { diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c index ba9a63db73da..8b038e7b2cd3 100644 --- a/drivers/mmc/host/meson-mx-sdio.c +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -638,7 +638,6 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) struct platform_device *slot_pdev; struct mmc_host *mmc; struct meson_mx_mmc_host *host; - struct resource *res; int ret, irq; u32 conf; @@ -663,8 +662,7 @@ static int meson_mx_mmc_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(host->controller_dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto error_free_mmc; diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 74c6cfbf9172..951f76dc1ddd 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1134,17 +1134,22 @@ static void mmc_spi_initsequence(struct mmc_spi_host *host) * SPI protocol. Another is that when chipselect is released while * the card returns BUSY status, the clock must issue several cycles * with chipselect high before the card will stop driving its output. + * + * SPI_CS_HIGH means "asserted" here. In some cases like when using + * GPIOs for chip select, SPI_CS_HIGH is set but this will be logically + * inverted by gpiolib, so if we want to ascertain to drive it high + * we should toggle the default with an XOR as we do here. */ - host->spi->mode |= SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Just warn; most cards work without it. */ dev_warn(&host->spi->dev, "can't change chip-select polarity\n"); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; } else { mmc_spi_readbytes(host, 18); - host->spi->mode &= ~SPI_CS_HIGH; + host->spi->mode ^= SPI_CS_HIGH; if (spi_setup(host->spi) != 0) { /* Wot, we can't get the same setup we had before? */ dev_err(&host->spi->dev, @@ -1421,7 +1426,7 @@ static int mmc_spi_probe(struct spi_device *spi) * Index 0 is card detect * Old boardfiles were specifying 1 ms as debounce */ - status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000, NULL); + status = mmc_gpiod_request_cd(mmc, NULL, 0, false, 1000); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) { @@ -1436,7 +1441,7 @@ static int mmc_spi_probe(struct spi_device *spi) mmc_detect_change(mmc, 0); /* Index 1 is write protect/read only */ - status = mmc_gpiod_request_ro(mmc, NULL, 1, 0, NULL); + status = mmc_gpiod_request_ro(mmc, NULL, 1, 0); if (status == -EPROBE_DEFER) goto fail_add_host; if (!status) diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 40e72c30ea84..e9ffce8d41ea 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -169,6 +169,8 @@ static struct variant_data variant_ux500 = { .cmdreg_srsp = MCI_CPSM_RESPONSE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -202,6 +204,8 @@ static struct variant_data variant_ux500v2 = { .datactrl_mask_ddrmode = MCI_DPSM_ST_DDRMODE, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, + .dma_power_of_2 = true, .datactrl_mask_sdio = MCI_DPSM_ST_SDIOEN, .st_sdio = true, .st_clkdiv = true, @@ -261,6 +265,7 @@ static struct variant_data variant_stm32_sdmmc = { .datacnt_useless = true, .datalength_bits = 25, .datactrl_blocksz = 14, + .datactrl_any_blocksz = true, .stm32_idmabsize_mask = GENMASK(12, 5), .busy_timeout = true, .busy_detect = true, @@ -284,6 +289,7 @@ static struct variant_data variant_qcom = { .data_cmd_enable = MCI_CPSM_QCOM_DATCMD, .datalength_bits = 24, .datactrl_blocksz = 11, + .datactrl_any_blocksz = true, .pwrreg_powerup = MCI_PWR_UP, .f_max = 208000000, .explicit_mclk_control = true, @@ -452,10 +458,11 @@ static void mmci_dma_setup(struct mmci_host *host) static int mmci_validate_data(struct mmci_host *host, struct mmc_data *data) { + struct variant_data *variant = host->variant; + if (!data) return 0; - - if (!is_power_of_2(data->blksz)) { + if (!is_power_of_2(data->blksz) && !variant->datactrl_any_blocksz) { dev_err(mmc_dev(host->mmc), "unsupported block size (%d bytes)\n", data->blksz); return -EINVAL; @@ -520,7 +527,9 @@ static int mmci_dma_start(struct mmci_host *host, unsigned int datactrl) "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", data->sg_len, data->blksz, data->blocks, data->flags); - host->ops->dma_start(host, &datactrl); + ret = host->ops->dma_start(host, &datactrl); + if (ret) + return ret; /* Trigger the DMA transfer */ mmci_write_datactrlreg(host, datactrl); @@ -706,10 +715,20 @@ int mmci_dmae_setup(struct mmci_host *host) host->dma_priv = dmae; - dmae->rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "rx"); - dmae->tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), - "tx"); + dmae->rx_channel = dma_request_chan(mmc_dev(host->mmc), "rx"); + if (IS_ERR(dmae->rx_channel)) { + int ret = PTR_ERR(dmae->rx_channel); + dmae->rx_channel = NULL; + return ret; + } + + dmae->tx_channel = dma_request_chan(mmc_dev(host->mmc), "tx"); + if (IS_ERR(dmae->tx_channel)) { + if (PTR_ERR(dmae->tx_channel) == -EPROBE_DEFER) + dev_warn(mmc_dev(host->mmc), + "Deferred probe for TX channel ignored\n"); + dmae->tx_channel = NULL; + } /* * If only an RX channel is specified, the driver will @@ -888,6 +907,18 @@ static int _mmci_dmae_prep_data(struct mmci_host *host, struct mmc_data *data, if (data->blksz * data->blocks <= variant->fifosize) return -EINVAL; + /* + * This is necessary to get SDIO working on the Ux500. We do not yet + * know if this is a bug in: + * - The Ux500 DMA controller (DMA40) + * - The MMCI DMA interface on the Ux500 + * some power of two blocks (such as 64 bytes) are sent regularly + * during SDIO traffic and those work fine so for these we enable DMA + * transfers. + */ + if (host->variant->dma_power_of_2 && !is_power_of_2(data->blksz)) + return -EINVAL; + device = chan->device; nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, mmc_get_dma_dir(data)); @@ -938,9 +969,14 @@ int mmci_dmae_prep_data(struct mmci_host *host, int mmci_dmae_start(struct mmci_host *host, unsigned int *datactrl) { struct mmci_dmae_priv *dmae = host->dma_priv; + int ret; host->dma_in_progress = true; - dmaengine_submit(dmae->desc_current); + ret = dma_submit_error(dmaengine_submit(dmae->desc_current)); + if (ret < 0) { + host->dma_in_progress = false; + return ret; + } dma_async_issue_pending(dmae->cur); *datactrl |= MCI_DPSM_DMAENABLE; @@ -1321,6 +1357,7 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, } else if (host->variant->busy_timeout && busy_resp && status & MCI_DATATIMEOUT) { cmd->error = -ETIMEDOUT; + host->irq_action = IRQ_WAKE_THREAD; } else { cmd->resp[0] = readl(base + MMCIRESPONSE0); cmd->resp[1] = readl(base + MMCIRESPONSE1); @@ -1339,7 +1376,10 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, return; } } - mmci_request_end(host, host->mrq); + + if (host->irq_action != IRQ_WAKE_THREAD) + mmci_request_end(host, host->mrq); + } else if (sbc) { mmci_start_command(host, host->mrq->cmd, 0); } else if (!host->variant->datactrl_first && @@ -1532,9 +1572,9 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) { struct mmci_host *host = dev_id; u32 status; - int ret = 0; spin_lock(&host->lock); + host->irq_action = IRQ_HANDLED; do { status = readl(host->base + MMCISTATUS); @@ -1574,12 +1614,41 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) if (host->variant->busy_detect_flag) status &= ~host->variant->busy_detect_flag; - ret = 1; } while (status); spin_unlock(&host->lock); - return IRQ_RETVAL(ret); + return host->irq_action; +} + +/* + * mmci_irq_thread() - A threaded IRQ handler that manages a reset of the HW. + * + * A reset is needed for some variants, where a datatimeout for a R1B request + * causes the DPSM to stay busy (non-functional). + */ +static irqreturn_t mmci_irq_thread(int irq, void *dev_id) +{ + struct mmci_host *host = dev_id; + unsigned long flags; + + if (host->rst) { + reset_control_assert(host->rst); + udelay(2); + reset_control_deassert(host->rst); + } + + spin_lock_irqsave(&host->lock, flags); + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->pwr_reg, host->base + MMCIPOWER); + writel(MCI_IRQENABLE | host->variant->start_err, + host->base + MMCIMASK0); + + host->irq_action = IRQ_HANDLED; + mmci_request_end(host, host->mrq); + spin_unlock_irqrestore(&host->lock, flags); + + return host->irq_action; } static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -1704,7 +1773,7 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) pinctrl_select_state(host->pinctrl, host->pins_opendrain); else - pinctrl_select_state(host->pinctrl, host->pins_default); + pinctrl_select_default_state(mmc_dev(mmc)); } /* @@ -1877,14 +1946,6 @@ static int mmci_probe(struct amba_device *dev, goto host_free; } - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(host->pins_default)) { - dev_err(mmc_dev(mmc), "Can't select default pins\n"); - ret = PTR_ERR(host->pins_default); - goto host_free; - } - host->pins_opendrain = pinctrl_lookup_state(host->pinctrl, MMCI_PINCTRL_STATE_OPENDRAIN); if (IS_ERR(host->pins_opendrain)) { @@ -2062,17 +2123,18 @@ static int mmci_probe(struct amba_device *dev, * silently of these do not exist */ if (!np) { - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto clk_disable; - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret == -EPROBE_DEFER) goto clk_disable; } - ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, - DRIVER_NAME " (cmd)", host); + ret = devm_request_threaded_irq(&dev->dev, dev->irq[0], mmci_irq, + mmci_irq_thread, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); if (ret) goto clk_disable; @@ -2203,7 +2265,7 @@ static int mmci_runtime_resume(struct device *dev) struct mmci_host *host = mmc_priv(mmc); clk_prepare_enable(host->clk); mmci_restore(host); - pinctrl_pm_select_default_state(dev); + pinctrl_select_default_state(dev); } return 0; diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 158e1231aa23..ea6a0b5779d4 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -279,7 +279,11 @@ struct mmci_host; * @stm32_clkdiv: true if using a STM32-specific clock divider algorithm * @datactrl_mask_ddrmode: ddr mode mask in datactrl register. * @datactrl_mask_sdio: SDIO enable mask in datactrl register - * @datactrl_blksz: block size in power of two + * @datactrl_blocksz: block size in power of two + * @datactrl_any_blocksz: true if block any block sizes are accepted by + * hardware, such as with some SDIO traffic that send + * odd packets. + * @dma_power_of_2: DMA only works with blocks that are a power of 2. * @datactrl_first: true if data must be setup before send command * @datacnt_useless: true if you could not use datacnt register to read * remaining data @@ -326,6 +330,8 @@ struct variant_data { unsigned int datactrl_mask_ddrmode; unsigned int datactrl_mask_sdio; unsigned int datactrl_blocksz; + u8 datactrl_any_blocksz:1; + u8 dma_power_of_2:1; u8 datactrl_first:1; u8 datacnt_useless:1; u8 st_sdio:1; @@ -404,7 +410,6 @@ struct mmci_host { struct mmci_host_ops *ops; struct variant_data *variant; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_opendrain; u8 hw_designer; @@ -412,6 +417,7 @@ struct mmci_host { struct timer_list timer; unsigned int oldstat; + u32 irq_action; /* pio stuff */ struct sg_mapping_iter sg_miter; diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 010fe29a4888..7726dcf48f2c 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -2194,8 +2194,7 @@ static int msdc_drv_probe(struct platform_device *pdev) if (ret) goto host_free; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->base = devm_ioremap_resource(&pdev->dev, res); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto host_free; diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 74a0a7fbbf7f..203b61712601 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -696,16 +696,14 @@ static int mvsd_probe(struct platform_device *pdev) struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; const struct mbus_dram_target_info *dram; - struct resource *r; int ret, irq; if (!np) { dev_err(&pdev->dev, "no DT node\n"); return -ENODEV; } - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) + if (irq < 0) return -ENXIO; mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); @@ -758,7 +756,7 @@ static int mvsd_probe(struct platform_device *pdev) spin_lock_init(&host->lock); - host->base = devm_ioremap_resource(&pdev->dev, r); + host->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->base)) { ret = PTR_ERR(host->base); goto out; diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 011b59a3602e..b3d654c688e5 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -1121,7 +1121,16 @@ static int mxcmci_probe(struct platform_device *pdev) mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); if (!host->pdata) { - host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); + host->dma = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(host->dma)) { + if (PTR_ERR(host->dma) == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out_clk_put; + } + + /* Ignore errors to fall back to PIO mode */ + host->dma = NULL; + } } else { res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (res) { diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 4031217d21c3..d82674aed447 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -623,11 +623,11 @@ static int mxs_mmc_probe(struct platform_device *pdev) goto out_clk_disable; } - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_clk_disable; } diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 767e964ca5a2..a379c45b985c 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -1605,12 +1605,6 @@ static int omap_hsmmc_configure_wake_irq(struct omap_hsmmc_host *host) ret = PTR_ERR(p); goto err_free_irq; } - if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_DEFAULT))) { - dev_info(host->dev, "missing default pinctrl state\n"); - devm_pinctrl_put(p); - ret = -EINVAL; - goto err_free_irq; - } if (IS_ERR(pinctrl_lookup_state(p, PINCTRL_STATE_IDLE))) { dev_info(host->dev, "missing idle pinctrl state\n"); @@ -2153,14 +2147,14 @@ static int omap_hsmmc_runtime_resume(struct device *dev) if ((host->mmc->caps & MMC_CAP_SDIO_IRQ) && (host->flags & HSMMC_SDIO_IRQ_ENABLED)) { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); /* irq lost, if pinmux incorrect */ OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, CIRQ_EN); OMAP_HSMMC_WRITE(host->base, IE, CIRQ_EN); } else { - pinctrl_pm_select_default_state(host->dev); + pinctrl_select_default_state(host->dev); } spin_unlock_irqrestore(&host->irq_lock, flags); return 0; diff --git a/drivers/mmc/host/owl-mmc.c b/drivers/mmc/host/owl-mmc.c index 771e3d00f1bb..01ffe51f413d 100644 --- a/drivers/mmc/host/owl-mmc.c +++ b/drivers/mmc/host/owl-mmc.c @@ -616,10 +616,10 @@ static int owl_mmc_probe(struct platform_device *pdev) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); pdev->dev.dma_mask = &pdev->dev.coherent_dma_mask; - owl_host->dma = dma_request_slave_channel(&pdev->dev, "mmc"); - if (!owl_host->dma) { + owl_host->dma = dma_request_chan(&pdev->dev, "mmc"); + if (IS_ERR(owl_host->dma)) { dev_err(owl_host->dev, "Failed to get external DMA channel.\n"); - ret = -ENXIO; + ret = PTR_ERR(owl_host->dma); goto err_free_host; } diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 024acc1b0a2e..3a9333475a2b 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -710,17 +710,19 @@ static int pxamci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, mmc); - host->dma_chan_rx = dma_request_slave_channel(dev, "rx"); - if (host->dma_chan_rx == NULL) { + host->dma_chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->dma_chan_rx)) { dev_err(dev, "unable to request rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_rx); + host->dma_chan_rx = NULL; goto out; } - host->dma_chan_tx = dma_request_slave_channel(dev, "tx"); - if (host->dma_chan_tx == NULL) { + host->dma_chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->dma_chan_tx)) { dev_err(dev, "unable to request tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(host->dma_chan_tx); + host->dma_chan_tx = NULL; goto out; } @@ -734,22 +736,22 @@ static int pxamci_probe(struct platform_device *pdev) } /* FIXME: should we pass detection delay to debounce? */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_cd\n"); goto out; } - ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0, NULL); + if (!host->pdata->gpio_card_ro_invert) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + ret = mmc_gpiod_request_ro(mmc, "wp", 0, 0); if (ret && ret != -ENOENT) { dev_err(dev, "Failed requesting gpio_ro\n"); goto out; } - if (!ret) { + if (!ret) host->use_ro_gpio = true; - mmc->caps2 |= host->pdata->gpio_card_ro_invert ? - 0 : MMC_CAP2_RO_ACTIVE_HIGH; - } if (host->pdata->init) host->pdata->init(dev, pxamci_detect_irq, mmc); diff --git a/drivers/mmc/host/renesas_sdhi.h b/drivers/mmc/host/renesas_sdhi.h index c0504aa90857..f524251d5113 100644 --- a/drivers/mmc/host/renesas_sdhi.h +++ b/drivers/mmc/host/renesas_sdhi.h @@ -14,8 +14,8 @@ struct renesas_sdhi_scc { unsigned long clk_rate; /* clock rate for SDR104 */ - u32 tap; /* sampling clock position for SDR104 */ - u32 tap_hs400; /* sampling clock position for HS400 */ + u32 tap; /* sampling clock position for SDR104/HS400 (8 TAP) */ + u32 tap_hs400_4tap; /* sampling clock position for HS400 (4 TAP) */ }; struct renesas_sdhi_of_data { @@ -33,6 +33,11 @@ struct renesas_sdhi_of_data { unsigned short max_segs; }; +struct renesas_sdhi_quirks { + bool hs400_disabled; + bool hs400_4taps; +}; + struct tmio_mmc_dma { enum dma_slave_buswidth dma_buswidth; bool (*filter)(struct dma_chan *chan, void *arg); @@ -46,6 +51,7 @@ struct renesas_sdhi { struct clk *clk_cd; struct tmio_mmc_data mmc_data; struct tmio_mmc_dma dma_priv; + const struct renesas_sdhi_quirks *quirks; struct pinctrl *pinctrl; struct pinctrl_state *pins_default, *pins_uhs; void __iomem *scc_ctl; diff --git a/drivers/mmc/host/renesas_sdhi_core.c b/drivers/mmc/host/renesas_sdhi_core.c index 234551a68739..35cb24cd45b4 100644 --- a/drivers/mmc/host/renesas_sdhi_core.c +++ b/drivers/mmc/host/renesas_sdhi_core.c @@ -46,11 +46,6 @@ #define SDHI_VER_GEN3_SD 0xcc10 #define SDHI_VER_GEN3_SDMMC 0xcd10 -struct renesas_sdhi_quirks { - bool hs400_disabled; - bool hs400_4taps; -}; - static void renesas_sdhi_sdbuf_width(struct tmio_mmc_host *host, int width) { u32 val; @@ -355,7 +350,7 @@ static void renesas_sdhi_hs400_complete(struct tmio_mmc_host *host) 0x4 << SH_MOBILE_SDHI_SCC_DTCNTL_TAPNUM_SHIFT); - if (host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400) + if (priv->quirks && priv->quirks->hs400_4taps) sd_scc_write32(host, priv, SH_MOBILE_SDHI_SCC_TAPSET, host->tap_set / 2); @@ -493,7 +488,7 @@ static int renesas_sdhi_select_tuning(struct tmio_mmc_host *host) static bool renesas_sdhi_check_scc_error(struct tmio_mmc_host *host) { struct renesas_sdhi *priv = host_to_priv(host); - bool use_4tap = host->pdata->flags & TMIO_MMC_HAVE_4TAP_HS400; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; /* * Skip checking SCC errors when running on 4 taps in HS400 mode as @@ -627,10 +622,10 @@ static const struct renesas_sdhi_quirks sdhi_quirks_nohs400 = { }; static const struct soc_device_attribute sdhi_quirks_match[] = { + { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a7795", .revision = "ES2.0", .data = &sdhi_quirks_4tap }, { .soc_id = "r8a7796", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, - { .soc_id = "r8a774a1", .revision = "ES1.[012]", .data = &sdhi_quirks_4tap_nohs400 }, { .soc_id = "r8a77980", .data = &sdhi_quirks_nohs400 }, { /* Sentinel. */ }, }; @@ -665,6 +660,7 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (!priv) return -ENOMEM; + priv->quirks = quirks; mmc_data = &priv->mmc_data; dma_priv = &priv->dma_priv; @@ -724,9 +720,6 @@ int renesas_sdhi_probe(struct platform_device *pdev, if (quirks && quirks->hs400_disabled) host->mmc->caps2 &= ~(MMC_CAP2_HS400 | MMC_CAP2_HS400_ES); - if (quirks && quirks->hs400_4taps) - mmc_data->flags |= TMIO_MMC_HAVE_4TAP_HS400; - /* For some SoC, we disable internal WP. GPIO may override this */ if (mmc_can_gpio_ro(host->mmc)) mmc_data->capabilities2 &= ~MMC_CAP2_NO_WRITE_PROTECT; @@ -800,20 +793,23 @@ int renesas_sdhi_probe(struct platform_device *pdev, host->mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V))) { const struct renesas_sdhi_scc *taps = of_data->taps; + bool use_4tap = priv->quirks && priv->quirks->hs400_4taps; bool hit = false; for (i = 0; i < of_data->taps_num; i++) { if (taps[i].clk_rate == 0 || taps[i].clk_rate == host->mmc->f_max) { priv->scc_tappos = taps->tap; - priv->scc_tappos_hs400 = taps->tap_hs400; + priv->scc_tappos_hs400 = use_4tap ? + taps->tap_hs400_4tap : + taps->tap; hit = true; break; } } if (!hit) - dev_warn(&host->pdev->dev, "Unknown clock rate for SDR104\n"); + dev_warn(&host->pdev->dev, "Unknown clock rate for tuning\n"); host->init_tuning = renesas_sdhi_init_tuning; host->prepare_tuning = renesas_sdhi_prepare_tuning; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 18839a10594c..47ac53e91241 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -82,7 +82,7 @@ static struct renesas_sdhi_scc rcar_gen3_scc_taps[] = { { .clk_rate = 0, .tap = 0x00000300, - .tap_hs400 = 0x00000704, + .tap_hs400_4tap = 0x00000100, }, }; @@ -298,38 +298,23 @@ static const struct tmio_mmc_dma_ops renesas_sdhi_internal_dmac_dma_ops = { * Whitelist of specific R-Car Gen3 SoC ES versions to use this DMAC * implementation as others may use a different implementation. */ -static const struct soc_device_attribute soc_whitelist[] = { - /* specific ones */ +static const struct soc_device_attribute soc_dma_quirks[] = { { .soc_id = "r7s9210", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ADDR_MODE_FIXED_ONLY) }, { .soc_id = "r8a7795", .revision = "ES1.*", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, { .soc_id = "r8a7796", .revision = "ES1.0", .data = (void *)BIT(SDHI_INTERNAL_DMAC_ONE_RX_ONLY) }, - /* generic ones */ - { .soc_id = "r8a774a1" }, - { .soc_id = "r8a774b1" }, - { .soc_id = "r8a774c0" }, - { .soc_id = "r8a77470" }, - { .soc_id = "r8a7795" }, - { .soc_id = "r8a7796" }, - { .soc_id = "r8a77965" }, - { .soc_id = "r8a77970" }, - { .soc_id = "r8a77980" }, - { .soc_id = "r8a77990" }, - { .soc_id = "r8a77995" }, { /* sentinel */ } }; static int renesas_sdhi_internal_dmac_probe(struct platform_device *pdev) { - const struct soc_device_attribute *soc = soc_device_match(soc_whitelist); + const struct soc_device_attribute *soc = soc_device_match(soc_dma_quirks); struct device *dev = &pdev->dev; - if (!soc) - return -ENODEV; - - global_flags |= (unsigned long)soc->data; + if (soc) + global_flags |= (unsigned long)soc->data; dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms), GFP_KERNEL); if (!dev->dma_parms) diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index bce9c33bc4b5..1e616ae56b13 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -1505,14 +1505,14 @@ static int s3cmci_probe_pdata(struct s3cmci_host *host) mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; /* If we get -ENOENT we have no card detect GPIO line */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for CD %d\n", ret); return ret; } - ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + ret = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (ret != -ENOENT) { dev_err(&pdev->dev, "error requesting GPIO for WP %d\n", ret); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 105e73d4a3b9..dd908577a9dc 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -752,7 +752,7 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); - err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); if (err) { if (err == -EPROBE_DEFER) goto err_free; diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index 73bb440aaf93..ad01f6451a95 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -9,29 +9,236 @@ #include <linux/mmc/host.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/bitops.h> +#include <linux/delay.h> #include "sdhci-pltfm.h" +#include "cqhci.h" -static const struct sdhci_ops sdhci_brcmstb_ops = { +#define SDHCI_VENDOR 0x78 +#define SDHCI_VENDOR_ENHANCED_STRB 0x1 + +#define BRCMSTB_PRIV_FLAGS_NO_64BIT BIT(0) +#define BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT BIT(1) + +#define SDHCI_ARASAN_CQE_BASE_ADDR 0x200 + +struct sdhci_brcmstb_priv { + void __iomem *cfg_regs; + bool has_cqe; +}; + +struct brcmstb_match_priv { + void (*hs400es)(struct mmc_host *mmc, struct mmc_ios *ios); + struct sdhci_ops *ops; + unsigned int flags; +}; + +static void sdhci_brcmstb_hs400es(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + + u32 reg; + + dev_dbg(mmc_dev(mmc), "%s(): Setting HS400-Enhanced-Strobe mode\n", + __func__); + reg = readl(host->ioaddr + SDHCI_VENDOR); + if (ios->enhanced_strobe) + reg |= SDHCI_VENDOR_ENHANCED_STRB; + else + reg &= ~SDHCI_VENDOR_ENHANCED_STRB; + writel(reg, host->ioaddr + SDHCI_VENDOR); +} + +static void sdhci_brcmstb_set_clock(struct sdhci_host *host, unsigned int clock) +{ + u16 clk; + + host->mmc->actual_clock = 0; + + clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock); + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + if (clock == 0) + return; + + sdhci_enable_clk(host, clk); +} + +static void sdhci_brcmstb_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) +{ + u16 ctrl_2; + + dev_dbg(mmc_dev(host->mmc), "%s: Setting UHS signaling for %d timing\n", + __func__, timing); + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_SD_HS || + timing == MMC_TIMING_MMC_HS || + timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + else if (timing == MMC_TIMING_MMC_HS400) + ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */ + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} + +static void sdhci_brcmstb_dumpregs(struct mmc_host *mmc) +{ + sdhci_dumpregs(mmc_priv(mmc)); +} + +static void sdhci_brcmstb_cqe_enable(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 reg; + + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + while (reg & SDHCI_DATA_AVAILABLE) { + sdhci_readl(host, SDHCI_BUFFER); + reg = sdhci_readl(host, SDHCI_PRESENT_STATE); + } + + sdhci_cqe_enable(mmc); +} + +static const struct cqhci_host_ops sdhci_brcmstb_cqhci_ops = { + .enable = sdhci_brcmstb_cqe_enable, + .disable = sdhci_cqe_disable, + .dumpregs = sdhci_brcmstb_dumpregs, +}; + +static struct sdhci_ops sdhci_brcmstb_ops = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static const struct sdhci_pltfm_data sdhci_brcmstb_pdata = { +static struct sdhci_ops sdhci_brcmstb_ops_7216 = { + .set_clock = sdhci_brcmstb_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_brcmstb_set_uhs_signaling, +}; + +static struct brcmstb_match_priv match_priv_7425 = { + .flags = BRCMSTB_PRIV_FLAGS_NO_64BIT | + BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, .ops = &sdhci_brcmstb_ops, }; +static struct brcmstb_match_priv match_priv_7445 = { + .flags = BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT, + .ops = &sdhci_brcmstb_ops, +}; + +static const struct brcmstb_match_priv match_priv_7216 = { + .hs400es = sdhci_brcmstb_hs400es, + .ops = &sdhci_brcmstb_ops_7216, +}; + +static const struct of_device_id sdhci_brcm_of_match[] = { + { .compatible = "brcm,bcm7425-sdhci", .data = &match_priv_7425 }, + { .compatible = "brcm,bcm7445-sdhci", .data = &match_priv_7445 }, + { .compatible = "brcm,bcm7216-sdhci", .data = &match_priv_7216 }, + {}, +}; + +static u32 sdhci_brcmstb_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + +static int sdhci_brcmstb_add_host(struct sdhci_host *host, + struct sdhci_brcmstb_priv *priv) +{ + struct cqhci_host *cq_host; + bool dma64; + int ret; + + if (!priv->has_cqe) + return sdhci_add_host(host); + + dev_dbg(mmc_dev(host->mmc), "CQE is enabled\n"); + host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = devm_kzalloc(mmc_dev(host->mmc), + sizeof(*cq_host), GFP_KERNEL); + if (!cq_host) { + ret = -ENOMEM; + goto cleanup; + } + + cq_host->mmio = host->ioaddr + SDHCI_ARASAN_CQE_BASE_ADDR; + cq_host->ops = &sdhci_brcmstb_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + if (dma64) { + dev_dbg(mmc_dev(host->mmc), "Using 64 bit DMA\n"); + cq_host->caps |= CQHCI_TASK_DESC_SZ_128; + cq_host->quirks |= CQHCI_QUIRK_SHORT_TXFR_DESC_SZ; + } + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) + goto cleanup; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + return 0; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + static int sdhci_brcmstb_probe(struct platform_device *pdev) { - struct sdhci_host *host; + const struct brcmstb_match_priv *match_priv; + struct sdhci_pltfm_data brcmstb_pdata; struct sdhci_pltfm_host *pltfm_host; + const struct of_device_id *match; + struct sdhci_brcmstb_priv *priv; + struct sdhci_host *host; + struct resource *iomem; + bool has_cqe = false; struct clk *clk; int res; + match = of_match_node(sdhci_brcm_of_match, pdev->dev.of_node); + match_priv = match->data; + + dev_dbg(&pdev->dev, "Probe found match for %s\n", match->compatible); + clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EPROBE_DEFER) + return -EPROBE_DEFER; dev_err(&pdev->dev, "Clock not found in Device Tree\n"); clk = NULL; } @@ -39,36 +246,64 @@ static int sdhci_brcmstb_probe(struct platform_device *pdev) if (res) return res; - host = sdhci_pltfm_init(pdev, &sdhci_brcmstb_pdata, 0); + memset(&brcmstb_pdata, 0, sizeof(brcmstb_pdata)); + if (device_property_read_bool(&pdev->dev, "supports-cqe")) { + has_cqe = true; + match_priv->ops->irq = sdhci_brcmstb_cqhci_irq; + } + brcmstb_pdata.ops = match_priv->ops; + host = sdhci_pltfm_init(pdev, &brcmstb_pdata, + sizeof(struct sdhci_brcmstb_priv)); if (IS_ERR(host)) { res = PTR_ERR(host); goto err_clk; } + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->has_cqe = has_cqe; + + /* Map in the non-standard CFG registers */ + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 1); + priv->cfg_regs = devm_ioremap_resource(&pdev->dev, iomem); + if (IS_ERR(priv->cfg_regs)) { + res = PTR_ERR(priv->cfg_regs); + goto err; + } + sdhci_get_of_property(pdev); res = mmc_of_parse(host->mmc); if (res) goto err; /* + * If the chip has enhanced strobe and it's enabled, add + * callback + */ + if (match_priv->hs400es && + (host->mmc->caps2 & MMC_CAP2_HS400_ES)) + host->mmc_host_ops.hs400_enhanced_strobe = match_priv->hs400es; + + /* * Supply the existing CAPS, but clear the UHS modes. This * will allow these modes to be specified by device tree * properties through mmc_of_parse(). */ host->caps = sdhci_readl(host, SDHCI_CAPABILITIES); - if (of_device_is_compatible(pdev->dev.of_node, "brcm,bcm7425-sdhci")) + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_NO_64BIT) host->caps &= ~SDHCI_CAN_64BIT; host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1); host->caps1 &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_SDR104 | - SDHCI_SUPPORT_DDR50); - host->quirks |= SDHCI_QUIRK_MISSING_CAPS | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + SDHCI_SUPPORT_DDR50); + host->quirks |= SDHCI_QUIRK_MISSING_CAPS; + + if (match_priv->flags & BRCMSTB_PRIV_FLAGS_BROKEN_TIMEOUT) + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - res = sdhci_add_host(host); + res = sdhci_brcmstb_add_host(host, priv); if (res) goto err; - pltfm_host = sdhci_priv(host); pltfm_host->clk = clk; return res; @@ -79,11 +314,15 @@ err_clk: return res; } -static const struct of_device_id sdhci_brcm_of_match[] = { - { .compatible = "brcm,bcm7425-sdhci" }, - { .compatible = "brcm,bcm7445-sdhci" }, - {}, -}; +static void sdhci_brcmstb_shutdown(struct platform_device *pdev) +{ + int ret; + + ret = sdhci_pltfm_unregister(pdev); + if (ret) + dev_err(&pdev->dev, "failed to shutdown\n"); +} + MODULE_DEVICE_TABLE(of, sdhci_brcm_of_match); static struct platform_driver sdhci_brcmstb_driver = { @@ -94,6 +333,7 @@ static struct platform_driver sdhci_brcmstb_driver = { }, .probe = sdhci_brcmstb_probe, .remove = sdhci_pltfm_unregister, + .shutdown = sdhci_brcmstb_shutdown, }; module_platform_driver(sdhci_brcmstb_driver); diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index ae0ec27dd7cc..5827d3751b81 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -158,7 +158,7 @@ static int sdhci_cdns_phy_init(struct sdhci_cdns_priv *priv) return 0; } -static inline void *sdhci_cdns_priv(struct sdhci_host *host) +static void *sdhci_cdns_priv(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 1c988d6a2433..382f25b2fa45 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -224,7 +224,6 @@ static struct esdhc_soc_data usdhc_imx8qxp_data = { struct pltfm_imx_data { u32 scratchpad; struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_100mhz; struct pinctrl_state *pins_200mhz; const struct esdhc_soc_data *socdata; @@ -951,7 +950,6 @@ static int esdhc_change_pinstate(struct sdhci_host *host, dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); if (IS_ERR(imx_data->pinctrl) || - IS_ERR(imx_data->pins_default) || IS_ERR(imx_data->pins_100mhz) || IS_ERR(imx_data->pins_200mhz)) return -EINVAL; @@ -968,7 +966,7 @@ static int esdhc_change_pinstate(struct sdhci_host *host, break; default: /* back to default state for other legacy timing */ - pinctrl = imx_data->pins_default; + return pinctrl_select_default_state(mmc_dev(host->mmc)); } return pinctrl_select_state(imx_data->pinctrl, pinctrl); @@ -1338,7 +1336,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, mmc_of_parse_voltage(np, &host->ocr_mask); - if (esdhc_is_usdhc(imx_data) && !IS_ERR(imx_data->pins_default)) { + if (esdhc_is_usdhc(imx_data)) { imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, ESDHC_PINCTRL_STATE_100MHZ); imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, @@ -1381,19 +1379,20 @@ static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev, host->mmc->parent->platform_data); /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { - err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0, NULL); + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + err = mmc_gpiod_request_ro(host->mmc, "wp", 0, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request write-protect gpio!\n"); return err; } - host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } /* card_detect */ switch (boarddata->cd_type) { case ESDHC_CD_GPIO: - err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + err = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (err) { dev_err(mmc_dev(host->mmc), "failed to request card-detect gpio!\n"); @@ -1492,11 +1491,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } - imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(imx_data->pins_default)) - dev_warn(mmc_dev(host->mmc), "could not get default state\n"); - if (esdhc_is_usdhc(imx_data)) { host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; host->mmc->caps |= MMC_CAP_1_8V_DDR | MMC_CAP_3_3V_DDR; diff --git a/drivers/mmc/host/sdhci-milbeaut.c b/drivers/mmc/host/sdhci-milbeaut.c index a1aa21b9ae1c..92f30a1db435 100644 --- a/drivers/mmc/host/sdhci-milbeaut.c +++ b/drivers/mmc/host/sdhci-milbeaut.c @@ -242,15 +242,12 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ret = 0; struct f_sdhost_priv *priv; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(dev, "%s: no irq specified\n", __func__); + if (irq < 0) return irq; - } host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) @@ -280,8 +277,7 @@ static int sdhci_milbeaut_probe(struct platform_device *pdev) host->ops = &sdhci_milbeaut_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index 3d0bb5e2e09b..c3a160c18047 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -15,6 +15,7 @@ #include <linux/regulator/consumer.h> #include "sdhci-pltfm.h" +#include "cqhci.h" #define CORE_MCI_VERSION 0x50 #define CORE_VERSION_MAJOR_SHIFT 28 @@ -122,6 +123,10 @@ #define msm_host_writel(msm_host, val, host, offset) \ msm_host->var_ops->msm_writel_relaxed(val, host, offset) +/* CQHCI vendor specific registers */ +#define CQHCI_VENDOR_CFG1 0xA00 +#define CQHCI_VENDOR_DIS_RST_ON_CQ_EN (0x3 << 13) + struct sdhci_msm_offset { u32 core_hc_mode; u32 core_mci_data_cnt; @@ -1567,6 +1572,127 @@ out: __sdhci_msm_set_clock(host, clock); } +/*****************************************************************************\ + * * + * MSM Command Queue Engine (CQE) * + * * +\*****************************************************************************/ + +static u32 sdhci_msm_cqe_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + return 0; +} + +void sdhci_msm_cqe_disable(struct mmc_host *mmc, bool recovery) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + u32 ctrl; + + /* + * When CQE is halted, the legacy SDHCI path operates only + * on 16-byte descriptors in 64bit mode. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 16; + + spin_lock_irqsave(&host->lock, flags); + + /* + * During CQE command transfers, command complete bit gets latched. + * So s/w should clear command complete interrupt status when CQE is + * either halted or disabled. Otherwise unexpected SDCHI legacy + * interrupt gets triggered when CQE is halted/disabled. + */ + ctrl = sdhci_readl(host, SDHCI_INT_ENABLE); + ctrl |= SDHCI_INT_RESPONSE; + sdhci_writel(host, ctrl, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_RESPONSE, SDHCI_INT_STATUS); + + spin_unlock_irqrestore(&host->lock, flags); + + sdhci_cqe_disable(mmc, recovery); +} + +static const struct cqhci_host_ops sdhci_msm_cqhci_ops = { + .enable = sdhci_cqe_enable, + .disable = sdhci_msm_cqe_disable, +}; + +static int sdhci_msm_cqe_add_host(struct sdhci_host *host, + struct platform_device *pdev) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct cqhci_host *cq_host; + bool dma64; + u32 cqcfg; + int ret; + + /* + * When CQE is halted, SDHC operates only on 16byte ADMA descriptors. + * So ensure ADMA table is allocated for 16byte descriptors. + */ + if (host->caps & SDHCI_CAN_64BIT) + host->alloc_desc_sz = 16; + + ret = sdhci_setup_host(host); + if (ret) + return ret; + + cq_host = cqhci_pltfm_init(pdev); + if (IS_ERR(cq_host)) { + ret = PTR_ERR(cq_host); + dev_err(&pdev->dev, "cqhci-pltfm init: failed: %d\n", ret); + goto cleanup; + } + + msm_host->mmc->caps2 |= MMC_CAP2_CQE | MMC_CAP2_CQE_DCMD; + cq_host->ops = &sdhci_msm_cqhci_ops; + + dma64 = host->flags & SDHCI_USE_64_BIT_DMA; + + ret = cqhci_init(cq_host, host->mmc, dma64); + if (ret) { + dev_err(&pdev->dev, "%s: CQE init: failed (%d)\n", + mmc_hostname(host->mmc), ret); + goto cleanup; + } + + /* Disable cqe reset due to cqe enable signal */ + cqcfg = cqhci_readl(cq_host, CQHCI_VENDOR_CFG1); + cqcfg |= CQHCI_VENDOR_DIS_RST_ON_CQ_EN; + cqhci_writel(cq_host, cqcfg, CQHCI_VENDOR_CFG1); + + /* + * SDHC expects 12byte ADMA descriptors till CQE is enabled. + * So limit desc_sz to 12 so that the data commands that are sent + * during card initialization (before CQE gets enabled) would + * get executed without any issues. + */ + if (host->flags & SDHCI_USE_64_BIT_DMA) + host->desc_sz = 12; + + ret = __sdhci_add_host(host); + if (ret) + goto cleanup; + + dev_info(&pdev->dev, "%s: CQE init: success\n", + mmc_hostname(host->mmc)); + return ret; + +cleanup: + sdhci_cleanup_host(host); + return ret; +} + /* * Platform specific register write functions. This is so that, if any * register write needs to be followed up by platform specific actions, @@ -1731,6 +1857,7 @@ static const struct sdhci_ops sdhci_msm_ops = { .set_uhs_signaling = sdhci_msm_set_uhs_signaling, .write_w = sdhci_msm_writew, .write_b = sdhci_msm_writeb, + .irq = sdhci_msm_cqe_irq, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { @@ -1746,7 +1873,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; - struct resource *core_memres; struct clk *clk; int ret; u16 host_version, core_minor; @@ -1754,6 +1880,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) u8 core_major; const struct sdhci_msm_offset *msm_offset; const struct sdhci_msm_variant_info *var_info; + struct device_node *node = pdev->dev.of_node; host = sdhci_pltfm_init(pdev, &sdhci_msm_pdata, sizeof(*msm_host)); if (IS_ERR(host)) @@ -1847,10 +1974,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) } if (!msm_host->mci_removed) { - core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); - msm_host->core_mem = devm_ioremap_resource(&pdev->dev, - core_memres); - + msm_host->core_mem = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(msm_host->core_mem)) { ret = PTR_ERR(msm_host->core_mem); goto clk_disable; @@ -1952,7 +2076,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) pm_runtime_use_autosuspend(&pdev->dev); host->mmc_host_ops.execute_tuning = sdhci_msm_execute_tuning; - ret = sdhci_add_host(host); + if (of_property_read_bool(node, "supports-cqe")) + ret = sdhci_msm_cqe_add_host(host, pdev); + else + ret = sdhci_add_host(host); if (ret) goto pm_runtime_disable; sdhci_msm_set_regulator_caps(msm_host); diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 5959e394b416..ab2bd314a390 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -33,7 +33,14 @@ #define SDHCI_AT91_PRESET_COMMON_CONF 0x400 /* drv type B, programmable clock mode */ +struct sdhci_at91_soc_data { + const struct sdhci_pltfm_data *pdata; + bool baseclk_is_generated_internally; + unsigned int divider_for_baseclk; +}; + struct sdhci_at91_priv { + const struct sdhci_at91_soc_data *soc_data; struct clk *hclock; struct clk *gck; struct clk *mainck; @@ -141,12 +148,24 @@ static const struct sdhci_ops sdhci_at91_sama5d2_ops = { .set_power = sdhci_at91_set_power, }; -static const struct sdhci_pltfm_data soc_data_sama5d2 = { +static const struct sdhci_pltfm_data sdhci_sama5d2_pdata = { .ops = &sdhci_at91_sama5d2_ops, }; +static const struct sdhci_at91_soc_data soc_data_sama5d2 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = false, +}; + +static const struct sdhci_at91_soc_data soc_data_sam9x60 = { + .pdata = &sdhci_sama5d2_pdata, + .baseclk_is_generated_internally = true, + .divider_for_baseclk = 2, +}; + static const struct of_device_id sdhci_at91_dt_match[] = { { .compatible = "atmel,sama5d2-sdhci", .data = &soc_data_sama5d2 }, + { .compatible = "microchip,sam9x60-sdhci", .data = &soc_data_sam9x60 }, {} }; MODULE_DEVICE_TABLE(of, sdhci_at91_dt_match); @@ -156,50 +175,37 @@ static int sdhci_at91_set_clks_presets(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_at91_priv *priv = sdhci_pltfm_priv(pltfm_host); - int ret; unsigned int caps0, caps1; unsigned int clk_base, clk_mul; - unsigned int gck_rate, real_gck_rate; + unsigned int gck_rate, clk_base_rate; unsigned int preset_div; - /* - * The mult clock is provided by as a generated clock by the PMC - * controller. In order to set the rate of gck, we have to get the - * base clock rate and the clock mult from capabilities. - */ clk_prepare_enable(priv->hclock); caps0 = readl(host->ioaddr + SDHCI_CAPABILITIES); caps1 = readl(host->ioaddr + SDHCI_CAPABILITIES_1); - clk_base = (caps0 & SDHCI_CLOCK_V3_BASE_MASK) >> SDHCI_CLOCK_BASE_SHIFT; - clk_mul = (caps1 & SDHCI_CLOCK_MUL_MASK) >> SDHCI_CLOCK_MUL_SHIFT; - gck_rate = clk_base * 1000000 * (clk_mul + 1); - ret = clk_set_rate(priv->gck, gck_rate); - if (ret < 0) { - dev_err(dev, "failed to set gck"); - clk_disable_unprepare(priv->hclock); - return ret; - } - /* - * We need to check if we have the requested rate for gck because in - * some cases this rate could be not supported. If it happens, the rate - * is the closest one gck can provide. We have to update the value - * of clk mul. - */ - real_gck_rate = clk_get_rate(priv->gck); - if (real_gck_rate != gck_rate) { - clk_mul = real_gck_rate / (clk_base * 1000000) - 1; - caps1 &= (~SDHCI_CLOCK_MUL_MASK); - caps1 |= ((clk_mul << SDHCI_CLOCK_MUL_SHIFT) & - SDHCI_CLOCK_MUL_MASK); - /* Set capabilities in r/w mode. */ - writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, - host->ioaddr + SDMMC_CACR); - writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); - /* Set capabilities in ro mode. */ - writel(0, host->ioaddr + SDMMC_CACR); - dev_info(dev, "update clk mul to %u as gck rate is %u Hz\n", - clk_mul, real_gck_rate); - } + + gck_rate = clk_get_rate(priv->gck); + if (priv->soc_data->baseclk_is_generated_internally) + clk_base_rate = gck_rate / priv->soc_data->divider_for_baseclk; + else + clk_base_rate = clk_get_rate(priv->mainck); + + clk_base = clk_base_rate / 1000000; + clk_mul = gck_rate / clk_base_rate - 1; + + caps0 &= ~SDHCI_CLOCK_V3_BASE_MASK; + caps0 |= (clk_base << SDHCI_CLOCK_BASE_SHIFT) & SDHCI_CLOCK_V3_BASE_MASK; + caps1 &= ~SDHCI_CLOCK_MUL_MASK; + caps1 |= (clk_mul << SDHCI_CLOCK_MUL_SHIFT) & SDHCI_CLOCK_MUL_MASK; + /* Set capabilities in r/w mode. */ + writel(SDMMC_CACR_KEY | SDMMC_CACR_CAPWREN, host->ioaddr + SDMMC_CACR); + writel(caps0, host->ioaddr + SDHCI_CAPABILITIES); + writel(caps1, host->ioaddr + SDHCI_CAPABILITIES_1); + /* Set capabilities in ro mode. */ + writel(0, host->ioaddr + SDMMC_CACR); + + dev_info(dev, "update clk mul to %u as gck rate is %u Hz and clk base is %u Hz\n", + clk_mul, gck_rate, clk_base_rate); /* * We have to set preset values because it depends on the clk_mul @@ -207,19 +213,19 @@ static int sdhci_at91_set_clks_presets(struct device *dev) * maximum sd clock value is 120 MHz instead of 208 MHz. For that * reason, we need to use presets to support SDR104. */ - preset_div = DIV_ROUND_UP(real_gck_rate, 24000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 24000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR12); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR25); - preset_div = DIV_ROUND_UP(real_gck_rate, 100000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 100000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR50); - preset_div = DIV_ROUND_UP(real_gck_rate, 120000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 120000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_SDR104); - preset_div = DIV_ROUND_UP(real_gck_rate, 50000000) - 1; + preset_div = DIV_ROUND_UP(gck_rate, 50000000) - 1; writew(SDHCI_AT91_PRESET_COMMON_CONF | preset_div, host->ioaddr + SDHCI_PRESET_FOR_DDR50); @@ -314,7 +320,7 @@ static const struct dev_pm_ops sdhci_at91_dev_pm_ops = { static int sdhci_at91_probe(struct platform_device *pdev) { const struct of_device_id *match; - const struct sdhci_pltfm_data *soc_data; + const struct sdhci_at91_soc_data *soc_data; struct sdhci_host *host; struct sdhci_pltfm_host *pltfm_host; struct sdhci_at91_priv *priv; @@ -325,29 +331,37 @@ static int sdhci_at91_probe(struct platform_device *pdev) return -EINVAL; soc_data = match->data; - host = sdhci_pltfm_init(pdev, soc_data, sizeof(*priv)); + host = sdhci_pltfm_init(pdev, soc_data->pdata, sizeof(*priv)); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); priv = sdhci_pltfm_priv(pltfm_host); + priv->soc_data = soc_data; priv->mainck = devm_clk_get(&pdev->dev, "baseclk"); if (IS_ERR(priv->mainck)) { - dev_err(&pdev->dev, "failed to get baseclk\n"); - return PTR_ERR(priv->mainck); + if (soc_data->baseclk_is_generated_internally) { + priv->mainck = NULL; + } else { + dev_err(&pdev->dev, "failed to get baseclk\n"); + ret = PTR_ERR(priv->mainck); + goto sdhci_pltfm_free; + } } priv->hclock = devm_clk_get(&pdev->dev, "hclock"); if (IS_ERR(priv->hclock)) { dev_err(&pdev->dev, "failed to get hclock\n"); - return PTR_ERR(priv->hclock); + ret = PTR_ERR(priv->hclock); + goto sdhci_pltfm_free; } priv->gck = devm_clk_get(&pdev->dev, "multclk"); if (IS_ERR(priv->gck)) { dev_err(&pdev->dev, "failed to get multclk\n"); - return PTR_ERR(priv->gck); + ret = PTR_ERR(priv->gck); + goto sdhci_pltfm_free; } ret = sdhci_at91_set_clks_presets(&pdev->dev); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 500f70a6ee42..5d8dd870bd44 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -173,6 +173,9 @@ static u16 esdhc_readw_fixup(struct sdhci_host *host, u16 ret; int shift = (spec_reg & 0x2) * 8; + if (spec_reg == SDHCI_TRANSFER_MODE) + return pltfm_host->xfer_mode_shadow; + if (spec_reg == SDHCI_HOST_VERSION) ret = value & 0xffff; else @@ -562,32 +565,46 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) static void esdhc_clock_enable(struct sdhci_host *host, bool enable) { - u32 val; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); ktime_t timeout; + u32 val, clk_en; + + clk_en = ESDHC_CLOCK_SDCLKEN; + + /* + * IPGEN/HCKEN/PEREN bits exist on eSDHC whose vendor version + * is 2.2 or lower. + */ + if (esdhc->vendor_ver <= VENDOR_V_22) + clk_en |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | + ESDHC_CLOCK_PEREN); val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); if (enable) - val |= ESDHC_CLOCK_SDCLKEN; + val |= clk_en; else - val &= ~ESDHC_CLOCK_SDCLKEN; + val &= ~clk_en; sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - /* Wait max 20 ms */ + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (1) { + while (esdhc->vendor_ver > VENDOR_V_22) { bool timedout = ktime_after(ktime_get(), timeout); - if (sdhci_readl(host, ESDHC_PRSSTAT) & val) + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) break; if (timedout) { pr_err("%s: Internal clock never stabilised.\n", mmc_hostname(host->mmc)); break; } - udelay(10); + usleep_range(10, 20); } } @@ -621,77 +638,97 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - int pre_div = 1; - int div = 1; - int division; + unsigned int pre_div = 1, div = 1; + unsigned int clock_fixup = 0; ktime_t timeout; - long fixup = 0; u32 temp; - host->mmc->actual_clock = 0; - if (clock == 0) { + host->mmc->actual_clock = 0; esdhc_clock_enable(host, false); return; } - /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ + /* Start pre_div at 2 for vendor version < 2.3. */ if (esdhc->vendor_ver < VENDOR_V_23) pre_div = 2; + /* Fix clock value. */ if (host->mmc->card && mmc_card_sd(host->mmc->card) && - esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) - fixup = esdhc->clk_fixup->sd_dflt_max_clk; + esdhc->clk_fixup && host->mmc->ios.timing == MMC_TIMING_LEGACY) + clock_fixup = esdhc->clk_fixup->sd_dflt_max_clk; else if (esdhc->clk_fixup) - fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; - - if (fixup && clock > fixup) - clock = fixup; + clock_fixup = esdhc->clk_fixup->max_clk[host->mmc->ios.timing]; - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_SDCLKEN | ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | - ESDHC_CLOCK_PEREN | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + if (clock_fixup == 0 || clock < clock_fixup) + clock_fixup = clock; - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + /* Calculate pre_div and div. */ + while (host->max_clk / pre_div / 16 > clock_fixup && pre_div < 256) pre_div *= 2; - while (host->max_clk / pre_div / div > clock && div < 16) + while (host->max_clk / pre_div / div > clock_fixup && div < 16) div++; + esdhc->div_ratio = pre_div * div; + + /* Limit clock division for HS400 200MHz clock for quirk. */ if (esdhc->quirk_limited_clk_division && clock == MMC_HS200_MAX_DTR && (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 || host->flags & SDHCI_HS400_TUNING)) { - division = pre_div * div; - if (division <= 4) { + if (esdhc->div_ratio <= 4) { pre_div = 4; div = 1; - } else if (division <= 8) { + } else if (esdhc->div_ratio <= 8) { pre_div = 4; div = 2; - } else if (division <= 12) { + } else if (esdhc->div_ratio <= 12) { pre_div = 4; div = 3; } else { pr_warn("%s: using unsupported clock division.\n", mmc_hostname(host->mmc)); } + esdhc->div_ratio = pre_div * div; } + host->mmc->actual_clock = host->max_clk / esdhc->div_ratio; + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); - host->mmc->actual_clock = host->max_clk / pre_div / div; - esdhc->div_ratio = pre_div * div; + clock, host->mmc->actual_clock); + + /* Set clock division into register. */ pre_div >>= 1; div--; + esdhc_clock_enable(host, false); + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); + temp &= ~ESDHC_CLOCK_MASK; + temp |= ((div << ESDHC_DIVIDER_SHIFT) | + (pre_div << ESDHC_PREDIV_SHIFT)); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + /* + * Wait max 20 ms. If vendor version is 2.2 or lower, do not + * wait clock stable bit which does not exist. + */ + timeout = ktime_add_ms(ktime_get(), 20); + while (esdhc->vendor_ver > VENDOR_V_22) { + bool timedout = ktime_after(ktime_get(), timeout); + + if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) + break; + if (timedout) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + usleep_range(10, 20); + } + + /* Additional setting for HS400. */ if (host->mmc->ios.timing == MMC_TIMING_MMC_HS400 && clock == MMC_HS200_MAX_DTR) { temp = sdhci_readl(host, ESDHC_TBCTL); @@ -711,25 +748,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) esdhc_clock_enable(host, false); esdhc_flush_async_fifo(host); } - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - while (1) { - bool timedout = ktime_after(ktime_get(), timeout); - - if (sdhci_readl(host, ESDHC_PRSSTAT) & ESDHC_CLOCK_STABLE) - break; - if (timedout) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - return; - } - udelay(10); - } - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= ESDHC_CLOCK_SDCLKEN; - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + esdhc_clock_enable(host, true); } static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) @@ -758,23 +777,58 @@ static void esdhc_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u32 val; + u32 val, bus_width = 0; + /* + * Add delay to make sure all the DMA transfers are finished + * for quirk. + */ if (esdhc->quirk_delay_before_data_reset && (mask & SDHCI_RESET_DATA) && (host->flags & SDHCI_REQ_USE_DMA)) mdelay(5); + /* + * Save bus-width for eSDHC whose vendor version is 2.2 + * or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + bus_width = val & ESDHC_CTRL_BUSWIDTH_MASK; + } + sdhci_reset(host, mask); - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + /* + * Restore bus-width setting and interrupt registers for eSDHC + * whose vendor version is 2.2 or lower for data reset. + */ + if ((mask & SDHCI_RESET_DATA) && + (esdhc->vendor_ver <= VENDOR_V_22)) { + val = sdhci_readl(host, ESDHC_PROCTL); + val &= ~ESDHC_CTRL_BUSWIDTH_MASK; + val |= bus_width; + sdhci_writel(host, val, ESDHC_PROCTL); - if (mask & SDHCI_RESET_ALL) { + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + } + + /* + * Some bits have to be cleaned manually for eSDHC whose spec + * version is higher than 3.0 for all reset. + */ + if ((mask & SDHCI_RESET_ALL) && + (esdhc->spec_ver >= SDHCI_SPEC_300)) { val = sdhci_readl(host, ESDHC_TBCTL); val &= ~ESDHC_TB_EN; sdhci_writel(host, val, ESDHC_TBCTL); + /* + * Initialize eSDHC_DLLCFG1[DLL_PD_PULSE_STRETCH_SEL] to + * 0 for quirk. + */ if (esdhc->quirk_unreliable_pulse_detection) { val = sdhci_readl(host, ESDHC_DLLCFG1); val &= ~ESDHC_DLL_PD_PULSE_STRETCH_SEL; @@ -854,20 +908,20 @@ static int esdhc_signal_voltage_switch(struct mmc_host *mmc, } static struct soc_device_attribute soc_tuning_erratum_type1[] = { - { .family = "QorIQ T1023", .revision = "1.0", }, - { .family = "QorIQ T1040", .revision = "1.0", }, - { .family = "QorIQ T2080", .revision = "1.0", }, - { .family = "QorIQ LS1021A", .revision = "1.0", }, + { .family = "QorIQ T1023", }, + { .family = "QorIQ T1040", }, + { .family = "QorIQ T2080", }, + { .family = "QorIQ LS1021A", }, { }, }; static struct soc_device_attribute soc_tuning_erratum_type2[] = { - { .family = "QorIQ LS1012A", .revision = "1.0", }, - { .family = "QorIQ LS1043A", .revision = "1.*", }, - { .family = "QorIQ LS1046A", .revision = "1.0", }, - { .family = "QorIQ LS1080A", .revision = "1.0", }, - { .family = "QorIQ LS2080A", .revision = "1.0", }, - { .family = "QorIQ LA1575A", .revision = "1.0", }, + { .family = "QorIQ LS1012A", }, + { .family = "QorIQ LS1043A", }, + { .family = "QorIQ LS1046A", }, + { .family = "QorIQ LS1080A", }, + { .family = "QorIQ LS2080A", }, + { .family = "QorIQ LA1575A", }, { }, }; @@ -888,20 +942,11 @@ static void esdhc_tuning_block_enable(struct sdhci_host *host, bool enable) esdhc_clock_enable(host, true); } -static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, +static void esdhc_tuning_window_ptr(struct sdhci_host *host, u8 *window_start, u8 *window_end) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); - u8 tbstat_15_8, tbstat_7_0; u32 val; - if (esdhc->quirk_tuning_erratum_type1) { - *window_start = 5 * esdhc->div_ratio; - *window_end = 3 * esdhc->div_ratio; - return; - } - /* Write TBCTL[11:8]=4'h8 */ val = sdhci_readl(host, ESDHC_TBCTL); val &= ~(0xf << 8); @@ -920,20 +965,37 @@ static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, val = sdhci_readl(host, ESDHC_TBSTAT); val = sdhci_readl(host, ESDHC_TBSTAT); + *window_end = val & 0xff; + *window_start = (val >> 8) & 0xff; +} + +static void esdhc_prepare_sw_tuning(struct sdhci_host *host, u8 *window_start, + u8 *window_end) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_esdhc *esdhc = sdhci_pltfm_priv(pltfm_host); + u8 start_ptr, end_ptr; + + if (esdhc->quirk_tuning_erratum_type1) { + *window_start = 5 * esdhc->div_ratio; + *window_end = 3 * esdhc->div_ratio; + return; + } + + esdhc_tuning_window_ptr(host, &start_ptr, &end_ptr); + /* Reset data lines by setting ESDHCCTL[RSTD] */ sdhci_reset(host, SDHCI_RESET_DATA); /* Write 32'hFFFF_FFFF to IRQSTAT register */ sdhci_writel(host, 0xFFFFFFFF, SDHCI_INT_STATUS); - /* If TBSTAT[15:8]-TBSTAT[7:0] > 4 * div_ratio - * or TBSTAT[7:0]-TBSTAT[15:8] > 4 * div_ratio, + /* If TBSTAT[15:8]-TBSTAT[7:0] > (4 * div_ratio) + 2 + * or TBSTAT[7:0]-TBSTAT[15:8] > (4 * div_ratio) + 2, * then program TBPTR[TB_WNDW_END_PTR] = 4 * div_ratio * and program TBPTR[TB_WNDW_START_PTR] = 8 * div_ratio. */ - tbstat_7_0 = val & 0xff; - tbstat_15_8 = (val >> 8) & 0xff; - if (abs(tbstat_15_8 - tbstat_7_0) > (4 * esdhc->div_ratio)) { + if (abs(start_ptr - end_ptr) > (4 * esdhc->div_ratio + 2)) { *window_start = 8 * esdhc->div_ratio; *window_end = 4 * esdhc->div_ratio; } else { @@ -1006,6 +1068,19 @@ static int esdhc_execute_tuning(struct mmc_host *mmc, u32 opcode) if (ret) break; + /* For type2 affected platforms of the tuning erratum, + * tuning may succeed although eSDHC might not have + * tuned properly. Need to check tuning window. + */ + if (esdhc->quirk_tuning_erratum_type2 && + !host->tuning_err) { + esdhc_tuning_window_ptr(host, &window_start, + &window_end); + if (abs(window_start - window_end) > + (4 * esdhc->div_ratio + 2)) + host->tuning_err = -EAGAIN; + } + /* If HW tuning fails and triggers erratum, * try workaround. */ @@ -1238,7 +1313,8 @@ static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) * 1/2 peripheral clock. */ if (of_device_is_compatible(np, "fsl,ls1046a-esdhc") || - of_device_is_compatible(np, "fsl,ls1028a-esdhc")) + of_device_is_compatible(np, "fsl,ls1028a-esdhc") || + of_device_is_compatible(np, "fsl,ls1088a-esdhc")) esdhc->peripheral_clock = clk_get_rate(clk) / 2; else esdhc->peripheral_clock = clk_get_rate(clk); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c index 083e7e053c95..882053151a47 100644 --- a/drivers/mmc/host/sdhci-omap.c +++ b/drivers/mmc/host/sdhci-omap.c @@ -7,6 +7,7 @@ */ #include <linux/delay.h> +#include <linux/mmc/mmc.h> #include <linux/mmc/slot-gpio.h> #include <linux/module.h> #include <linux/of.h> @@ -85,6 +86,7 @@ /* sdhci-omap controller flags */ #define SDHCI_OMAP_REQUIRE_IODELAY BIT(0) +#define SDHCI_OMAP_SPECIAL_RESET BIT(1) struct sdhci_omap_data { u32 offset; @@ -685,7 +687,11 @@ static int sdhci_omap_enable_dma(struct sdhci_host *host) struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); - reg |= CON_DMA_MASTER; + reg &= ~CON_DMA_MASTER; + /* Switch to DMA slave mode when using external DMA */ + if (!host->use_external_dma) + reg |= CON_DMA_MASTER; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); return 0; @@ -774,15 +780,35 @@ static void sdhci_omap_set_uhs_signaling(struct sdhci_host *host, sdhci_omap_start_clock(omap_host); } +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ static void sdhci_omap_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long limit = MMC_TIMEOUT_US; + unsigned long i = 0; /* Don't reset data lines during tuning operation */ if (omap_host->is_tuning) mask &= ~SDHCI_RESET_DATA; + if (omap_host->flags & SDHCI_OMAP_SPECIAL_RESET) { + sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); + while ((!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask)) && + (i++ < limit)) + udelay(1); + i = 0; + while ((sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) && + (i++ < limit)) + udelay(1); + + if (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) + dev_err(mmc_dev(host->mmc), + "Timeout waiting on controller reset in %s\n", + __func__); + return; + } + sdhci_reset(host, mask); } @@ -823,6 +849,15 @@ static u32 sdhci_omap_irq(struct sdhci_host *host, u32 intmask) return intmask; } +static void sdhci_omap_set_timeout(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_ERASE) + sdhci_set_data_timeout_irq(host, false); + + __sdhci_set_timeout(host, cmd); +} + static struct sdhci_ops sdhci_omap_ops = { .set_clock = sdhci_omap_set_clock, .set_power = sdhci_omap_set_power, @@ -834,6 +869,7 @@ static struct sdhci_ops sdhci_omap_ops = { .reset = sdhci_omap_reset, .set_uhs_signaling = sdhci_omap_set_uhs_signaling, .irq = sdhci_omap_irq, + .set_timeout = sdhci_omap_set_timeout, }; static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) @@ -883,6 +919,16 @@ static const struct sdhci_omap_data k2g_data = { .offset = 0x200, }; +static const struct sdhci_omap_data am335_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + +static const struct sdhci_omap_data am437_data = { + .offset = 0x200, + .flags = SDHCI_OMAP_SPECIAL_RESET, +}; + static const struct sdhci_omap_data dra7_data = { .offset = 0x200, .flags = SDHCI_OMAP_REQUIRE_IODELAY, @@ -891,6 +937,8 @@ static const struct sdhci_omap_data dra7_data = { static const struct of_device_id omap_sdhci_match[] = { { .compatible = "ti,dra7-sdhci", .data = &dra7_data }, { .compatible = "ti,k2g-sdhci", .data = &k2g_data }, + { .compatible = "ti,am335-sdhci", .data = &am335_data }, + { .compatible = "ti,am437-sdhci", .data = &am437_data }, {}, }; MODULE_DEVICE_TABLE(of, omap_sdhci_match); @@ -1037,6 +1085,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) const struct of_device_id *match; struct sdhci_omap_data *data; const struct soc_device_attribute *soc; + struct resource *regs; match = of_match_device(omap_sdhci_match, dev); if (!match) @@ -1049,6 +1098,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) } offset = data->offset; + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!regs) + return -ENXIO; + host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata, sizeof(*omap_host)); if (IS_ERR(host)) { @@ -1065,6 +1118,7 @@ static int sdhci_omap_probe(struct platform_device *pdev) omap_host->timing = MMC_TIMING_LEGACY; omap_host->flags = data->flags; host->ioaddr += offset; + host->mapbase = regs->start + offset; mmc = host->mmc; sdhci_get_of_property(pdev); @@ -1134,6 +1188,10 @@ static int sdhci_omap_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = sdhci_omap_execute_tuning; host->mmc_host_ops.enable_sdio_irq = sdhci_omap_enable_sdio_irq; + /* Switch to external DMA only if there is the "dmas" property */ + if (of_find_property(dev->of_node, "dmas", NULL)) + sdhci_switch_external_dma(host, true); + ret = sdhci_setup_host(host); if (ret) goto err_put_sync; diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 5091e2c1c0e5..525de2454a4d 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -1991,12 +1991,12 @@ static struct sdhci_pci_slot *sdhci_pci_probe_slot( if (slot->cd_idx >= 0) { ret = mmc_gpiod_request_cd(host->mmc, "cd", slot->cd_idx, - slot->cd_override_level, 0, NULL); + slot->cd_override_level, 0); if (ret && ret != -EPROBE_DEFER) ret = mmc_gpiod_request_cd(host->mmc, NULL, slot->cd_idx, slot->cd_override_level, - 0, NULL); + 0); if (ret == -EPROBE_DEFER) goto remove; diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 51e096f27388..64200c78e90d 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -117,7 +117,6 @@ struct sdhci_s3c { struct s3c_sdhci_platdata *pdata; int cur_clk; int ext_cd_irq; - int ext_cd_gpio; struct clk *clk_io; struct clk *clk_bus[MAX_BUS_CLK]; @@ -481,7 +480,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct sdhci_host *host; struct sdhci_s3c *sc; - struct resource *res; int ret, irq, ptr, clks; if (!pdev->dev.platform_data && !pdev->dev.of_node) { @@ -512,7 +510,6 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_pdata_io_clk; } else { memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); - sc->ext_cd_gpio = -1; /* invalid gpio number */ } drv_data = sdhci_s3c_get_driver_data(pdev); @@ -555,8 +552,7 @@ static int sdhci_s3c_probe(struct platform_device *pdev) goto err_no_busclks; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err_req_regs; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c index e43143223320..f4b05dd6c20a 100644 --- a/drivers/mmc/host/sdhci-sirf.c +++ b/drivers/mmc/host/sdhci-sirf.c @@ -194,7 +194,7 @@ static int sdhci_sirf_probe(struct platform_device *pdev) * We must request the IRQ after sdhci_add_host(), as the tasklet only * gets setup in sdhci_add_host() and we oops. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto err_request_cd; if (!ret) diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index 916b5b09c3d1..b4b63089a4e2 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -43,7 +43,6 @@ static const struct sdhci_ops sdhci_pltfm_ops = { static int sdhci_probe(struct platform_device *pdev) { struct sdhci_host *host; - struct resource *iomem; struct spear_sdhci *sdhci; struct device *dev; int ret; @@ -56,8 +55,7 @@ static int sdhci_probe(struct platform_device *pdev) goto err; } - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret); @@ -98,7 +96,7 @@ static int sdhci_probe(struct platform_device *pdev) * It is optional to use GPIOs for sdhci card detection. If we * find a descriptor using slot GPIO, we use it. */ - ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(host->mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) goto disable_clk; diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 7bc950520fd9..403ac44a7378 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -386,7 +386,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_DDR50; if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR104) misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDR104; - if (soc_data->nvquirks & SDHCI_MISC_CTRL_ENABLE_SDR50) + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDR50) clk_ctrl |= SDHCI_CLOCK_CTRL_SDR50_TUNING_OVERRIDE; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 1b1c26da3fe0..63db84481dff 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -10,6 +10,7 @@ */ #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/ktime.h> #include <linux/highmem.h> #include <linux/io.h> @@ -992,7 +993,7 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } -static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) { if (enable) host->ier |= SDHCI_INT_DATA_TIMEOUT; @@ -1001,42 +1002,36 @@ static void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable) sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } +EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq); -static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - u8 count; - - if (host->ops->set_timeout) { - host->ops->set_timeout(host, cmd); - } else { - bool too_big = false; - - count = sdhci_calc_timeout(host, cmd, &too_big); - - if (too_big && - host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { - sdhci_calc_sw_timeout(host, cmd); - sdhci_set_data_timeout_irq(host, false); - } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { - sdhci_set_data_timeout_irq(host, true); - } - - sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); + bool too_big = false; + u8 count = sdhci_calc_timeout(host, cmd, &too_big); + + if (too_big && + host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) { + sdhci_calc_sw_timeout(host, cmd); + sdhci_set_data_timeout_irq(host, false); + } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) { + sdhci_set_data_timeout_irq(host, true); } + + sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); } +EXPORT_SYMBOL_GPL(__sdhci_set_timeout); -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd) { - struct mmc_data *data = cmd->data; - - host->data_timeout = 0; - - if (sdhci_data_line_cmd(cmd)) - sdhci_set_timeout(host, cmd); - - if (!data) - return; + if (host->ops->set_timeout) + host->ops->set_timeout(host, cmd); + else + __sdhci_set_timeout(host, cmd); +} +static void sdhci_initialize_data(struct sdhci_host *host, + struct mmc_data *data) +{ WARN_ON(host->data); /* Sanity checks */ @@ -1047,6 +1042,34 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) host->data = data; host->data_early = 0; host->data->bytes_xfered = 0; +} + +static inline void sdhci_set_block_info(struct sdhci_host *host, + struct mmc_data *data) +{ + /* Set the DMA boundary value and block size */ + sdhci_writew(host, + SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), + SDHCI_BLOCK_SIZE); + /* + * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count + * can be supported, in that case 16-bit block count register must be 0. + */ + if (host->version >= SDHCI_SPEC_410 && host->v4_mode && + (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { + if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) + sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); + sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); + } else { + sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + } +} + +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { struct scatterlist *sg; @@ -1133,24 +1156,192 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) sdhci_set_transfer_irqs(host); - /* Set the DMA boundary value and block size */ - sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz), - SDHCI_BLOCK_SIZE); + sdhci_set_block_info(host, data); +} - /* - * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count - * can be supported, in that case 16-bit block count register must be 0. - */ - if (host->version >= SDHCI_SPEC_410 && host->v4_mode && - (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) { - if (sdhci_readw(host, SDHCI_BLOCK_COUNT)) - sdhci_writew(host, 0, SDHCI_BLOCK_COUNT); - sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT); +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + +static int sdhci_external_dma_init(struct sdhci_host *host) +{ + int ret = 0; + struct mmc_host *mmc = host->mmc; + + host->tx_chan = dma_request_chan(mmc->parent, "tx"); + if (IS_ERR(host->tx_chan)) { + ret = PTR_ERR(host->tx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request TX DMA channel.\n"); + host->tx_chan = NULL; + return ret; + } + + host->rx_chan = dma_request_chan(mmc->parent, "rx"); + if (IS_ERR(host->rx_chan)) { + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + ret = PTR_ERR(host->rx_chan); + if (ret != -EPROBE_DEFER) + pr_warn("Failed to request RX DMA channel.\n"); + host->rx_chan = NULL; + } + + return ret; +} + +static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + +static int sdhci_external_dma_setup(struct sdhci_host *host, + struct mmc_command *cmd) +{ + int ret, i; + enum dma_transfer_direction dir; + struct dma_async_tx_descriptor *desc; + struct mmc_data *data = cmd->data; + struct dma_chan *chan; + struct dma_slave_config cfg; + dma_cookie_t cookie; + int sg_cnt; + + if (!host->mapbase) + return -EINVAL; + + cfg.src_addr = host->mapbase + SDHCI_BUFFER; + cfg.dst_addr = host->mapbase + SDHCI_BUFFER; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + /* Sanity check: all the SG entries must be aligned by block size. */ + for (i = 0; i < data->sg_len; i++) { + if ((data->sg + i)->length % data->blksz) + return -EINVAL; + } + + chan = sdhci_external_dma_channel(host, data); + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) + return ret; + + sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED); + if (sg_cnt <= 0) + return -EINVAL; + + dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM; + desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) + return -EINVAL; + + desc->callback = NULL; + desc->callback_param = NULL; + + cookie = dmaengine_submit(desc); + if (dma_submit_error(cookie)) + ret = cookie; + + return ret; +} + +static void sdhci_external_dma_release(struct sdhci_host *host) +{ + if (host->tx_chan) { + dma_release_channel(host->tx_chan); + host->tx_chan = NULL; + } + + if (host->rx_chan) { + dma_release_channel(host->rx_chan); + host->rx_chan = NULL; + } + + sdhci_switch_external_dma(host, false); +} + +static void __sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct mmc_data *data = cmd->data; + + sdhci_initialize_data(host, data); + + host->flags |= SDHCI_REQ_USE_DMA; + sdhci_set_transfer_irqs(host); + + sdhci_set_block_info(host, data); +} + +static void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + if (!sdhci_external_dma_setup(host, cmd)) { + __sdhci_external_dma_prepare_data(host, cmd); } else { - sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT); + sdhci_external_dma_release(host); + pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n", + mmc_hostname(host->mmc)); + sdhci_prepare_data(host, cmd); } } +static void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ + struct dma_chan *chan; + + if (!cmd->data) + return; + + chan = sdhci_external_dma_channel(host, cmd->data); + if (chan) + dma_async_issue_pending(chan); +} + +#else + +static inline int sdhci_external_dma_init(struct sdhci_host *host) +{ + return -EOPNOTSUPP; +} + +static inline void sdhci_external_dma_release(struct sdhci_host *host) +{ +} + +static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host, + struct mmc_command *cmd) +{ + /* This should never happen */ + WARN_ON_ONCE(1); +} + +static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host, + struct mmc_command *cmd) +{ +} + +static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host, + struct mmc_data *data) +{ + return NULL; +} + +#endif + +void sdhci_switch_external_dma(struct sdhci_host *host, bool en) +{ + host->use_external_dma = en; +} +EXPORT_SYMBOL_GPL(sdhci_switch_external_dma); + static inline bool sdhci_auto_cmd12(struct sdhci_host *host, struct mmc_request *mrq) { @@ -1245,22 +1436,10 @@ static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq) (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))); } -static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq) { int i; - if (host->cmd && host->cmd->mrq == mrq) - host->cmd = NULL; - - if (host->data_cmd && host->data_cmd->mrq == mrq) - host->data_cmd = NULL; - - if (host->data && host->data->mrq == mrq) - host->data = NULL; - - if (sdhci_needs_reset(host, mrq)) - host->pending_reset = true; - for (i = 0; i < SDHCI_MAX_MRQS; i++) { if (host->mrqs_done[i] == mrq) { WARN_ON(1); @@ -1276,6 +1455,23 @@ static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) } WARN_ON(i >= SDHCI_MAX_MRQS); +} + +static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq) +{ + if (host->cmd && host->cmd->mrq == mrq) + host->cmd = NULL; + + if (host->data_cmd && host->data_cmd->mrq == mrq) + host->data_cmd = NULL; + + if (host->data && host->data->mrq == mrq) + host->data = NULL; + + if (sdhci_needs_reset(host, mrq)) + host->pending_reset = true; + + sdhci_set_mrq_done(host, mrq); sdhci_del_timer(host, mrq); @@ -1326,12 +1522,12 @@ static void sdhci_finish_data(struct sdhci_host *host) /* * Need to send CMD12 if - - * a) open-ended multiblock transfer (no CMD23) + * a) open-ended multiblock transfer not using auto CMD12 (no CMD23) * b) error in multiblock transfer */ if (data->stop && - (data->error || - !data->mrq->sbc)) { + ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) || + data->error)) { /* * 'cap_cmd_during_tfr' request must not use the command line * after mmc_command_done() has been called. It is upper layer's @@ -1390,12 +1586,19 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) } host->cmd = cmd; + host->data_timeout = 0; if (sdhci_data_line_cmd(cmd)) { WARN_ON(host->data_cmd); host->data_cmd = cmd; + sdhci_set_timeout(host, cmd); } - sdhci_prepare_data(host, cmd); + if (cmd->data) { + if (host->use_external_dma) + sdhci_external_dma_prepare_data(host, cmd); + else + sdhci_prepare_data(host, cmd); + } sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); @@ -1437,6 +1640,9 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) timeout += 10 * HZ; sdhci_mod_timer(host, cmd->mrq, timeout); + if (host->use_external_dma) + sdhci_external_dma_pre_transfer(host, cmd); + sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); } EXPORT_SYMBOL_GPL(sdhci_send_command); @@ -1825,17 +2031,6 @@ void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_led_activate(host); - /* - * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED - * requests if Auto-CMD12 is enabled. - */ - if (sdhci_auto_cmd12(host, mrq)) { - if (mrq->stop) { - mrq->data->stop = NULL; - mrq->stop = NULL; - } - } - if (!present || host->flags & SDHCI_DEVICE_DEAD) { mrq->cmd->error = -ENOMEDIUM; sdhci_finish_mrq(host, mrq); @@ -2661,6 +2856,17 @@ static bool sdhci_request_done(struct sdhci_host *host) if (host->flags & SDHCI_REQ_USE_DMA) { struct mmc_data *data = mrq->data; + if (host->use_external_dma && data && + (mrq->cmd->error || data->error)) { + struct dma_chan *chan = sdhci_external_dma_channel(host, data); + + host->mrqs_done[i] = NULL; + spin_unlock_irqrestore(&host->lock, flags); + dmaengine_terminate_sync(chan); + spin_lock_irqsave(&host->lock, flags); + sdhci_set_mrq_done(host, mrq); + } + if (data && data->host_cookie == COOKIE_MAPPED) { if (host->bounce_buffer) { /* @@ -3796,6 +4002,21 @@ int sdhci_setup_host(struct sdhci_host *host) if (sdhci_can_64bit_dma(host)) host->flags |= SDHCI_USE_64_BIT_DMA; + if (host->use_external_dma) { + ret = sdhci_external_dma_init(host); + if (ret == -EPROBE_DEFER) + goto unreg; + /* + * Fall back to use the DMA/PIO integrated in standard SDHCI + * instead of external DMA devices. + */ + else if (ret) + sdhci_switch_external_dma(host, false); + /* Disable internal DMA sources */ + else + host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA); + } + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->set_dma_mask) ret = host->ops->set_dma_mask(host); @@ -3822,15 +4043,13 @@ int sdhci_setup_host(struct sdhci_host *host) dma_addr_t dma; void *buf; - if (host->flags & SDHCI_USE_64_BIT_DMA) { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_64_DESC_SZ(host); - host->desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); - } else { - host->adma_table_sz = host->adma_table_cnt * - SDHCI_ADMA2_32_DESC_SZ; - host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; - } + if (!(host->flags & SDHCI_USE_64_BIT_DMA)) + host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ; + else if (!host->alloc_desc_sz) + host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host); + + host->desc_sz = host->alloc_desc_sz; + host->adma_table_sz = host->adma_table_cnt * host->desc_sz; host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN; /* @@ -3913,11 +4132,13 @@ int sdhci_setup_host(struct sdhci_host *host) if (host->ops->get_min_clock) mmc->f_min = host->ops->get_min_clock(host); else if (host->version >= SDHCI_SPEC_300) { - if (host->clk_mul) { - mmc->f_min = (host->max_clk * host->clk_mul) / 1024; + if (host->clk_mul) max_clk = host->max_clk * host->clk_mul; - } else - mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; + /* + * Divided Clock Mode minimum clock rate is always less than + * Programmable Clock Mode minimum clock rate. + */ + mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; } else mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; @@ -4276,6 +4497,10 @@ void sdhci_cleanup_host(struct sdhci_host *host) dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz + host->adma_table_sz, host->align_buffer, host->align_addr); + + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } @@ -4321,6 +4546,7 @@ int __sdhci_add_host(struct sdhci_host *host) pr_info("%s: SDHCI controller on %s [%s] using %s\n", mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), + host->use_external_dma ? "External DMA" : (host->flags & SDHCI_USE_ADMA) ? (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); @@ -4409,6 +4635,9 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) host->adma_table_sz, host->align_buffer, host->align_addr); + if (host->use_external_dma) + sdhci_external_dma_release(host); + host->adma_table = NULL; host->align_buffer = NULL; } diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index fe83ece6965b..a6a3ddcf97e7 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -487,6 +487,7 @@ struct sdhci_host { int irq; /* Device IRQ */ void __iomem *ioaddr; /* Mapped address */ + phys_addr_t mapbase; /* physical address base */ char *bounce_buffer; /* For packing SDMA reads/writes */ dma_addr_t bounce_addr; unsigned int bounce_buffer_size; @@ -535,6 +536,7 @@ struct sdhci_host { bool pending_reset; /* Cmd/data reset is pending */ bool irq_wake_enabled; /* IRQ wakeup is enabled */ bool v4_mode; /* Host Version 4 Enable */ + bool use_external_dma; /* Host selects to use external DMA */ struct mmc_request *mrqs_done[SDHCI_MAX_MRQS]; /* Requests done */ struct mmc_command *cmd; /* Current command */ @@ -556,7 +558,8 @@ struct sdhci_host { dma_addr_t adma_addr; /* Mapped ADMA descr. table */ dma_addr_t align_addr; /* Mapped bounce buffer */ - unsigned int desc_sz; /* ADMA descriptor size */ + unsigned int desc_sz; /* ADMA current descriptor size */ + unsigned int alloc_desc_sz; /* ADMA descr. max size host supports */ struct workqueue_struct *complete_wq; /* Request completion wq */ struct work_struct complete_work; /* Request completion work */ @@ -564,6 +567,11 @@ struct sdhci_host { struct timer_list timer; /* Timer for timeouts */ struct timer_list data_timer; /* Timer for data timeouts */ +#if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA) + struct dma_chan *rx_chan; + struct dma_chan *tx_chan; +#endif + u32 caps; /* CAPABILITY_0 */ u32 caps1; /* CAPABILITY_1 */ bool read_caps; /* Capability flags have been read */ @@ -795,5 +803,8 @@ void sdhci_end_tuning(struct sdhci_host *host); void sdhci_reset_tuning(struct sdhci_host *host); void sdhci_send_tuning(struct sdhci_host *host, u32 opcode); void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode); +void sdhci_switch_external_dma(struct sdhci_host *host, bool en); +void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable); +void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd); #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index b8e897e31e2e..3afea580fbea 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -240,6 +240,35 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) writeb(val, host->ioaddr + reg); } +static int sdhci_am654_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct sdhci_host *host = mmc_priv(mmc); + int err = sdhci_execute_tuning(mmc, opcode); + + if (err) + return err; + /* + * Tuning data remains in the buffer after tuning. + * Do a command and data reset to get rid of it + */ + sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + + return 0; +} + +static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) +{ + int cmd_error = 0; + int data_error = 0; + + if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) + return intmask; + + cqhci_irq(host->mmc, intmask, cmd_error, data_error); + + return 0; +} + static struct sdhci_ops sdhci_am654_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -248,13 +277,13 @@ static struct sdhci_ops sdhci_am654_ops = { .set_power = sdhci_am654_set_power, .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, + .irq = sdhci_am654_cqhci_irq, .reset = sdhci_reset, }; static const struct sdhci_pltfm_data sdhci_am654_pdata = { .ops = &sdhci_am654_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -263,19 +292,6 @@ static const struct sdhci_am654_driver_data sdhci_am654_drvdata = { .flags = IOMUX_PRESENT | FREQSEL_2_BIT | STRBSEL_4_BIT | DLL_PRESENT, }; -static u32 sdhci_am654_cqhci_irq(struct sdhci_host *host, u32 intmask) -{ - int cmd_error = 0; - int data_error = 0; - - if (!sdhci_cqe_irq(host, intmask, &cmd_error, &data_error)) - return intmask; - - cqhci_irq(host->mmc, intmask, cmd_error, data_error); - - return 0; -} - static struct sdhci_ops sdhci_j721e_8bit_ops = { .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_pltfm_clk_get_max_clock, @@ -290,8 +306,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { .ops = &sdhci_j721e_8bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -314,8 +329,7 @@ static struct sdhci_ops sdhci_j721e_4bit_ops = { static const struct sdhci_pltfm_data sdhci_j721e_4bit_pdata = { .ops = &sdhci_j721e_4bit_ops, - .quirks = SDHCI_QUIRK_INVERTED_WRITE_PROTECT | - SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, + .quirks = SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, }; @@ -491,7 +505,6 @@ static int sdhci_am654_probe(struct platform_device *pdev) struct sdhci_am654_data *sdhci_am654; const struct of_device_id *match; struct sdhci_host *host; - struct resource *res; struct clk *clk_xin; struct device *dev = &pdev->dev; void __iomem *base; @@ -524,8 +537,7 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_disable; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 1); - base = devm_ioremap_resource(dev, res); + base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(base)) { ret = PTR_ERR(base); goto pm_runtime_put; @@ -549,6 +561,8 @@ static int sdhci_am654_probe(struct platform_device *pdev) goto pm_runtime_put; } + host->mmc_host_ops.execute_tuning = sdhci_am654_execute_tuning; + ret = sdhci_am654_init(host); if (ret) goto pm_runtime_put; diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index fa0dfc657c22..4625cc071b61 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -89,7 +89,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - struct resource *res; int irq, ctrl = 0, ret = 0; struct f_sdhost_priv *priv; u32 reg = 0; @@ -123,8 +122,7 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) host->ops = &sdhci_f_sdh30_ops; host->irq = irq; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + host->ioaddr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->ioaddr)) { ret = PTR_ERR(host->ioaddr); goto err; diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 98c575de43c7..7e1fd557109c 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -432,8 +432,12 @@ static void sh_mmcif_request_dma(struct sh_mmcif_host *host) host->chan_rx = sh_mmcif_request_dma_pdata(host, pdata->slave_id_rx); } else { - host->chan_tx = dma_request_slave_channel(dev, "tx"); - host->chan_rx = dma_request_slave_channel(dev, "rx"); + host->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(host->chan_tx)) + host->chan_tx = NULL; + host->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(host->chan_rx)) + host->chan_rx = NULL; } dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx, host->chan_rx); @@ -1388,7 +1392,6 @@ static int sh_mmcif_probe(struct platform_device *pdev) struct sh_mmcif_host *host; struct device *dev = &pdev->dev; struct sh_mmcif_plat_data *pd = dev->platform_data; - struct resource *res; void __iomem *reg; const char *name; @@ -1397,8 +1400,7 @@ static int sh_mmcif_probe(struct platform_device *pdev) if (irq[0] < 0) return -ENXIO; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - reg = devm_ioremap_resource(dev, res); + reg = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg)) return PTR_ERR(reg); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index d577a6b0ceae..f87d7967457f 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1273,8 +1273,7 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, if (ret) return ret; - host->reg_base = devm_ioremap_resource(&pdev->dev, - platform_get_resource(pdev, IORESOURCE_MEM, 0)); + host->reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(host->reg_base)) return PTR_ERR(host->reg_base); diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index c4a1d49fbea4..1e424bcdbd5f 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -1109,12 +1109,10 @@ struct tmio_mmc_host *tmio_mmc_host_alloc(struct platform_device *pdev, { struct tmio_mmc_host *host; struct mmc_host *mmc; - struct resource *res; void __iomem *ctl; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - ctl = devm_ioremap_resource(&pdev->dev, res); + ctl = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctl)) return ERR_CAST(ctl); @@ -1181,7 +1179,7 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host) * Look for a card detect GPIO, if it fails with anything * else than a probe deferral, just live without it. */ - ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0, NULL); + ret = mmc_gpiod_request_cd(mmc, "cd", 0, false, 0); if (ret == -EPROBE_DEFER) return ret; diff --git a/drivers/mmc/host/uniphier-sd.c b/drivers/mmc/host/uniphier-sd.c index 0c72ec5546c3..a1683c49cb90 100644 --- a/drivers/mmc/host/uniphier-sd.c +++ b/drivers/mmc/host/uniphier-sd.c @@ -59,7 +59,6 @@ struct uniphier_sd_priv { struct tmio_mmc_data tmio_data; struct pinctrl *pinctrl; - struct pinctrl_state *pinstate_default; struct pinctrl_state *pinstate_uhs; struct clk *clk; struct reset_control *rst; @@ -500,13 +499,12 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, { struct tmio_mmc_host *host = mmc_priv(mmc); struct uniphier_sd_priv *priv = uniphier_sd_priv(host); - struct pinctrl_state *pinstate; + struct pinctrl_state *pinstate = NULL; u32 val, tmp; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: val = UNIPHIER_SD_VOLT_330; - pinstate = priv->pinstate_default; break; case MMC_SIGNAL_VOLTAGE_180: val = UNIPHIER_SD_VOLT_180; @@ -521,7 +519,10 @@ static int uniphier_sd_start_signal_voltage_switch(struct mmc_host *mmc, tmp |= FIELD_PREP(UNIPHIER_SD_VOLT_MASK, val); writel(tmp, host->ctl + UNIPHIER_SD_VOLT); - pinctrl_select_state(priv->pinctrl, pinstate); + if (pinstate) + pinctrl_select_state(priv->pinctrl, pinstate); + else + pinctrl_select_default_state(mmc_dev(mmc)); return 0; } @@ -533,11 +534,6 @@ static int uniphier_sd_uhs_init(struct tmio_mmc_host *host, if (IS_ERR(priv->pinctrl)) return PTR_ERR(priv->pinctrl); - priv->pinstate_default = pinctrl_lookup_state(priv->pinctrl, - PINCTRL_STATE_DEFAULT); - if (IS_ERR(priv->pinstate_default)) - return PTR_ERR(priv->pinstate_default); - priv->pinstate_uhs = pinctrl_lookup_state(priv->pinctrl, "uhs"); if (IS_ERR(priv->pinstate_uhs)) return PTR_ERR(priv->pinstate_uhs); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index b11ac2314328..9a0b1e4e405d 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -199,7 +199,6 @@ struct usdhi6_host { /* Pin control */ struct pinctrl *pinctrl; - struct pinctrl_state *pins_default; struct pinctrl_state *pins_uhs; }; @@ -677,12 +676,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) }; int ret; - host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); + host->chan_tx = dma_request_chan(mmc_dev(host->mmc), "tx"); dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, host->chan_tx); - if (!host->chan_tx) + if (IS_ERR(host->chan_tx)) { + host->chan_tx = NULL; return; + } cfg.direction = DMA_MEM_TO_DEV; cfg.dst_addr = start + USDHI6_SD_BUF0; @@ -692,12 +693,14 @@ static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) if (ret < 0) goto e_release_tx; - host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); + host->chan_rx = dma_request_chan(mmc_dev(host->mmc), "rx"); dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, host->chan_rx); - if (!host->chan_rx) + if (IS_ERR(host->chan_rx)) { + host->chan_rx = NULL; goto e_release_tx; + } cfg.direction = DMA_DEV_TO_MEM; cfg.src_addr = cfg.dst_addr; @@ -1162,8 +1165,7 @@ static int usdhi6_set_pinstates(struct usdhi6_host *host, int voltage) host->pins_uhs); default: - return pinctrl_select_state(host->pinctrl, - host->pins_default); + return pinctrl_select_default_state(mmc_dev(host->mmc)); } } @@ -1770,17 +1772,6 @@ static int usdhi6_probe(struct platform_device *pdev) } host->pins_uhs = pinctrl_lookup_state(host->pinctrl, "state_uhs"); - if (!IS_ERR(host->pins_uhs)) { - host->pins_default = pinctrl_lookup_state(host->pinctrl, - PINCTRL_STATE_DEFAULT); - - if (IS_ERR(host->pins_default)) { - dev_err(dev, - "UHS pinctrl requires a default pin state.\n"); - ret = PTR_ERR(host->pins_default); - goto e_free_mmc; - } - } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); host->base = devm_ioremap_resource(dev, res); diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c index 2e57122f02fb..2f5c287eac95 100644 --- a/drivers/net/can/slcan.c +++ b/drivers/net/can/slcan.c @@ -344,9 +344,16 @@ static void slcan_transmit(struct work_struct *work) */ static void slcan_write_wakeup(struct tty_struct *tty) { - struct slcan *sl = tty->disc_data; + struct slcan *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } /* Send a can_frame to a TTY queue. */ @@ -644,10 +651,11 @@ static void slcan_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* Flush network side */ diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 120fa05a39ff..0a8624be44a9 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -2164,8 +2164,8 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv, DMA_END_ADDR); /* Initialize Tx NAPI */ - netif_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, - NAPI_POLL_WEIGHT); + netif_tx_napi_add(priv->dev, &ring->napi, bcmgenet_tx_poll, + NAPI_POLL_WEIGHT); } /* Initialize a RDMA ring */ diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 58f89f6a040f..97ff8608f0ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -2448,6 +2448,8 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) if (!is_offload(adapter)) return -EOPNOTSUPP; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; if (!(adapter->flags & FULL_INIT_DONE)) return -EIO; /* need the memory controllers */ if (copy_from_user(&t, useraddr, sizeof(t))) diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index aca9f7a20a2a..4144c230dc97 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -70,8 +70,7 @@ static void *seq_tab_start(struct seq_file *seq, loff_t *pos) static void *seq_tab_next(struct seq_file *seq, void *v, loff_t *pos) { v = seq_tab_get_idx(seq->private, *pos + 1); - if (v) - ++*pos; + ++(*pos); return v; } diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index e9e45006632d..1a16449e9deb 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -678,8 +678,7 @@ static void *l2t_seq_start(struct seq_file *seq, loff_t *pos) static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos) { v = l2t_get_idx(seq, *pos); - if (v) - ++*pos; + ++(*pos); return v; } diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 41c6fa200e74..e1901874c19f 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -110,7 +110,7 @@ do { \ /* Interface Mode Register (IF_MODE) */ #define IF_MODE_MASK 0x00000003 /* 30-31 Mask on i/f mode bits */ -#define IF_MODE_XGMII 0x00000000 /* 30-31 XGMII (10G) interface */ +#define IF_MODE_10G 0x00000000 /* 30-31 10G interface */ #define IF_MODE_GMII 0x00000002 /* 30-31 GMII (1G) interface */ #define IF_MODE_RGMII 0x00000004 #define IF_MODE_RGMII_AUTO 0x00008000 @@ -440,7 +440,7 @@ static int init(struct memac_regs __iomem *regs, struct memac_cfg *cfg, tmp = 0; switch (phy_if) { case PHY_INTERFACE_MODE_XGMII: - tmp |= IF_MODE_XGMII; + tmp |= IF_MODE_10G; break; default: tmp |= IF_MODE_GMII; diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c index e03b30c60dcf..c82c85ef5fb3 100644 --- a/drivers/net/ethernet/freescale/xgmac_mdio.c +++ b/drivers/net/ethernet/freescale/xgmac_mdio.c @@ -49,6 +49,7 @@ struct tgec_mdio_controller { struct mdio_fsl_priv { struct tgec_mdio_controller __iomem *mdio_base; bool is_little_endian; + bool has_a011043; }; static u32 xgmac_read32(void __iomem *regs, @@ -226,7 +227,8 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) return ret; /* Return all Fs if nothing was there */ - if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { + if ((xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) && + !priv->has_a011043) { dev_err(&bus->dev, "Error while reading PHY%d reg at %d.%hhu\n", phy_id, dev_addr, regnum); @@ -274,6 +276,9 @@ static int xgmac_mdio_probe(struct platform_device *pdev) priv->is_little_endian = of_property_read_bool(pdev->dev.of_node, "little-endian"); + priv->has_a011043 = of_property_read_bool(pdev->dev.of_node, + "fsl,erratum-a011043"); + ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "cannot register MDIO bus\n"); diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index d4055037af89..45b90eb11adb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -1113,7 +1113,7 @@ i40e_status i40e_read_pba_string(struct i40e_hw *hw, u8 *pba_num, */ pba_size--; if (pba_num_size < (((u32)pba_size * 2) + 1)) { - hw_dbg(hw, "Buffer to small for PBA data.\n"); + hw_dbg(hw, "Buffer too small for PBA data.\n"); return I40E_ERR_PARAM; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 778dab1af8fc..f260dd96873b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -180,7 +180,7 @@ mlx5e_ktls_tx_post_param_wqes(struct mlx5e_txqsq *sq, struct tx_sync_info { u64 rcd_sn; - s32 sync_len; + u32 sync_len; int nr_frags; skb_frag_t frags[MAX_SKB_FRAGS]; }; @@ -193,13 +193,14 @@ enum mlx5e_ktls_sync_retval { static enum mlx5e_ktls_sync_retval tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, - u32 tcp_seq, struct tx_sync_info *info) + u32 tcp_seq, int datalen, struct tx_sync_info *info) { struct tls_offload_context_tx *tx_ctx = priv_tx->tx_ctx; enum mlx5e_ktls_sync_retval ret = MLX5E_KTLS_SYNC_DONE; struct tls_record_info *record; int remaining, i = 0; unsigned long flags; + bool ends_before; spin_lock_irqsave(&tx_ctx->lock, flags); record = tls_get_record(tx_ctx, tcp_seq, &info->rcd_sn); @@ -209,9 +210,21 @@ tx_sync_info_get(struct mlx5e_ktls_offload_context_tx *priv_tx, goto out; } - if (unlikely(tcp_seq < tls_record_start_seq(record))) { - ret = tls_record_is_start_marker(record) ? - MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + /* There are the following cases: + * 1. packet ends before start marker: bypass offload. + * 2. packet starts before start marker and ends after it: drop, + * not supported, breaks contract with kernel. + * 3. packet ends before tls record info starts: drop, + * this packet was already acknowledged and its record info + * was released. + */ + ends_before = before(tcp_seq + datalen, tls_record_start_seq(record)); + + if (unlikely(tls_record_is_start_marker(record))) { + ret = ends_before ? MLX5E_KTLS_SYNC_SKIP_NO_DATA : MLX5E_KTLS_SYNC_FAIL; + goto out; + } else if (ends_before) { + ret = MLX5E_KTLS_SYNC_FAIL; goto out; } @@ -337,7 +350,7 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, u8 num_wqebbs; int i = 0; - ret = tx_sync_info_get(priv_tx, seq, &info); + ret = tx_sync_info_get(priv_tx, seq, datalen, &info); if (unlikely(ret != MLX5E_KTLS_SYNC_DONE)) { if (ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA) { stats->tls_skip_no_sync_data++; @@ -351,14 +364,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, goto err_out; } - if (unlikely(info.sync_len < 0)) { - if (likely(datalen <= -info.sync_len)) - return MLX5E_KTLS_SYNC_DONE; - - stats->tls_drop_bypass_req++; - goto err_out; - } - stats->tls_ooo++; tx_post_resync_params(sq, priv_tx, info.rcd_sn); @@ -378,8 +383,6 @@ mlx5e_ktls_tx_handle_ooo(struct mlx5e_ktls_offload_context_tx *priv_tx, if (unlikely(contig_wqebbs_room < num_wqebbs)) mlx5e_fill_sq_frag_edge(sq, wq, pi, contig_wqebbs_room); - tx_post_resync_params(sq, priv_tx, info.rcd_sn); - for (; i < info.nr_frags; i++) { unsigned int orig_fsz, frag_offset = 0, n = 0; skb_frag_t *f = &info.frags[i]; @@ -455,12 +458,18 @@ struct sk_buff *mlx5e_ktls_handle_tx_skb(struct net_device *netdev, enum mlx5e_ktls_sync_retval ret = mlx5e_ktls_tx_handle_ooo(priv_tx, sq, datalen, seq); - if (likely(ret == MLX5E_KTLS_SYNC_DONE)) + switch (ret) { + case MLX5E_KTLS_SYNC_DONE: *wqe = mlx5e_sq_fetch_wqe(sq, sizeof(**wqe), pi); - else if (ret == MLX5E_KTLS_SYNC_FAIL) + break; + case MLX5E_KTLS_SYNC_SKIP_NO_DATA: + if (likely(!skb->decrypted)) + goto out; + WARN_ON_ONCE(1); + /* fall-through */ + default: /* MLX5E_KTLS_SYNC_FAIL */ goto err_out; - else /* ret == MLX5E_KTLS_SYNC_SKIP_NO_DATA */ - goto out; + } } priv_tx->expected_seq = seq + datalen; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 024e1cddfd0e..7e32b9e3667c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -4036,6 +4036,13 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, u32 rate_mbps; int err; + vport_num = rpriv->rep->vport; + if (vport_num >= MLX5_VPORT_ECPF) { + NL_SET_ERR_MSG_MOD(extack, + "Ingress rate limit is supported only for Eswitch ports connected to VFs"); + return -EOPNOTSUPP; + } + esw = priv->mdev->priv.eswitch; /* rate is given in bytes/sec. * First convert to bits/sec and then round to the nearest mbit/secs. @@ -4044,8 +4051,6 @@ static int apply_police_params(struct mlx5e_priv *priv, u32 rate, * 1 mbit/sec. */ rate_mbps = rate ? max_t(u32, (rate * 8 + 500000) / 1000000, 1) : 0; - vport_num = rpriv->rep->vport; - err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 2c965ad0d744..3df3604e8929 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1928,8 +1928,10 @@ static void mlx5_eswitch_clear_vf_vports_info(struct mlx5_eswitch *esw) struct mlx5_vport *vport; int i; - mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) { memset(&vport->info, 0, sizeof(vport->info)); + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO; + } } /* Public E-Switch API */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 243a5440867e..3e6412783078 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -866,7 +866,7 @@ out: */ #define ESW_SIZE (16 * 1024 * 1024) const unsigned int ESW_POOLS[4] = { 4 * 1024 * 1024, 1 * 1024 * 1024, - 64 * 1024, 4 * 1024 }; + 64 * 1024, 128 }; static int get_sz_from_pool(struct mlx5_eswitch *esw) @@ -1377,7 +1377,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, return -EINVAL; } - mlx5_eswitch_disable(esw, false); + mlx5_eswitch_disable(esw, true); mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS); if (err) { @@ -2220,7 +2220,8 @@ int mlx5_esw_funcs_changed_handler(struct notifier_block *nb, unsigned long type int esw_offloads_enable(struct mlx5_eswitch *esw) { - int err; + struct mlx5_vport *vport; + int err, i; if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, decap)) @@ -2237,6 +2238,10 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; + /* Representor will control the vport link state */ + mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs) + vport->info.link_state = MLX5_VPORT_ADMIN_STATE_DOWN; + err = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_VPORT_UC_ADDR_CHANGE); if (err) goto err_vports; @@ -2266,7 +2271,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, { int err, err1; - mlx5_eswitch_disable(esw, false); + mlx5_eswitch_disable(esw, true); err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index cf7b8da0f010..f554cfddcf4e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1563,6 +1563,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = { { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */ { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */ { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */ + { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */ { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */ { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */ { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 51803eef13dd..c7f10d4f8f8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2019 Mellanox Technologies. */ +#include <linux/smp.h> #include "dr_types.h" #define QUEUE_SIZE 128 @@ -729,7 +730,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, if (!in) goto err_cqwq; - vector = smp_processor_id() % mlx5_comp_vectors_count(mdev); + vector = raw_smp_processor_id() % mlx5_comp_vectors_count(mdev); err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn); if (err) { kvfree(in); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index 3d587d0bdbbe..1e32e2443f73 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -352,26 +352,16 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { list_for_each_entry(dst, &fte->node.children, node.list) { enum mlx5_flow_destination_type type = dst->dest_attr.type; - u32 id; if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { err = -ENOSPC; goto free_actions; } - switch (type) { - case MLX5_FLOW_DESTINATION_TYPE_COUNTER: - id = dst->dest_attr.counter_id; + if (type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; - tmp_action = - mlx5dr_action_create_flow_counter(id); - if (!tmp_action) { - err = -ENOMEM; - goto free_actions; - } - fs_dr_actions[fs_dr_num_actions++] = tmp_action; - actions[num_actions++] = tmp_action; - break; + switch (type) { case MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE: tmp_action = create_ft_action(dev, dst); if (!tmp_action) { @@ -397,6 +387,32 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, } } + if (fte->action.action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { + list_for_each_entry(dst, &fte->node.children, node.list) { + u32 id; + + if (dst->dest_attr.type != + MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + + if (num_actions == MLX5_FLOW_CONTEXT_ACTION_MAX) { + err = -ENOSPC; + goto free_actions; + } + + id = dst->dest_attr.counter_id; + tmp_action = + mlx5dr_action_create_flow_counter(id); + if (!tmp_action) { + err = -ENOMEM; + goto free_actions; + } + + fs_dr_actions[fs_dr_num_actions++] = tmp_action; + actions[num_actions++] = tmp_action; + } + } + params.match_sz = match_sz; params.match_buf = (u64 *)fte->val; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index 150b3a144b83..3d3cca596116 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -8,6 +8,7 @@ #include <linux/string.h> #include <linux/rhashtable.h> #include <linux/netdevice.h> +#include <linux/mutex.h> #include <net/net_namespace.h> #include <net/tc_act/tc_vlan.h> @@ -25,6 +26,7 @@ struct mlxsw_sp_acl { struct mlxsw_sp_fid *dummy_fid; struct rhashtable ruleset_ht; struct list_head rules; + struct mutex rules_lock; /* Protects rules list */ struct { struct delayed_work dw; unsigned long interval; /* ms */ @@ -701,7 +703,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, goto err_ruleset_block_bind; } + mutex_lock(&mlxsw_sp->acl->rules_lock); list_add_tail(&rule->list, &mlxsw_sp->acl->rules); + mutex_unlock(&mlxsw_sp->acl->rules_lock); block->rule_count++; block->egress_blocker_rule_count += rule->rulei->egress_bind_blocker; return 0; @@ -723,7 +727,9 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, block->egress_blocker_rule_count -= rule->rulei->egress_bind_blocker; ruleset->ht_key.block->rule_count--; + mutex_lock(&mlxsw_sp->acl->rules_lock); list_del(&rule->list); + mutex_unlock(&mlxsw_sp->acl->rules_lock); if (!ruleset->ht_key.chain_index && mlxsw_sp_acl_ruleset_is_singular(ruleset)) mlxsw_sp_acl_ruleset_block_unbind(mlxsw_sp, ruleset, @@ -783,19 +789,18 @@ static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl) struct mlxsw_sp_acl_rule *rule; int err; - /* Protect internal structures from changes */ - rtnl_lock(); + mutex_lock(&acl->rules_lock); list_for_each_entry(rule, &acl->rules, list) { err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp, rule); if (err) goto err_rule_update; } - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return 0; err_rule_update: - rtnl_unlock(); + mutex_unlock(&acl->rules_lock); return err; } @@ -880,6 +885,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) acl->dummy_fid = fid; INIT_LIST_HEAD(&acl->rules); + mutex_init(&acl->rules_lock); err = mlxsw_sp_acl_tcam_init(mlxsw_sp, &acl->tcam); if (err) goto err_acl_ops_init; @@ -892,6 +898,7 @@ int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp) return 0; err_acl_ops_init: + mutex_destroy(&acl->rules_lock); mlxsw_sp_fid_put(fid); err_fid_get: rhashtable_destroy(&acl->ruleset_ht); @@ -908,6 +915,7 @@ void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp) cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw); mlxsw_sp_acl_tcam_fini(mlxsw_sp, &acl->tcam); + mutex_destroy(&acl->rules_lock); WARN_ON(!list_empty(&acl->rules)); mlxsw_sp_fid_put(acl->dummy_fid); rhashtable_destroy(&acl->ruleset_ht); diff --git a/drivers/net/ethernet/natsemi/sonic.c b/drivers/net/ethernet/natsemi/sonic.c index b339125b2f09..05e760444a92 100644 --- a/drivers/net/ethernet/natsemi/sonic.c +++ b/drivers/net/ethernet/natsemi/sonic.c @@ -64,6 +64,8 @@ static int sonic_open(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: initializing sonic driver\n", __func__); + spin_lock_init(&lp->lock); + for (i = 0; i < SONIC_NUM_RRS; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); if (skb == NULL) { @@ -114,6 +116,24 @@ static int sonic_open(struct net_device *dev) return 0; } +/* Wait for the SONIC to become idle. */ +static void sonic_quiesce(struct net_device *dev, u16 mask) +{ + struct sonic_local * __maybe_unused lp = netdev_priv(dev); + int i; + u16 bits; + + for (i = 0; i < 1000; ++i) { + bits = SONIC_READ(SONIC_CMD) & mask; + if (!bits) + return; + if (irqs_disabled() || in_interrupt()) + udelay(20); + else + usleep_range(100, 200); + } + WARN_ONCE(1, "command deadline expired! 0x%04x\n", bits); +} /* * Close the SONIC device @@ -130,6 +150,9 @@ static int sonic_close(struct net_device *dev) /* * stop the SONIC, disable interrupts */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -169,6 +192,9 @@ static void sonic_tx_timeout(struct net_device *dev) * put the Sonic into software-reset mode and * disable all interrupts before releasing DMA buffers */ + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + sonic_quiesce(dev, SONIC_CR_ALL); + SONIC_WRITE(SONIC_IMR, 0); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); @@ -206,8 +232,6 @@ static void sonic_tx_timeout(struct net_device *dev) * wake the tx queue * Concurrently with all of this, the SONIC is potentially writing to * the status flags of the TDs. - * Until some mutual exclusion is added, this code will not work with SMP. However, - * MIPS Jazz machines and m68k Macs were all uni-processor machines. */ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) @@ -215,7 +239,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) struct sonic_local *lp = netdev_priv(dev); dma_addr_t laddr; int length; - int entry = lp->next_tx; + int entry; + unsigned long flags; netif_dbg(lp, tx_queued, dev, "%s: skb=%p\n", __func__, skb); @@ -237,6 +262,10 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } + spin_lock_irqsave(&lp->lock, flags); + + entry = lp->next_tx; + sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */ sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */ sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */ @@ -246,10 +275,6 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) sonic_tda_put(dev, entry, SONIC_TD_LINK, sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL); - /* - * Must set tx_skb[entry] only after clearing status, and - * before clearing EOL and before stopping queue - */ wmb(); lp->tx_len[entry] = length; lp->tx_laddr[entry] = laddr; @@ -272,6 +297,8 @@ static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev) SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + spin_unlock_irqrestore(&lp->lock, flags); + return NETDEV_TX_OK; } @@ -284,15 +311,28 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) struct net_device *dev = dev_id; struct sonic_local *lp = netdev_priv(dev); int status; + unsigned long flags; + + /* The lock has two purposes. Firstly, it synchronizes sonic_interrupt() + * with sonic_send_packet() so that the two functions can share state. + * Secondly, it makes sonic_interrupt() re-entrant, as that is required + * by macsonic which must use two IRQs with different priority levels. + */ + spin_lock_irqsave(&lp->lock, flags); + + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + if (!status) { + spin_unlock_irqrestore(&lp->lock, flags); - if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)) return IRQ_NONE; + } do { + SONIC_WRITE(SONIC_ISR, status); /* clear the interrupt(s) */ + if (status & SONIC_INT_PKTRX) { netif_dbg(lp, intr, dev, "%s: packet rx\n", __func__); sonic_rx(dev); /* got packet(s) */ - SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */ } if (status & SONIC_INT_TXDN) { @@ -300,11 +340,12 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) int td_status; int freed_some = 0; - /* At this point, cur_tx is the index of a TD that is one of: - * unallocated/freed (status set & tx_skb[entry] clear) - * allocated and sent (status set & tx_skb[entry] set ) - * allocated and not yet sent (status clear & tx_skb[entry] set ) - * still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear) + /* The state of a Transmit Descriptor may be inferred + * from { tx_skb[entry], td_status } as follows. + * { clear, clear } => the TD has never been used + * { set, clear } => the TD was handed to SONIC + * { set, set } => the TD was handed back + * { clear, set } => the TD is available for re-use */ netif_dbg(lp, intr, dev, "%s: tx done\n", __func__); @@ -313,18 +354,19 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0) break; - if (td_status & 0x0001) { + if (td_status & SONIC_TCR_PTX) { lp->stats.tx_packets++; lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE); } else { - lp->stats.tx_errors++; - if (td_status & 0x0642) + if (td_status & (SONIC_TCR_EXD | + SONIC_TCR_EXC | SONIC_TCR_BCM)) lp->stats.tx_aborted_errors++; - if (td_status & 0x0180) + if (td_status & + (SONIC_TCR_NCRS | SONIC_TCR_CRLS)) lp->stats.tx_carrier_errors++; - if (td_status & 0x0020) + if (td_status & SONIC_TCR_OWC) lp->stats.tx_window_errors++; - if (td_status & 0x0004) + if (td_status & SONIC_TCR_FU) lp->stats.tx_fifo_errors++; } @@ -346,7 +388,6 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (freed_some || lp->tx_skb[entry] == NULL) netif_wake_queue(dev); /* The ring is no longer full */ lp->cur_tx = entry; - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */ } /* @@ -355,42 +396,37 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) if (status & SONIC_INT_RFO) { netif_dbg(lp, rx_err, dev, "%s: rx fifo overrun\n", __func__); - lp->stats.rx_fifo_errors++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */ } if (status & SONIC_INT_RDE) { netif_dbg(lp, rx_err, dev, "%s: rx descriptors exhausted\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */ } if (status & SONIC_INT_RBAE) { netif_dbg(lp, rx_err, dev, "%s: rx buffer area exceeded\n", __func__); - lp->stats.rx_dropped++; - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */ } /* counter overruns; all counters are 16bit wide */ - if (status & SONIC_INT_FAE) { + if (status & SONIC_INT_FAE) lp->stats.rx_frame_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */ - } - if (status & SONIC_INT_CRC) { + if (status & SONIC_INT_CRC) lp->stats.rx_crc_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */ - } - if (status & SONIC_INT_MP) { + if (status & SONIC_INT_MP) lp->stats.rx_missed_errors += 65536; - SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */ - } /* transmit error */ if (status & SONIC_INT_TXER) { - if (SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) - netif_dbg(lp, tx_err, dev, "%s: tx fifo underrun\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */ + u16 tcr = SONIC_READ(SONIC_TCR); + + netif_dbg(lp, tx_err, dev, "%s: TXER intr, TCR %04x\n", + __func__, tcr); + + if (tcr & (SONIC_TCR_EXD | SONIC_TCR_EXC | + SONIC_TCR_FU | SONIC_TCR_BCM)) { + /* Aborted transmission. Try again. */ + netif_stop_queue(dev); + SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP); + } } /* bus retry */ @@ -400,107 +436,164 @@ static irqreturn_t sonic_interrupt(int irq, void *dev_id) /* ... to help debug DMA problems causing endless interrupts. */ /* Bounce the eth interface to turn on the interrupt again. */ SONIC_WRITE(SONIC_IMR, 0); - SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */ } - /* load CAM done */ - if (status & SONIC_INT_LCD) - SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */ - } while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT)); + status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT; + } while (status); + + spin_unlock_irqrestore(&lp->lock, flags); + return IRQ_HANDLED; } +/* Return the array index corresponding to a given Receive Buffer pointer. */ +static int index_from_addr(struct sonic_local *lp, dma_addr_t addr, + unsigned int last) +{ + unsigned int i = last; + + do { + i = (i + 1) & SONIC_RRS_MASK; + if (addr == lp->rx_laddr[i]) + return i; + } while (i != last); + + return -ENOENT; +} + +/* Allocate and map a new skb to be used as a receive buffer. */ +static bool sonic_alloc_rb(struct net_device *dev, struct sonic_local *lp, + struct sk_buff **new_skb, dma_addr_t *new_addr) +{ + *new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); + if (!*new_skb) + return false; + + if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2) + skb_reserve(*new_skb, 2); + + *new_addr = dma_map_single(lp->device, skb_put(*new_skb, SONIC_RBSIZE), + SONIC_RBSIZE, DMA_FROM_DEVICE); + if (!*new_addr) { + dev_kfree_skb(*new_skb); + *new_skb = NULL; + return false; + } + + return true; +} + +/* Place a new receive resource in the Receive Resource Area and update RWP. */ +static void sonic_update_rra(struct net_device *dev, struct sonic_local *lp, + dma_addr_t old_addr, dma_addr_t new_addr) +{ + unsigned int entry = sonic_rr_entry(dev, SONIC_READ(SONIC_RWP)); + unsigned int end = sonic_rr_entry(dev, SONIC_READ(SONIC_RRP)); + u32 buf; + + /* The resources in the range [RRP, RWP) belong to the SONIC. This loop + * scans the other resources in the RRA, those in the range [RWP, RRP). + */ + do { + buf = (sonic_rra_get(dev, entry, SONIC_RR_BUFADR_H) << 16) | + sonic_rra_get(dev, entry, SONIC_RR_BUFADR_L); + + if (buf == old_addr) + break; + + entry = (entry + 1) & SONIC_RRS_MASK; + } while (entry != end); + + WARN_ONCE(buf != old_addr, "failed to find resource!\n"); + + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, new_addr >> 16); + sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, new_addr & 0xffff); + + entry = (entry + 1) & SONIC_RRS_MASK; + + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, entry)); +} + /* * We have a good packet(s), pass it/them up the network stack. */ static void sonic_rx(struct net_device *dev) { struct sonic_local *lp = netdev_priv(dev); - int status; int entry = lp->cur_rx; + int prev_entry = lp->eol_rx; + bool rbe = false; while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) { - struct sk_buff *used_skb; - struct sk_buff *new_skb; - dma_addr_t new_laddr; - u16 bufadr_l; - u16 bufadr_h; - int pkt_len; - - status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); - if (status & SONIC_RCR_PRX) { - /* Malloc up new buffer. */ - new_skb = netdev_alloc_skb(dev, SONIC_RBSIZE + 2); - if (new_skb == NULL) { - lp->stats.rx_dropped++; + u16 status = sonic_rda_get(dev, entry, SONIC_RD_STATUS); + + /* If the RD has LPKT set, the chip has finished with the RB */ + if ((status & SONIC_RCR_PRX) && (status & SONIC_RCR_LPKT)) { + struct sk_buff *new_skb; + dma_addr_t new_laddr; + u32 addr = (sonic_rda_get(dev, entry, + SONIC_RD_PKTPTR_H) << 16) | + sonic_rda_get(dev, entry, SONIC_RD_PKTPTR_L); + int i = index_from_addr(lp, addr, entry); + + if (i < 0) { + WARN_ONCE(1, "failed to find buffer!\n"); break; } - /* provide 16 byte IP header alignment unless DMA requires otherwise */ - if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2) - skb_reserve(new_skb, 2); - - new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE), - SONIC_RBSIZE, DMA_FROM_DEVICE); - if (!new_laddr) { - dev_kfree_skb(new_skb); - printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name); + + if (sonic_alloc_rb(dev, lp, &new_skb, &new_laddr)) { + struct sk_buff *used_skb = lp->rx_skb[i]; + int pkt_len; + + /* Pass the used buffer up the stack */ + dma_unmap_single(lp->device, addr, SONIC_RBSIZE, + DMA_FROM_DEVICE); + + pkt_len = sonic_rda_get(dev, entry, + SONIC_RD_PKTLEN); + skb_trim(used_skb, pkt_len); + used_skb->protocol = eth_type_trans(used_skb, + dev); + netif_rx(used_skb); + lp->stats.rx_packets++; + lp->stats.rx_bytes += pkt_len; + + lp->rx_skb[i] = new_skb; + lp->rx_laddr[i] = new_laddr; + } else { + /* Failed to obtain a new buffer so re-use it */ + new_laddr = addr; lp->stats.rx_dropped++; - break; } - - /* now we have a new skb to replace it, pass the used one up the stack */ - dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE); - used_skb = lp->rx_skb[entry]; - pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN); - skb_trim(used_skb, pkt_len); - used_skb->protocol = eth_type_trans(used_skb, dev); - netif_rx(used_skb); - lp->stats.rx_packets++; - lp->stats.rx_bytes += pkt_len; - - /* and insert the new skb */ - lp->rx_laddr[entry] = new_laddr; - lp->rx_skb[entry] = new_skb; - - bufadr_l = (unsigned long)new_laddr & 0xffff; - bufadr_h = (unsigned long)new_laddr >> 16; - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l); - sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h); - } else { - /* This should only happen, if we enable accepting broken packets. */ - lp->stats.rx_errors++; - if (status & SONIC_RCR_FAER) - lp->stats.rx_frame_errors++; - if (status & SONIC_RCR_CRCR) - lp->stats.rx_crc_errors++; - } - if (status & SONIC_RCR_LPKT) { - /* - * this was the last packet out of the current receive buffer - * give the buffer back to the SONIC + /* If RBE is already asserted when RWP advances then + * it's safe to clear RBE after processing this packet. */ - lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); - if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff; - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); - if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) { - netif_dbg(lp, rx_err, dev, "%s: rx buffer exhausted\n", - __func__); - SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */ - } - } else - printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n", - dev->name); + rbe = rbe || SONIC_READ(SONIC_ISR) & SONIC_INT_RBE; + sonic_update_rra(dev, lp, addr, new_laddr); + } /* * give back the descriptor */ - sonic_rda_put(dev, entry, SONIC_RD_LINK, - sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL); + sonic_rda_put(dev, entry, SONIC_RD_STATUS, 0); sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1); - sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, - sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL); - lp->eol_rx = entry; - lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK; + + prev_entry = entry; + entry = (entry + 1) & SONIC_RDS_MASK; + } + + lp->cur_rx = entry; + + if (prev_entry != lp->eol_rx) { + /* Advance the EOL flag to put descriptors back into service */ + sonic_rda_put(dev, prev_entry, SONIC_RD_LINK, SONIC_EOL | + sonic_rda_get(dev, prev_entry, SONIC_RD_LINK)); + sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK, ~SONIC_EOL & + sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK)); + lp->eol_rx = prev_entry; } + + if (rbe) + SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* * If any worth-while packets have been received, netif_rx() * has done a mark_bh(NET_BH) for us and will work on them @@ -550,6 +643,8 @@ static void sonic_multicast_list(struct net_device *dev) (netdev_mc_count(dev) > 15)) { rcr |= SONIC_RCR_AMC; } else { + unsigned long flags; + netif_dbg(lp, ifup, dev, "%s: mc_count %d\n", __func__, netdev_mc_count(dev)); sonic_set_cam_enable(dev, 1); /* always enable our own address */ @@ -563,9 +658,14 @@ static void sonic_multicast_list(struct net_device *dev) i++; } SONIC_WRITE(SONIC_CDC, 16); - /* issue Load CAM command */ SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff); + + /* LCAM and TXP commands can't be used simultaneously */ + spin_lock_irqsave(&lp->lock, flags); + sonic_quiesce(dev, SONIC_CR_TXP); SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); + sonic_quiesce(dev, SONIC_CR_LCAM); + spin_unlock_irqrestore(&lp->lock, flags); } } @@ -580,7 +680,6 @@ static void sonic_multicast_list(struct net_device *dev) */ static int sonic_init(struct net_device *dev) { - unsigned int cmd; struct sonic_local *lp = netdev_priv(dev); int i; @@ -592,12 +691,16 @@ static int sonic_init(struct net_device *dev) SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_CMD, SONIC_CR_RST); + /* While in reset mode, clear CAM Enable register */ + SONIC_WRITE(SONIC_CE, 0); + /* * clear software reset flag, disable receiver, clear and * enable interrupts, then completely initialize the SONIC */ SONIC_WRITE(SONIC_CMD, 0); - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS | SONIC_CR_STP); + sonic_quiesce(dev, SONIC_CR_ALL); /* * initialize the receive resource area @@ -615,15 +718,10 @@ static int sonic_init(struct net_device *dev) } /* initialize all RRA registers */ - lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR * - SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff; - - SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_REA, lp->rra_end); - SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff); - SONIC_WRITE(SONIC_RWP, lp->cur_rwp); + SONIC_WRITE(SONIC_RSA, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_REA, sonic_rr_addr(dev, SONIC_NUM_RRS)); + SONIC_WRITE(SONIC_RRP, sonic_rr_addr(dev, 0)); + SONIC_WRITE(SONIC_RWP, sonic_rr_addr(dev, SONIC_NUM_RRS - 1)); SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16); SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1)); @@ -631,14 +729,7 @@ static int sonic_init(struct net_device *dev) netif_dbg(lp, ifup, dev, "%s: issuing RRRA command\n", __func__); SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA); - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA) - break; - } - - netif_dbg(lp, ifup, dev, "%s: status=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), i); + sonic_quiesce(dev, SONIC_CR_RRRA); /* * Initialize the receive descriptors so that they @@ -713,28 +804,17 @@ static int sonic_init(struct net_device *dev) * load the CAM */ SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM); - - i = 0; - while (i++ < 100) { - if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD) - break; - } - netif_dbg(lp, ifup, dev, "%s: CMD=%x, ISR=%x, i=%d\n", __func__, - SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i); + sonic_quiesce(dev, SONIC_CR_LCAM); /* * enable receiver, disable loopback * and enable all interrupts */ - SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP); SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT); SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT); SONIC_WRITE(SONIC_ISR, 0x7fff); SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT); - - cmd = SONIC_READ(SONIC_CMD); - if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0) - printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd); + SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN); netif_dbg(lp, ifup, dev, "%s: new status=%x\n", __func__, SONIC_READ(SONIC_CMD)); diff --git a/drivers/net/ethernet/natsemi/sonic.h b/drivers/net/ethernet/natsemi/sonic.h index 2b27f7049acb..1df6d2f06cc4 100644 --- a/drivers/net/ethernet/natsemi/sonic.h +++ b/drivers/net/ethernet/natsemi/sonic.h @@ -110,6 +110,9 @@ #define SONIC_CR_TXP 0x0002 #define SONIC_CR_HTX 0x0001 +#define SONIC_CR_ALL (SONIC_CR_LCAM | SONIC_CR_RRRA | \ + SONIC_CR_RXEN | SONIC_CR_TXP) + /* * SONIC data configuration bits */ @@ -175,6 +178,7 @@ #define SONIC_TCR_NCRS 0x0100 #define SONIC_TCR_CRLS 0x0080 #define SONIC_TCR_EXC 0x0040 +#define SONIC_TCR_OWC 0x0020 #define SONIC_TCR_PMB 0x0008 #define SONIC_TCR_FU 0x0004 #define SONIC_TCR_BCM 0x0002 @@ -274,8 +278,9 @@ #define SONIC_NUM_RDS SONIC_NUM_RRS /* number of receive descriptors */ #define SONIC_NUM_TDS 16 /* number of transmit descriptors */ -#define SONIC_RDS_MASK (SONIC_NUM_RDS-1) -#define SONIC_TDS_MASK (SONIC_NUM_TDS-1) +#define SONIC_RRS_MASK (SONIC_NUM_RRS - 1) +#define SONIC_RDS_MASK (SONIC_NUM_RDS - 1) +#define SONIC_TDS_MASK (SONIC_NUM_TDS - 1) #define SONIC_RBSIZE 1520 /* size of one resource buffer */ @@ -312,8 +317,6 @@ struct sonic_local { u32 rda_laddr; /* logical DMA address of RDA */ dma_addr_t rx_laddr[SONIC_NUM_RRS]; /* logical DMA addresses of rx skbuffs */ dma_addr_t tx_laddr[SONIC_NUM_TDS]; /* logical DMA addresses of tx skbuffs */ - unsigned int rra_end; - unsigned int cur_rwp; unsigned int cur_rx; unsigned int cur_tx; /* first unacked transmit packet */ unsigned int eol_rx; @@ -322,6 +325,7 @@ struct sonic_local { int msg_enable; struct device *device; /* generic device */ struct net_device_stats stats; + spinlock_t lock; }; #define TX_TIMEOUT (3 * HZ) @@ -344,30 +348,30 @@ static void sonic_msg_init(struct net_device *dev); as far as we can tell. */ /* OpenBSD calls this "SWO". I'd like to think that sonic_buf_put() is a much better name. */ -static inline void sonic_buf_put(void* base, int bitmode, +static inline void sonic_buf_put(u16 *base, int bitmode, int offset, __u16 val) { if (bitmode) #ifdef __BIG_ENDIAN - ((__u16 *) base + (offset*2))[1] = val; + __raw_writew(val, base + (offset * 2) + 1); #else - ((__u16 *) base + (offset*2))[0] = val; + __raw_writew(val, base + (offset * 2) + 0); #endif else - ((__u16 *) base)[offset] = val; + __raw_writew(val, base + (offset * 1) + 0); } -static inline __u16 sonic_buf_get(void* base, int bitmode, +static inline __u16 sonic_buf_get(u16 *base, int bitmode, int offset) { if (bitmode) #ifdef __BIG_ENDIAN - return ((volatile __u16 *) base + (offset*2))[1]; + return __raw_readw(base + (offset * 2) + 1); #else - return ((volatile __u16 *) base + (offset*2))[0]; + return __raw_readw(base + (offset * 2) + 0); #endif else - return ((volatile __u16 *) base)[offset]; + return __raw_readw(base + (offset * 1) + 0); } /* Inlines that you should actually use for reading/writing DMA buffers */ @@ -447,6 +451,22 @@ static inline __u16 sonic_rra_get(struct net_device* dev, int entry, (entry * SIZEOF_SONIC_RR) + offset); } +static inline u16 sonic_rr_addr(struct net_device *dev, int entry) +{ + struct sonic_local *lp = netdev_priv(dev); + + return lp->rra_laddr + + entry * SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode); +} + +static inline u16 sonic_rr_entry(struct net_device *dev, u16 addr) +{ + struct sonic_local *lp = netdev_priv(dev); + + return (addr - (u16)lp->rra_laddr) / (SIZEOF_SONIC_RR * + SONIC_BUS_SCALE(lp->dma_bitmode)); +} + static const char version[] = "sonic.c:v0.92 20.9.98 tsbogend@alpha.franken.de\n"; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c index a496390b8632..07f9067affc6 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c @@ -2043,6 +2043,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev, break; } entry += p_hdr->size; + cond_resched(); } p_dev->ahw->reset.seq_index = index; } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c index afa10a163da1..f34ae8c75bc5 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_minidump.c @@ -703,6 +703,7 @@ static u32 qlcnic_read_memory_test_agent(struct qlcnic_adapter *adapter, addr += 16; reg_read -= 16; ret += 16; + cond_resched(); } out: mutex_unlock(&adapter->ahw->mem_lock); @@ -1383,6 +1384,7 @@ int qlcnic_dump_fw(struct qlcnic_adapter *adapter) buf_offset += entry->hdr.cap_size; entry_offset += entry->hdr.offset; buffer = fw_dump->data + buf_offset; + cond_resched(); } fw_dump->clr = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 4775f49d7f3b..d10ac54bf385 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -412,9 +412,9 @@ stmmac_probe_config_dt(struct platform_device *pdev, const char **mac) *mac = NULL; } - rc = of_get_phy_mode(np, &plat->phy_interface); - if (rc) - return ERR_PTR(rc); + plat->phy_interface = device_get_phy_mode(&pdev->dev); + if (plat->phy_interface < 0) + return ERR_PTR(plat->phy_interface); plat->interface = stmmac_of_get_mac_mode(np); if (plat->interface < 0) diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c index f6222ada6818..9b3ba98726d7 100644 --- a/drivers/net/gtp.c +++ b/drivers/net/gtp.c @@ -804,19 +804,21 @@ static struct sock *gtp_encap_enable_socket(int fd, int type, return NULL; } - if (sock->sk->sk_protocol != IPPROTO_UDP) { + sk = sock->sk; + if (sk->sk_protocol != IPPROTO_UDP || + sk->sk_type != SOCK_DGRAM || + (sk->sk_family != AF_INET && sk->sk_family != AF_INET6)) { pr_debug("socket fd=%d not UDP\n", fd); sk = ERR_PTR(-EINVAL); goto out_sock; } - lock_sock(sock->sk); - if (sock->sk->sk_user_data) { + lock_sock(sk); + if (sk->sk_user_data) { sk = ERR_PTR(-EBUSY); goto out_rel_sock; } - sk = sock->sk; sock_hold(sk); tuncfg.sk_user_data = gtp; diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c index 2a91c192659f..61d7e0d1d77d 100644 --- a/drivers/net/slip/slip.c +++ b/drivers/net/slip/slip.c @@ -452,9 +452,16 @@ static void slip_transmit(struct work_struct *work) */ static void slip_write_wakeup(struct tty_struct *tty) { - struct slip *sl = tty->disc_data; + struct slip *sl; + + rcu_read_lock(); + sl = rcu_dereference(tty->disc_data); + if (!sl) + goto out; schedule_work(&sl->tx_work); +out: + rcu_read_unlock(); } static void sl_tx_timeout(struct net_device *dev) @@ -882,10 +889,11 @@ static void slip_close(struct tty_struct *tty) return; spin_lock_bh(&sl->lock); - tty->disc_data = NULL; + rcu_assign_pointer(tty->disc_data, NULL); sl->tty = NULL; spin_unlock_bh(&sl->lock); + synchronize_rcu(); flush_work(&sl->tx_work); /* VSV = very important to remove timers */ diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 683d371e6e82..35e884a8242d 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1936,6 +1936,10 @@ drop: if (ret != XDP_PASS) { rcu_read_unlock(); local_bh_enable(); + if (frags) { + tfile->napi.skb = NULL; + mutex_unlock(&tfile->napi_mutex); + } return total_len; } } diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 75bdfae5f3e2..c2a58f05b9a1 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -20,6 +20,7 @@ #include <linux/mdio.h> #include <linux/phy.h> #include <net/ip6_checksum.h> +#include <net/vxlan.h> #include <linux/interrupt.h> #include <linux/irqdomain.h> #include <linux/irq.h> @@ -3668,6 +3669,19 @@ static void lan78xx_tx_timeout(struct net_device *net) tasklet_schedule(&dev->bh); } +static netdev_features_t lan78xx_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) +{ + if (skb->len + TX_OVERHEAD > MAX_SINGLE_PACKET_SIZE) + features &= ~NETIF_F_GSO_MASK; + + features = vlan_features_check(skb, features); + features = vxlan_features_check(skb, features); + + return features; +} + static const struct net_device_ops lan78xx_netdev_ops = { .ndo_open = lan78xx_open, .ndo_stop = lan78xx_stop, @@ -3681,6 +3695,7 @@ static const struct net_device_ops lan78xx_netdev_ops = { .ndo_set_features = lan78xx_set_features, .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid, + .ndo_features_check = lan78xx_features_check, }; static void lan78xx_stat_monitor(struct timer_list *t) diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 031cb8fff909..3f425f974d03 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -31,7 +31,7 @@ #define NETNEXT_VERSION "11" /* Information for net */ -#define NET_VERSION "10" +#define NET_VERSION "11" #define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION #define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" @@ -68,6 +68,7 @@ #define PLA_LED_FEATURE 0xdd92 #define PLA_PHYAR 0xde00 #define PLA_BOOT_CTRL 0xe004 +#define PLA_LWAKE_CTRL_REG 0xe007 #define PLA_GPHY_INTR_IMR 0xe022 #define PLA_EEE_CR 0xe040 #define PLA_EEEP_CR 0xe080 @@ -95,6 +96,7 @@ #define PLA_TALLYCNT 0xe890 #define PLA_SFF_STS_7 0xe8de #define PLA_PHYSTATUS 0xe908 +#define PLA_CONFIG6 0xe90a /* CONFIG6 */ #define PLA_BP_BA 0xfc26 #define PLA_BP_0 0xfc28 #define PLA_BP_1 0xfc2a @@ -107,6 +109,7 @@ #define PLA_BP_EN 0xfc38 #define USB_USB2PHY 0xb41e +#define USB_SSPHYLINK1 0xb426 #define USB_SSPHYLINK2 0xb428 #define USB_U2P3_CTRL 0xb460 #define USB_CSR_DUMMY1 0xb464 @@ -300,6 +303,9 @@ #define LINK_ON_WAKE_EN 0x0010 #define LINK_OFF_WAKE_EN 0x0008 +/* PLA_CONFIG6 */ +#define LANWAKE_CLR_EN BIT(0) + /* PLA_CONFIG5 */ #define BWF_EN 0x0040 #define MWF_EN 0x0020 @@ -312,6 +318,7 @@ /* PLA_PHY_PWR */ #define TX_10M_IDLE_EN 0x0080 #define PFM_PWM_SWITCH 0x0040 +#define TEST_IO_OFF BIT(4) /* PLA_MAC_PWR_CTRL */ #define D3_CLK_GATED_EN 0x00004000 @@ -324,6 +331,7 @@ #define MAC_CLK_SPDWN_EN BIT(15) /* PLA_MAC_PWR_CTRL3 */ +#define PLA_MCU_SPDWN_EN BIT(14) #define PKT_AVAIL_SPDWN_EN 0x0100 #define SUSPEND_SPDWN_EN 0x0004 #define U1U2_SPDWN_EN 0x0002 @@ -354,6 +362,9 @@ /* PLA_BOOT_CTRL */ #define AUTOLOAD_DONE 0x0002 +/* PLA_LWAKE_CTRL_REG */ +#define LANWAKE_PIN BIT(7) + /* PLA_SUSPEND_FLAG */ #define LINK_CHG_EVENT BIT(0) @@ -365,13 +376,18 @@ #define DEBUG_LTSSM 0x0082 /* PLA_EXTRA_STATUS */ +#define CUR_LINK_OK BIT(15) #define U3P3_CHECK_EN BIT(7) /* RTL_VER_05 only */ #define LINK_CHANGE_FLAG BIT(8) +#define POLL_LINK_CHG BIT(0) /* USB_USB2PHY */ #define USB2PHY_SUSPEND 0x0001 #define USB2PHY_L1 0x0002 +/* USB_SSPHYLINK1 */ +#define DELAY_PHY_PWR_CHG BIT(1) + /* USB_SSPHYLINK2 */ #define pwd_dn_scale_mask 0x3ffe #define pwd_dn_scale(x) ((x) << 1) @@ -2863,6 +2879,17 @@ static int rtl8153_enable(struct r8152 *tp) r8153_set_rx_early_timeout(tp); r8153_set_rx_early_size(tp); + if (tp->version == RTL_VER_09) { + u32 ocp_data; + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_FW_TASK); + ocp_data &= ~FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + usleep_range(1000, 2000); + ocp_data |= FC_PATCH_TASK; + ocp_write_word(tp, MCU_TYPE_USB, USB_FW_TASK, ocp_data); + } + return rtl_enable(tp); } @@ -3376,8 +3403,8 @@ static void rtl8153b_runtime_enable(struct r8152 *tp, bool enable) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - r8153_u2p3en(tp, true); - r8153b_u1u2en(tp, true); + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); } } @@ -4675,7 +4702,6 @@ static void r8153b_hw_phy_cfg(struct r8152 *tp) r8153_aldps_en(tp, true); r8152b_enable_fc(tp); - r8153_u2p3en(tp, true); set_bit(PHY_RESET, &tp->flags); } @@ -4954,6 +4980,8 @@ static void rtl8152_down(struct r8152 *tp) static void rtl8153_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -4961,6 +4989,19 @@ static void rtl8153_up(struct r8152 *tp) r8153_u2p3en(tp, false); r8153_aldps_en(tp, false); r8153_first_init(tp); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + + ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1); + ocp_data &= ~DELAY_PHY_PWR_CHG; + ocp_write_word(tp, MCU_TYPE_USB, USB_SSPHYLINK1, ocp_data); + r8153_aldps_en(tp, true); switch (tp->version) { @@ -4979,11 +5020,17 @@ static void rtl8153_up(struct r8152 *tp) static void rtl8153_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data &= ~LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + r8153_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153_power_cut_en(tp, false); @@ -4994,6 +5041,8 @@ static void rtl8153_down(struct r8152 *tp) static void rtl8153b_up(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) return; @@ -5004,18 +5053,29 @@ static void rtl8153b_up(struct r8152 *tp) r8153_first_init(tp); ocp_write_dword(tp, MCU_TYPE_USB, USB_RX_BUF_TH, RX_THR_B); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153_aldps_en(tp, true); - r8153_u2p3en(tp, true); - r8153b_u1u2en(tp, true); + + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); } static void rtl8153b_down(struct r8152 *tp) { + u32 ocp_data; + if (test_bit(RTL8152_UNPLUG, &tp->flags)) { rtl_drop_queued_tx(tp); return; } + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data |= PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + r8153b_u1u2en(tp, false); r8153_u2p3en(tp, false); r8153b_power_cut_en(tp, false); @@ -5387,6 +5447,16 @@ static void r8153_init(struct r8152 *tp) else ocp_data |= DYNAMIC_BURST; ocp_write_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY1, ocp_data); + + r8153_queue_wake(tp, false); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); } ocp_data = ocp_read_byte(tp, MCU_TYPE_USB, USB_CSR_DUMMY2); @@ -5416,10 +5486,19 @@ static void r8153_init(struct r8152 *tp) ocp_write_word(tp, MCU_TYPE_USB, USB_CONNECT_TIMER, 0x0001); r8153_power_cut_en(tp, false); + rtl_runtime_suspend_enable(tp, false); r8153_u1u2en(tp, true); r8153_mac_clk_spd(tp, false); usb_enable_lpm(tp->udev); + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6); + ocp_data |= LANWAKE_CLR_EN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CONFIG6, ocp_data); + + ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG); + ocp_data &= ~LANWAKE_PIN; + ocp_write_byte(tp, MCU_TYPE_PLA, PLA_LWAKE_CTRL_REG, ocp_data); + /* rx aggregation */ ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_USB_CTRL); ocp_data &= ~(RX_AGG_DISABLE | RX_ZERO_EN); @@ -5484,7 +5563,17 @@ static void r8153b_init(struct r8152 *tp) r8153b_ups_en(tp, false); r8153_queue_wake(tp, false); rtl_runtime_suspend_enable(tp, false); - r8153b_u1u2en(tp, true); + + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS); + if (rtl8152_get_speed(tp) & LINK_STATUS) + ocp_data |= CUR_LINK_OK; + else + ocp_data &= ~CUR_LINK_OK; + ocp_data |= POLL_LINK_CHG; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_EXTRA_STATUS, ocp_data); + + if (tp->udev->speed != USB_SPEED_HIGH) + r8153b_u1u2en(tp, true); usb_enable_lpm(tp->udev); /* MAC clock speed down */ @@ -5492,6 +5581,19 @@ static void r8153b_init(struct r8152 *tp) ocp_data |= MAC_CLK_SPDWN_EN; ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL2, ocp_data); + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3); + ocp_data &= ~PLA_MCU_SPDWN_EN; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_MAC_PWR_CTRL3, ocp_data); + + if (tp->version == RTL_VER_09) { + /* Disable Test IO for 32QFN */ + if (ocp_read_byte(tp, MCU_TYPE_PLA, 0xdc00) & BIT(5)) { + ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); + ocp_data |= TEST_IO_OFF; + ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); + } + } + set_bit(GREEN_ETHERNET, &tp->flags); /* rx aggregation */ @@ -6707,6 +6809,11 @@ static int rtl8152_probe(struct usb_interface *intf, intf->needs_remote_wakeup = 1; + if (!rtl_can_wakeup(tp)) + __rtl_set_wol(tp, 0); + else + tp->saved_wolopts = __rtl_get_wol(tp); + tp->rtl_ops.init(tp); #if IS_BUILTIN(CONFIG_USB_RTL8152) /* Retry in case request_firmware() is not ready yet. */ @@ -6724,10 +6831,6 @@ static int rtl8152_probe(struct usb_interface *intf, goto out1; } - if (!rtl_can_wakeup(tp)) - __rtl_set_wol(tp, 0); - - tp->saved_wolopts = __rtl_get_wol(tp); if (tp->saved_wolopts) device_set_wakeup_enable(&udev->dev, true); else diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index f43c06569ea1..c4c8f1b62e1e 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -7790,16 +7790,8 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { case AIROGVLIST: ridcode = RID_APLIST; break; case AIROGDRVNAM: ridcode = RID_DRVNAME; break; case AIROGEHTENC: ridcode = RID_ETHERENCAP; break; - case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; - case AIROGWEPKNV: ridcode = RID_WEP_PERM; - /* Only super-user can read WEP keys */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - break; + case AIROGWEPKTMP: ridcode = RID_WEP_TEMP; break; + case AIROGWEPKNV: ridcode = RID_WEP_PERM; break; case AIROGSTAT: ridcode = RID_STATUS; break; case AIROGSTATSD32: ridcode = RID_STATSDELTA; break; case AIROGSTATSC32: ridcode = RID_STATS; break; @@ -7813,7 +7805,13 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) { return -EINVAL; } - if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL) + if (ridcode == RID_WEP_TEMP || ridcode == RID_WEP_PERM) { + /* Only super-user can read WEP keys */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + } + + if ((iobuf = kzalloc(RIDSIZE, GFP_KERNEL)) == NULL) return -ENOMEM; PC4500_readrid(ai,ridcode,iobuf,RIDSIZE, 1); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c index cd73fc5cfcbb..fd454836adbe 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tx.c @@ -267,7 +267,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct iwl_station_priv *sta_priv = NULL; struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; __le16 fc; u8 hdr_len; @@ -348,7 +348,6 @@ int iwlagn_tx_skb(struct iwl_priv *priv, if (unlikely(!dev_cmd)) goto drop_unlock_priv; - memset(dev_cmd, 0, sizeof(*dev_cmd)); dev_cmd->hdr.cmd = REPLY_TX; tx_cmd = (struct iwl_tx_cmd *) dev_cmd->payload; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index 40fe2d667622..48d375a86d86 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -357,8 +357,8 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) { union acpi_object *wifi_pkg, *data; bool enabled; - int i, n_profiles, tbl_rev; - int ret = 0; + int i, n_profiles, tbl_rev, pos; + int ret = 0; data = iwl_acpi_get_object(fwrt->dev, ACPI_EWRD_METHOD); if (IS_ERR(data)) @@ -390,10 +390,10 @@ int iwl_sar_get_ewrd_table(struct iwl_fw_runtime *fwrt) goto out_free; } - for (i = 0; i < n_profiles; i++) { - /* the tables start at element 3 */ - int pos = 3; + /* the tables start at element 3 */ + pos = 3; + for (i = 0; i < n_profiles; i++) { /* The EWRD profiles officially go from 2 to 4, but we * save them in sar_profiles[1-3] (because we don't * have profile 0). So in the array we start from 1. diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index ed90dd104366..4c60f9959f7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -2669,12 +2669,7 @@ int iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime *fwrt, { int ret = 0; - /* if the FW crashed or not debug monitor cfg was given, there is - * no point in changing the recording state - */ - if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status) || - (!fwrt->trans->dbg.dest_tlv && - fwrt->trans->dbg.ini_dest == IWL_FW_INI_LOCATION_INVALID)) + if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) return 0; if (fw_has_capa(&fwrt->fw->ucode_capa, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 92d9898ab7c2..c2f7252ae4e7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -379,7 +379,7 @@ enum { /* CSR GIO */ -#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) +#define CSR_GIO_REG_VAL_L0S_DISABLED (0x00000002) /* * UCODE-DRIVER GP (general purpose) mailbox register 1 diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index f266647dc08c..ce8f248c33ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -480,7 +480,14 @@ static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt, if (!frag || frag->size || !pages) return -EIO; - while (pages) { + /* + * We try to allocate as many pages as we can, starting with + * the requested amount and going down until we can allocate + * something. Because of DIV_ROUND_UP(), pages will never go + * down to 0 and stop the loop, so stop when pages reaches 1, + * which is too small anyway. + */ + while (pages > 1) { block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE, &physical, GFP_KERNEL | __GFP_NOWARN); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index 4096ccf58b07..bc8c959588ca 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -1817,9 +1817,6 @@ MODULE_PARM_DESC(antenna_coupling, module_param_named(nvm_file, iwlwifi_mod_params.nvm_file, charp, 0444); MODULE_PARM_DESC(nvm_file, "NVM file name"); -module_param_named(lar_disable, iwlwifi_mod_params.lar_disable, bool, 0444); -MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); - module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index ebea3f308b5d..82e5cac23d8d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -115,7 +115,6 @@ enum iwl_uapsd_disable { * @nvm_file: specifies a external NVM file * @uapsd_disable: disable U-APSD, see &enum iwl_uapsd_disable, default = * IWL_DISABLE_UAPSD_BSS | IWL_DISABLE_UAPSD_P2P_CLIENT - * @lar_disable: disable LAR (regulatory), default = 0 * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. @@ -136,7 +135,6 @@ struct iwl_mod_params { int antenna_coupling; char *nvm_file; u32 uapsd_disable; - bool lar_disable; bool fw_monitor; bool disable_11ac; /** diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 1e240a2a8329..d4f834b52f50 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -224,6 +224,34 @@ enum iwl_nvm_channel_flags { NVM_CHANNEL_DC_HIGH = BIT(12), }; +/** + * enum iwl_reg_capa_flags - global flags applied for the whole regulatory + * domain. + * @REG_CAPA_BF_CCD_LOW_BAND: Beam-forming or Cyclic Delay Diversity in the + * 2.4Ghz band is allowed. + * @REG_CAPA_BF_CCD_HIGH_BAND: Beam-forming or Cyclic Delay Diversity in the + * 5Ghz band is allowed. + * @REG_CAPA_160MHZ_ALLOWED: 11ac channel with a width of 160Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_80MHZ_ALLOWED: 11ac channel with a width of 80Mhz is allowed + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_MCS_8_ALLOWED: 11ac with MCS 8 is allowed. + * @REG_CAPA_MCS_9_ALLOWED: 11ac with MCS 9 is allowed. + * @REG_CAPA_40MHZ_FORBIDDEN: 11n channel with a width of 40Mhz is forbidden + * for this regulatory domain (valid only in 5Ghz). + * @REG_CAPA_DC_HIGH_ENABLED: DC HIGH allowed. + */ +enum iwl_reg_capa_flags { + REG_CAPA_BF_CCD_LOW_BAND = BIT(0), + REG_CAPA_BF_CCD_HIGH_BAND = BIT(1), + REG_CAPA_160MHZ_ALLOWED = BIT(2), + REG_CAPA_80MHZ_ALLOWED = BIT(3), + REG_CAPA_MCS_8_ALLOWED = BIT(4), + REG_CAPA_MCS_9_ALLOWED = BIT(5), + REG_CAPA_40MHZ_FORBIDDEN = BIT(7), + REG_CAPA_DC_HIGH_ENABLED = BIT(9), +}; + static inline void iwl_nvm_print_channel_flags(struct device *dev, u32 level, int chan, u32 flags) { @@ -939,10 +967,11 @@ iwl_nvm_no_wide_in_5ghz(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported) + u8 tx_chains, u8 rx_chains) { struct iwl_nvm_data *data; bool lar_enabled; @@ -1022,7 +1051,8 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, return NULL; } - if (lar_fw_supported && lar_enabled) + if (lar_enabled && + fw_has_capa(&fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; if (iwl_nvm_no_wide_in_5ghz(trans, cfg, nvm_hw)) @@ -1038,6 +1068,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, + u16 cap_flags, const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; @@ -1076,13 +1107,27 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, (flags & NL80211_RRF_NO_IR)) flags |= NL80211_RRF_GO_CONCURRENT; + /* + * cap_flags is per regulatory domain so apply it for every channel + */ + if (ch_idx >= NUM_2GHZ_CHANNELS) { + if (cap_flags & REG_CAPA_40MHZ_FORBIDDEN) + flags |= NL80211_RRF_NO_HT40; + + if (!(cap_flags & REG_CAPA_80MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_80MHZ; + + if (!(cap_flags & REG_CAPA_160MHZ_ALLOWED)) + flags |= NL80211_RRF_NO_160MHZ; + } + return flags; } struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info) + u16 geo_info, u16 cap) { int ch_idx; u16 ch_flags; @@ -1140,7 +1185,8 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, } reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, - ch_flags, cfg); + ch_flags, cap, + cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || @@ -1405,9 +1451,6 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, .id = WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_GET_INFO) }; int ret; - bool lar_fw_supported = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); bool empty_otp; u32 mac_flags; u32 sbands_flags = 0; @@ -1485,7 +1528,9 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, nvm->valid_tx_ant = (u8)le32_to_cpu(rsp->phy_sku.tx_chains); nvm->valid_rx_ant = (u8)le32_to_cpu(rsp->phy_sku.rx_chains); - if (le32_to_cpu(rsp->regulatory.lar_enabled) && lar_fw_supported) { + if (le32_to_cpu(rsp->regulatory.lar_enabled) && + fw_has_capa(&fw->ucode_capa, + IWL_UCODE_TLV_CAPA_LAR_SUPPORT)) { nvm->lar_enabled = true; sbands_flags |= IWL_NVM_SBANDS_FLAGS_LAR; } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index b7e1ddf8f177..fb0b385d10fd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -7,7 +7,7 @@ * * Copyright(c) 2008 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -29,7 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018 Intel Corporation + * Copyright(c) 2018 - 2019 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -85,10 +85,11 @@ enum iwl_nvm_sbands_flags { */ struct iwl_nvm_data * iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const struct iwl_fw *fw, const __be16 *nvm_hw, const __le16 *nvm_sw, const __le16 *nvm_calib, const __le16 *regulatory, const __le16 *mac_override, const __le16 *phy_sku, - u8 tx_chains, u8 rx_chains, bool lar_fw_supported); + u8 tx_chains, u8 rx_chains); /** * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW @@ -103,7 +104,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info); + u16 geo_info, u16 cap); /** * struct iwl_nvm_section - describes an NVM section in memory. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index 28bdc9a9617e..f91197e4ae40 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -66,7 +66,9 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops) + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align) { struct iwl_trans *trans; #ifdef CONFIG_LOCKDEP @@ -90,10 +92,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = kmem_cache_create(trans->dev_cmd_pool_name, - sizeof(struct iwl_device_cmd), - sizeof(void *), - SLAB_HWCACHE_ALIGN, - NULL); + cmd_pool_size, cmd_pool_align, + SLAB_HWCACHE_ALIGN, NULL); if (!trans->dev_cmd_pool) return NULL; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 8cadad7364ac..e33df5ad00e0 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -193,6 +193,18 @@ struct iwl_device_cmd { }; } __packed; +/** + * struct iwl_device_tx_cmd - buffer for TX command + * @hdr: the header + * @payload: the payload placeholder + * + * The actual structure is sized dynamically according to need. + */ +struct iwl_device_tx_cmd { + struct iwl_cmd_header hdr; + u8 payload[]; +} __packed; + #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd)) /* @@ -544,7 +556,7 @@ struct iwl_trans_ops { int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue); + struct iwl_device_tx_cmd *dev_cmd, int queue); void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, struct sk_buff_head *skbs); @@ -948,22 +960,22 @@ iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) return trans->ops->dump_data(trans, dump_mask); } -static inline struct iwl_device_cmd * +static inline struct iwl_device_tx_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) { - return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC); + return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC); } int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, - struct iwl_device_cmd *dev_cmd) + struct iwl_device_tx_cmd *dev_cmd) { kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int queue) + struct iwl_device_tx_cmd *dev_cmd, int queue) { if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; @@ -1271,7 +1283,9 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops); + const struct iwl_trans_ops *ops, + unsigned int cmd_pool_size, + unsigned int cmd_pool_align); void iwl_trans_free(struct iwl_trans *trans); /***************************************************** diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 60aff2ecec12..58df25e2fb32 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -154,5 +154,6 @@ #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT false #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 10 +#define IWL_MVM_USE_NSSN_SYNC 0 #endif /* __MVM_CONSTANTS_H */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index dd685f7eb410..c09624d8d7ee 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -841,9 +841,13 @@ int iwl_mvm_ppag_send_cmd(struct iwl_mvm *mvm) return 0; } + if (!mvm->fwrt.ppag_table.enabled) { + IWL_DEBUG_RADIO(mvm, + "PPAG not enabled, command not sent.\n"); + return 0; + } + IWL_DEBUG_RADIO(mvm, "Sending PER_PLATFORM_ANT_GAIN_CMD\n"); - IWL_DEBUG_RADIO(mvm, "PPAG is %s\n", - mvm->fwrt.ppag_table.enabled ? "enabled" : "disabled"); for (i = 0; i < ACPI_PPAG_NUM_CHAINS; i++) { for (j = 0; j < ACPI_PPAG_NUM_SUB_BANDS; j++) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 32dc9d6f0fb6..6717f25c46b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -256,7 +256,8 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, __le32_to_cpu(resp->n_channels), resp->channels, __le16_to_cpu(resp->mcc), - __le16_to_cpu(resp->geo_info)); + __le16_to_cpu(resp->geo_info), + __le16_to_cpu(resp->cap)); /* Store the return source id */ src_id = resp->source_id; kfree(resp); @@ -754,6 +755,20 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) return ret; } +static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) +{ + if (likely(sta)) { + if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) + return; + } else { + if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) + return; + } + + ieee80211_free_txskb(mvm->hw, skb); +} + static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) @@ -797,14 +812,7 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } } - if (sta) { - if (iwl_mvm_tx_skb(mvm, skb, sta)) - goto drop; - return; - } - - if (iwl_mvm_tx_skb_non_sta(mvm, skb)) - goto drop; + iwl_mvm_tx_skb(mvm, skb, sta); return; drop: ieee80211_free_txskb(hw, skb); @@ -854,10 +862,7 @@ void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) break; } - if (!txq->sta) - iwl_mvm_tx_skb_non_sta(mvm, skb); - else - iwl_mvm_tx_skb(mvm, skb, txq->sta); + iwl_mvm_tx_skb(mvm, skb, txq->sta); } } while (atomic_dec_return(&mvmtxq->tx_request)); rcu_read_unlock(); @@ -4771,6 +4776,125 @@ static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, return ret; } +static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) +{ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + rinfo->bw = RATE_INFO_BW_20; + break; + case RATE_MCS_CHAN_WIDTH_40: + rinfo->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rinfo->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rinfo->bw = RATE_INFO_BW_160; + break; + } + + if (rate_n_flags & RATE_MCS_HT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, RATE_HT_MCS_INDEX_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_HT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; + } else if (rate_n_flags & RATE_MCS_HE_MSK) { + u32 gi_ltf = u32_get_bits(rate_n_flags, + RATE_MCS_HE_GI_LTF_MSK); + + rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; + rinfo->mcs = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_RATE_CODE_MSK); + rinfo->nss = u32_get_bits(rate_n_flags, + RATE_VHT_MCS_NSS_MSK) + 1; + + if (rate_n_flags & RATE_MCS_HE_106T_MSK) { + rinfo->bw = RATE_INFO_BW_HE_RU; + rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { + case RATE_MCS_HE_TYPE_SU: + case RATE_MCS_HE_TYPE_EXT_SU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else if (rate_n_flags & RATE_MCS_SGI_MSK) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_MU: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else if (gi_ltf == 2) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + case RATE_MCS_HE_TYPE_TRIG: + if (gi_ltf == 0 || gi_ltf == 1) + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + else + rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) + rinfo->he_dcm = 1; + } else { + switch (u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK)) { + case IWL_RATE_1M_PLCP: + rinfo->legacy = 10; + break; + case IWL_RATE_2M_PLCP: + rinfo->legacy = 20; + break; + case IWL_RATE_5M_PLCP: + rinfo->legacy = 55; + break; + case IWL_RATE_11M_PLCP: + rinfo->legacy = 110; + break; + case IWL_RATE_6M_PLCP: + rinfo->legacy = 60; + break; + case IWL_RATE_9M_PLCP: + rinfo->legacy = 90; + break; + case IWL_RATE_12M_PLCP: + rinfo->legacy = 120; + break; + case IWL_RATE_18M_PLCP: + rinfo->legacy = 180; + break; + case IWL_RATE_24M_PLCP: + rinfo->legacy = 240; + break; + case IWL_RATE_36M_PLCP: + rinfo->legacy = 360; + break; + case IWL_RATE_48M_PLCP: + rinfo->legacy = 480; + break; + case IWL_RATE_54M_PLCP: + rinfo->legacy = 540; + break; + } + } +} + static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -4785,6 +4909,13 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } + if (iwl_mvm_has_tlc_offload(mvm)) { + struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; + + iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + } + /* if beacon filtering isn't on mac80211 does it anyway */ if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 3ec8de00f3aa..67ab7e7e9c9d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -1298,9 +1298,6 @@ static inline bool iwl_mvm_is_lar_supported(struct iwl_mvm *mvm) bool tlv_lar = fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - if (iwlwifi_mod_params.lar_disable) - return false; - /* * Enable LAR only if it is supported by the FW (TLV) && * enabled in the NVM @@ -1508,8 +1505,8 @@ int __must_check iwl_mvm_send_cmd_status(struct iwl_mvm *mvm, int __must_check iwl_mvm_send_cmd_pdu_status(struct iwl_mvm *mvm, u32 id, u16 len, const void *data, u32 *status); -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta); +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta); int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb); void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, struct iwl_tx_cmd *tx_cmd, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index 945c1ea5cda8..46128a2a9c6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -277,11 +277,10 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) struct iwl_nvm_section *sections = mvm->nvm_sections; const __be16 *hw; const __le16 *sw, *calib, *regulatory, *mac_override, *phy_sku; - bool lar_enabled; int regulatory_type; /* Checking for required sections */ - if (mvm->trans->cfg->nvm_type != IWL_NVM_EXT) { + if (mvm->trans->cfg->nvm_type == IWL_NVM) { if (!mvm->nvm_sections[NVM_SECTION_TYPE_SW].data || !mvm->nvm_sections[mvm->cfg->nvm_hw_section_num].data) { IWL_ERR(mvm, "Can't parse empty OTP/NVM sections\n"); @@ -327,14 +326,9 @@ iwl_parse_nvm_sections(struct iwl_mvm *mvm) (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY_SDP].data : (const __le16 *)sections[NVM_SECTION_TYPE_REGULATORY].data; - lar_enabled = !iwlwifi_mod_params.lar_disable && - fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_LAR_SUPPORT); - - return iwl_parse_nvm_data(mvm->trans, mvm->cfg, hw, sw, calib, + return iwl_parse_nvm_data(mvm->trans, mvm->cfg, mvm->fw, hw, sw, calib, regulatory, mac_override, phy_sku, - mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant, - lar_enabled); + mvm->fw->valid_tx_ant, mvm->fw->valid_rx_ant); } /* Loads the NVM data stored in mvm->nvm_sections into the NIC */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index ef99c49247b7..c15f7dbc9516 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -514,14 +514,17 @@ static bool iwl_mvm_is_sn_less(u16 sn1, u16 sn2, u16 buffer_size) static void iwl_mvm_sync_nssn(struct iwl_mvm *mvm, u8 baid, u16 nssn) { - struct iwl_mvm_rss_sync_notif notif = { - .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, - .metadata.sync = 0, - .nssn_sync.baid = baid, - .nssn_sync.nssn = nssn, - }; - - iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); + if (IWL_MVM_USE_NSSN_SYNC) { + struct iwl_mvm_rss_sync_notif notif = { + .metadata.type = IWL_MVM_RXQ_NSSN_SYNC, + .metadata.sync = 0, + .nssn_sync.baid = baid, + .nssn_sync.nssn = nssn, + }; + + iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, + sizeof(notif)); + } } #define RX_REORDER_BUF_TIMEOUT_MQ (HZ / 10) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index a046ac9fa852..a5af8f4128b1 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -1213,7 +1213,7 @@ static int iwl_mvm_legacy_config_scan(struct iwl_mvm *mvm) cmd_size = sizeof(struct iwl_scan_config_v2); else cmd_size = sizeof(struct iwl_scan_config_v1); - cmd_size += num_channels; + cmd_size += mvm->fw->ucode_capa.n_scan_channels; cfg = kzalloc(cmd_size, GFP_KERNEL); if (!cfg) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index dc5c02fbc65a..ddfc9a668036 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -490,13 +490,13 @@ static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm, /* * Allocates and sets the Tx cmd the driver data pointers in the skb */ -static struct iwl_device_cmd * +static struct iwl_device_tx_cmd * iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, struct ieee80211_tx_info *info, int hdrlen, struct ieee80211_sta *sta, u8 sta_id) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; struct iwl_tx_cmd *tx_cmd; dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans); @@ -504,11 +504,6 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, if (unlikely(!dev_cmd)) return NULL; - /* Make sure we zero enough of dev_cmd */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); - - memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { @@ -597,7 +592,7 @@ out: } static void iwl_mvm_skb_prepare_status(struct sk_buff *skb, - struct iwl_device_cmd *cmd) + struct iwl_device_tx_cmd *cmd) { struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); @@ -716,7 +711,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_info info; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; @@ -1078,7 +1073,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct iwl_mvm_sta *mvmsta; - struct iwl_device_cmd *dev_cmd; + struct iwl_device_tx_cmd *dev_cmd; __le16 fc; u16 seq_number = 0; u8 tid = IWL_MAX_TID_COUNT; @@ -1154,7 +1149,7 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) { iwl_trans_free_tx_cmd(mvm->trans, dev_cmd); spin_unlock(&mvmsta->lock); - return 0; + return -1; } if (!iwl_mvm_has_new_tx_api(mvm)) { @@ -1206,8 +1201,8 @@ drop: return -1; } -int iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, - struct ieee80211_sta *sta) +int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb, + struct ieee80211_sta *sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_tx_info info; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index d38cefbb779e..e249e3fd14c6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -57,6 +57,42 @@ #include "internal.h" #include "iwl-prph.h" +static void *_iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys, + int depth) +{ + void *result; + + if (WARN(depth > 2, + "failed to allocate DMA memory not crossing 2^32 boundary")) + return NULL; + + result = dma_alloc_coherent(trans->dev, size, phys, GFP_KERNEL); + + if (!result) + return NULL; + + if (unlikely(iwl_pcie_crosses_4g_boundary(*phys, size))) { + void *old = result; + dma_addr_t oldphys = *phys; + + result = _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, + phys, + depth + 1); + dma_free_coherent(trans->dev, size, old, oldphys); + } + + return result; +} + +static void *iwl_pcie_ctxt_info_dma_alloc_coherent(struct iwl_trans *trans, + size_t size, + dma_addr_t *phys) +{ + return _iwl_pcie_ctxt_info_dma_alloc_coherent(trans, size, phys, 0); +} + void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { struct iwl_self_init_dram *dram = &trans->init_dram; @@ -161,14 +197,17 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, struct iwl_context_info *ctxt_info; struct iwl_context_info_rbd_cfg *rx_cfg; u32 control_flags = 0, rb_size; + dma_addr_t phys; int ret; - ctxt_info = dma_alloc_coherent(trans->dev, sizeof(*ctxt_info), - &trans_pcie->ctxt_info_dma_addr, - GFP_KERNEL); + ctxt_info = iwl_pcie_ctxt_info_dma_alloc_coherent(trans, + sizeof(*ctxt_info), + &phys); if (!ctxt_info) return -ENOMEM; + trans_pcie->ctxt_info_dma_addr = phys; + ctxt_info->version.version = 0; ctxt_info->version.mac_id = cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index a091690f6c79..f14bcef3495e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -305,7 +305,7 @@ struct iwl_cmd_meta { #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64) struct iwl_pcie_txq_entry { - struct iwl_device_cmd *cmd; + void *cmd; struct sk_buff *skb; /* buffer to free after command completes */ const void *free_buf; @@ -672,6 +672,16 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ +/* + * We need this inline in case dma_addr_t is only 32-bits - since the + * hardware is always 64-bit, the issue can still occur in that case, + * so use u64 for 'phys' here to force the addition in 64-bit. + */ +static inline bool iwl_pcie_crosses_4g_boundary(u64 phys, u16 len) +{ + return upper_32_bits(phys) != upper_32_bits(phys + len); +} + int iwl_pcie_tx_init(struct iwl_trans *trans); int iwl_pcie_gen2_tx_init(struct iwl_trans *trans, int txq_id, int queue_size); @@ -688,7 +698,7 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx); @@ -1082,7 +1092,8 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans); void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb); #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb); #endif /* common functions that are used by gen3 transport */ @@ -1106,7 +1117,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, unsigned int timeout); void iwl_trans_pcie_dyn_txq_free(struct iwl_trans *trans, int queue); int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id); + struct iwl_device_tx_cmd *dev_cmd, int txq_id); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 452da44a21e0..f0b8ff67a1bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1529,13 +1529,13 @@ out: napi = &rxq->napi; if (napi->poll) { + napi_gro_flush(napi, false); + if (napi->rx_count) { netif_receive_skb_list(&napi->rx_list); INIT_LIST_HEAD(&napi->rx_list); napi->rx_count = 0; } - - napi_gro_flush(napi, false); } iwl_pcie_rxq_restock(trans, rxq); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index a0677131634d..f60d66f1e55b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -79,6 +79,7 @@ #include "iwl-agn-hw.h" #include "fw/error-dump.h" #include "fw/dbg.h" +#include "fw/api/tx.h" #include "internal.h" #include "iwl-fh.h" @@ -301,18 +302,13 @@ void iwl_pcie_apm_config(struct iwl_trans *trans) u16 cap; /* - * HW bug W/A for instability in PCIe bus L0S->L1 transition. - * Check if BIOS (or OS) enabled L1-ASPM on this device. - * If so (likely), disable L0S, so device moves directly L0->L1; - * costs negligible amount of power savings. - * If not (unlikely), enable L0S, so there is at least some - * power savings, even without L1. + * L0S states have been found to be unstable with our devices + * and in newer hardware they are not officially supported at + * all, so we must always set the L0S_DISABLED bit. */ + iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED); + pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); - if (lctl & PCI_EXP_LNKCTL_ASPM_L1) - iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); - else - iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap); @@ -3460,19 +3456,34 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, { struct iwl_trans_pcie *trans_pcie; struct iwl_trans *trans; - int ret, addr_size; + int ret, addr_size, txcmd_size, txcmd_align; + const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; + + if (!cfg_trans->gen2) { + ops = &trans_ops_pcie; + txcmd_size = sizeof(struct iwl_tx_cmd); + txcmd_align = sizeof(void *); + } else if (cfg_trans->device_family < IWL_DEVICE_FAMILY_AX210) { + txcmd_size = sizeof(struct iwl_tx_cmd_gen2); + txcmd_align = 64; + } else { + txcmd_size = sizeof(struct iwl_tx_cmd_gen3); + txcmd_align = 128; + } + + txcmd_size += sizeof(struct iwl_cmd_header); + txcmd_size += 36; /* biggest possible 802.11 header */ + + /* Ensure device TX cmd cannot reach/cross a page boundary in gen2 */ + if (WARN_ON(cfg_trans->gen2 && txcmd_size >= txcmd_align)) + return ERR_PTR(-EINVAL); ret = pcim_enable_device(pdev); if (ret) return ERR_PTR(ret); - if (cfg_trans->gen2) - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie_gen2); - else - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), - &pdev->dev, &trans_ops_pcie); - + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, + txcmd_size, txcmd_align); if (!trans) return ERR_PTR(-ENOMEM); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 8ca0250de99e..bfb984b2e00c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -221,6 +221,17 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, int idx = iwl_pcie_gen2_get_num_tbs(trans, tfd); struct iwl_tfh_tb *tb; + /* + * Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_pcie_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) return -EINVAL; tb = &tfd->tbs[idx]; @@ -240,13 +251,114 @@ static int iwl_pcie_gen2_set_tb(struct iwl_trans *trans, return idx; } +static struct page *get_workaround_page(struct iwl_trans *trans, + struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct page **page_ptr; + struct page *ret; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + + ret = alloc_page(GFP_ATOMIC); + if (!ret) + return NULL; + + /* set the chaining pointer to the previous page if there */ + *(void **)(page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; + *page_ptr = ret; + + return ret; +} + +/* + * Add a TB and if needed apply the FH HW bug workaround; + * meta != NULL indicates that it's a page mapping and we + * need to dma_unmap_page() and set the meta->tbs bit in + * this case. + */ +static int iwl_pcie_gen2_set_tb_with_wa(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + dma_addr_t phys, void *virt, + u16 len, struct iwl_cmd_meta *meta) +{ + dma_addr_t oldphys = phys; + struct page *page; + int ret; + + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + + if (likely(!iwl_pcie_crosses_4g_boundary(phys, len))) { + ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); + + if (ret < 0) + goto unmap; + + if (meta) + meta->tbs |= BIT(ret); + + ret = 0; + goto trace; + } + + /* + * Work around a hardware bug. If (as expressed in the + * condition above) the TB ends on a 32-bit boundary, + * then the next TB may be accessed with the wrong + * address. + * To work around it, copy the data elsewhere and make + * a new mapping for it so the device will not fail. + */ + + if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { + ret = -ENOBUFS; + goto unmap; + } + + page = get_workaround_page(trans, skb); + if (!page) { + ret = -ENOMEM; + goto unmap; + } + + memcpy(page_address(page), virt, len); + + phys = dma_map_single(trans->dev, page_address(page), len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + ret = iwl_pcie_gen2_set_tb(trans, tfd, phys, len); + if (ret < 0) { + /* unmap the new allocation as single */ + oldphys = phys; + meta = NULL; + goto unmap; + } + IWL_WARN(trans, + "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", + len, (unsigned long long)oldphys, (unsigned long long)phys); + + ret = 0; +unmap: + if (meta) + dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); +trace: + trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); + + return ret; +} + static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_tfh_tfd *tfd, int start_len, - u8 hdr_len, struct iwl_device_cmd *dev_cmd) + u8 hdr_len, + struct iwl_device_tx_cmd *dev_cmd) { #ifdef CONFIG_INET - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; @@ -254,7 +366,6 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, u16 length, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; struct tso_t tso; trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), @@ -270,14 +381,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; /* * Pull the ieee80211 header to be able to use TSO core, @@ -332,6 +440,11 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, dev_kfree_skb(csum_skb); goto out_err; } + /* + * No need for _with_wa, this is from the TSO page and + * we leave some space at the end of it so can't hit + * the buggy scenario. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, tb_phys, tb_len); @@ -343,16 +456,18 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, /* put the payload */ while (data_left) { + int ret; + tb_len = min_t(unsigned int, tso.size, data_left); tb_phys = dma_map_single(trans->dev, tso.data, tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, + tb_phys, tso.data, + tb_len, NULL); + if (ret) { dev_kfree_skb(csum_skb); goto out_err; } - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, - tb_phys, tb_len); data_left -= tb_len; tso_build_data(skb, &tso, tb_len); @@ -372,7 +487,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -386,6 +501,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* @@ -404,6 +524,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, @@ -430,24 +554,19 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; dma_addr_t tb_phys; - int tb_idx; + unsigned int fragsz = skb_frag_size(frag); + int ret; - if (!skb_frag_size(frag)) + if (!fragsz) continue; tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - skb_frag_size(frag), DMA_TO_DEVICE); - - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - return -ENOMEM; - tb_idx = iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, - skb_frag_size(frag)); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), - tb_phys, skb_frag_size(frag)); - if (tb_idx < 0) - return tb_idx; - - out_meta->tbs |= BIT(tb_idx); + fragsz, DMA_TO_DEVICE); + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb_frag_address(frag), + fragsz, out_meta); + if (ret) + return ret; } return 0; @@ -456,7 +575,7 @@ static int iwl_pcie_gen2_tx_add_frags(struct iwl_trans *trans, static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta, int hdr_len, @@ -475,6 +594,11 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, /* The first TB points to bi-directional DMA data */ memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); /* @@ -496,6 +620,10 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(trans->dev, tb_phys))) goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -504,26 +632,30 @@ iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, tb2_len = skb_headlen(skb) - hdr_len; if (tb2_len > 0) { + int ret; + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, tb2_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb->data + hdr_len, tb2_len, + NULL); + if (ret) goto out_err; - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb2_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, - tb_phys, tb2_len); } if (iwl_pcie_gen2_tx_add_frags(trans, skb, tfd, out_meta)) goto out_err; skb_walk_frags(skb, frag) { + int ret; + tb_phys = dma_map_single(trans->dev, frag->data, skb_headlen(frag), DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + ret = iwl_pcie_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + frag->data, + skb_headlen(frag), NULL); + if (ret) goto out_err; - iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, skb_headlen(frag)); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, frag->data, - tb_phys, skb_headlen(frag)); if (iwl_pcie_gen2_tx_add_frags(trans, frag, tfd, out_meta)) goto out_err; } @@ -538,7 +670,7 @@ out_err: static struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, + struct iwl_device_tx_cmd *dev_cmd, struct sk_buff *skb, struct iwl_cmd_meta *out_meta) { @@ -578,7 +710,7 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, } int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_cmd_meta *out_meta; @@ -603,7 +735,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index f21f16ab2a97..b0eb52b4951b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -213,8 +213,8 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, u8 sec_ctl = 0; u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[txq->write_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; u8 sta_id = tx_cmd->sta_id; scd_bc_tbl = trans_pcie->scd_bc_tbls.addr; @@ -257,8 +257,8 @@ static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans, int read_ptr = txq->read_ptr; u8 sta_id = 0; __le16 bc_ent; - struct iwl_tx_cmd *tx_cmd = - (void *)txq->entries[read_ptr].cmd->payload; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); @@ -624,12 +624,18 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct sk_buff *skb) { struct page **page_ptr; + struct page *next; page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + next = *page_ptr; + *page_ptr = NULL; - if (*page_ptr) { - __free_page(*page_ptr); - *page_ptr = NULL; + while (next) { + struct page *tmp = next; + + next = *(void **)(page_address(next) + PAGE_SIZE - + sizeof(void *)); + __free_page(tmp); } } @@ -1196,7 +1202,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, while (!skb_queue_empty(&overflow_skbs)) { struct sk_buff *skb = __skb_dequeue(&overflow_skbs); - struct iwl_device_cmd *dev_cmd_ptr; + struct iwl_device_tx_cmd *dev_cmd_ptr; dev_cmd_ptr = *(void **)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); @@ -2052,17 +2058,34 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len) +struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, + struct sk_buff *skb) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page); + struct page **page_ptr; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); + + if (WARN_ON(*page_ptr)) + return NULL; if (!p->page) goto alloc; - /* enough room on this page */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE) - return p; + /* + * Check if there's enough room on this page + * + * Note that we put a page chaining pointer *last* in the + * page - we need it somewhere, and if it's there then we + * avoid DMA mapping the last bits of the page which may + * trigger the 32-bit boundary hardware bug. + * + * (see also get_workaround_page() in tx-gen2.c) + */ + if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - + sizeof(void *)) + goto out; /* We don't have enough room on this page, get a new one. */ __free_page(p->page); @@ -2072,6 +2095,11 @@ alloc: if (!p->page) return NULL; p->pos = page_address(p->page); + /* set the chaining pointer to NULL */ + *(void **)(page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; +out: + *page_ptr = p->page; + get_page(p->page); return p; } @@ -2097,7 +2125,8 @@ static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct iwl_trans_pcie *trans_pcie = txq->trans_pcie; @@ -2107,7 +2136,6 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, u16 length, iv_len, amsdu_pad; u8 *start_hdr; struct iwl_tso_hdr_page *hdr_page; - struct page **page_ptr; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -2130,14 +2158,11 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room); + hdr_page = get_page_hdr(trans, hdr_room, skb); if (!hdr_page) return -ENOMEM; - get_page(hdr_page->page); start_hdr = hdr_page->pos; - page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs); - *page_ptr = hdr_page->page; memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); hdr_page->pos += iv_len; @@ -2279,7 +2304,8 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, - struct iwl_device_cmd *dev_cmd, u16 tb1_len) + struct iwl_device_tx_cmd *dev_cmd, + u16 tb1_len) { /* No A-MSDU without CONFIG_INET */ WARN_ON(1); @@ -2289,7 +2315,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, #endif /* CONFIG_INET */ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_cmd *dev_cmd, int txq_id) + struct iwl_device_tx_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; @@ -2346,7 +2372,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* don't put the packet on the ring, if there is no room */ if (unlikely(iwl_queue_space(trans, txq) < 3)) { - struct iwl_device_cmd **dev_cmd_ptr; + struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + trans_pcie->dev_cmd_offs); diff --git a/drivers/net/wireless/marvell/libertas/cfg.c b/drivers/net/wireless/marvell/libertas/cfg.c index 57edfada0665..c9401c121a14 100644 --- a/drivers/net/wireless/marvell/libertas/cfg.c +++ b/drivers/net/wireless/marvell/libertas/cfg.c @@ -273,6 +273,10 @@ add_ie_rates(u8 *tlv, const u8 *ie, int *nrates) int hw, ap, ap_max = ie[1]; u8 hw_rate; + if (ap_max > MAX_RATES) { + lbs_deb_assoc("invalid rates\n"); + return tlv; + } /* Advance past IE header */ ie += 2; @@ -1717,6 +1721,9 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, struct cmd_ds_802_11_ad_hoc_join cmd; u8 preamble = RADIO_PREAMBLE_SHORT; int ret = 0; + int hw, i; + u8 rates_max; + u8 *rates; /* TODO: set preamble based on scan result */ ret = lbs_set_radio(priv, preamble, 1); @@ -1775,9 +1782,12 @@ static int lbs_ibss_join_existing(struct lbs_private *priv, if (!rates_eid) { lbs_add_rates(cmd.bss.rates); } else { - int hw, i; - u8 rates_max = rates_eid[1]; - u8 *rates = cmd.bss.rates; + rates_max = rates_eid[1]; + if (rates_max > MAX_RATES) { + lbs_deb_join("invalid rates"); + goto out; + } + rates = cmd.bss.rates; for (hw = 0; hw < ARRAY_SIZE(lbs_rates); hw++) { u8 hw_rate = lbs_rates[hw].bitrate / 5; for (i = 0; i < rates_max; i++) { diff --git a/drivers/net/wireless/mediatek/mt76/airtime.c b/drivers/net/wireless/mediatek/mt76/airtime.c index 55116f395f9a..a4a785467748 100644 --- a/drivers/net/wireless/mediatek/mt76/airtime.c +++ b/drivers/net/wireless/mediatek/mt76/airtime.c @@ -242,7 +242,7 @@ u32 mt76_calc_rx_airtime(struct mt76_dev *dev, struct mt76_rx_status *status, return 0; sband = dev->hw->wiphy->bands[status->band]; - if (!sband || status->rate_idx > sband->n_bitrates) + if (!sband || status->rate_idx >= sband->n_bitrates) return 0; rate = &sband->bitrates[status->rate_idx]; diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index b9f2a401041a..96018fd65779 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -378,7 +378,8 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; - mt76_led_cleanup(dev); + if (IS_ENABLED(CONFIG_MT76_LEDS)) + mt76_led_cleanup(dev); mt76_tx_status_check(dev, NULL, true); ieee80211_unregister_hw(hw); } diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 55083c67b2bb..95dca2cb5265 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -633,13 +633,17 @@ static int ddr_perf_probe(struct platform_device *pdev) if (ret < 0) { dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); - goto ddr_perf_err; + goto cpuhp_state_err; } pmu->cpuhp_state = ret; /* Register the pmu instance for cpu hotplug */ - cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); + if (ret) { + dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); + goto cpuhp_instance_err; + } /* Request irq */ irq = of_irq_get(np, 0); @@ -673,9 +677,10 @@ static int ddr_perf_probe(struct platform_device *pdev) return 0; ddr_perf_err: - if (pmu->cpuhp_state) - cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); - + cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); +cpuhp_instance_err: + cpuhp_remove_multi_state(pmu->cpuhp_state); +cpuhp_state_err: ida_simple_remove(&ddr_ida, pmu->id); dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); return ret; @@ -686,6 +691,7 @@ static int ddr_perf_remove(struct platform_device *pdev) struct ddr_pmu *pmu = platform_get_drvdata(pdev); cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); + cpuhp_remove_multi_state(pmu->cpuhp_state); irq_set_affinity_hint(pmu->irq, NULL); perf_pmu_unregister(&pmu->pmu); diff --git a/drivers/perf/hisilicon/hisi_uncore_pmu.c b/drivers/perf/hisilicon/hisi_uncore_pmu.c index 96183e31b96a..584de8f807cc 100644 --- a/drivers/perf/hisilicon/hisi_uncore_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_pmu.c @@ -337,38 +337,44 @@ void hisi_uncore_pmu_disable(struct pmu *pmu) hisi_pmu->ops->stop_counters(hisi_pmu); } + /* - * Read Super CPU cluster and CPU cluster ID from MPIDR_EL1. - * If multi-threading is supported, On Huawei Kunpeng 920 SoC whose cpu - * core is tsv110, CCL_ID is the low 3-bits in MPIDR[Aff2] and SCCL_ID - * is the upper 5-bits of Aff2 field; while for other cpu types, SCCL_ID - * is in MPIDR[Aff3] and CCL_ID is in MPIDR[Aff2], if not, SCCL_ID - * is in MPIDR[Aff2] and CCL_ID is in MPIDR[Aff1]. + * The Super CPU Cluster (SCCL) and CPU Cluster (CCL) IDs can be + * determined from the MPIDR_EL1, but the encoding varies by CPU: + * + * - For MT variants of TSV110: + * SCCL is Aff2[7:3], CCL is Aff2[2:0] + * + * - For other MT parts: + * SCCL is Aff3[7:0], CCL is Aff2[7:0] + * + * - For non-MT parts: + * SCCL is Aff2[7:0], CCL is Aff1[7:0] */ -static void hisi_read_sccl_and_ccl_id(int *sccl_id, int *ccl_id) +static void hisi_read_sccl_and_ccl_id(int *scclp, int *cclp) { u64 mpidr = read_cpuid_mpidr(); - - if (mpidr & MPIDR_MT_BITMASK) { - if (read_cpuid_part_number() == HISI_CPU_PART_TSV110) { - int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); - - if (sccl_id) - *sccl_id = aff2 >> 3; - if (ccl_id) - *ccl_id = aff2 & 0x7; - } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 3); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - } + int aff3 = MPIDR_AFFINITY_LEVEL(mpidr, 3); + int aff2 = MPIDR_AFFINITY_LEVEL(mpidr, 2); + int aff1 = MPIDR_AFFINITY_LEVEL(mpidr, 1); + bool mt = mpidr & MPIDR_MT_BITMASK; + int sccl, ccl; + + if (mt && read_cpuid_part_number() == HISI_CPU_PART_TSV110) { + sccl = aff2 >> 3; + ccl = aff2 & 0x7; + } else if (mt) { + sccl = aff3; + ccl = aff2; } else { - if (sccl_id) - *sccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); - if (ccl_id) - *ccl_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + sccl = aff2; + ccl = aff1; } + + if (scclp) + *scclp = sccl; + if (cclp) + *cclp = ccl; } /* diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index 2bbd8ee93507..46600d9380ea 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1535,15 +1535,8 @@ int pinctrl_init_done(struct device *dev) return ret; } -#ifdef CONFIG_PM - -/** - * pinctrl_pm_select_state() - select pinctrl state for PM - * @dev: device to select default state for - * @state: state to set - */ -static int pinctrl_pm_select_state(struct device *dev, - struct pinctrl_state *state) +static int pinctrl_select_bound_state(struct device *dev, + struct pinctrl_state *state) { struct dev_pin_info *pins = dev->pins; int ret; @@ -1558,15 +1551,27 @@ static int pinctrl_pm_select_state(struct device *dev, } /** - * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * pinctrl_select_default_state() - select default pinctrl state * @dev: device to select default state for */ -int pinctrl_pm_select_default_state(struct device *dev) +int pinctrl_select_default_state(struct device *dev) { if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->default_state); + return pinctrl_select_bound_state(dev, dev->pins->default_state); +} +EXPORT_SYMBOL_GPL(pinctrl_select_default_state); + +#ifdef CONFIG_PM + +/** + * pinctrl_pm_select_default_state() - select default pinctrl state for PM + * @dev: device to select default state for + */ +int pinctrl_pm_select_default_state(struct device *dev) +{ + return pinctrl_select_default_state(dev); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_default_state); @@ -1579,7 +1584,7 @@ int pinctrl_pm_select_sleep_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->sleep_state); + return pinctrl_select_bound_state(dev, dev->pins->sleep_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_sleep_state); @@ -1592,7 +1597,7 @@ int pinctrl_pm_select_idle_state(struct device *dev) if (!dev->pins) return 0; - return pinctrl_pm_select_state(dev, dev->pins->idle_state); + return pinctrl_select_bound_state(dev, dev->pins->idle_state); } EXPORT_SYMBOL_GPL(pinctrl_pm_select_idle_state); #endif diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c index 44d7f50bbc82..d936e7aa74c4 100644 --- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c +++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c @@ -49,6 +49,7 @@ .padown_offset = SPT_PAD_OWN, \ .padcfglock_offset = SPT_PADCFGLOCK, \ .hostown_offset = SPT_HOSTSW_OWN, \ + .is_offset = SPT_GPI_IS, \ .ie_offset = SPT_GPI_IE, \ .pin_base = (s), \ .npins = ((e) - (s) + 1), \ diff --git a/drivers/platform/mellanox/mlxreg-hotplug.c b/drivers/platform/mellanox/mlxreg-hotplug.c index 706207d192ae..77be37a1fbcf 100644 --- a/drivers/platform/mellanox/mlxreg-hotplug.c +++ b/drivers/platform/mellanox/mlxreg-hotplug.c @@ -504,6 +504,20 @@ static int mlxreg_hotplug_set_irq(struct mlxreg_hotplug_priv_data *priv) item = pdata->items; for (i = 0; i < pdata->counter; i++, item++) { + if (item->capability) { + /* + * Read group capability register to get actual number + * of interrupt capable components and set group mask + * accordingly. + */ + ret = regmap_read(priv->regmap, item->capability, + ®val); + if (ret) + goto out; + + item->mask = GENMASK((regval & item->mask) - 1, 0); + } + /* Clear group presense event. */ ret = regmap_write(priv->regmap, item->reg + MLXREG_HOTPLUG_EVENT_OFF, 0); diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig index 27d5b40fb717..587403c44598 100644 --- a/drivers/platform/x86/Kconfig +++ b/drivers/platform/x86/Kconfig @@ -997,7 +997,6 @@ config INTEL_SCU_IPC config INTEL_SCU_IPC_UTIL tristate "Intel SCU IPC utility driver" depends on INTEL_SCU_IPC - default y ---help--- The IPC Util driver provides an interface with the SCU enabling low level access for debug work and updating the firmware. Say @@ -1299,9 +1298,9 @@ config INTEL_ATOMISP2_PM depends on PCI && IOSF_MBI && PM help Power-management driver for Intel's Image Signal Processor found on - Bay and Cherry Trail devices. This dummy driver's sole purpose is to - turn the ISP off (put it in D3) to save power and to allow entering - of S0ix modes. + Bay Trail and Cherry Trail devices. This dummy driver's sole purpose + is to turn the ISP off (put it in D3) to save power and to allow + entering of S0ix modes. To compile this driver as a module, choose M here: the module will be called intel_atomisp2_pm. @@ -1337,6 +1336,17 @@ config PCENGINES_APU2 To compile this driver as a module, choose M here: the module will be called pcengines-apuv2. +config INTEL_UNCORE_FREQ_CONTROL + tristate "Intel Uncore frequency control driver" + depends on X86_64 + help + This driver allows control of uncore frequency limits on + supported server platforms. + Uncore frequency controls RING/LLC (last-level cache) clocks. + + To compile this driver as a module, choose M here: the module + will be called intel-uncore-frequency. + source "drivers/platform/x86/intel_speed_select_if/Kconfig" config SYSTEM76_ACPI diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile index 42d85a00be4e..3747b1f07cf1 100644 --- a/drivers/platform/x86/Makefile +++ b/drivers/platform/x86/Makefile @@ -105,3 +105,4 @@ obj-$(CONFIG_INTEL_ATOMISP2_PM) += intel_atomisp2_pm.o obj-$(CONFIG_PCENGINES_APU2) += pcengines-apuv2.o obj-$(CONFIG_INTEL_SPEED_SELECT_INTERFACE) += intel_speed_select_if/ obj-$(CONFIG_SYSTEM76_ACPI) += system76_acpi.o +obj-$(CONFIG_INTEL_UNCORE_FREQ_CONTROL) += intel-uncore-frequency.o diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index b361c73636a4..6f12747a359a 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -471,6 +471,7 @@ static const struct key_entry asus_nb_wmi_keymap[] = { { KE_KEY, 0x67, { KEY_SWITCHVIDEOMODE } }, /* SDSP LCD + CRT + TV */ { KE_KEY, 0x6B, { KEY_TOUCHPAD_TOGGLE } }, { KE_IGNORE, 0x6E, }, /* Low Battery notification */ + { KE_KEY, 0x71, { KEY_F13 } }, /* General-purpose button */ { KE_KEY, 0x7a, { KEY_ALS_TOGGLE } }, /* Ambient Light Sensor Toggle */ { KE_KEY, 0x7c, { KEY_MICMUTE } }, { KE_KEY, 0x7D, { KEY_BLUETOOTH } }, /* Bluetooth Enable */ diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 982f0cc8270c..43bb15e05529 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -61,6 +61,7 @@ MODULE_LICENSE("GPL"); #define NOTIFY_KBD_BRTDWN 0xc5 #define NOTIFY_KBD_BRTTOGGLE 0xc7 #define NOTIFY_KBD_FBM 0x99 +#define NOTIFY_KBD_TTP 0xae #define ASUS_WMI_FNLOCK_BIOS_DISABLED BIT(0) @@ -81,6 +82,10 @@ MODULE_LICENSE("GPL"); #define ASUS_FAN_BOOST_MODE_SILENT_MASK 0x02 #define ASUS_FAN_BOOST_MODES_MASK 0x03 +#define ASUS_THROTTLE_THERMAL_POLICY_DEFAULT 0 +#define ASUS_THROTTLE_THERMAL_POLICY_OVERBOOST 1 +#define ASUS_THROTTLE_THERMAL_POLICY_SILENT 2 + #define USB_INTEL_XUSB2PR 0xD0 #define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31 @@ -198,6 +203,9 @@ struct asus_wmi { u8 fan_boost_mode_mask; u8 fan_boost_mode; + bool throttle_thermal_policy_available; + u8 throttle_thermal_policy_mode; + // The RSOC controls the maximum charging percentage. bool battery_rsoc_available; @@ -1718,6 +1726,107 @@ static ssize_t fan_boost_mode_store(struct device *dev, // Fan boost mode: 0 - normal, 1 - overboost, 2 - silent static DEVICE_ATTR_RW(fan_boost_mode); +/* Throttle thermal policy ****************************************************/ + +static int throttle_thermal_policy_check_present(struct asus_wmi *asus) +{ + u32 result; + int err; + + asus->throttle_thermal_policy_available = false; + + err = asus_wmi_get_devstate(asus, + ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + &result); + if (err) { + if (err == -ENODEV) + return 0; + return err; + } + + if (result & ASUS_WMI_DSTS_PRESENCE_BIT) + asus->throttle_thermal_policy_available = true; + + return 0; +} + +static int throttle_thermal_policy_write(struct asus_wmi *asus) +{ + int err; + u8 value; + u32 retval; + + value = asus->throttle_thermal_policy_mode; + + err = asus_wmi_set_devstate(ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY, + value, &retval); + if (err) { + pr_warn("Failed to set throttle thermal policy: %d\n", err); + return err; + } + + if (retval != 1) { + pr_warn("Failed to set throttle thermal policy (retval): 0x%x\n", + retval); + return -EIO; + } + + return 0; +} + +static int throttle_thermal_policy_set_default(struct asus_wmi *asus) +{ + if (!asus->throttle_thermal_policy_available) + return 0; + + asus->throttle_thermal_policy_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + return throttle_thermal_policy_write(asus); +} + +static int throttle_thermal_policy_switch_next(struct asus_wmi *asus) +{ + u8 new_mode = asus->throttle_thermal_policy_mode + 1; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + new_mode = ASUS_THROTTLE_THERMAL_POLICY_DEFAULT; + + asus->throttle_thermal_policy_mode = new_mode; + return throttle_thermal_policy_write(asus); +} + +static ssize_t throttle_thermal_policy_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct asus_wmi *asus = dev_get_drvdata(dev); + u8 mode = asus->throttle_thermal_policy_mode; + + return scnprintf(buf, PAGE_SIZE, "%d\n", mode); +} + +static ssize_t throttle_thermal_policy_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + int result; + u8 new_mode; + struct asus_wmi *asus = dev_get_drvdata(dev); + + result = kstrtou8(buf, 10, &new_mode); + if (result < 0) + return result; + + if (new_mode > ASUS_THROTTLE_THERMAL_POLICY_SILENT) + return -EINVAL; + + asus->throttle_thermal_policy_mode = new_mode; + throttle_thermal_policy_write(asus); + + return count; +} + +// Throttle thermal policy: 0 - default, 1 - overboost, 2 - silent +static DEVICE_ATTR_RW(throttle_thermal_policy); + /* Backlight ******************************************************************/ static int read_backlight_power(struct asus_wmi *asus) @@ -1999,6 +2108,11 @@ static void asus_wmi_handle_event_code(int code, struct asus_wmi *asus) return; } + if (asus->throttle_thermal_policy_available && code == NOTIFY_KBD_TTP) { + throttle_thermal_policy_switch_next(asus); + return; + } + if (is_display_toggle(code) && asus->driver->quirks->no_display_toggle) return; @@ -2149,6 +2263,7 @@ static struct attribute *platform_attributes[] = { &dev_attr_lid_resume.attr, &dev_attr_als_enable.attr, &dev_attr_fan_boost_mode.attr, + &dev_attr_throttle_thermal_policy.attr, NULL }; @@ -2172,6 +2287,8 @@ static umode_t asus_sysfs_is_visible(struct kobject *kobj, devid = ASUS_WMI_DEVID_ALS_ENABLE; else if (attr == &dev_attr_fan_boost_mode.attr) ok = asus->fan_boost_mode_available; + else if (attr == &dev_attr_throttle_thermal_policy.attr) + ok = asus->throttle_thermal_policy_available; if (devid != -1) ok = !(asus_wmi_get_devstate_simple(asus, devid) < 0); @@ -2431,6 +2548,12 @@ static int asus_wmi_add(struct platform_device *pdev) if (err) goto fail_fan_boost_mode; + err = throttle_thermal_policy_check_present(asus); + if (err) + goto fail_throttle_thermal_policy; + else + throttle_thermal_policy_set_default(asus); + err = asus_wmi_sysfs_init(asus->platform_device); if (err) goto fail_sysfs; @@ -2515,6 +2638,7 @@ fail_hwmon: fail_input: asus_wmi_sysfs_exit(asus->platform_device); fail_sysfs: +fail_throttle_thermal_policy: fail_fan_boost_mode: fail_platform: kfree(asus); diff --git a/drivers/platform/x86/intel-uncore-frequency.c b/drivers/platform/x86/intel-uncore-frequency.c new file mode 100644 index 000000000000..2b1a0734c3f8 --- /dev/null +++ b/drivers/platform/x86/intel-uncore-frequency.c @@ -0,0 +1,437 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel Uncore Frequency Setting + * Copyright (c) 2019, Intel Corporation. + * All rights reserved. + * + * Provide interface to set MSR 620 at a granularity of per die. On CPU online, + * one control CPU is identified per die to read/write limit. This control CPU + * is changed, if the CPU state is changed to offline. When the last CPU is + * offline in a die then remove the sysfs object for that die. + * The majority of actual code is related to sysfs create and read/write + * attributes. + * + * Author: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com> + */ + +#include <linux/cpu.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/suspend.h> +#include <asm/cpu_device_id.h> +#include <asm/intel-family.h> + +#define MSR_UNCORE_RATIO_LIMIT 0x620 +#define UNCORE_FREQ_KHZ_MULTIPLIER 100000 + +/** + * struct uncore_data - Encapsulate all uncore data + * @stored_uncore_data: Last user changed MSR 620 value, which will be restored + * on system resume. + * @initial_min_freq_khz: Sampled minimum uncore frequency at driver init + * @initial_max_freq_khz: Sampled maximum uncore frequency at driver init + * @control_cpu: Designated CPU for a die to read/write + * @valid: Mark the data valid/invalid + * + * This structure is used to encapsulate all data related to uncore sysfs + * settings for a die/package. + */ +struct uncore_data { + struct kobject kobj; + u64 stored_uncore_data; + u32 initial_min_freq_khz; + u32 initial_max_freq_khz; + int control_cpu; + bool valid; +}; + +#define to_uncore_data(a) container_of(a, struct uncore_data, kobj) + +/* Max instances for uncore data, one for each die */ +static int uncore_max_entries __read_mostly; +/* Storage for uncore data for all instances */ +static struct uncore_data *uncore_instances; +/* Root of the all uncore sysfs kobjs */ +struct kobject uncore_root_kobj; +/* Stores the CPU mask of the target CPUs to use during uncore read/write */ +static cpumask_t uncore_cpu_mask; +/* CPU online callback register instance */ +static enum cpuhp_state uncore_hp_state __read_mostly; +/* Mutex to control all mutual exclusions */ +static DEFINE_MUTEX(uncore_lock); + +struct uncore_attr { + struct attribute attr; + ssize_t (*show)(struct kobject *kobj, + struct attribute *attr, char *buf); + ssize_t (*store)(struct kobject *kobj, + struct attribute *attr, const char *c, ssize_t count); +}; + +#define define_one_uncore_ro(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0444, show_##_name, NULL) + +#define define_one_uncore_rw(_name) \ +static struct uncore_attr _name = \ +__ATTR(_name, 0644, show_##_name, store_##_name) + +#define show_uncore_data(member_name) \ + static ssize_t show_##member_name(struct kobject *kobj, \ + struct attribute *attr, \ + char *buf) \ + { \ + struct uncore_data *data = to_uncore_data(kobj); \ + return scnprintf(buf, PAGE_SIZE, "%u\n", \ + data->member_name); \ + } \ + define_one_uncore_ro(member_name) + +show_uncore_data(initial_min_freq_khz); +show_uncore_data(initial_max_freq_khz); + +/* Common function to read MSR 0x620 and read min/max */ +static int uncore_read_ratio(struct uncore_data *data, unsigned int *min, + unsigned int *max) +{ + u64 cap; + int ret; + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + return ret; + + *max = (cap & 0x7F) * UNCORE_FREQ_KHZ_MULTIPLIER; + *min = ((cap & GENMASK(14, 8)) >> 8) * UNCORE_FREQ_KHZ_MULTIPLIER; + + return 0; +} + +/* Common function to set min/max ratios to be used by sysfs callbacks */ +static int uncore_write_ratio(struct uncore_data *data, unsigned int input, + int set_max) +{ + int ret; + u64 cap; + + mutex_lock(&uncore_lock); + + input /= UNCORE_FREQ_KHZ_MULTIPLIER; + if (!input || input > 0x7F) { + ret = -EINVAL; + goto finish_write; + } + + ret = rdmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, &cap); + if (ret) + goto finish_write; + + if (set_max) { + cap &= ~0x7F; + cap |= input; + } else { + cap &= ~GENMASK(14, 8); + cap |= (input << 8); + } + + ret = wrmsrl_on_cpu(data->control_cpu, MSR_UNCORE_RATIO_LIMIT, cap); + if (ret) + goto finish_write; + + data->stored_uncore_data = cap; + +finish_write: + mutex_unlock(&uncore_lock); + + return ret; +} + +static ssize_t store_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + const char *buf, ssize_t count, + int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int input; + + if (kstrtouint(buf, 10, &input)) + return -EINVAL; + + uncore_write_ratio(data, input, min_max); + + return count; +} + +static ssize_t show_min_max_freq_khz(struct kobject *kobj, + struct attribute *attr, + char *buf, int min_max) +{ + struct uncore_data *data = to_uncore_data(kobj); + unsigned int min, max; + int ret; + + mutex_lock(&uncore_lock); + ret = uncore_read_ratio(data, &min, &max); + mutex_unlock(&uncore_lock); + if (ret) + return ret; + + if (min_max) + return sprintf(buf, "%u\n", max); + + return sprintf(buf, "%u\n", min); +} + +#define store_uncore_min_max(name, min_max) \ + static ssize_t store_##name(struct kobject *kobj, \ + struct attribute *attr, \ + const char *buf, ssize_t count) \ + { \ + \ + return store_min_max_freq_khz(kobj, attr, buf, count, \ + min_max); \ + } + +#define show_uncore_min_max(name, min_max) \ + static ssize_t show_##name(struct kobject *kobj, \ + struct attribute *attr, char *buf) \ + { \ + \ + return show_min_max_freq_khz(kobj, attr, buf, min_max); \ + } + +store_uncore_min_max(min_freq_khz, 0); +store_uncore_min_max(max_freq_khz, 1); + +show_uncore_min_max(min_freq_khz, 0); +show_uncore_min_max(max_freq_khz, 1); + +define_one_uncore_rw(min_freq_khz); +define_one_uncore_rw(max_freq_khz); + +static struct attribute *uncore_attrs[] = { + &initial_min_freq_khz.attr, + &initial_max_freq_khz.attr, + &max_freq_khz.attr, + &min_freq_khz.attr, + NULL +}; + +static struct kobj_type uncore_ktype = { + .sysfs_ops = &kobj_sysfs_ops, + .default_attrs = uncore_attrs, +}; + +static struct kobj_type uncore_root_ktype = { + .sysfs_ops = &kobj_sysfs_ops, +}; + +/* Caller provides protection */ +static struct uncore_data *uncore_get_instance(unsigned int cpu) +{ + int id = topology_logical_die_id(cpu); + + if (id >= 0 && id < uncore_max_entries) + return &uncore_instances[id]; + + return NULL; +} + +static void uncore_add_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (!data) { + mutex_unlock(&uncore_lock); + return; + } + + if (data->valid) { + /* control cpu changed */ + data->control_cpu = cpu; + } else { + char str[64]; + int ret; + + memset(data, 0, sizeof(*data)); + sprintf(str, "package_%02d_die_%02d", + topology_physical_package_id(cpu), + topology_die_id(cpu)); + + uncore_read_ratio(data, &data->initial_min_freq_khz, + &data->initial_max_freq_khz); + + ret = kobject_init_and_add(&data->kobj, &uncore_ktype, + &uncore_root_kobj, str); + if (!ret) { + data->control_cpu = cpu; + data->valid = true; + } + } + mutex_unlock(&uncore_lock); +} + +/* Last CPU in this die is offline, so remove sysfs entries */ +static void uncore_remove_die_entry(int cpu) +{ + struct uncore_data *data; + + mutex_lock(&uncore_lock); + data = uncore_get_instance(cpu); + if (data) { + kobject_put(&data->kobj); + data->control_cpu = -1; + data->valid = false; + } + mutex_unlock(&uncore_lock); +} + +static int uncore_event_cpu_online(unsigned int cpu) +{ + int target; + + /* Check if there is an online cpu in the package for uncore MSR */ + target = cpumask_any_and(&uncore_cpu_mask, topology_die_cpumask(cpu)); + if (target < nr_cpu_ids) + return 0; + + /* Use this CPU on this die as a control CPU */ + cpumask_set_cpu(cpu, &uncore_cpu_mask); + uncore_add_die_entry(cpu); + + return 0; +} + +static int uncore_event_cpu_offline(unsigned int cpu) +{ + int target; + + /* Check if existing cpu is used for uncore MSRs */ + if (!cpumask_test_and_clear_cpu(cpu, &uncore_cpu_mask)) + return 0; + + /* Find a new cpu to set uncore MSR */ + target = cpumask_any_but(topology_die_cpumask(cpu), cpu); + + if (target < nr_cpu_ids) { + cpumask_set_cpu(target, &uncore_cpu_mask); + uncore_add_die_entry(target); + } else { + uncore_remove_die_entry(cpu); + } + + return 0; +} + +static int uncore_pm_notify(struct notifier_block *nb, unsigned long mode, + void *_unused) +{ + int cpu; + + switch (mode) { + case PM_POST_HIBERNATION: + case PM_POST_RESTORE: + case PM_POST_SUSPEND: + for_each_cpu(cpu, &uncore_cpu_mask) { + struct uncore_data *data; + int ret; + + data = uncore_get_instance(cpu); + if (!data || !data->valid || !data->stored_uncore_data) + continue; + + ret = wrmsrl_on_cpu(cpu, MSR_UNCORE_RATIO_LIMIT, + data->stored_uncore_data); + if (ret) + return ret; + } + break; + default: + break; + } + return 0; +} + +static struct notifier_block uncore_pm_nb = { + .notifier_call = uncore_pm_notify, +}; + +#define ICPU(model) { X86_VENDOR_INTEL, 6, model, X86_FEATURE_ANY, } + +static const struct x86_cpu_id intel_uncore_cpu_ids[] = { + ICPU(INTEL_FAM6_BROADWELL_G), + ICPU(INTEL_FAM6_BROADWELL_X), + ICPU(INTEL_FAM6_BROADWELL_D), + ICPU(INTEL_FAM6_SKYLAKE_X), + ICPU(INTEL_FAM6_ICELAKE_X), + ICPU(INTEL_FAM6_ICELAKE_D), + {} +}; + +static int __init intel_uncore_init(void) +{ + const struct x86_cpu_id *id; + int ret; + + id = x86_match_cpu(intel_uncore_cpu_ids); + if (!id) + return -ENODEV; + + uncore_max_entries = topology_max_packages() * + topology_max_die_per_package(); + uncore_instances = kcalloc(uncore_max_entries, + sizeof(*uncore_instances), GFP_KERNEL); + if (!uncore_instances) + return -ENOMEM; + + ret = kobject_init_and_add(&uncore_root_kobj, &uncore_root_ktype, + &cpu_subsys.dev_root->kobj, + "intel_uncore_frequency"); + if (ret) + goto err_free; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "platform/x86/uncore-freq:online", + uncore_event_cpu_online, + uncore_event_cpu_offline); + if (ret < 0) + goto err_rem_kobj; + + uncore_hp_state = ret; + + ret = register_pm_notifier(&uncore_pm_nb); + if (ret) + goto err_rem_state; + + return 0; + +err_rem_state: + cpuhp_remove_state(uncore_hp_state); +err_rem_kobj: + kobject_put(&uncore_root_kobj); +err_free: + kfree(uncore_instances); + + return ret; +} +module_init(intel_uncore_init) + +static void __exit intel_uncore_exit(void) +{ + int i; + + unregister_pm_notifier(&uncore_pm_nb); + cpuhp_remove_state(uncore_hp_state); + for (i = 0; i < uncore_max_entries; ++i) { + if (uncore_instances[i].valid) + kobject_put(&uncore_instances[i].kobj); + } + kobject_put(&uncore_root_kobj); + kfree(uncore_instances); +} +module_exit(intel_uncore_exit) + +MODULE_LICENSE("GPL v2"); +MODULE_DESCRIPTION("Intel Uncore Frequency Limits Driver"); diff --git a/drivers/platform/x86/intel_atomisp2_pm.c b/drivers/platform/x86/intel_atomisp2_pm.c index b0f421fea2a5..805fc0d8515c 100644 --- a/drivers/platform/x86/intel_atomisp2_pm.c +++ b/drivers/platform/x86/intel_atomisp2_pm.c @@ -1,8 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* - * Dummy driver for Intel's Image Signal Processor found on Bay and Cherry - * Trail devices. The sole purpose of this driver is to allow the ISP to - * be put in D3. + * Dummy driver for Intel's Image Signal Processor found on Bay Trail + * and Cherry Trail devices. The sole purpose of this driver is to allow + * the ISP to be put in D3. * * Copyright (C) 2018 Hans de Goede <hdegoede@redhat.com> * @@ -36,8 +36,7 @@ static int isp_set_power(struct pci_dev *dev, bool enable) { unsigned long timeout; - u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : - ISPSSPM0_IUNIT_POWER_OFF; + u32 val = enable ? ISPSSPM0_IUNIT_POWER_ON : ISPSSPM0_IUNIT_POWER_OFF; /* Write to ISPSSPM0 bit[1:0] to power on/off the IUNIT */ iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, @@ -45,29 +44,25 @@ static int isp_set_power(struct pci_dev *dev, bool enable) /* * There should be no IUNIT access while power-down is - * in progress HW sighting: 4567865 + * in progress. HW sighting: 4567865. * Wait up to 50 ms for the IUNIT to shut down. * And we do the same for power on. */ timeout = jiffies + msecs_to_jiffies(50); - while (1) { + do { u32 tmp; /* Wait until ISPSSPM0 bit[25:24] shows the right value */ iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, ISPSSPM0, &tmp); tmp = (tmp & ISPSSPM0_ISPSSS_MASK) >> ISPSSPM0_ISPSSS_OFFSET; if (tmp == val) - break; + return 0; - if (time_after(jiffies, timeout)) { - dev_err(&dev->dev, "IUNIT power-%s timeout.\n", - enable ? "on" : "off"); - return -EBUSY; - } usleep_range(1000, 2000); - } + } while (time_before(jiffies, timeout)); - return 0; + dev_err(&dev->dev, "IUNIT power-%s timeout.\n", enable ? "on" : "off"); + return -EBUSY; } static int isp_probe(struct pci_dev *dev, const struct pci_device_id *id) diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c index 292bace83f1e..6f436836fe50 100644 --- a/drivers/platform/x86/intel_mid_powerbtn.c +++ b/drivers/platform/x86/intel_mid_powerbtn.c @@ -146,9 +146,10 @@ static int mid_pb_probe(struct platform_device *pdev) input_set_capability(input, EV_KEY, KEY_POWER); - ddata = (struct mid_pb_ddata *)id->driver_data; + ddata = devm_kmemdup(&pdev->dev, (void *)id->driver_data, + sizeof(*ddata), GFP_KERNEL); if (!ddata) - return -ENODATA; + return -ENOMEM; ddata->dev = &pdev->dev; ddata->irq = irq; diff --git a/drivers/platform/x86/intel_pmc_core.c b/drivers/platform/x86/intel_pmc_core.c index 571b4754477c..144faa8bad3d 100644 --- a/drivers/platform/x86/intel_pmc_core.c +++ b/drivers/platform/x86/intel_pmc_core.c @@ -49,7 +49,7 @@ static const struct pmc_bit_map spt_pll_map[] = { {"GEN2 USB2PCIE2 PLL", SPT_PMC_BIT_MPHY_CMN_LANE1}, {"DMIPCIE3 PLL", SPT_PMC_BIT_MPHY_CMN_LANE2}, {"SATA PLL", SPT_PMC_BIT_MPHY_CMN_LANE3}, - {}, + {} }; static const struct pmc_bit_map spt_mphy_map[] = { @@ -69,7 +69,7 @@ static const struct pmc_bit_map spt_mphy_map[] = { {"MPHY CORE LANE 13", SPT_PMC_BIT_MPHY_LANE13}, {"MPHY CORE LANE 14", SPT_PMC_BIT_MPHY_LANE14}, {"MPHY CORE LANE 15", SPT_PMC_BIT_MPHY_LANE15}, - {}, + {} }; static const struct pmc_bit_map spt_pfear_map[] = { @@ -113,7 +113,12 @@ static const struct pmc_bit_map spt_pfear_map[] = { {"CSME_SMS1", SPT_PMC_BIT_CSME_SMS1}, {"CSME_RTC", SPT_PMC_BIT_CSME_RTC}, {"CSME_PSF", SPT_PMC_BIT_CSME_PSF}, - {}, + {} +}; + +static const struct pmc_bit_map *ext_spt_pfear_map[] = { + spt_pfear_map, + NULL }; static const struct pmc_bit_map spt_ltr_show_map[] = { @@ -142,7 +147,7 @@ static const struct pmc_bit_map spt_ltr_show_map[] = { }; static const struct pmc_reg_map spt_reg_map = { - .pfear_sts = spt_pfear_map, + .pfear_sts = ext_spt_pfear_map, .mphy_sts = spt_mphy_map, .pll_sts = spt_pll_map, .ltr_show_sts = spt_ltr_show_map, @@ -186,7 +191,10 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"SDX", BIT(4)}, {"SPE", BIT(5)}, {"Fuse", BIT(6)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and Elkhart Lake. + */ {"SBR8", BIT(7)}, {"CSME_FSC", BIT(0)}, @@ -230,11 +238,22 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {"HDA_PGD4", BIT(2)}, {"HDA_PGD5", BIT(3)}, {"HDA_PGD6", BIT(4)}, - /* Reserved for Cannon Lake but valid for Ice Lake and Comet Lake */ + /* + * Reserved for Cannon Lake but valid for Ice Lake, Comet Lake, + * Tiger Lake and ELkhart Lake. + */ {"PSF6", BIT(5)}, {"PSF7", BIT(6)}, {"PSF8", BIT(7)}, + {} +}; + +static const struct pmc_bit_map *ext_cnp_pfear_map[] = { + cnp_pfear_map, + NULL +}; +static const struct pmc_bit_map icl_pfear_map[] = { /* Ice Lake generation onwards only */ {"RES_65", BIT(0)}, {"RES_66", BIT(1)}, @@ -247,6 +266,30 @@ static const struct pmc_bit_map cnp_pfear_map[] = { {} }; +static const struct pmc_bit_map *ext_icl_pfear_map[] = { + cnp_pfear_map, + icl_pfear_map, + NULL +}; + +static const struct pmc_bit_map tgl_pfear_map[] = { + /* Tiger Lake and Elkhart Lake generation onwards only */ + {"PSF9", BIT(0)}, + {"RES_66", BIT(1)}, + {"RES_67", BIT(2)}, + {"RES_68", BIT(3)}, + {"RES_69", BIT(4)}, + {"RES_70", BIT(5)}, + {"TBTLSX", BIT(6)}, + {} +}; + +static const struct pmc_bit_map *ext_tgl_pfear_map[] = { + cnp_pfear_map, + tgl_pfear_map, + NULL +}; + static const struct pmc_bit_map cnp_slps0_dbg0_map[] = { {"AUDIO_D3", BIT(0)}, {"OTG_D3", BIT(1)}, @@ -300,7 +343,7 @@ static const struct pmc_bit_map *cnp_slps0_dbg_maps[] = { cnp_slps0_dbg0_map, cnp_slps0_dbg1_map, cnp_slps0_dbg2_map, - NULL, + NULL }; static const struct pmc_bit_map cnp_ltr_show_map[] = { @@ -334,7 +377,7 @@ static const struct pmc_bit_map cnp_ltr_show_map[] = { }; static const struct pmc_reg_map cnp_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_cnp_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -350,7 +393,7 @@ static const struct pmc_reg_map cnp_reg_map = { }; static const struct pmc_reg_map icl_reg_map = { - .pfear_sts = cnp_pfear_map, + .pfear_sts = ext_icl_pfear_map, .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, .slps0_dbg_maps = cnp_slps0_dbg_maps, .ltr_show_sts = cnp_ltr_show_map, @@ -365,18 +408,29 @@ static const struct pmc_reg_map icl_reg_map = { .ltr_ignore_max = ICL_NUM_IP_IGN_ALLOWED, }; -static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) -{ - return readb(pmcdev->regbase + offset); -} +static const struct pmc_reg_map tgl_reg_map = { + .pfear_sts = ext_tgl_pfear_map, + .slp_s0_offset = CNP_PMC_SLP_S0_RES_COUNTER_OFFSET, + .slps0_dbg_maps = cnp_slps0_dbg_maps, + .ltr_show_sts = cnp_ltr_show_map, + .msr_sts = msr_map, + .slps0_dbg_offset = CNP_PMC_SLPS0_DBG_OFFSET, + .ltr_ignore_offset = CNP_PMC_LTR_IGNORE_OFFSET, + .regmap_length = CNP_PMC_MMIO_REG_LEN, + .ppfear0_offset = CNP_PMC_HOST_PPFEAR0A, + .ppfear_buckets = ICL_PPFEAR_NUM_ENTRIES, + .pm_cfg_offset = CNP_PMC_PM_CFG_OFFSET, + .pm_read_disable_bit = CNP_PMC_READ_DISABLE_BIT, + .ltr_ignore_max = TGL_NUM_IP_IGN_ALLOWED, +}; static inline u32 pmc_core_reg_read(struct pmc_dev *pmcdev, int reg_offset) { return readl(pmcdev->regbase + reg_offset); } -static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int - reg_offset, u32 val) +static inline void pmc_core_reg_write(struct pmc_dev *pmcdev, int reg_offset, + u32 val) { writel(val, pmcdev->regbase + reg_offset); } @@ -412,20 +466,25 @@ static int pmc_core_check_read_lock_bit(void) #if IS_ENABLED(CONFIG_DEBUG_FS) static bool slps0_dbg_latch; -static void pmc_core_display_map(struct seq_file *s, int index, - u8 pf_reg, const struct pmc_bit_map *pf_map) +static inline u8 pmc_core_reg_read_byte(struct pmc_dev *pmcdev, int offset) +{ + return readb(pmcdev->regbase + offset); +} + +static void pmc_core_display_map(struct seq_file *s, int index, int idx, int ip, + u8 pf_reg, const struct pmc_bit_map **pf_map) { seq_printf(s, "PCH IP: %-2d - %-32s\tState: %s\n", - index, pf_map[index].name, - pf_map[index].bit_mask & pf_reg ? "Off" : "On"); + ip, pf_map[idx][index].name, + pf_map[idx][index].bit_mask & pf_reg ? "Off" : "On"); } static int pmc_core_ppfear_show(struct seq_file *s, void *unused) { struct pmc_dev *pmcdev = s->private; - const struct pmc_bit_map *map = pmcdev->map->pfear_sts; + const struct pmc_bit_map **maps = pmcdev->map->pfear_sts; u8 pf_regs[PPFEAR_MAX_NUM_ENTRIES]; - int index, iter; + int index, iter, idx, ip = 0; iter = pmcdev->map->ppfear0_offset; @@ -433,9 +492,12 @@ static int pmc_core_ppfear_show(struct seq_file *s, void *unused) index < PPFEAR_MAX_NUM_ENTRIES; index++, iter++) pf_regs[index] = pmc_core_reg_read_byte(pmcdev, iter); - for (index = 0; map[index].name && - index < pmcdev->map->ppfear_buckets * 8; index++) - pmc_core_display_map(s, index, pf_regs[index / 8], map); + for (idx = 0; maps[idx]; idx++) { + for (index = 0; maps[idx][index].name && + index < pmcdev->map->ppfear_buckets * 8; ip++, index++) + pmc_core_display_map(s, index, idx, ip, + pf_regs[index / 8], maps); + } return 0; } @@ -561,21 +623,22 @@ out_unlock: } DEFINE_SHOW_ATTRIBUTE(pmc_core_pll); -static ssize_t pmc_core_ltr_ignore_write(struct file *file, const char __user -*userbuf, size_t count, loff_t *ppos) +static ssize_t pmc_core_ltr_ignore_write(struct file *file, + const char __user *userbuf, + size_t count, loff_t *ppos) { struct pmc_dev *pmcdev = &pmc; const struct pmc_reg_map *map = pmcdev->map; u32 val, buf_size, fd; - int err = 0; + int err; buf_size = count < 64 ? count : 64; - mutex_lock(&pmcdev->lock); - if (kstrtou32_from_user(userbuf, buf_size, 10, &val)) { - err = -EFAULT; - goto out_unlock; - } + err = kstrtou32_from_user(userbuf, buf_size, 10, &val); + if (err) + return err; + + mutex_lock(&pmcdev->lock); if (val > map->ltr_ignore_max) { err = -EINVAL; @@ -767,8 +830,9 @@ static void pmc_core_dbgfs_register(struct pmc_dev *pmcdev) debugfs_create_file("slp_s0_residency_usec", 0444, dir, pmcdev, &pmc_core_dev_state); - debugfs_create_file("pch_ip_power_gating_status", 0444, dir, pmcdev, - &pmc_core_ppfear_fops); + if (pmcdev->map->pfear_sts) + debugfs_create_file("pch_ip_power_gating_status", 0444, dir, + pmcdev, &pmc_core_ppfear_fops); debugfs_create_file("ltr_ignore", 0644, dir, pmcdev, &pmc_core_ltr_ignore_ops); @@ -816,19 +880,22 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { INTEL_CPU_FAM6(ICELAKE_NNPI, icl_reg_map), INTEL_CPU_FAM6(COMETLAKE, cnp_reg_map), INTEL_CPU_FAM6(COMETLAKE_L, cnp_reg_map), + INTEL_CPU_FAM6(TIGERLAKE_L, tgl_reg_map), + INTEL_CPU_FAM6(TIGERLAKE, tgl_reg_map), + INTEL_CPU_FAM6(ATOM_TREMONT, tgl_reg_map), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pmc_core_ids); static const struct pci_device_id pmc_pci_ids[] = { - { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID), 0}, - { 0, }, + { PCI_VDEVICE(INTEL, SPT_PMC_PCI_DEVICE_ID) }, + { } }; /* * This quirk can be used on those platforms where - * the platform BIOS enforces 24Mhx Crystal to shutdown + * the platform BIOS enforces 24Mhz crystal to shutdown * before PMC can assert SLP_S0#. */ static int quirk_xtal_ignore(const struct dmi_system_id *id) diff --git a/drivers/platform/x86/intel_pmc_core.h b/drivers/platform/x86/intel_pmc_core.h index 8203ae38dc46..f1a0792b3f91 100644 --- a/drivers/platform/x86/intel_pmc_core.h +++ b/drivers/platform/x86/intel_pmc_core.h @@ -186,6 +186,8 @@ enum ppfear_regs { #define ICL_NUM_IP_IGN_ALLOWED 20 #define ICL_PMC_LTR_WIGIG 0x1BFC +#define TGL_NUM_IP_IGN_ALLOWED 22 + struct pmc_bit_map { const char *name; u32 bit_mask; @@ -213,7 +215,7 @@ struct pmc_bit_map { * captures them to have a common implementation. */ struct pmc_reg_map { - const struct pmc_bit_map *pfear_sts; + const struct pmc_bit_map **pfear_sts; const struct pmc_bit_map *mphy_sts; const struct pmc_bit_map *pll_sts; const struct pmc_bit_map **slps0_dbg_maps; diff --git a/drivers/platform/x86/intel_pmc_ipc.c b/drivers/platform/x86/intel_pmc_ipc.c index 5c1da2bb1435..2433bf73f1ed 100644 --- a/drivers/platform/x86/intel_pmc_ipc.c +++ b/drivers/platform/x86/intel_pmc_ipc.c @@ -12,23 +12,13 @@ */ #include <linux/acpi.h> -#include <linux/atomic.h> -#include <linux/bitops.h> #include <linux/delay.h> -#include <linux/device.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/io-64-nonatomic-lo-hi.h> -#include <linux/kernel.h> #include <linux/module.h> -#include <linux/notifier.h> #include <linux/pci.h> #include <linux/platform_device.h> -#include <linux/pm.h> -#include <linux/pm_qos.h> -#include <linux/sched.h> -#include <linux/spinlock.h> -#include <linux/suspend.h> #include <asm/intel_pmc_ipc.h> @@ -184,11 +174,6 @@ static inline void ipc_data_writel(u32 data, u32 offset) writel(data, ipcdev.ipc_base + IPC_WRITE_BUFFER + offset); } -static inline u8 __maybe_unused ipc_data_readb(u32 offset) -{ - return readb(ipcdev.ipc_base + IPC_READ_BUFFER + offset); -} - static inline u32 ipc_data_readl(u32 offset) { return readl(ipcdev.ipc_base + IPC_READ_BUFFER + offset); @@ -211,35 +196,6 @@ static inline int is_gcr_valid(u32 offset) } /** - * intel_pmc_gcr_read() - Read a 32-bit PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: data pointer for storing the register output - * - * Reads the 32-bit PMC GCR register at given offset. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_read(u32 offset, u32 *data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - *data = readl(ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_read); - -/** * intel_pmc_gcr_read64() - Read a 64-bit PMC GCR register * @offset: offset of GCR register from GCR address base * @data: data pointer for storing the register output @@ -269,36 +225,6 @@ int intel_pmc_gcr_read64(u32 offset, u64 *data) EXPORT_SYMBOL_GPL(intel_pmc_gcr_read64); /** - * intel_pmc_gcr_write() - Write PMC GCR register - * @offset: offset of GCR register from GCR address base - * @data: register update value - * - * Writes the PMC GCR register of given offset with given - * value. - * - * Return: negative value on error or 0 on success. - */ -int intel_pmc_gcr_write(u32 offset, u32 data) -{ - int ret; - - spin_lock(&ipcdev.gcr_lock); - - ret = is_gcr_valid(offset); - if (ret < 0) { - spin_unlock(&ipcdev.gcr_lock); - return ret; - } - - writel(data, ipcdev.gcr_mem_base + offset); - - spin_unlock(&ipcdev.gcr_lock); - - return 0; -} -EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); - -/** * intel_pmc_gcr_update() - Update PMC GCR register bits * @offset: offset of GCR register from GCR address base * @mask: bit mask for update operation @@ -309,7 +235,7 @@ EXPORT_SYMBOL_GPL(intel_pmc_gcr_write); * * Return: negative value on error or 0 on success. */ -int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) +static int intel_pmc_gcr_update(u32 offset, u32 mask, u32 val) { u32 new_val; int ret = 0; @@ -339,7 +265,6 @@ gcr_ipc_unlock: spin_unlock(&ipcdev.gcr_lock); return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_gcr_update); static int update_no_reboot_bit(void *priv, bool set) { @@ -405,7 +330,7 @@ static int intel_pmc_ipc_check_status(void) * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_simple_command(int cmd, int sub) +static int intel_pmc_ipc_simple_command(int cmd, int sub) { int ret; @@ -420,7 +345,6 @@ int intel_pmc_ipc_simple_command(int cmd, int sub) return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); /** * intel_pmc_ipc_raw_cmd() - IPC command with data and pointers @@ -437,8 +361,8 @@ EXPORT_SYMBOL_GPL(intel_pmc_ipc_simple_command); * * Return: an IPC error code or 0 on success. */ -int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, - u32 outlen, u32 dptr, u32 sptr) +static int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, + u32 outlen, u32 dptr, u32 sptr) { u32 wbuf[4] = { 0 }; int ret; @@ -470,7 +394,6 @@ int intel_pmc_ipc_raw_cmd(u32 cmd, u32 sub, u8 *in, u32 inlen, u32 *out, return ret; } -EXPORT_SYMBOL_GPL(intel_pmc_ipc_raw_cmd); /** * intel_pmc_ipc_command() - IPC command with input/output data @@ -579,6 +502,7 @@ static ssize_t intel_pmc_ipc_simple_cmd_store(struct device *dev, } return (ssize_t)count; } +static DEVICE_ATTR(simplecmd, 0200, NULL, intel_pmc_ipc_simple_cmd_store); static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, struct device_attribute *attr, @@ -588,8 +512,9 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, int subcmd; int ret; - if (kstrtoul(buf, 0, &val)) - return -EINVAL; + ret = kstrtoul(buf, 0, &val); + if (ret) + return ret; if (val) subcmd = 1; @@ -602,11 +527,7 @@ static ssize_t intel_pmc_ipc_northpeak_store(struct device *dev, } return (ssize_t)count; } - -static DEVICE_ATTR(simplecmd, S_IWUSR, - NULL, intel_pmc_ipc_simple_cmd_store); -static DEVICE_ATTR(northpeak, S_IWUSR, - NULL, intel_pmc_ipc_northpeak_store); +static DEVICE_ATTR(northpeak, 0200, NULL, intel_pmc_ipc_northpeak_store); static struct attribute *intel_ipc_attrs[] = { &dev_attr_northpeak.attr, @@ -618,6 +539,11 @@ static const struct attribute_group intel_ipc_group = { .attrs = intel_ipc_attrs, }; +static const struct attribute_group *intel_ipc_groups[] = { + &intel_ipc_group, + NULL +}; + static struct resource punit_res_array[] = { /* Punit BIOS */ { @@ -958,18 +884,10 @@ static int ipc_plat_probe(struct platform_device *pdev) goto err_irq; } - ret = sysfs_create_group(&pdev->dev.kobj, &intel_ipc_group); - if (ret) { - dev_err(&pdev->dev, "Failed to create sysfs group %d\n", - ret); - goto err_sys; - } - ipcdev.has_gcr_regs = true; return 0; -err_sys: - devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); + err_irq: platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -980,7 +898,6 @@ err_irq: static int ipc_plat_remove(struct platform_device *pdev) { - sysfs_remove_group(&pdev->dev.kobj, &intel_ipc_group); devm_free_irq(&pdev->dev, ipcdev.irq, &ipcdev); platform_device_unregister(ipcdev.tco_dev); platform_device_unregister(ipcdev.punit_dev); @@ -995,6 +912,7 @@ static struct platform_driver ipc_plat_driver = { .driver = { .name = "pmc-ipc-plat", .acpi_match_table = ACPI_PTR(ipc_acpi_ids), + .dev_groups = intel_ipc_groups, }, }; diff --git a/drivers/platform/x86/intel_scu_ipc.c b/drivers/platform/x86/intel_scu_ipc.c index cdab916fbf92..3d7da5266136 100644 --- a/drivers/platform/x86/intel_scu_ipc.c +++ b/drivers/platform/x86/intel_scu_ipc.c @@ -26,11 +26,7 @@ #include <asm/intel_scu_ipc.h> /* IPC defines the following message types */ -#define IPCMSG_WATCHDOG_TIMER 0xF8 /* Set Kernel Watchdog Threshold */ -#define IPCMSG_BATTERY 0xEF /* Coulomb Counter Accumulator */ -#define IPCMSG_FW_UPDATE 0xFE /* Firmware update */ -#define IPCMSG_PCNTRL 0xFF /* Power controller unit read/write */ -#define IPCMSG_FW_REVISION 0xF4 /* Get firmware revision */ +#define IPCMSG_PCNTRL 0xff /* Power controller unit read/write */ /* Command id associated with message IPCMSG_PCNTRL */ #define IPC_CMD_PCNTRL_W 0 /* Register write */ @@ -58,56 +54,29 @@ #define IPC_RWBUF_SIZE 20 /* IPC Read buffer Size */ #define IPC_IOC 0x100 /* IPC command register IOC bit */ -#define PCI_DEVICE_ID_LINCROFT 0x082a -#define PCI_DEVICE_ID_PENWELL 0x080e -#define PCI_DEVICE_ID_CLOVERVIEW 0x08ea -#define PCI_DEVICE_ID_TANGIER 0x11a0 - -/* intel scu ipc driver data */ -struct intel_scu_ipc_pdata_t { - u32 i2c_base; - u32 i2c_len; - u8 irq_mode; -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_lincroft_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - -/* Penwell and Cloverview */ -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_penwell_pdata = { - .i2c_base = 0xff12b000, - .i2c_len = 0x10, - .irq_mode = 1, -}; - -static const struct intel_scu_ipc_pdata_t intel_scu_ipc_tangier_pdata = { - .i2c_base = 0xff00d000, - .i2c_len = 0x10, - .irq_mode = 0, -}; - struct intel_scu_ipc_dev { struct device *dev; void __iomem *ipc_base; - void __iomem *i2c_base; struct completion cmd_complete; u8 irq_mode; }; static struct intel_scu_ipc_dev ipcdev; /* Only one for now */ +#define IPC_STATUS 0x04 +#define IPC_STATUS_IRQ BIT(2) +#define IPC_STATUS_ERR BIT(1) +#define IPC_STATUS_BUSY BIT(0) + /* - * IPC Read Buffer (Read Only): - * 16 byte buffer for receiving data from SCU, if IPC command - * processing results in response data + * IPC Write/Read Buffers: + * 16 byte buffer for sending and receiving data to and from SCU. */ +#define IPC_WRITE_BUFFER 0x80 #define IPC_READ_BUFFER 0x90 -#define IPC_I2C_CNTRL_ADDR 0 -#define I2C_DATA_ADDR 0x04 +/* Timeout in jiffies */ +#define IPC_TIMEOUT (3 * HZ) static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ @@ -120,11 +89,8 @@ static DEFINE_MUTEX(ipclock); /* lock used to prevent multiple call to SCU */ */ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) { - if (scu->irq_mode) { - reinit_completion(&scu->cmd_complete); - writel(cmd | IPC_IOC, scu->ipc_base); - } - writel(cmd, scu->ipc_base); + reinit_completion(&scu->cmd_complete); + writel(cmd | IPC_IOC, scu->ipc_base); } /* @@ -135,7 +101,7 @@ static inline void ipc_command(struct intel_scu_ipc_dev *scu, u32 cmd) */ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 offset) { - writel(data, scu->ipc_base + 0x80 + offset); + writel(data, scu->ipc_base + IPC_WRITE_BUFFER + offset); } /* @@ -147,7 +113,7 @@ static inline void ipc_data_writel(struct intel_scu_ipc_dev *scu, u32 data, u32 */ static inline u8 ipc_read_status(struct intel_scu_ipc_dev *scu) { - return __raw_readl(scu->ipc_base + 0x04); + return __raw_readl(scu->ipc_base + IPC_STATUS); } /* Read ipc byte data */ @@ -165,24 +131,20 @@ static inline u32 ipc_data_readl(struct intel_scu_ipc_dev *scu, u32 offset) /* Wait till scu status is busy */ static inline int busy_loop(struct intel_scu_ipc_dev *scu) { - u32 status = ipc_read_status(scu); - u32 loop_count = 100000; + unsigned long end = jiffies + msecs_to_jiffies(IPC_TIMEOUT); - /* break if scu doesn't reset busy bit after huge retry */ - while ((status & BIT(0)) && --loop_count) { - udelay(1); /* scu processing time is in few u secods */ - status = ipc_read_status(scu); - } + do { + u32 status; - if (status & BIT(0)) { - dev_err(scu->dev, "IPC timed out"); - return -ETIMEDOUT; - } + status = ipc_read_status(scu); + if (!(status & IPC_STATUS_BUSY)) + return (status & IPC_STATUS_ERR) ? -EIO : 0; - if (status & BIT(1)) - return -EIO; + usleep_range(50, 100); + } while (time_before(jiffies, end)); - return 0; + dev_err(scu->dev, "IPC timed out"); + return -ETIMEDOUT; } /* Wait till ipc ioc interrupt is received or timeout in 3 HZ */ @@ -190,13 +152,13 @@ static inline int ipc_wait_for_interrupt(struct intel_scu_ipc_dev *scu) { int status; - if (!wait_for_completion_timeout(&scu->cmd_complete, 3 * HZ)) { + if (!wait_for_completion_timeout(&scu->cmd_complete, IPC_TIMEOUT)) { dev_err(scu->dev, "IPC timed out\n"); return -ETIMEDOUT; } status = ipc_read_status(scu); - if (status & BIT(1)) + if (status & IPC_STATUS_ERR) return -EIO; return 0; @@ -260,14 +222,14 @@ static int pwr_reg_rdwr(u16 *addr, u8 *data, u32 count, u32 op, u32 id) } /** - * intel_scu_ipc_ioread8 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read byte + * intel_scu_ipc_ioread8 - read a word via the SCU + * @addr: Register on SCU + * @data: Return pointer for read byte * - * Read a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Read a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_ioread8(u16 addr, u8 *data) { @@ -276,48 +238,14 @@ int intel_scu_ipc_ioread8(u16 addr, u8 *data) EXPORT_SYMBOL(intel_scu_ipc_ioread8); /** - * intel_scu_ipc_ioread16 - read a word via the SCU - * @addr: register on SCU - * @data: return pointer for read word - * - * Read a register pair. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread16(u16 addr, u16 *data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread16); - -/** - * intel_scu_ipc_ioread32 - read a dword via the SCU - * @addr: register on SCU - * @data: return pointer for read dword - * - * Read four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * This function may sleep. - */ -int intel_scu_ipc_ioread32(u16 addr, u32 *data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_R); -} -EXPORT_SYMBOL(intel_scu_ipc_ioread32); - -/** - * intel_scu_ipc_iowrite8 - write a byte via the SCU - * @addr: register on SCU - * @data: byte to write + * intel_scu_ipc_iowrite8 - write a byte via the SCU + * @addr: Register on SCU + * @data: Byte to write * - * Write a single register. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * Write a single register. Returns %0 on success or an error code. All + * locking between SCU accesses is handled for the caller. * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_iowrite8(u16 addr, u8 data) { @@ -326,51 +254,17 @@ int intel_scu_ipc_iowrite8(u16 addr, u8 data) EXPORT_SYMBOL(intel_scu_ipc_iowrite8); /** - * intel_scu_ipc_iowrite16 - write a word via the SCU - * @addr: register on SCU - * @data: word to write - * - * Write two registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_readvv - read a set of registers + * @addr: Register list + * @data: Bytes to return + * @len: Length of array * - * This function may sleep. - */ -int intel_scu_ipc_iowrite16(u16 addr, u16 data) -{ - u16 x[2] = {addr, addr + 1}; - return pwr_reg_rdwr(x, (u8 *)&data, 2, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite16); - -/** - * intel_scu_ipc_iowrite32 - write a dword via the SCU - * @addr: register on SCU - * @data: dword to write + * Read registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * Write four registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * The largest array length permitted by the hardware is 5 items. * - * This function may sleep. - */ -int intel_scu_ipc_iowrite32(u16 addr, u32 data) -{ - u16 x[4] = {addr, addr + 1, addr + 2, addr + 3}; - return pwr_reg_rdwr(x, (u8 *)&data, 4, IPCMSG_PCNTRL, IPC_CMD_PCNTRL_W); -} -EXPORT_SYMBOL(intel_scu_ipc_iowrite32); - -/** - * intel_scu_ipc_readvv - read a set of registers - * @addr: register list - * @data: bytes to return - * @len: length of array - * - * Read registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. - * - * The largest array length permitted by the hardware is 5 items. - * - * This function may sleep. + * This function may sleep. */ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) { @@ -379,18 +273,17 @@ int intel_scu_ipc_readv(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_readv); /** - * intel_scu_ipc_writev - write a set of registers - * @addr: register list - * @data: bytes to write - * @len: length of array - * - * Write registers. Returns 0 on success or an error code. All - * locking between SCU accesses is handled for the caller. + * intel_scu_ipc_writev - write a set of registers + * @addr: Register list + * @data: Bytes to write + * @len: Length of array * - * The largest array length permitted by the hardware is 5 items. + * Write registers. Returns %0 on success or an error code. All locking + * between SCU accesses is handled for the caller. * - * This function may sleep. + * The largest array length permitted by the hardware is 5 items. * + * This function may sleep. */ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) { @@ -399,19 +292,18 @@ int intel_scu_ipc_writev(u16 *addr, u8 *data, int len) EXPORT_SYMBOL(intel_scu_ipc_writev); /** - * intel_scu_ipc_update_register - r/m/w a register - * @addr: register address - * @bits: bits to update - * @mask: mask of bits to update - * - * Read-modify-write power control unit register. The first data argument - * must be register value and second is mask value - * mask is a bitmap that indicates which bits to update. - * 0 = masked. Don't modify this bit, 1 = modify this bit. - * returns 0 on success or an error code. - * - * This function may sleep. Locking between SCU accesses is handled - * for the caller. + * intel_scu_ipc_update_register - r/m/w a register + * @addr: Register address + * @bits: Bits to update + * @mask: Mask of bits to update + * + * Read-modify-write power control unit register. The first data argument + * must be register value and second is mask value mask is a bitmap that + * indicates which bits to update. %0 = masked. Don't modify this bit, %1 = + * modify this bit. returns %0 on success or an error code. + * + * This function may sleep. Locking between SCU accesses is handled + * for the caller. */ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) { @@ -421,16 +313,16 @@ int intel_scu_ipc_update_register(u16 addr, u8 bits, u8 mask) EXPORT_SYMBOL(intel_scu_ipc_update_register); /** - * intel_scu_ipc_simple_command - send a simple command - * @cmd: command - * @sub: sub type + * intel_scu_ipc_simple_command - send a simple command + * @cmd: Command + * @sub: Sub type * - * Issue a simple command to the SCU. Do not use this interface if - * you must then access data as any data values may be overwritten - * by another SCU access by the time this function returns. + * Issue a simple command to the SCU. Do not use this interface if you must + * then access data as any data values may be overwritten by another SCU + * access by the time this function returns. * - * This function may sleep. Locking for SCU accesses is handled for - * the caller. + * This function may sleep. Locking for SCU accesses is handled for the + * caller. */ int intel_scu_ipc_simple_command(int cmd, int sub) { @@ -450,16 +342,16 @@ int intel_scu_ipc_simple_command(int cmd, int sub) EXPORT_SYMBOL(intel_scu_ipc_simple_command); /** - * intel_scu_ipc_command - command with data - * @cmd: command - * @sub: sub type - * @in: input data - * @inlen: input length in dwords - * @out: output data - * @outlein: output length in dwords - * - * Issue a command to the SCU which involves data transfers. Do the - * data copies under the lock but leave it for the caller to interpret + * intel_scu_ipc_command - command with data + * @cmd: Command + * @sub: Sub type + * @in: Input data + * @inlen: Input length in dwords + * @out: Output data + * @outlen: Output length in dwords + * + * Issue a command to the SCU which involves data transfers. Do the + * data copies under the lock but leave it for the caller to interpret. */ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, u32 *out, int outlen) @@ -489,117 +381,6 @@ int intel_scu_ipc_command(int cmd, int sub, u32 *in, int inlen, } EXPORT_SYMBOL(intel_scu_ipc_command); -#define IPC_SPTR 0x08 -#define IPC_DPTR 0x0C - -/** - * intel_scu_ipc_raw_command() - IPC command with data and pointers - * @cmd: IPC command code. - * @sub: IPC command sub type. - * @in: input data of this IPC command. - * @inlen: input data length in dwords. - * @out: output data of this IPC command. - * @outlen: output data length in dwords. - * @sptr: data writing to SPTR register. - * @dptr: data writing to DPTR register. - * - * Send an IPC command to SCU with input/output data and source/dest pointers. - * - * Return: an IPC error code or 0 on success. - */ -int intel_scu_ipc_raw_command(int cmd, int sub, u8 *in, int inlen, - u32 *out, int outlen, u32 dptr, u32 sptr) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - int inbuflen = DIV_ROUND_UP(inlen, 4); - u32 inbuf[4]; - int i, err; - - /* Up to 16 bytes */ - if (inbuflen > 4) - return -EINVAL; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - - writel(dptr, scu->ipc_base + IPC_DPTR); - writel(sptr, scu->ipc_base + IPC_SPTR); - - /* - * SRAM controller doesn't support 8-bit writes, it only - * supports 32-bit writes, so we have to copy input data into - * the temporary buffer, and SCU FW will use the inlen to - * determine the actual input data length in the temporary - * buffer. - */ - memcpy(inbuf, in, inlen); - - for (i = 0; i < inbuflen; i++) - ipc_data_writel(scu, inbuf[i], 4 * i); - - ipc_command(scu, (inlen << 16) | (sub << 12) | cmd); - err = intel_scu_ipc_check_status(scu); - if (!err) { - for (i = 0; i < outlen; i++) - *out++ = ipc_data_readl(scu, 4 * i); - } - - mutex_unlock(&ipclock); - return err; -} -EXPORT_SYMBOL_GPL(intel_scu_ipc_raw_command); - -/* I2C commands */ -#define IPC_I2C_WRITE 1 /* I2C Write command */ -#define IPC_I2C_READ 2 /* I2C Read command */ - -/** - * intel_scu_ipc_i2c_cntrl - I2C read/write operations - * @addr: I2C address + command bits - * @data: data to read/write - * - * Perform an an I2C read/write operation via the SCU. All locking is - * handled for the caller. This function may sleep. - * - * Returns an error code or 0 on success. - * - * This has to be in the IPC driver for the locking. - */ -int intel_scu_ipc_i2c_cntrl(u32 addr, u32 *data) -{ - struct intel_scu_ipc_dev *scu = &ipcdev; - u32 cmd = 0; - - mutex_lock(&ipclock); - if (scu->dev == NULL) { - mutex_unlock(&ipclock); - return -ENODEV; - } - cmd = (addr >> 24) & 0xFF; - if (cmd == IPC_I2C_READ) { - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - /* Write not getting updated without delay */ - usleep_range(1000, 2000); - *data = readl(scu->i2c_base + I2C_DATA_ADDR); - } else if (cmd == IPC_I2C_WRITE) { - writel(*data, scu->i2c_base + I2C_DATA_ADDR); - usleep_range(1000, 2000); - writel(addr, scu->i2c_base + IPC_I2C_CNTRL_ADDR); - } else { - dev_err(scu->dev, - "intel_scu_ipc: I2C INVALID_CMD = 0x%x\n", cmd); - - mutex_unlock(&ipclock); - return -EIO; - } - mutex_unlock(&ipclock); - return 0; -} -EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); - /* * Interrupt handler gets called when ioc bit of IPC_COMMAND_REG set to 1 * When ioc bit is set to 1, caller api must wait for interrupt handler called @@ -610,9 +391,10 @@ EXPORT_SYMBOL(intel_scu_ipc_i2c_cntrl); static irqreturn_t ioc(int irq, void *dev_id) { struct intel_scu_ipc_dev *scu = dev_id; + int status = ipc_read_status(scu); - if (scu->irq_mode) - complete(&scu->cmd_complete); + writel(status | IPC_STATUS_IRQ, scu->ipc_base + IPC_STATUS); + complete(&scu->cmd_complete); return IRQ_HANDLED; } @@ -629,17 +411,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; struct intel_scu_ipc_dev *scu = &ipcdev; - struct intel_scu_ipc_pdata_t *pdata; if (scu->dev) /* We support only one SCU */ return -EBUSY; - pdata = (struct intel_scu_ipc_pdata_t *)id->driver_data; - if (!pdata) - return -ENODEV; - - scu->irq_mode = pdata->irq_mode; - err = pcim_enable_device(pdev); if (err) return err; @@ -652,10 +427,6 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) scu->ipc_base = pcim_iomap_table(pdev)[0]; - scu->i2c_base = ioremap_nocache(pdata->i2c_base, pdata->i2c_len); - if (!scu->i2c_base) - return -ENOMEM; - err = devm_request_irq(&pdev->dev, pdev->irq, ioc, 0, "intel_scu_ipc", scu); if (err) @@ -670,13 +441,10 @@ static int ipc_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; } -#define SCU_DEVICE(id, pdata) {PCI_VDEVICE(INTEL, id), (kernel_ulong_t)&pdata} - static const struct pci_device_id pci_ids[] = { - SCU_DEVICE(PCI_DEVICE_ID_LINCROFT, intel_scu_ipc_lincroft_pdata), - SCU_DEVICE(PCI_DEVICE_ID_PENWELL, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_CLOVERVIEW, intel_scu_ipc_penwell_pdata), - SCU_DEVICE(PCI_DEVICE_ID_TANGIER, intel_scu_ipc_tangier_pdata), + { PCI_VDEVICE(INTEL, 0x080e) }, + { PCI_VDEVICE(INTEL, 0x08ea) }, + { PCI_VDEVICE(INTEL, 0x11a0) }, {} }; diff --git a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c index 3de5a3c66529..0c2aa22c7a12 100644 --- a/drivers/platform/x86/intel_speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel_speed_select_if/isst_if_common.c @@ -50,6 +50,8 @@ static const struct isst_valid_cmd_ranges isst_valid_cmds[] = { {0x7F, 0x00, 0x0B}, {0x7F, 0x10, 0x12}, {0x7F, 0x20, 0x23}, + {0x94, 0x03, 0x03}, + {0x95, 0x03, 0x03}, }; static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { @@ -59,6 +61,7 @@ static const struct isst_cmd_set_req_type isst_cmd_set_reqs[] = { {0xD0, 0x03, 0x08}, {0x7F, 0x02, 0x00}, {0x7F, 0x08, 0x00}, + {0x95, 0x03, 0x03}, }; struct isst_cmd { diff --git a/drivers/platform/x86/intel_telemetry_debugfs.c b/drivers/platform/x86/intel_telemetry_debugfs.c index e84d3e983e0c..8e3fb55ac1ae 100644 --- a/drivers/platform/x86/intel_telemetry_debugfs.c +++ b/drivers/platform/x86/intel_telemetry_debugfs.c @@ -686,13 +686,14 @@ static ssize_t telem_pss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_PSS, verbosity); if (err) { pr_err("Changing PSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; @@ -733,13 +734,14 @@ static ssize_t telem_ioss_trc_verb_write(struct file *file, u32 verbosity; int err; - if (kstrtou32_from_user(userbuf, count, 0, &verbosity)) - return -EFAULT; + err = kstrtou32_from_user(userbuf, count, 0, &verbosity); + if (err) + return err; err = telemetry_set_trace_verbosity(TELEM_IOSS, verbosity); if (err) { pr_err("Changing IOSS Trace Verbosity Failed. Error %d\n", err); - count = err; + return err; } return count; diff --git a/drivers/platform/x86/intel_telemetry_pltdrv.c b/drivers/platform/x86/intel_telemetry_pltdrv.c index df8565bad595..c4c742bb23cf 100644 --- a/drivers/platform/x86/intel_telemetry_pltdrv.c +++ b/drivers/platform/x86/intel_telemetry_pltdrv.c @@ -1117,9 +1117,9 @@ static const struct telemetry_core_ops telm_pltops = { static int telemetry_pltdrv_probe(struct platform_device *pdev) { - struct resource *res0 = NULL, *res1 = NULL; const struct x86_cpu_id *id; - int size, ret = -ENOMEM; + void __iomem *mem; + int ret; id = x86_match_cpu(telemetry_cpu_ids); if (!id) @@ -1127,50 +1127,17 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) telm_conf = (struct telemetry_plt_config *)id->driver_data; - res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res0) { - ret = -EINVAL; - goto out; - } - size = resource_size(res0); - if (!devm_request_mem_region(&pdev->dev, res0->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } - telm_conf->pss_config.ssram_base_addr = res0->start; - telm_conf->pss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(mem)) + return PTR_ERR(mem); - res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - if (!res1) { - ret = -EINVAL; - goto out; - } - size = resource_size(res1); - if (!devm_request_mem_region(&pdev->dev, res1->start, size, - pdev->name)) { - ret = -EBUSY; - goto out; - } + telm_conf->pss_config.regmap = mem; - telm_conf->ioss_config.ssram_base_addr = res1->start; - telm_conf->ioss_config.ssram_size = size; + mem = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(mem)) + return PTR_ERR(mem); - telm_conf->pss_config.regmap = ioremap_nocache( - telm_conf->pss_config.ssram_base_addr, - telm_conf->pss_config.ssram_size); - if (!telm_conf->pss_config.regmap) { - ret = -ENOMEM; - goto out; - } - - telm_conf->ioss_config.regmap = ioremap_nocache( - telm_conf->ioss_config.ssram_base_addr, - telm_conf->ioss_config.ssram_size); - if (!telm_conf->ioss_config.regmap) { - ret = -ENOMEM; - goto out; - } + telm_conf->ioss_config.regmap = mem; mutex_init(&telm_conf->telem_lock); mutex_init(&telm_conf->telem_trace_lock); @@ -1188,14 +1155,6 @@ static int telemetry_pltdrv_probe(struct platform_device *pdev) return 0; out: - if (res0) - release_mem_region(res0->start, resource_size(res0)); - if (res1) - release_mem_region(res1->start, resource_size(res1)); - if (telm_conf->pss_config.regmap) - iounmap(telm_conf->pss_config.regmap); - if (telm_conf->ioss_config.regmap) - iounmap(telm_conf->ioss_config.regmap); dev_err(&pdev->dev, "TELEMETRY Setup Failed.\n"); return ret; @@ -1204,9 +1163,6 @@ out: static int telemetry_pltdrv_remove(struct platform_device *pdev) { telemetry_clear_pltdata(); - iounmap(telm_conf->pss_config.regmap); - iounmap(telm_conf->ioss_config.regmap); - return 0; } diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c index 8fe51e43f1bc..c27548fd386a 100644 --- a/drivers/platform/x86/mlx-platform.c +++ b/drivers/platform/x86/mlx-platform.c @@ -35,6 +35,8 @@ #define MLXPLAT_CPLD_LPC_REG_LED4_OFFSET 0x23 #define MLXPLAT_CPLD_LPC_REG_LED5_OFFSET 0x24 #define MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION 0x2a +#define MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET 0x2b +#define MLXPLAT_CPLD_LPC_REG_GP0_OFFSET 0x2e #define MLXPLAT_CPLD_LPC_REG_GP1_OFFSET 0x30 #define MLXPLAT_CPLD_LPC_REG_WP1_OFFSET 0x31 #define MLXPLAT_CPLD_LPC_REG_GP2_OFFSET 0x32 @@ -46,6 +48,8 @@ #define MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET 0x41 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET 0x42 #define MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET 0x43 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET 0x44 +#define MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET 0x45 #define MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET 0x50 #define MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET 0x51 #define MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET 0x52 @@ -68,6 +72,7 @@ #define MLXPLAT_CPLD_LPC_REG_WD3_TMR_OFFSET 0xd1 #define MLXPLAT_CPLD_LPC_REG_WD3_TLEFT_OFFSET 0xd2 #define MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET 0xd3 +#define MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET 0xe2 #define MLXPLAT_CPLD_LPC_REG_PWM1_OFFSET 0xe3 #define MLXPLAT_CPLD_LPC_REG_TACHO1_OFFSET 0xe4 #define MLXPLAT_CPLD_LPC_REG_TACHO2_OFFSET 0xe5 @@ -85,9 +90,13 @@ #define MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET 0xf6 #define MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET 0xf7 #define MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET 0xf8 +#define MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET 0xf9 +#define MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET 0xfb +#define MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET 0xfc #define MLXPLAT_CPLD_LPC_IO_RANGE 0x100 #define MLXPLAT_CPLD_LPC_I2C_CH1_OFF 0xdb #define MLXPLAT_CPLD_LPC_I2C_CH2_OFF 0xda +#define MLXPLAT_CPLD_LPC_I2C_CH3_OFF 0xdc #define MLXPLAT_CPLD_LPC_PIO_OFFSET 0x10000UL #define MLXPLAT_CPLD_LPC_REG1 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ @@ -96,6 +105,9 @@ #define MLXPLAT_CPLD_LPC_REG2 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ MLXPLAT_CPLD_LPC_I2C_CH2_OFF) | \ MLXPLAT_CPLD_LPC_PIO_OFFSET) +#define MLXPLAT_CPLD_LPC_REG3 ((MLXPLAT_CPLD_LPC_REG_BASE_ADRR + \ + MLXPLAT_CPLD_LPC_I2C_CH3_OFF) | \ + MLXPLAT_CPLD_LPC_PIO_OFFSET) /* Masks for aggregation, psu, pwr and fan event in CPLD related registers. */ #define MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF 0x04 @@ -112,17 +124,29 @@ #define MLXPLAT_CPLD_LOW_AGGR_MASK_I2C BIT(6) #define MLXPLAT_CPLD_PSU_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_PWR_MASK GENMASK(1, 0) +#define MLXPLAT_CPLD_PSU_EXT_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_PWR_EXT_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_FAN_MASK GENMASK(3, 0) #define MLXPLAT_CPLD_ASIC_MASK GENMASK(1, 0) #define MLXPLAT_CPLD_FAN_NG_MASK GENMASK(5, 0) #define MLXPLAT_CPLD_LED_LO_NIBBLE_MASK GENMASK(7, 4) #define MLXPLAT_CPLD_LED_HI_NIBBLE_MASK GENMASK(3, 0) +#define MLXPLAT_CPLD_VOLTREG_UPD_MASK GENMASK(5, 4) +#define MLXPLAT_CPLD_I2C_CAP_BIT 0x04 +#define MLXPLAT_CPLD_I2C_CAP_MASK GENMASK(5, MLXPLAT_CPLD_I2C_CAP_BIT) + +/* Masks for aggregation for comex carriers */ +#define MLXPLAT_CPLD_AGGR_MASK_CARRIER BIT(1) +#define MLXPLAT_CPLD_AGGR_MASK_CARR_DEF (MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF | \ + MLXPLAT_CPLD_AGGR_MASK_CARRIER) +#define MLXPLAT_CPLD_LOW_AGGRCX_MASK 0xc1 /* Default I2C parent bus number */ #define MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR 1 /* Maximum number of possible physical buses equipped on system */ #define MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM 16 +#define MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM 24 /* Number of channels in group */ #define MLXPLAT_CPLD_GRP_CHNL_NUM 8 @@ -130,14 +154,16 @@ /* Start channel numbers */ #define MLXPLAT_CPLD_CH1 2 #define MLXPLAT_CPLD_CH2 10 +#define MLXPLAT_CPLD_CH3 18 /* Number of LPC attached MUX platform devices */ -#define MLXPLAT_CPLD_LPC_MUX_DEVS 2 +#define MLXPLAT_CPLD_LPC_MUX_DEVS 3 /* Hotplug devices adapter numbers */ #define MLXPLAT_CPLD_NR_NONE -1 #define MLXPLAT_CPLD_PSU_DEFAULT_NR 10 #define MLXPLAT_CPLD_PSU_MSNXXXX_NR 4 +#define MLXPLAT_CPLD_PSU_MSNXXXX_NR2 3 #define MLXPLAT_CPLD_FAN1_DEFAULT_NR 11 #define MLXPLAT_CPLD_FAN2_DEFAULT_NR 12 #define MLXPLAT_CPLD_FAN3_DEFAULT_NR 13 @@ -187,8 +213,24 @@ static const struct resource mlxplat_lpc_resources[] = { IORESOURCE_IO), }; +/* Platform i2c next generation systems data */ +static struct mlxreg_core_data mlxplat_mlxcpld_i2c_ng_items_data[] = { + { + .reg = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .mask = MLXPLAT_CPLD_I2C_CAP_MASK, + .bit = MLXPLAT_CPLD_I2C_CAP_BIT, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_i2c_ng_items[] = { + { + .data = mlxplat_mlxcpld_i2c_ng_items_data, + }, +}; + /* Platform next generation systems i2c data */ static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_i2c_ng_data = { + .items = mlxplat_mlxcpld_i2c_ng_items, .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, .mask = MLXPLAT_CPLD_AGGR_MASK_COMEX, .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET, @@ -213,7 +255,7 @@ static const int mlxplat_default_channels[][MLXPLAT_CPLD_GRP_CHNL_NUM] = { static const int mlxplat_msn21xx_channels[] = { 1, 2, 3, 4, 5, 6, 7, 8 }; /* Platform mux data */ -static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { +static struct i2c_mux_reg_platform_data mlxplat_default_mux_data[] = { { .parent = 1, .base_nr = MLXPLAT_CPLD_CH1, @@ -233,6 +275,40 @@ static struct i2c_mux_reg_platform_data mlxplat_mux_data[] = { }; +/* Platform mux configuration variables */ +static int mlxplat_max_adap_num; +static int mlxplat_mux_num; +static struct i2c_mux_reg_platform_data *mlxplat_mux_data; + +/* Platform extended mux data */ +static struct i2c_mux_reg_platform_data mlxplat_extended_mux_data[] = { + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH1, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG1, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH2, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG3, + .reg_size = 1, + .idle_in_use = 1, + }, + { + .parent = 1, + .base_nr = MLXPLAT_CPLD_CH3, + .write_only = 1, + .reg = (void __iomem *)MLXPLAT_CPLD_LPC_REG2, + .reg_size = 1, + .idle_in_use = 1, + }, + +}; + /* Platform hotplug devices */ static struct i2c_board_info mlxplat_mlxcpld_psu[] = { { @@ -276,6 +352,22 @@ static struct i2c_board_info mlxplat_mlxcpld_fan[] = { }, }; +/* Platform hotplug comex carrier system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + /* Platform hotplug default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_psu_items_data[] = { { @@ -390,6 +482,45 @@ static struct mlxreg_core_item mlxplat_mlxcpld_default_items[] = { }, }; +static struct mlxreg_core_item mlxplat_mlxcpld_comex_items[] = { + { + .data = mlxplat_mlxcpld_comex_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_psu), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_pwr), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_CARRIER, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_fan), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_ASIC_MASK_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + static struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .items = mlxplat_mlxcpld_default_items, @@ -400,6 +531,16 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_comex_data = { + .items = mlxplat_mlxcpld_comex_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_CARR_DEF, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGRCX_MASK, +}; + static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_pwr_items_data[] = { { .label = "pwr1", @@ -723,6 +864,116 @@ struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_default_ng_data = { .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, }; +/* Platform hotplug extended system family data */ +static struct mlxreg_core_data mlxplat_mlxcpld_ext_psu_items_data[] = { + { + .label = "psu1", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(0), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu2", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(1), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu3", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(2), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, + { + .label = "psu4", + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = BIT(3), + .hpdev.nr = MLXPLAT_CPLD_NR_NONE, + }, +}; + +static struct mlxreg_core_data mlxplat_mlxcpld_ext_pwr_items_data[] = { + { + .label = "pwr1", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(0), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr2", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(1), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR, + }, + { + .label = "pwr3", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(2), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[0], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, + { + .label = "pwr4", + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = BIT(3), + .hpdev.brdinfo = &mlxplat_mlxcpld_pwr[1], + .hpdev.nr = MLXPLAT_CPLD_PSU_MSNXXXX_NR2, + }, +}; + +static struct mlxreg_core_item mlxplat_mlxcpld_ext_items[] = { + { + .data = mlxplat_mlxcpld_ext_psu_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PSU_OFFSET, + .mask = MLXPLAT_CPLD_PSU_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_psu_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_ext_pwr_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_PWR_OFFSET, + .mask = MLXPLAT_CPLD_PWR_EXT_MASK, + .capability = MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET, + .count = ARRAY_SIZE(mlxplat_mlxcpld_ext_pwr_items_data), + .inversed = 0, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_ng_fan_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_FAN_OFFSET, + .mask = MLXPLAT_CPLD_FAN_NG_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_fan_items_data), + .inversed = 1, + .health = false, + }, + { + .data = mlxplat_mlxcpld_default_asic_items_data, + .aggr_mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF, + .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, + .mask = MLXPLAT_CPLD_ASIC_MASK, + .count = ARRAY_SIZE(mlxplat_mlxcpld_default_asic_items_data), + .inversed = 0, + .health = true, + }, +}; + +static +struct mlxreg_core_hotplug_platform_data mlxplat_mlxcpld_ext_data = { + .items = mlxplat_mlxcpld_ext_items, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_ext_items), + .cell = MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET, + .mask = MLXPLAT_CPLD_AGGR_MASK_NG_DEF | MLXPLAT_CPLD_AGGR_MASK_COMEX, + .cell_low = MLXPLAT_CPLD_LPC_REG_AGGRLO_OFFSET, + .mask_low = MLXPLAT_CPLD_LOW_AGGR_MASK_LOW, +}; + /* Platform led default data */ static struct mlxreg_core_data mlxplat_mlxcpld_default_led_data[] = { { @@ -964,6 +1215,80 @@ static struct mlxreg_core_platform_data mlxplat_default_ng_led_data = { .counter = ARRAY_SIZE(mlxplat_mlxcpld_default_ng_led_data), }; +/* Platform led for Comex based 100GbE systems */ +static struct mlxreg_core_data mlxplat_mlxcpld_comex_100G_led_data[] = { + { + .label = "status:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "status:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK + }, + { + .label = "psu:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "psu:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED1_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan1:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan1:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan2:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan2:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED2_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan3:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan3:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, + { + .label = "fan4:green", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "fan4:red", + .reg = MLXPLAT_CPLD_LPC_REG_LED3_OFFSET, + .mask = MLXPLAT_CPLD_LED_HI_NIBBLE_MASK, + }, + { + .label = "uid:blue", + .reg = MLXPLAT_CPLD_LPC_REG_LED5_OFFSET, + .mask = MLXPLAT_CPLD_LED_LO_NIBBLE_MASK, + }, +}; + +static struct mlxreg_core_platform_data mlxplat_comex_100G_led_data = { + .data = mlxplat_mlxcpld_comex_100G_led_data, + .counter = ARRAY_SIZE(mlxplat_mlxcpld_comex_100G_led_data), +}; + /* Platform register access default */ static struct mlxreg_core_data mlxplat_mlxcpld_default_regs_io_data[] = { { @@ -1157,6 +1482,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_msn21xx_regs_io_data[] = { .mode = 0200, }, { + .label = "select_iio", + .reg = MLXPLAT_CPLD_LPC_REG_GP2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0644, + }, + { .label = "asic_health", .reg = MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET, .mask = MLXPLAT_CPLD_ASIC_MASK, @@ -1245,6 +1576,18 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_platform", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0444, + }, + { + .label = "reset_soc", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(5), + .mode = 0444, + }, + { .label = "reset_comex_wd", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(6), @@ -1263,6 +1606,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_sw_pwr_off", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(2), + .mode = 0444, + }, + { .label = "reset_comex_thermal", .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, .mask = GENMASK(7, 0) & ~BIT(3), @@ -1275,6 +1624,12 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .mode = 0444, }, { + .label = "reset_ac_pwr_fail", + .reg = MLXPLAT_CPLD_LPC_REG_RST_CAUSE2_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(6), + .mode = 0444, + }, + { .label = "psu1_on", .reg = MLXPLAT_CPLD_LPC_REG_GP1_OFFSET, .mask = GENMASK(7, 0) & ~BIT(0), @@ -1317,6 +1672,43 @@ static struct mlxreg_core_data mlxplat_mlxcpld_default_ng_regs_io_data[] = { .bit = GENMASK(7, 0), .mode = 0444, }, + { + .label = "voltreg_update_status", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET, + .mask = MLXPLAT_CPLD_VOLTREG_UPD_MASK, + .bit = 5, + .mode = 0444, + }, + { + .label = "vpd_wp", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(3), + .mode = 0644, + }, + { + .label = "pcie_asic_reset_dis", + .reg = MLXPLAT_CPLD_LPC_REG_GP0_OFFSET, + .mask = GENMASK(7, 0) & ~BIT(4), + .mode = 0644, + }, + { + .label = "config1", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "config2", + .reg = MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, + { + .label = "ufm_version", + .reg = MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET, + .bit = GENMASK(7, 0), + .mode = 0444, + }, }; static struct mlxreg_core_platform_data mlxplat_default_ng_regs_io_data = { @@ -1575,6 +1967,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED3_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1582,6 +1975,7 @@ static bool mlxplat_mlxcpld_writeable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGR_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_PSU_EVENT_OFFSET: @@ -1621,6 +2015,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_WP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: @@ -1631,6 +2027,8 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1671,6 +2069,10 @@ static bool mlxplat_mlxcpld_readable_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1692,6 +2094,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_LED4_OFFSET: case MLXPLAT_CPLD_LPC_REG_LED5_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DIRECTION: + case MLXPLAT_CPLD_LPC_REG_GP0_RO_OFFSET: + case MLXPLAT_CPLD_LPC_REG_GP0_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP1_OFFSET: case MLXPLAT_CPLD_LPC_REG_GP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGR_OFFSET: @@ -1700,6 +2104,8 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_AGGRLO_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_OFFSET: case MLXPLAT_CPLD_LPC_REG_AGGRCO_MASK_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_OFFSET: + case MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_HEALTH_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_EVENT_OFFSET: case MLXPLAT_CPLD_LPC_REG_ASIC_MASK_OFFSET: @@ -1734,6 +2140,10 @@ static bool mlxplat_mlxcpld_volatile_reg(struct device *dev, unsigned int reg) case MLXPLAT_CPLD_LPC_REG_FAN_CAP2_OFFSET: case MLXPLAT_CPLD_LPC_REG_FAN_DRW_CAP_OFFSET: case MLXPLAT_CPLD_LPC_REG_TACHO_SPEED_OFFSET: + case MLXPLAT_CPLD_LPC_REG_PSU_I2C_CAP_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG1_OFFSET: + case MLXPLAT_CPLD_LPC_REG_CONFIG2_OFFSET: + case MLXPLAT_CPLD_LPC_REG_UFM_VERSION_OFFSET: return true; } return false; @@ -1751,6 +2161,19 @@ static const struct reg_default mlxplat_mlxcpld_regmap_ng[] = { { MLXPLAT_CPLD_LPC_REG_WD_CLEAR_WP_OFFSET, 0x00 }, }; +static const struct reg_default mlxplat_mlxcpld_regmap_comex_default[] = { + { MLXPLAT_CPLD_LPC_REG_AGGRCX_MASK_OFFSET, + MLXPLAT_CPLD_LOW_AGGRCX_MASK }, + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, +}; + +static const struct reg_default mlxplat_mlxcpld_regmap_ng400[] = { + { MLXPLAT_CPLD_LPC_REG_PWM_CONTROL_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD1_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD2_ACT_OFFSET, 0x00 }, + { MLXPLAT_CPLD_LPC_REG_WD3_ACT_OFFSET, 0x00 }, +}; + struct mlxplat_mlxcpld_regmap_context { void __iomem *base; }; @@ -1803,6 +2226,34 @@ static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng = { .reg_write = mlxplat_mlxcpld_reg_write, }; +static const struct regmap_config mlxplat_mlxcpld_regmap_config_comex = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_comex_default, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_comex_default), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + +static const struct regmap_config mlxplat_mlxcpld_regmap_config_ng400 = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 255, + .cache_type = REGCACHE_FLAT, + .writeable_reg = mlxplat_mlxcpld_writeable_reg, + .readable_reg = mlxplat_mlxcpld_readable_reg, + .volatile_reg = mlxplat_mlxcpld_volatile_reg, + .reg_defaults = mlxplat_mlxcpld_regmap_ng400, + .num_reg_defaults = ARRAY_SIZE(mlxplat_mlxcpld_regmap_ng400), + .reg_read = mlxplat_mlxcpld_reg_read, + .reg_write = mlxplat_mlxcpld_reg_write, +}; + static struct resource mlxplat_mlxcpld_resources[] = { [0] = DEFINE_RES_IRQ_NAMED(17, "mlxreg-hotplug"), }; @@ -1821,7 +2272,10 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_default_channels[i]; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_default_channels[i]); @@ -1834,13 +2288,16 @@ static int __init mlxplat_dmi_default_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1853,13 +2310,16 @@ static int __init mlxplat_dmi_msn21xx_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1872,13 +2332,16 @@ static int __init mlxplat_dmi_msn274x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1891,13 +2354,16 @@ static int __init mlxplat_dmi_msn201x_matched(const struct dmi_system_id *dmi) mlxplat_wd_data[0] = &mlxplat_mlxcpld_wd_set_type1[0]; return 1; -}; +} static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) { int i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; mlxplat_mux_data[i].n_values = ARRAY_SIZE(mlxplat_msn21xx_channels); @@ -1914,7 +2380,57 @@ static int __init mlxplat_dmi_qmb7xx_matched(const struct dmi_system_id *dmi) mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng; return 1; -}; +} + +static int __init mlxplat_dmi_comex_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_extended_mux_data); + mlxplat_mux_data = mlxplat_extended_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_comex_data; + mlxplat_hotplug->deferred_nr = MLXPLAT_CPLD_MAX_PHYS_EXT_ADAPTER_NUM; + mlxplat_led = &mlxplat_comex_100G_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_comex; + + return 1; +} + +static int __init mlxplat_dmi_ng400_matched(const struct dmi_system_id *dmi) +{ + int i; + + mlxplat_max_adap_num = MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; + mlxplat_mux_num = ARRAY_SIZE(mlxplat_default_mux_data); + mlxplat_mux_data = mlxplat_default_mux_data; + for (i = 0; i < mlxplat_mux_num; i++) { + mlxplat_mux_data[i].values = mlxplat_msn21xx_channels; + mlxplat_mux_data[i].n_values = + ARRAY_SIZE(mlxplat_msn21xx_channels); + } + mlxplat_hotplug = &mlxplat_mlxcpld_ext_data; + mlxplat_hotplug->deferred_nr = + mlxplat_msn21xx_channels[MLXPLAT_CPLD_GRP_CHNL_NUM - 1]; + mlxplat_led = &mlxplat_default_ng_led_data; + mlxplat_regs_io = &mlxplat_default_ng_regs_io_data; + mlxplat_fan = &mlxplat_default_fan_data; + for (i = 0; i < ARRAY_SIZE(mlxplat_mlxcpld_wd_set_type2); i++) + mlxplat_wd_data[i] = &mlxplat_mlxcpld_wd_set_type2[i]; + mlxplat_i2c = &mlxplat_mlxcpld_i2c_ng_data; + mlxplat_regmap_config = &mlxplat_mlxcpld_regmap_config_ng400; + + return 1; +} static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { { @@ -1954,6 +2470,18 @@ static const struct dmi_system_id mlxplat_dmi_table[] __initconst = { }, }, { + .callback = mlxplat_dmi_comex_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0009"), + }, + }, + { + .callback = mlxplat_dmi_ng400_matched, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "VMOD0010"), + }, + }, + { .callback = mlxplat_dmi_msn274x_matched, .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Mellanox Technologies"), @@ -2043,7 +2571,7 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) /* Scan adapters from expected id to verify it is free. */ *nr = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; for (i = MLXPLAT_CPLD_PHYS_ADAPTER_DEF_NR; i < - MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM; i++) { + mlxplat_max_adap_num; i++) { search_adap = i2c_get_adapter(i); if (search_adap) { i2c_put_adapter(search_adap); @@ -2057,12 +2585,12 @@ static int mlxplat_mlxcpld_verify_bus_topology(int *nr) } /* Return with error if free id for adapter is not found. */ - if (i == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) + if (i == mlxplat_max_adap_num) return -ENODEV; /* Shift adapter ids, since expected parent adapter is not free. */ *nr = i; - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { shift = *nr - mlxplat_mux_data[i].parent; mlxplat_mux_data[i].parent = *nr; mlxplat_mux_data[i].base_nr += shift; @@ -2118,7 +2646,7 @@ static int __init mlxplat_init(void) if (nr < 0) goto fail_alloc; - nr = (nr == MLXPLAT_CPLD_MAX_PHYS_ADAPTER_NUM) ? -1 : nr; + nr = (nr == mlxplat_max_adap_num) ? -1 : nr; if (mlxplat_i2c) mlxplat_i2c->regmap = priv->regmap; priv->pdev_i2c = platform_device_register_resndata( @@ -2131,7 +2659,7 @@ static int __init mlxplat_init(void) goto fail_alloc; } - for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) { + for (i = 0; i < mlxplat_mux_num; i++) { priv->pdev_mux[i] = platform_device_register_resndata( &priv->pdev_i2c->dev, "i2c-mux-reg", i, NULL, @@ -2265,7 +2793,7 @@ static void __exit mlxplat_exit(void) platform_device_unregister(priv->pdev_led); platform_device_unregister(priv->pdev_hotplug); - for (i = ARRAY_SIZE(mlxplat_mux_data) - 1; i >= 0 ; i--) + for (i = mlxplat_mux_num - 1; i >= 0 ; i--) platform_device_unregister(priv->pdev_mux[i]); platform_device_unregister(priv->pdev_i2c); diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 72205771d03d..93177e6e5ecd 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -219,8 +219,7 @@ static const struct property_entry digma_citi_e200_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1980), PROPERTY_ENTRY_U32("touchscreen-size-y", 1500), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-digma_citi_e200.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-digma_citi_e200.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -236,8 +235,7 @@ static const struct property_entry gp_electronic_t701_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 640), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-gp-electronic-t701.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-gp-electronic-t701.fw"), { } }; @@ -382,8 +380,7 @@ static const struct property_entry onda_v80_plus_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1698), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v80-plus-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v80-plus-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -398,8 +395,7 @@ static const struct property_entry onda_v820w_32g_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1665), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-onda-v820w-32g.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-onda-v820w-32g.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -415,8 +411,7 @@ static const struct property_entry onda_v891w_v1_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 1676), PROPERTY_ENTRY_U32("touchscreen-size-y", 1130), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-onda-v891w-v1.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-onda-v891w-v1.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -433,8 +428,7 @@ static const struct property_entry onda_v891w_v3_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1625), PROPERTY_ENTRY_U32("touchscreen-size-y", 1135), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3676-onda-v891w-v3.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3676-onda-v891w-v3.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -450,8 +444,7 @@ static const struct property_entry pipo_w2s_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 880), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-pipo-w2s.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w2s.fw"), { } }; @@ -460,14 +453,29 @@ static const struct ts_dmi_data pipo_w2s_data = { .properties = pipo_w2s_props, }; +static const struct property_entry pipo_w11_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 1), + PROPERTY_ENTRY_U32("touchscreen-min-y", 15), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1532), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-pipo-w11.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + PROPERTY_ENTRY_BOOL("silead,home-button"), + { } +}; + +static const struct ts_dmi_data pipo_w11_data = { + .acpi_name = "MSSL1680:00", + .properties = pipo_w11_props, +}; + static const struct property_entry pov_mobii_wintab_p800w_v20_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-x", 32), PROPERTY_ENTRY_U32("touchscreen-min-y", 16), PROPERTY_ENTRY_U32("touchscreen-size-x", 1692), PROPERTY_ENTRY_U32("touchscreen-size-y", 1146), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3680-pov-mobii-wintab-p800w-v20.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3680-pov-mobii-wintab-p800w-v20.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -484,8 +492,7 @@ static const struct property_entry pov_mobii_wintab_p800w_v21_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1794), PROPERTY_ENTRY_U32("touchscreen-size-y", 1148), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p800w.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p800w.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -502,8 +509,7 @@ static const struct property_entry pov_mobii_wintab_p1006w_v10_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1984), PROPERTY_ENTRY_U32("touchscreen-size-y", 1520), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-pov-mobii-wintab-p1006w-v10.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -520,8 +526,7 @@ static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-schneider-sct101ctm.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-schneider-sct101ctm.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -551,8 +556,7 @@ static const struct property_entry teclast_x98plus2_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_BOOL("touchscreen-inverted-x"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-teclast_x98plus2.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-teclast_x98plus2.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -566,8 +570,7 @@ static const struct property_entry trekstor_primebook_c11_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1970), PROPERTY_ENTRY_U32("touchscreen-size-y", 1530), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c11.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c11.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -581,8 +584,7 @@ static const struct ts_dmi_data trekstor_primebook_c11_data = { static const struct property_entry trekstor_primebook_c13_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2624), PROPERTY_ENTRY_U32("touchscreen-size-y", 1920), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primebook-c13.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primebook-c13.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -596,8 +598,7 @@ static const struct ts_dmi_data trekstor_primebook_c13_data = { static const struct property_entry trekstor_primetab_t13b_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 2500), PROPERTY_ENTRY_U32("touchscreen-size-y", 1900), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1680-trekstor-primetab-t13b.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-trekstor-primetab-t13b.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), @@ -613,8 +614,7 @@ static const struct property_entry trekstor_surftab_twin_10_1_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1900), PROPERTY_ENTRY_U32("touchscreen-size-y", 1280), PROPERTY_ENTRY_U32("touchscreen-inverted-y", 1), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl3670-surftab-twin-10-1-st10432-8.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl3670-surftab-twin-10-1-st10432-8.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), { } }; @@ -629,8 +629,7 @@ static const struct property_entry trekstor_surftab_wintron70_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 8), PROPERTY_ENTRY_U32("touchscreen-size-x", 884), PROPERTY_ENTRY_U32("touchscreen-size-y", 632), - PROPERTY_ENTRY_STRING("firmware-name", - "gsl1686-surftab-wintron70-st70416-6.fw"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1686-surftab-wintron70-st70416-6.fw"), PROPERTY_ENTRY_U32("silead,max-fingers", 10), PROPERTY_ENTRY_BOOL("silead,home-button"), { } @@ -910,6 +909,16 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* Pipo W11 */ + .driver_data = (void *)&pipo_w11_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "PIPO"), + DMI_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."), + /* Above matches are too generic, add bios-ver match */ + DMI_MATCH(DMI_BIOS_VERSION, "JS-BI-10.6-SF133GR300-GA55B-024-F"), + }, + }, + { /* Ployer Momo7w (same hardware as the Trekstor ST70416-6) */ .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { @@ -1032,8 +1041,7 @@ static const struct dmi_system_id touchscreen_dmi_table[] = { .driver_data = (void *)&trekstor_surftab_wintron70_data, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TrekStor"), - DMI_MATCH(DMI_PRODUCT_NAME, - "SurfTab wintron 7.0 ST70416-6"), + DMI_MATCH(DMI_PRODUCT_NAME, "SurfTab wintron 7.0 ST70416-6"), /* Exact match, different versions need different fw */ DMI_MATCH(DMI_BIOS_VERSION, "TREK.G.WI71C.JGBMRBA05"), }, @@ -1065,7 +1073,7 @@ static void ts_dmi_add_props(struct i2c_client *client) } static int ts_dmi_notifier_call(struct notifier_block *nb, - unsigned long action, void *data) + unsigned long action, void *data) { struct device *dev = data; struct i2c_client *client; diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 74eb5af7295f..97bfdd47954f 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -194,6 +194,18 @@ config REGULATOR_BD70528 This driver can also be built as a module. If so, the module will be called bd70528-regulator. +config REGULATOR_BD71828 + tristate "ROHM BD71828 Power Regulator" + depends on MFD_ROHM_BD71828 + select REGULATOR_ROHM + help + This driver supports voltage regulators on ROHM BD71828 PMIC. + This will enable support for the software controllable buck + and LDO regulators. + + This driver can also be built as a module. If so, the module + will be called bd71828-regulator. + config REGULATOR_BD718XX tristate "ROHM BD71837 Power Regulator" depends on MFD_ROHM_BD718XX @@ -600,6 +612,27 @@ config REGULATOR_MCP16502 through the regulator interface. In addition it enables suspend-to-ram/standby transition. +config REGULATOR_MP8859 + tristate "MPS MP8859 regulator driver" + depends on I2C + select REGMAP_I2C + help + Say y here to support the MP8859 voltage regulator. This driver + supports basic operations (get/set voltage) through the regulator + interface. + Say M here if you want to include support for the regulator as a + module. The module will be named "mp8859". + +config REGULATOR_MPQ7920 + tristate "Monolithic MPQ7920 PMIC" + depends on I2C && OF + select REGMAP_I2C + help + Say y here to support the MPQ7920 PMIC. This will enable supports + the software controllable 4 buck and 5 LDO regulators. + This driver supports the control of different power rails of device + through regulator interface. + config REGULATOR_MT6311 tristate "MediaTek MT6311 PMIC" depends on I2C @@ -1077,6 +1110,13 @@ config REGULATOR_VEXPRESS This driver provides support for voltage regulators available on the ARM Ltd's Versatile Express platform. +config REGULATOR_VQMMC_IPQ4019 + tristate "IPQ4019 VQMMC SD LDO regulator support" + depends on ARCH_QCOM + help + This driver provides support for the VQMMC LDO I/0 + voltage regulator of the IPQ4019 SD/EMMC controller. + config REGULATOR_WM831X tristate "Wolfson Microelectronics WM831x PMIC regulators" depends on MFD_WM831X diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 2210ba56f9bd..07bc977c52b0 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile @@ -28,6 +28,7 @@ obj-$(CONFIG_REGULATOR_AS3722) += as3722-regulator.o obj-$(CONFIG_REGULATOR_AXP20X) += axp20x-regulator.o obj-$(CONFIG_REGULATOR_BCM590XX) += bcm590xx-regulator.o obj-$(CONFIG_REGULATOR_BD70528) += bd70528-regulator.o +obj-$(CONFIG_REGULATOR_BD71828) += bd71828-regulator.o obj-$(CONFIG_REGULATOR_BD718XX) += bd718x7-regulator.o obj-$(CONFIG_REGULATOR_BD9571MWV) += bd9571mwv-regulator.o obj-$(CONFIG_REGULATOR_DA903X) += da903x.o @@ -77,6 +78,8 @@ obj-$(CONFIG_REGULATOR_MC13783) += mc13783-regulator.o obj-$(CONFIG_REGULATOR_MC13892) += mc13892-regulator.o obj-$(CONFIG_REGULATOR_MC13XXX_CORE) += mc13xxx-regulator-core.o obj-$(CONFIG_REGULATOR_MCP16502) += mcp16502.o +obj-$(CONFIG_REGULATOR_MP8859) += mp8859.o +obj-$(CONFIG_REGULATOR_MPQ7920) += mpq7920.o obj-$(CONFIG_REGULATOR_MT6311) += mt6311-regulator.o obj-$(CONFIG_REGULATOR_MT6323) += mt6323-regulator.o obj-$(CONFIG_REGULATOR_MT6358) += mt6358-regulator.o @@ -132,6 +135,7 @@ obj-$(CONFIG_REGULATOR_TWL4030) += twl-regulator.o twl6030-regulator.o obj-$(CONFIG_REGULATOR_UNIPHIER) += uniphier-regulator.o obj-$(CONFIG_REGULATOR_VCTRL) += vctrl-regulator.o obj-$(CONFIG_REGULATOR_VEXPRESS) += vexpress-regulator.o +obj-$(CONFIG_REGULATOR_VQMMC_IPQ4019) += vqmmc-ipq4019-regulator.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-dcdc.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-isink.o obj-$(CONFIG_REGULATOR_WM831X) += wm831x-ldo.o diff --git a/drivers/regulator/bd71828-regulator.c b/drivers/regulator/bd71828-regulator.c new file mode 100644 index 000000000000..b2fa17be4988 --- /dev/null +++ b/drivers/regulator/bd71828-regulator.c @@ -0,0 +1,807 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright (C) 2019 ROHM Semiconductors +// bd71828-regulator.c ROHM BD71828GW-DS1 regulator driver +// + +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/mfd/rohm-bd71828.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +struct reg_init { + unsigned int reg; + unsigned int mask; + unsigned int val; +}; +struct bd71828_regulator_data { + struct regulator_desc desc; + const struct rohm_dvs_config dvs; + const struct reg_init *reg_inits; + int reg_init_amnt; +}; + +static const struct reg_init buck1_inits[] = { + /* + * DVS Buck voltages can be changed by register values or via GPIO. + * Use register accesses by default. + */ + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK1_CTRL, + .val = BD71828_DVS_BUCK1_CTRL_I2C, + }, +}; + +static const struct reg_init buck2_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK2_CTRL, + .val = BD71828_DVS_BUCK2_CTRL_I2C, + }, +}; + +static const struct reg_init buck6_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK6_CTRL, + .val = BD71828_DVS_BUCK6_CTRL_I2C, + }, +}; + +static const struct reg_init buck7_inits[] = { + { + .reg = BD71828_REG_PS_CTRL_1, + .mask = BD71828_MASK_DVS_BUCK7_CTRL, + .val = BD71828_DVS_BUCK7_CTRL_I2C, + }, +}; + +static const struct regulator_linear_range bd71828_buck1267_volts[] = { + REGULATOR_LINEAR_RANGE(500000, 0x00, 0xef, 6250), + REGULATOR_LINEAR_RANGE(2000000, 0xf0, 0xff, 0), +}; + +static const struct regulator_linear_range bd71828_buck3_volts[] = { + REGULATOR_LINEAR_RANGE(1200000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(2000000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_buck4_volts[] = { + REGULATOR_LINEAR_RANGE(1000000, 0x00, 0x1f, 25000), + REGULATOR_LINEAR_RANGE(1800000, 0x20, 0x3f, 0), +}; + +static const struct regulator_linear_range bd71828_buck5_volts[] = { + REGULATOR_LINEAR_RANGE(2500000, 0x00, 0x0f, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x10, 0x1f, 0), +}; + +static const struct regulator_linear_range bd71828_ldo_volts[] = { + REGULATOR_LINEAR_RANGE(800000, 0x00, 0x31, 50000), + REGULATOR_LINEAR_RANGE(3300000, 0x32, 0x3f, 0), +}; + +static int bd71828_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int val; + + switch (ramp_delay) { + case 1 ... 2500: + val = 0; + break; + case 2501 ... 5000: + val = 1; + break; + case 5001 ... 10000: + val = 2; + break; + case 10001 ... 20000: + val = 3; + break; + default: + val = 3; + dev_err(&rdev->dev, + "ramp_delay: %d not supported, setting 20mV/uS", + ramp_delay); + } + + /* + * On BD71828 the ramp delay level control reg is at offset +2 to + * enable reg + */ + return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg + 2, + BD71828_MASK_RAMP_DELAY, + val << (ffs(BD71828_MASK_RAMP_DELAY) - 1)); +} + +static int buck_set_hw_dvs_levels(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + struct bd71828_regulator_data *data; + + data = container_of(desc, struct bd71828_regulator_data, desc); + + return rohm_regulator_set_dvs_levels(&data->dvs, np, desc, cfg->regmap); +} + +static int ldo6_parse_dt(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *cfg) +{ + int ret, i; + uint32_t uv = 0; + unsigned int en; + struct regmap *regmap = cfg->regmap; + static const char * const props[] = { "rohm,dvs-run-voltage", + "rohm,dvs-idle-voltage", + "rohm,dvs-suspend-voltage", + "rohm,dvs-lpsr-voltage" }; + unsigned int mask[] = { BD71828_MASK_RUN_EN, BD71828_MASK_IDLE_EN, + BD71828_MASK_SUSP_EN, BD71828_MASK_LPSR_EN }; + + for (i = 0; i < ARRAY_SIZE(props); i++) { + ret = of_property_read_u32(np, props[i], &uv); + if (ret) { + if (ret != -EINVAL) + return ret; + continue; + } + if (uv) + en = 0xffffffff; + else + en = 0; + + ret = regmap_update_bits(regmap, desc->enable_reg, mask[i], en); + if (ret) + return ret; + } + return 0; +} + +static const struct regulator_ops bd71828_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_dvs_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_time_sel = regulator_set_voltage_time_sel, + .set_ramp_delay = bd71828_set_ramp_delay, +}; + +static const struct regulator_ops bd71828_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear_range, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .get_voltage_sel = regulator_get_voltage_sel_regmap, +}; + +static const struct regulator_ops bd71828_ldo6_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, +}; + +static const struct bd71828_regulator_data bd71828_rdata[] = { + { + .desc = { + .name = "buck1", + .of_match = of_match_ptr("BUCK1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK1, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK1_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK1_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK1_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + /* + * LPSR voltage is same as SUSPEND voltage. Allow + * setting it so that regulator can be set enabled at + * LPSR state + */ + .lpsr_reg = BD71828_REG_BUCK1_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck1_inits, + .reg_init_amnt = ARRAY_SIZE(buck1_inits), + }, + { + .desc = { + .name = "buck2", + .of_match = of_match_ptr("BUCK2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK2, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK2_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK2_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK2_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK2_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck2_inits, + .reg_init_amnt = ARRAY_SIZE(buck2_inits), + }, + { + .desc = { + .name = "buck3", + .of_match = of_match_ptr("BUCK3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK3, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck3_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck3_volts), + .n_voltages = BD71828_BUCK3_VOLTS, + .enable_reg = BD71828_REG_BUCK3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK3_VOLT, + .vsel_mask = BD71828_MASK_BUCK3_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK3_VOLT, + .idle_reg = BD71828_REG_BUCK3_VOLT, + .suspend_reg = BD71828_REG_BUCK3_VOLT, + .lpsr_reg = BD71828_REG_BUCK3_VOLT, + .run_mask = BD71828_MASK_BUCK3_VOLT, + .idle_mask = BD71828_MASK_BUCK3_VOLT, + .suspend_mask = BD71828_MASK_BUCK3_VOLT, + .lpsr_mask = BD71828_MASK_BUCK3_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck4", + .of_match = of_match_ptr("BUCK4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK4, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck4_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck4_volts), + .n_voltages = BD71828_BUCK4_VOLTS, + .enable_reg = BD71828_REG_BUCK4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK4_VOLT, + .vsel_mask = BD71828_MASK_BUCK4_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK4 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK4_VOLT, + .idle_reg = BD71828_REG_BUCK4_VOLT, + .suspend_reg = BD71828_REG_BUCK4_VOLT, + .lpsr_reg = BD71828_REG_BUCK4_VOLT, + .run_mask = BD71828_MASK_BUCK4_VOLT, + .idle_mask = BD71828_MASK_BUCK4_VOLT, + .suspend_mask = BD71828_MASK_BUCK4_VOLT, + .lpsr_mask = BD71828_MASK_BUCK4_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck5", + .of_match = of_match_ptr("BUCK5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK5, + .ops = &bd71828_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck5_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck5_volts), + .n_voltages = BD71828_BUCK5_VOLTS, + .enable_reg = BD71828_REG_BUCK5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK5_VOLT, + .vsel_mask = BD71828_MASK_BUCK5_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * BUCK5 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK5_VOLT, + .idle_reg = BD71828_REG_BUCK5_VOLT, + .suspend_reg = BD71828_REG_BUCK5_VOLT, + .lpsr_reg = BD71828_REG_BUCK5_VOLT, + .run_mask = BD71828_MASK_BUCK5_VOLT, + .idle_mask = BD71828_MASK_BUCK5_VOLT, + .suspend_mask = BD71828_MASK_BUCK5_VOLT, + .lpsr_mask = BD71828_MASK_BUCK5_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, + { + .desc = { + .name = "buck6", + .of_match = of_match_ptr("BUCK6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK6, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK6_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK6_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK6_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK6_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck6_inits, + .reg_init_amnt = ARRAY_SIZE(buck6_inits), + }, + { + .desc = { + .name = "buck7", + .of_match = of_match_ptr("BUCK7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_BUCK7, + .ops = &bd71828_dvs_buck_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_buck1267_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_buck1267_volts), + .n_voltages = BD71828_BUCK1267_VOLTS, + .enable_reg = BD71828_REG_BUCK7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_BUCK7_VOLT, + .vsel_mask = BD71828_MASK_BUCK1267_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_BUCK7_VOLT, + .run_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_reg = BD71828_REG_BUCK7_IDLE_VOLT, + .idle_mask = BD71828_MASK_BUCK1267_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .suspend_mask = BD71828_MASK_BUCK1267_VOLT, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + .lpsr_reg = BD71828_REG_BUCK7_SUSP_VOLT, + .lpsr_mask = BD71828_MASK_BUCK1267_VOLT, + }, + .reg_inits = buck7_inits, + .reg_init_amnt = ARRAY_SIZE(buck7_inits), + }, + { + .desc = { + .name = "ldo1", + .of_match = of_match_ptr("LDO1"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO1, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO1_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO1_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO1_VOLT, + .idle_reg = BD71828_REG_LDO1_VOLT, + .suspend_reg = BD71828_REG_LDO1_VOLT, + .lpsr_reg = BD71828_REG_LDO1_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo2", + .of_match = of_match_ptr("LDO2"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO2, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO2_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO2_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO2 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO2_VOLT, + .idle_reg = BD71828_REG_LDO2_VOLT, + .suspend_reg = BD71828_REG_LDO2_VOLT, + .lpsr_reg = BD71828_REG_LDO2_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo3", + .of_match = of_match_ptr("LDO3"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO3, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO3_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO3_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO3 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO3_VOLT, + .idle_reg = BD71828_REG_LDO3_VOLT, + .suspend_reg = BD71828_REG_LDO3_VOLT, + .lpsr_reg = BD71828_REG_LDO3_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo4", + .of_match = of_match_ptr("LDO4"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO4, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO4_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO4_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO1 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO4_VOLT, + .idle_reg = BD71828_REG_LDO4_VOLT, + .suspend_reg = BD71828_REG_LDO4_VOLT, + .lpsr_reg = BD71828_REG_LDO4_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + }, { + .desc = { + .name = "ldo5", + .of_match = of_match_ptr("LDO5"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO5, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO5_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO5_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .of_parse_cb = buck_set_hw_dvs_levels, + .owner = THIS_MODULE, + }, + /* + * LDO5 is special. It can choose vsel settings to be configured + * from 2 different registers (by GPIO). + * + * This driver supports only configuration where + * BD71828_REG_LDO5_VOLT_L is used. + */ + .dvs = { + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO5_VOLT, + .idle_reg = BD71828_REG_LDO5_VOLT, + .suspend_reg = BD71828_REG_LDO5_VOLT, + .lpsr_reg = BD71828_REG_LDO5_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, { + .desc = { + .name = "ldo6", + .of_match = of_match_ptr("LDO6"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO6, + .ops = &bd71828_ldo6_ops, + .type = REGULATOR_VOLTAGE, + .fixed_uV = BD71828_LDO_6_VOLTAGE, + .n_voltages = 1, + .enable_reg = BD71828_REG_LDO6_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .owner = THIS_MODULE, + /* + * LDO6 only supports enable/disable for all states. + * Voltage for LDO6 is fixed. + */ + .of_parse_cb = ldo6_parse_dt, + }, + }, { + .desc = { + /* SNVS LDO in data-sheet */ + .name = "ldo7", + .of_match = of_match_ptr("LDO7"), + .regulators_node = of_match_ptr("regulators"), + .id = BD71828_LDO_SNVS, + .ops = &bd71828_ldo_ops, + .type = REGULATOR_VOLTAGE, + .linear_ranges = bd71828_ldo_volts, + .n_linear_ranges = ARRAY_SIZE(bd71828_ldo_volts), + .n_voltages = BD71828_LDO_VOLTS, + .enable_reg = BD71828_REG_LDO7_EN, + .enable_mask = BD71828_MASK_RUN_EN, + .vsel_reg = BD71828_REG_LDO7_VOLT, + .vsel_mask = BD71828_MASK_LDO_VOLT, + .owner = THIS_MODULE, + .of_parse_cb = buck_set_hw_dvs_levels, + }, + .dvs = { + /* + * LDO7 only supports single voltage for all states. + * voltage can be individually enabled for each state + * though => allow setting all states to support + * enabling power rail on different states. + */ + .level_map = ROHM_DVS_LEVEL_RUN | ROHM_DVS_LEVEL_IDLE | + ROHM_DVS_LEVEL_SUSPEND | + ROHM_DVS_LEVEL_LPSR, + .run_reg = BD71828_REG_LDO7_VOLT, + .idle_reg = BD71828_REG_LDO7_VOLT, + .suspend_reg = BD71828_REG_LDO7_VOLT, + .lpsr_reg = BD71828_REG_LDO7_VOLT, + .run_mask = BD71828_MASK_LDO_VOLT, + .idle_mask = BD71828_MASK_LDO_VOLT, + .suspend_mask = BD71828_MASK_LDO_VOLT, + .lpsr_mask = BD71828_MASK_LDO_VOLT, + .idle_on_mask = BD71828_MASK_IDLE_EN, + .suspend_on_mask = BD71828_MASK_SUSP_EN, + .lpsr_on_mask = BD71828_MASK_LPSR_EN, + }, + + }, +}; + +static int bd71828_probe(struct platform_device *pdev) +{ + struct rohm_regmap_dev *bd71828; + int i, j, ret; + struct regulator_config config = { + .dev = pdev->dev.parent, + }; + + bd71828 = dev_get_drvdata(pdev->dev.parent); + if (!bd71828) { + dev_err(&pdev->dev, "No MFD driver data\n"); + return -EINVAL; + } + + config.regmap = bd71828->regmap; + + for (i = 0; i < ARRAY_SIZE(bd71828_rdata); i++) { + struct regulator_dev *rdev; + const struct bd71828_regulator_data *rd; + + rd = &bd71828_rdata[i]; + rdev = devm_regulator_register(&pdev->dev, + &rd->desc, &config); + if (IS_ERR(rdev)) { + dev_err(&pdev->dev, + "failed to register %s regulator\n", + rd->desc.name); + return PTR_ERR(rdev); + } + for (j = 0; j < rd->reg_init_amnt; j++) { + ret = regmap_update_bits(bd71828->regmap, + rd->reg_inits[j].reg, + rd->reg_inits[j].mask, + rd->reg_inits[j].val); + if (ret) { + dev_err(&pdev->dev, + "regulator %s init failed\n", + rd->desc.name); + return ret; + } + } + } + return 0; +} + +static struct platform_driver bd71828_regulator = { + .driver = { + .name = "bd71828-pmic" + }, + .probe = bd71828_probe, +}; + +module_platform_driver(bd71828_regulator); + +MODULE_AUTHOR("Matti Vaittinen <matti.vaittinen@fi.rohmeurope.com>"); +MODULE_DESCRIPTION("BD71828 voltage regulator driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:bd71828-pmic"); diff --git a/drivers/regulator/bd718x7-regulator.c b/drivers/regulator/bd718x7-regulator.c index 13a43eee2e46..8f9b2d8eaf10 100644 --- a/drivers/regulator/bd718x7-regulator.c +++ b/drivers/regulator/bd718x7-regulator.c @@ -1142,28 +1142,14 @@ static const struct bd718xx_regulator_data bd71837_regulators[] = { }, }; -struct bd718xx_pmic_inits { - const struct bd718xx_regulator_data *r_datas; - unsigned int r_amount; -}; - static int bd718xx_probe(struct platform_device *pdev) { struct bd718xx *mfd; struct regulator_config config = { 0 }; - struct bd718xx_pmic_inits pmic_regulators[ROHM_CHIP_TYPE_AMOUNT] = { - [ROHM_CHIP_TYPE_BD71837] = { - .r_datas = bd71837_regulators, - .r_amount = ARRAY_SIZE(bd71837_regulators), - }, - [ROHM_CHIP_TYPE_BD71847] = { - .r_datas = bd71847_regulators, - .r_amount = ARRAY_SIZE(bd71847_regulators), - }, - }; - int i, j, err; bool use_snvs; + const struct bd718xx_regulator_data *reg_data; + unsigned int num_reg_data; mfd = dev_get_drvdata(pdev->dev.parent); if (!mfd) { @@ -1172,8 +1158,16 @@ static int bd718xx_probe(struct platform_device *pdev) goto err; } - if (mfd->chip.chip_type >= ROHM_CHIP_TYPE_AMOUNT || - !pmic_regulators[mfd->chip.chip_type].r_datas) { + switch (mfd->chip.chip_type) { + case ROHM_CHIP_TYPE_BD71837: + reg_data = bd71837_regulators; + num_reg_data = ARRAY_SIZE(bd71837_regulators); + break; + case ROHM_CHIP_TYPE_BD71847: + reg_data = bd71847_regulators; + num_reg_data = ARRAY_SIZE(bd71847_regulators); + break; + default: dev_err(&pdev->dev, "Unsupported chip type\n"); err = -EINVAL; goto err; @@ -1215,13 +1209,13 @@ static int bd718xx_probe(struct platform_device *pdev) } } - for (i = 0; i < pmic_regulators[mfd->chip.chip_type].r_amount; i++) { + for (i = 0; i < num_reg_data; i++) { const struct regulator_desc *desc; struct regulator_dev *rdev; const struct bd718xx_regulator_data *r; - r = &pmic_regulators[mfd->chip.chip_type].r_datas[i]; + r = ®_data[i]; desc = &r->desc; config.dev = pdev->dev.parent; diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 03d79fee2987..d015d99cb59d 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3470,6 +3470,7 @@ int regulator_set_voltage_rdev(struct regulator_dev *rdev, int min_uV, out: return ret; } +EXPORT_SYMBOL_GPL(regulator_set_voltage_rdev); static int regulator_limit_voltage_step(struct regulator_dev *rdev, int *current_uV, int *min_uV) @@ -4034,6 +4035,7 @@ int regulator_get_voltage_rdev(struct regulator_dev *rdev) return ret; return ret - rdev->constraints->uV_offset; } +EXPORT_SYMBOL_GPL(regulator_get_voltage_rdev); /** * regulator_get_voltage - get regulator output voltage diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index f9448ed50e05..0cdeb6186529 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -131,8 +131,7 @@ static const struct of_device_id da9210_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, da9210_dt_ids); -static int da9210_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9210_i2c_probe(struct i2c_client *i2c) { struct da9210 *chip; struct device *dev = &i2c->dev; @@ -228,7 +227,7 @@ static struct i2c_driver da9210_regulator_driver = { .name = "da9210", .of_match_table = of_match_ptr(da9210_dt_ids), }, - .probe = da9210_i2c_probe, + .probe_new = da9210_i2c_probe, .id_table = da9210_i2c_id, }; diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index 523dc1b95826..2ea4362ffa5c 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -416,8 +416,7 @@ static int da9211_regulator_init(struct da9211 *chip) /* * I2C driver interface functions */ -static int da9211_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int da9211_i2c_probe(struct i2c_client *i2c) { struct da9211 *chip; int error, ret; @@ -526,7 +525,7 @@ static struct i2c_driver da9211_regulator_driver = { .name = "da9211", .of_match_table = of_match_ptr(da9211_dt_ids), }, - .probe = da9211_i2c_probe, + .probe_new = da9211_i2c_probe, .id_table = da9211_i2c_id, }; diff --git a/drivers/regulator/helpers.c b/drivers/regulator/helpers.c index ca3dc3f3bb29..bb16c465426e 100644 --- a/drivers/regulator/helpers.c +++ b/drivers/regulator/helpers.c @@ -13,6 +13,8 @@ #include <linux/regulator/driver.h> #include <linux/module.h> +#include "internal.h" + /** * regulator_is_enabled_regmap - standard is_enabled() for regmap users * @@ -881,3 +883,15 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers, consumers[i].supply = supply_names[i]; } EXPORT_SYMBOL_GPL(regulator_bulk_set_supply_names); + +/** + * regulator_is_equal - test whether two regulators are the same + * + * @reg1: first regulator to operate on + * @reg2: second regulator to operate on + */ +bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2) +{ + return reg1->rdev == reg2->rdev; +} +EXPORT_SYMBOL_GPL(regulator_is_equal); diff --git a/drivers/regulator/isl9305.c b/drivers/regulator/isl9305.c index 978f5e903cae..cfb765986d0d 100644 --- a/drivers/regulator/isl9305.c +++ b/drivers/regulator/isl9305.c @@ -137,8 +137,7 @@ static const struct regmap_config isl9305_regmap = { .cache_type = REGCACHE_RBTREE, }; -static int isl9305_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int isl9305_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct isl9305_pdata *pdata = i2c->dev.platform_data; @@ -198,7 +197,7 @@ static struct i2c_driver isl9305_regulator_driver = { .name = "isl9305", .of_match_table = of_match_ptr(isl9305_dt_ids), }, - .probe = isl9305_i2c_probe, + .probe_new = isl9305_i2c_probe, .id_table = isl9305_i2c_id, }; diff --git a/drivers/regulator/lp3971.c b/drivers/regulator/lp3971.c index bc96e65ef7c0..8be252f81b09 100644 --- a/drivers/regulator/lp3971.c +++ b/drivers/regulator/lp3971.c @@ -400,8 +400,7 @@ static int setup_regulators(struct lp3971 *lp3971, return 0; } -static int lp3971_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int lp3971_i2c_probe(struct i2c_client *i2c) { struct lp3971 *lp3971; struct lp3971_platform_data *pdata = dev_get_platdata(&i2c->dev); @@ -449,7 +448,7 @@ static struct i2c_driver lp3971_i2c_driver = { .driver = { .name = "LP3971", }, - .probe = lp3971_i2c_probe, + .probe_new = lp3971_i2c_probe, .id_table = lp3971_i2c_id, }; diff --git a/drivers/regulator/ltc3676.c b/drivers/regulator/ltc3676.c index d934540eb8c4..e12e52c69e52 100644 --- a/drivers/regulator/ltc3676.c +++ b/drivers/regulator/ltc3676.c @@ -301,8 +301,7 @@ static irqreturn_t ltc3676_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static int ltc3676_regulator_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int ltc3676_regulator_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct regulator_init_data *init_data = dev_get_platdata(dev); @@ -380,7 +379,7 @@ static struct i2c_driver ltc3676_driver = { .name = DRIVER_NAME, .of_match_table = of_match_ptr(ltc3676_of_match), }, - .probe = ltc3676_regulator_probe, + .probe_new = ltc3676_regulator_probe, .id_table = ltc3676_i2c_id, }; module_i2c_driver(ltc3676_driver); diff --git a/drivers/regulator/mp8859.c b/drivers/regulator/mp8859.c new file mode 100644 index 000000000000..1d26b506ee5b --- /dev/null +++ b/drivers/regulator/mp8859.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +// +// Copyright (c) 2019 five technologies GmbH +// Author: Markus Reichl <m.reichl@fivetechno.de> + +#include <linux/module.h> +#include <linux/i2c.h> +#include <linux/of.h> +#include <linux/regulator/driver.h> +#include <linux/regmap.h> + + +#define VOL_MIN_IDX 0x00 +#define VOL_MAX_IDX 0x7ff + +/* Register definitions */ +#define MP8859_VOUT_L_REG 0 //3 lo Bits +#define MP8859_VOUT_H_REG 1 //8 hi Bits +#define MP8859_VOUT_GO_REG 2 +#define MP8859_IOUT_LIM_REG 3 +#define MP8859_CTL1_REG 4 +#define MP8859_CTL2_REG 5 +#define MP8859_RESERVED1_REG 6 +#define MP8859_RESERVED2_REG 7 +#define MP8859_RESERVED3_REG 8 +#define MP8859_STATUS_REG 9 +#define MP8859_INTERRUPT_REG 0x0A +#define MP8859_MASK_REG 0x0B +#define MP8859_ID1_REG 0x0C +#define MP8859_MFR_ID_REG 0x27 +#define MP8859_DEV_ID_REG 0x28 +#define MP8859_IC_REV_REG 0x29 + +#define MP8859_MAX_REG 0x29 + +#define MP8859_GO_BIT 0x01 + + +static int mp8859_set_voltage_sel(struct regulator_dev *rdev, unsigned int sel) +{ + int ret; + + ret = regmap_write(rdev->regmap, MP8859_VOUT_L_REG, sel & 0x7); + + if (ret) + return ret; + ret = regmap_write(rdev->regmap, MP8859_VOUT_H_REG, sel >> 3); + + if (ret) + return ret; + ret = regmap_update_bits(rdev->regmap, MP8859_VOUT_GO_REG, + MP8859_GO_BIT, 1); + return ret; +} + +static int mp8859_get_voltage_sel(struct regulator_dev *rdev) +{ + unsigned int val_tmp; + unsigned int val; + int ret; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_H_REG, &val_tmp); + + if (ret) + return ret; + val = val_tmp << 3; + + ret = regmap_read(rdev->regmap, MP8859_VOUT_L_REG, &val_tmp); + + if (ret) + return ret; + val |= val_tmp & 0x07; + return val; +} + +static const struct regulator_linear_range mp8859_dcdc_ranges[] = { + REGULATOR_LINEAR_RANGE(0, VOL_MIN_IDX, VOL_MAX_IDX, 10000), +}; + +static const struct regmap_config mp8859_regmap = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MP8859_MAX_REG, + .cache_type = REGCACHE_RBTREE, +}; + +static const struct regulator_ops mp8859_ops = { + .set_voltage_sel = mp8859_set_voltage_sel, + .get_voltage_sel = mp8859_get_voltage_sel, + .list_voltage = regulator_list_voltage_linear_range, +}; + +static const struct regulator_desc mp8859_regulators[] = { + { + .id = 0, + .type = REGULATOR_VOLTAGE, + .name = "mp8859_dcdc", + .of_match = of_match_ptr("mp8859_dcdc"), + .n_voltages = VOL_MAX_IDX + 1, + .linear_ranges = mp8859_dcdc_ranges, + .n_linear_ranges = 1, + .ops = &mp8859_ops, + .owner = THIS_MODULE, + }, +}; + +static int mp8859_i2c_probe(struct i2c_client *i2c) +{ + int ret; + struct regulator_config config = {.dev = &i2c->dev}; + struct regmap *regmap = devm_regmap_init_i2c(i2c, &mp8859_regmap); + struct regulator_dev *rdev; + + if (IS_ERR(regmap)) { + ret = PTR_ERR(regmap); + dev_err(&i2c->dev, "regmap init failed: %d\n", ret); + return ret; + } + rdev = devm_regulator_register(&i2c->dev, &mp8859_regulators[0], + &config); + + if (IS_ERR(rdev)) { + ret = PTR_ERR(rdev); + dev_err(&i2c->dev, "failed to register %s: %d\n", + mp8859_regulators[0].name, ret); + return ret; + } + return 0; +} + +static const struct of_device_id mp8859_dt_id[] = { + {.compatible = "mps,mp8859"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mp8859_dt_id); + +static const struct i2c_device_id mp8859_i2c_id[] = { + { "mp8859", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mp8859_i2c_id); + +static struct i2c_driver mp8859_regulator_driver = { + .driver = { + .name = "mp8859", + .of_match_table = of_match_ptr(mp8859_dt_id), + }, + .probe_new = mp8859_i2c_probe, + .id_table = mp8859_i2c_id, +}; + +module_i2c_driver(mp8859_regulator_driver); + +MODULE_DESCRIPTION("Monolithic Power Systems MP8859 voltage regulator driver"); +MODULE_AUTHOR("Markus Reichl <m.reichl@fivetechno.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/regulator/mpq7920.c b/drivers/regulator/mpq7920.c new file mode 100644 index 000000000000..54c862edf571 --- /dev/null +++ b/drivers/regulator/mpq7920.c @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// mpq7920.c - regulator driver for mps mpq7920 +// +// Copyright 2019 Monolithic Power Systems, Inc +// +// Author: Saravanan Sekar <sravanhome@gmail.com> + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/of_regulator.h> +#include <linux/i2c.h> +#include <linux/regmap.h> +#include "mpq7920.h" + +#define MPQ7920_BUCK_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_BUCK_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) +#define MPQ7920_LDO_VOLT_RANGE \ + ((MPQ7920_VOLT_MAX - MPQ7920_LDO_VOLT_MIN)/MPQ7920_VOLT_STEP + 1) + +#define MPQ7920BUCK(_name, _id, _ilim) \ + [MPQ7920_BUCK ## _id] = { \ + .id = MPQ7920_BUCK ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .of_parse_cb = mpq7920_parse_cb, \ + .ops = &mpq7920_buck_ops, \ + .min_uV = MPQ7920_BUCK_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_BUCK_VOLT_RANGE, \ + .curr_table = _ilim, \ + .n_current_limits = ARRAY_SIZE(_ilim), \ + .csel_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .csel_mask = MPQ7920_MASK_BUCK_ILIM, \ + .enable_reg = MPQ7920_REG_REGULATOR_EN, \ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_BUCK ## _id), \ + .vsel_reg = MPQ7920_BUCK ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_reg = MPQ7920_BUCK ##_id## _REG_B, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .soft_start_reg = MPQ7920_BUCK ##_id## _REG_C, \ + .soft_start_mask = MPQ7920_MASK_SOFTSTART, \ + .owner = THIS_MODULE, \ + } + +#define MPQ7920LDO(_name, _id, _ops, _ilim, _ilim_sz, _creg, _cmask) \ + [MPQ7920_LDO ## _id] = { \ + .id = MPQ7920_LDO ## _id, \ + .name = _name, \ + .of_match = _name, \ + .regulators_node = "regulators", \ + .ops = _ops, \ + .min_uV = MPQ7920_LDO_VOLT_MIN, \ + .uV_step = MPQ7920_VOLT_STEP, \ + .n_voltages = MPQ7920_LDO_VOLT_RANGE, \ + .vsel_reg = MPQ7920_LDO ##_id## _REG_A, \ + .vsel_mask = MPQ7920_MASK_VREF, \ + .curr_table = _ilim, \ + .n_current_limits = _ilim_sz, \ + .csel_reg = _creg, \ + .csel_mask = _cmask, \ + .enable_reg = (_id == 1) ? 0 : MPQ7920_REG_REGULATOR_EN,\ + .enable_mask = BIT(MPQ7920_REGULATOR_EN_OFFSET - \ + MPQ7920_LDO ##_id + 1), \ + .active_discharge_on = MPQ7920_DISCHARGE_ON, \ + .active_discharge_mask = MPQ7920_MASK_DISCHARGE, \ + .active_discharge_reg = MPQ7920_LDO ##_id## _REG_B, \ + .type = REGULATOR_VOLTAGE, \ + .owner = THIS_MODULE, \ + } + +enum mpq7920_regulators { + MPQ7920_BUCK1, + MPQ7920_BUCK2, + MPQ7920_BUCK3, + MPQ7920_BUCK4, + MPQ7920_LDO1, /* LDORTC */ + MPQ7920_LDO2, + MPQ7920_LDO3, + MPQ7920_LDO4, + MPQ7920_LDO5, + MPQ7920_MAX_REGULATORS, +}; + +struct mpq7920_regulator_info { + struct regmap *regmap; + struct regulator_desc *rdesc; +}; + +static const struct regmap_config mpq7920_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x25, +}; + +/* Current limits array (in uA) + * ILIM1 & ILIM3 + */ +static const unsigned int mpq7920_I_limits1[] = { + 4600000, 6600000, 7600000, 9300000 +}; + +/* ILIM2 & ILIM4 */ +static const unsigned int mpq7920_I_limits2[] = { + 2700000, 3900000, 5100000, 6100000 +}; + +/* LDO4 & LDO5 */ +static const unsigned int mpq7920_I_limits3[] = { + 300000, 700000 +}; + +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay); +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *rdesc, + struct regulator_config *config); + +/* RTCLDO not controllable, always ON */ +static const struct regulator_ops mpq7920_ldortc_ops = { + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_wo_current_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, +}; + +static const struct regulator_ops mpq7920_ldo_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .get_current_limit = regulator_get_current_limit_regmap, + .set_current_limit = regulator_set_current_limit_regmap, +}; + +static const struct regulator_ops mpq7920_buck_ops = { + .enable = regulator_enable_regmap, + .disable = regulator_disable_regmap, + .is_enabled = regulator_is_enabled_regmap, + .list_voltage = regulator_list_voltage_linear, + .map_voltage = regulator_map_voltage_linear, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, + .set_active_discharge = regulator_set_active_discharge_regmap, + .set_soft_start = regulator_set_soft_start_regmap, + .set_ramp_delay = mpq7920_set_ramp_delay, +}; + +static struct regulator_desc mpq7920_regulators_desc[MPQ7920_MAX_REGULATORS] = { + MPQ7920BUCK("buck1", 1, mpq7920_I_limits1), + MPQ7920BUCK("buck2", 2, mpq7920_I_limits2), + MPQ7920BUCK("buck3", 3, mpq7920_I_limits1), + MPQ7920BUCK("buck4", 4, mpq7920_I_limits2), + MPQ7920LDO("ldortc", 1, &mpq7920_ldortc_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo2", 2, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo3", 3, &mpq7920_ldo_wo_current_ops, NULL, 0, 0, 0), + MPQ7920LDO("ldo4", 4, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO4_REG_B, + MPQ7920_MASK_LDO_ILIM), + MPQ7920LDO("ldo5", 5, &mpq7920_ldo_ops, mpq7920_I_limits3, + ARRAY_SIZE(mpq7920_I_limits3), MPQ7920_LDO5_REG_B, + MPQ7920_MASK_LDO_ILIM), +}; + +/* + * DVS ramp rate BUCK1 to BUCK4 + * 00-01: Reserved + * 10: 8mV/us + * 11: 4mV/us + */ +static int mpq7920_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) +{ + unsigned int ramp_val; + + if (ramp_delay > 8000 || ramp_delay < 0) + return -EINVAL; + + if (ramp_delay <= 4000) + ramp_val = 3; + else + ramp_val = 2; + + return regmap_update_bits(rdev->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_DVS_SLEWRATE, ramp_val << 6); +} + +static int mpq7920_parse_cb(struct device_node *np, + const struct regulator_desc *desc, + struct regulator_config *config) +{ + uint8_t val; + int ret; + struct mpq7920_regulator_info *info = config->driver_data; + struct regulator_desc *rdesc = &info->rdesc[desc->id]; + + if (of_property_read_bool(np, "mps,buck-ovp-disable")) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_B + (rdesc->id * 4), + MPQ7920_MASK_OVP, MPQ7920_OVP_DISABLE); + } + + ret = of_property_read_u8(np, "mps,buck-phase-delay", &val); + if (!ret) { + regmap_update_bits(config->regmap, + MPQ7920_BUCK1_REG_C + (rdesc->id * 4), + MPQ7920_MASK_BUCK_PHASE_DEALY, + (val & 3) << 4); + } + + ret = of_property_read_u8(np, "mps,buck-softstart", &val); + if (!ret) + rdesc->soft_start_val_on = (val & 3) << 2; + + return 0; +} + +static void mpq7920_parse_dt(struct device *dev, + struct mpq7920_regulator_info *info) +{ + int ret; + struct device_node *np = dev->of_node; + uint8_t freq; + + np = of_get_child_by_name(np, "regulators"); + if (!np) { + dev_err(dev, "missing 'regulators' subnode in DT\n"); + return; + } + + ret = of_property_read_u8(np, "mps,switch-freq", &freq); + if (!ret) { + regmap_update_bits(info->regmap, MPQ7920_REG_CTL0, + MPQ7920_MASK_SWITCH_FREQ, + (freq & 3) << 4); + } + + of_node_put(np); +} + +static int mpq7920_i2c_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct mpq7920_regulator_info *info; + struct regulator_config config = { NULL, }; + struct regulator_dev *rdev; + struct regmap *regmap; + int i; + + info = devm_kzalloc(dev, sizeof(struct mpq7920_regulator_info), + GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->rdesc = mpq7920_regulators_desc; + regmap = devm_regmap_init_i2c(client, &mpq7920_regmap_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to allocate regmap!\n"); + return PTR_ERR(regmap); + } + + i2c_set_clientdata(client, info); + info->regmap = regmap; + if (client->dev.of_node) + mpq7920_parse_dt(&client->dev, info); + + config.dev = dev; + config.regmap = regmap; + config.driver_data = info; + + for (i = 0; i < MPQ7920_MAX_REGULATORS; i++) { + rdev = devm_regulator_register(dev, + &mpq7920_regulators_desc[i], + &config); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator!\n"); + return PTR_ERR(rdev); + } + } + + return 0; +} + +static const struct of_device_id mpq7920_of_match[] = { + { .compatible = "mps,mpq7920"}, + {}, +}; +MODULE_DEVICE_TABLE(of, mpq7920_of_match); + +static const struct i2c_device_id mpq7920_id[] = { + { "mpq7920", }, + { }, +}; +MODULE_DEVICE_TABLE(i2c, mpq7920_id); + +static struct i2c_driver mpq7920_regulator_driver = { + .driver = { + .name = "mpq7920", + .of_match_table = of_match_ptr(mpq7920_of_match), + }, + .probe_new = mpq7920_i2c_probe, + .id_table = mpq7920_id, +}; +module_i2c_driver(mpq7920_regulator_driver); + +MODULE_AUTHOR("Saravanan Sekar <sravanhome@gmail.com>"); +MODULE_DESCRIPTION("MPQ7920 PMIC regulator driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/mpq7920.h b/drivers/regulator/mpq7920.h new file mode 100644 index 000000000000..489924655a96 --- /dev/null +++ b/drivers/regulator/mpq7920.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * mpq7920.h - Regulator definitions for mpq7920 + * + * Copyright 2019 Monolithic Power Systems, Inc + * + */ + +#ifndef __MPQ7920_H__ +#define __MPQ7920_H__ + +#define MPQ7920_REG_CTL0 0x00 +#define MPQ7920_REG_CTL1 0x01 +#define MPQ7920_REG_CTL2 0x02 +#define MPQ7920_BUCK1_REG_A 0x03 +#define MPQ7920_BUCK1_REG_B 0x04 +#define MPQ7920_BUCK1_REG_C 0x05 +#define MPQ7920_BUCK1_REG_D 0x06 +#define MPQ7920_BUCK2_REG_A 0x07 +#define MPQ7920_BUCK2_REG_B 0x08 +#define MPQ7920_BUCK2_REG_C 0x09 +#define MPQ7920_BUCK2_REG_D 0x0a +#define MPQ7920_BUCK3_REG_A 0x0b +#define MPQ7920_BUCK3_REG_B 0x0c +#define MPQ7920_BUCK3_REG_C 0x0d +#define MPQ7920_BUCK3_REG_D 0x0e +#define MPQ7920_BUCK4_REG_A 0x0f +#define MPQ7920_BUCK4_REG_B 0x10 +#define MPQ7920_BUCK4_REG_C 0x11 +#define MPQ7920_BUCK4_REG_D 0x12 +#define MPQ7920_LDO1_REG_A 0x13 +#define MPQ7920_LDO1_REG_B 0x0 +#define MPQ7920_LDO2_REG_A 0x14 +#define MPQ7920_LDO2_REG_B 0x15 +#define MPQ7920_LDO2_REG_C 0x16 +#define MPQ7920_LDO3_REG_A 0x17 +#define MPQ7920_LDO3_REG_B 0x18 +#define MPQ7920_LDO3_REG_C 0x19 +#define MPQ7920_LDO4_REG_A 0x1a +#define MPQ7920_LDO4_REG_B 0x1b +#define MPQ7920_LDO4_REG_C 0x1c +#define MPQ7920_LDO5_REG_A 0x1d +#define MPQ7920_LDO5_REG_B 0x1e +#define MPQ7920_LDO5_REG_C 0x1f +#define MPQ7920_REG_MODE 0x20 +#define MPQ7920_REG_REGULATOR_EN 0x22 + +#define MPQ7920_MASK_VREF 0x7f +#define MPQ7920_MASK_BUCK_ILIM 0xc0 +#define MPQ7920_MASK_LDO_ILIM BIT(6) +#define MPQ7920_MASK_DISCHARGE BIT(5) +#define MPQ7920_MASK_MODE 0xc0 +#define MPQ7920_MASK_SOFTSTART 0x0c +#define MPQ7920_MASK_SWITCH_FREQ 0x30 +#define MPQ7920_MASK_BUCK_PHASE_DEALY 0x30 +#define MPQ7920_MASK_DVS_SLEWRATE 0xc0 +#define MPQ7920_MASK_OVP 0x40 +#define MPQ7920_OVP_DISABLE ~(0x40) +#define MPQ7920_DISCHARGE_ON BIT(5) + +#define MPQ7920_REGULATOR_EN_OFFSET 7 + +/* values in mV */ +#define MPQ7920_BUCK_VOLT_MIN 400000 +#define MPQ7920_LDO_VOLT_MIN 650000 +#define MPQ7920_VOLT_MAX 3587500 +#define MPQ7920_VOLT_STEP 12500 + +#endif /* __MPQ7920_H__ */ diff --git a/drivers/regulator/mt6311-regulator.c b/drivers/regulator/mt6311-regulator.c index af95449d3590..69e6af3cd505 100644 --- a/drivers/regulator/mt6311-regulator.c +++ b/drivers/regulator/mt6311-regulator.c @@ -85,8 +85,7 @@ static const struct regulator_desc mt6311_regulators[] = { /* * I2C driver interface functions */ -static int mt6311_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int mt6311_i2c_probe(struct i2c_client *i2c) { struct regulator_config config = { }; struct regulator_dev *rdev; @@ -154,7 +153,7 @@ static struct i2c_driver mt6311_regulator_driver = { .name = "mt6311", .of_match_table = of_match_ptr(mt6311_dt_ids), }, - .probe = mt6311_i2c_probe, + .probe_new = mt6311_i2c_probe, .id_table = mt6311_i2c_id, }; diff --git a/drivers/regulator/pv88060-regulator.c b/drivers/regulator/pv88060-regulator.c index 3d3415839ba2..787ced918372 100644 --- a/drivers/regulator/pv88060-regulator.c +++ b/drivers/regulator/pv88060-regulator.c @@ -279,8 +279,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88060_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88060_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88060 *chip; @@ -385,7 +384,7 @@ static struct i2c_driver pv88060_regulator_driver = { .name = "pv88060", .of_match_table = of_match_ptr(pv88060_dt_ids), }, - .probe = pv88060_i2c_probe, + .probe_new = pv88060_i2c_probe, .id_table = pv88060_i2c_id, }; diff --git a/drivers/regulator/pv88090-regulator.c b/drivers/regulator/pv88090-regulator.c index b1d0d97ae935..784729ec2182 100644 --- a/drivers/regulator/pv88090-regulator.c +++ b/drivers/regulator/pv88090-regulator.c @@ -272,8 +272,7 @@ error_i2c: /* * I2C driver interface functions */ -static int pv88090_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int pv88090_i2c_probe(struct i2c_client *i2c) { struct regulator_init_data *init_data = dev_get_platdata(&i2c->dev); struct pv88090 *chip; @@ -406,7 +405,7 @@ static struct i2c_driver pv88090_regulator_driver = { .name = "pv88090", .of_match_table = of_match_ptr(pv88090_dt_ids), }, - .probe = pv88090_i2c_probe, + .probe_new = pv88090_i2c_probe, .id_table = pv88090_i2c_id, }; diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 5b4003226484..31f79fda3238 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -1282,7 +1282,7 @@ static int rk808_regulator_dt_parse_pdata(struct device *dev, } if (!pdata->dvs_gpio[i]) { - dev_warn(dev, "there is no dvs%d gpio\n", i); + dev_info(dev, "there is no dvs%d gpio\n", i); continue; } diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c index 51f7e8b74d8c..115f59530852 100644 --- a/drivers/regulator/s2mpa01.c +++ b/drivers/regulator/s2mpa01.c @@ -390,5 +390,5 @@ module_platform_driver(s2mpa01_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_AUTHOR("Sachin Kamat <sachin.kamat@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPA01 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c index 4f2dc5ebffdc..23d288278957 100644 --- a/drivers/regulator/s2mps11.c +++ b/drivers/regulator/s2mps11.c @@ -1265,5 +1265,5 @@ module_platform_driver(s2mps11_pmic_driver); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S2MPS11/S2MPS14/S2MPS15/S2MPU02 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c index 12d6b8d2e97e..4abd3ed31f60 100644 --- a/drivers/regulator/s5m8767.c +++ b/drivers/regulator/s5m8767.c @@ -1015,5 +1015,5 @@ module_exit(s5m8767_pmic_exit); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); -MODULE_DESCRIPTION("SAMSUNG S5M8767 Regulator Driver"); +MODULE_DESCRIPTION("Samsung S5M8767 Regulator Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/slg51000-regulator.c b/drivers/regulator/slg51000-regulator.c index bf1a3508ebc4..44e4cecbf6de 100644 --- a/drivers/regulator/slg51000-regulator.c +++ b/drivers/regulator/slg51000-regulator.c @@ -439,8 +439,7 @@ static void slg51000_clear_fault_log(struct slg51000 *chip) dev_dbg(chip->dev, "Fault log: FLT_POR\n"); } -static int slg51000_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int slg51000_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct slg51000 *chip; @@ -509,7 +508,7 @@ static struct i2c_driver slg51000_regulator_driver = { .driver = { .name = "slg51000-regulator", }, - .probe = slg51000_i2c_probe, + .probe_new = slg51000_i2c_probe, .id_table = slg51000_i2c_id, }; diff --git a/drivers/regulator/sy8106a-regulator.c b/drivers/regulator/sy8106a-regulator.c index 42e03b2c10a0..2222e739e62b 100644 --- a/drivers/regulator/sy8106a-regulator.c +++ b/drivers/regulator/sy8106a-regulator.c @@ -61,8 +61,7 @@ static const struct regulator_desc sy8106a_reg = { /* * I2C driver interface functions */ -static int sy8106a_i2c_probe(struct i2c_client *i2c, - const struct i2c_device_id *id) +static int sy8106a_i2c_probe(struct i2c_client *i2c) { struct device *dev = &i2c->dev; struct regulator_dev *rdev; @@ -141,7 +140,7 @@ static struct i2c_driver sy8106a_regulator_driver = { .name = "sy8106a", .of_match_table = of_match_ptr(sy8106a_i2c_of_match), }, - .probe = sy8106a_i2c_probe, + .probe_new = sy8106a_i2c_probe, .id_table = sy8106a_i2c_id, }; diff --git a/drivers/regulator/sy8824x.c b/drivers/regulator/sy8824x.c index 92adb4f3ee19..62d243f3b904 100644 --- a/drivers/regulator/sy8824x.c +++ b/drivers/regulator/sy8824x.c @@ -112,8 +112,7 @@ static const struct regmap_config sy8824_regmap_config = { .val_bits = 8, }; -static int sy8824_i2c_probe(struct i2c_client *client, - const struct i2c_device_id *id) +static int sy8824_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct device_node *np = dev->of_node; @@ -222,7 +221,7 @@ static struct i2c_driver sy8824_regulator_driver = { .name = "sy8824-regulator", .of_match_table = of_match_ptr(sy8824_dt_ids), }, - .probe = sy8824_i2c_probe, + .probe_new = sy8824_i2c_probe, .id_table = sy8824_id, }; module_i2c_driver(sy8824_regulator_driver); diff --git a/drivers/regulator/tps65132-regulator.c b/drivers/regulator/tps65132-regulator.c index 7b0e38f8d627..0edc83089ba2 100644 --- a/drivers/regulator/tps65132-regulator.c +++ b/drivers/regulator/tps65132-regulator.c @@ -220,8 +220,7 @@ static const struct regmap_config tps65132_regmap_config = { .wr_table = &tps65132_no_reg_table, }; -static int tps65132_probe(struct i2c_client *client, - const struct i2c_device_id *client_id) +static int tps65132_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct tps65132_regulator *tps; @@ -272,7 +271,7 @@ static struct i2c_driver tps65132_i2c_driver = { .driver = { .name = "tps65132", }, - .probe = tps65132_probe, + .probe_new = tps65132_probe, .id_table = tps65132_id, }; diff --git a/drivers/regulator/vctrl-regulator.c b/drivers/regulator/vctrl-regulator.c index 9a9ee8188109..cbadb1c99679 100644 --- a/drivers/regulator/vctrl-regulator.c +++ b/drivers/regulator/vctrl-regulator.c @@ -11,10 +11,13 @@ #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/regulator/coupler.h> #include <linux/regulator/driver.h> #include <linux/regulator/of_regulator.h> #include <linux/sort.h> +#include "internal.h" + struct vctrl_voltage_range { int min_uV; int max_uV; @@ -79,7 +82,7 @@ static int vctrl_calc_output_voltage(struct vctrl_data *vctrl, int ctrl_uV) static int vctrl_get_voltage(struct regulator_dev *rdev) { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); - int ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + int ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); return vctrl_calc_output_voltage(vctrl, ctrl_uV); } @@ -90,16 +93,16 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, { struct vctrl_data *vctrl = rdev_get_drvdata(rdev); struct regulator *ctrl_reg = vctrl->ctrl_reg; - int orig_ctrl_uV = regulator_get_voltage(ctrl_reg); + int orig_ctrl_uV = regulator_get_voltage_rdev(ctrl_reg->rdev); int uV = vctrl_calc_output_voltage(vctrl, orig_ctrl_uV); int ret; if (req_min_uV >= uV || !vctrl->ovp_threshold) /* voltage rising or no OVP */ - return regulator_set_voltage( - ctrl_reg, + return regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl_calc_ctrl_voltage(vctrl, req_min_uV), - vctrl_calc_ctrl_voltage(vctrl, req_max_uV)); + vctrl_calc_ctrl_voltage(vctrl, req_max_uV), + PM_SUSPEND_ON); while (uV > req_min_uV) { int max_drop_uV = (uV * vctrl->ovp_threshold) / 100; @@ -114,9 +117,10 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, next_uV = max_t(int, req_min_uV, uV - max_drop_uV); next_ctrl_uV = vctrl_calc_ctrl_voltage(vctrl, next_uV); - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + next_ctrl_uV, next_ctrl_uV, - next_ctrl_uV); + PM_SUSPEND_ON); if (ret) goto err; @@ -130,7 +134,8 @@ static int vctrl_set_voltage(struct regulator_dev *rdev, err: /* Try to go back to original voltage */ - regulator_set_voltage(ctrl_reg, orig_ctrl_uV, orig_ctrl_uV); + regulator_set_voltage_rdev(ctrl_reg->rdev, orig_ctrl_uV, orig_ctrl_uV, + PM_SUSPEND_ON); return ret; } @@ -155,9 +160,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, if (selector >= vctrl->sel || !vctrl->ovp_threshold) { /* voltage rising or no OVP */ - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[selector].ctrl, vctrl->vtable[selector].ctrl, - vctrl->vtable[selector].ctrl); + PM_SUSPEND_ON); if (!ret) vctrl->sel = selector; @@ -173,9 +179,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, else next_sel = vctrl->vtable[vctrl->sel].ovp_min_sel; - ret = regulator_set_voltage(ctrl_reg, + ret = regulator_set_voltage_rdev(ctrl_reg->rdev, vctrl->vtable[next_sel].ctrl, - vctrl->vtable[next_sel].ctrl); + vctrl->vtable[next_sel].ctrl, + PM_SUSPEND_ON); if (ret) { dev_err(&rdev->dev, "failed to set control voltage to %duV\n", @@ -195,9 +202,10 @@ static int vctrl_set_voltage_sel(struct regulator_dev *rdev, err: if (vctrl->sel != orig_sel) { /* Try to go back to original voltage */ - if (!regulator_set_voltage(ctrl_reg, + if (!regulator_set_voltage_rdev(ctrl_reg->rdev, + vctrl->vtable[orig_sel].ctrl, vctrl->vtable[orig_sel].ctrl, - vctrl->vtable[orig_sel].ctrl)) + PM_SUSPEND_ON)) vctrl->sel = orig_sel; else dev_warn(&rdev->dev, @@ -482,7 +490,7 @@ static int vctrl_probe(struct platform_device *pdev) if (ret) return ret; - ctrl_uV = regulator_get_voltage(vctrl->ctrl_reg); + ctrl_uV = regulator_get_voltage_rdev(vctrl->ctrl_reg->rdev); if (ctrl_uV < 0) { dev_err(&pdev->dev, "failed to get control voltage\n"); return ctrl_uV; diff --git a/drivers/regulator/vqmmc-ipq4019-regulator.c b/drivers/regulator/vqmmc-ipq4019-regulator.c new file mode 100644 index 000000000000..6d5ae25d08d1 --- /dev/null +++ b/drivers/regulator/vqmmc-ipq4019-regulator.c @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: GPL-2.0+ +// +// Copyright (c) 2019 Mantas Pucka <mantas@8devices.com> +// Copyright (c) 2019 Robert Marko <robert.marko@sartura.hr> +// +// Driver for IPQ4019 SD/MMC controller's I/O LDO voltage regulator + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/regulator/driver.h> +#include <linux/regulator/machine.h> +#include <linux/regulator/of_regulator.h> + +static const unsigned int ipq4019_vmmc_voltages[] = { + 1500000, 1800000, 2500000, 3000000, +}; + +static const struct regulator_ops ipq4019_regulator_voltage_ops = { + .list_voltage = regulator_list_voltage_table, + .map_voltage = regulator_map_voltage_ascend, + .get_voltage_sel = regulator_get_voltage_sel_regmap, + .set_voltage_sel = regulator_set_voltage_sel_regmap, +}; + +static const struct regulator_desc vmmc_regulator = { + .name = "vmmcq", + .ops = &ipq4019_regulator_voltage_ops, + .type = REGULATOR_VOLTAGE, + .owner = THIS_MODULE, + .volt_table = ipq4019_vmmc_voltages, + .n_voltages = ARRAY_SIZE(ipq4019_vmmc_voltages), + .vsel_reg = 0, + .vsel_mask = 0x3, +}; + +static const struct regmap_config ipq4019_vmmcq_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, +}; + +static int ipq4019_regulator_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct regulator_init_data *init_data; + struct regulator_config cfg = {}; + struct regulator_dev *rdev; + struct resource *res; + struct regmap *rmap; + void __iomem *base; + + init_data = of_get_regulator_init_data(dev, dev->of_node, + &vmmc_regulator); + if (!init_data) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + base = devm_ioremap_resource(dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); + + rmap = devm_regmap_init_mmio(dev, base, &ipq4019_vmmcq_regmap_config); + if (IS_ERR(rmap)) + return PTR_ERR(rmap); + + cfg.dev = dev; + cfg.init_data = init_data; + cfg.of_node = dev->of_node; + cfg.regmap = rmap; + + rdev = devm_regulator_register(dev, &vmmc_regulator, &cfg); + if (IS_ERR(rdev)) { + dev_err(dev, "Failed to register regulator: %ld\n", + PTR_ERR(rdev)); + return PTR_ERR(rdev); + } + platform_set_drvdata(pdev, rdev); + + return 0; +} + +static const struct of_device_id regulator_ipq4019_of_match[] = { + { .compatible = "qcom,vqmmc-ipq4019-regulator", }, + {}, +}; + +static struct platform_driver ipq4019_regulator_driver = { + .probe = ipq4019_regulator_probe, + .driver = { + .name = "vqmmc-ipq4019-regulator", + .of_match_table = of_match_ptr(regulator_ipq4019_of_match), + }, +}; +module_platform_driver(ipq4019_regulator_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Mantas Pucka <mantas@8devices.com>"); +MODULE_DESCRIPTION("IPQ4019 VQMMC voltage regulator"); diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index 8ef150dfb6f7..b60795893994 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -439,6 +439,9 @@ static int fnic_queuecommand_lck(struct scsi_cmnd *sc, void (*done)(struct scsi_ if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_IO_BLOCKED))) return SCSI_MLQUEUE_HOST_BUSY; + if (unlikely(fnic_chk_state_flags_locked(fnic, FNIC_FLAGS_FWRESET))) + return SCSI_MLQUEUE_HOST_BUSY; + rport = starget_to_rport(scsi_target(sc->device)); if (!rport) { FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host, diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 65ce10c7989c..902b649fc8ef 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2958,15 +2958,16 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) q->limits.zoned = BLK_ZONED_HM; } else { sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1) + if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) { /* Host-aware */ q->limits.zoned = BLK_ZONED_HA; - else + } else { /* - * Treat drive-managed devices as - * regular block devices. + * Treat drive-managed devices and host-aware devices + * with partitions as regular block devices. */ q->limits.zoned = BLK_ZONED_NONE; + } } if (blk_queue_is_zoned(q) && sdkp->first_scan) sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", diff --git a/drivers/soc/ti/Kconfig b/drivers/soc/ti/Kconfig index cf545f428d03..4486e055794c 100644 --- a/drivers/soc/ti/Kconfig +++ b/drivers/soc/ti/Kconfig @@ -80,6 +80,17 @@ config TI_SCI_PM_DOMAINS called ti_sci_pm_domains. Note this is needed early in boot before rootfs may be available. +config TI_K3_RINGACC + bool "K3 Ring accelerator Sub System" + depends on ARCH_K3 || COMPILE_TEST + depends on TI_SCI_INTA_IRQCHIP + help + Say y here to support the K3 Ring accelerator module. + The Ring Accelerator (RINGACC or RA) provides hardware acceleration + to enable straightforward passing of work between a producer + and a consumer. There is one RINGACC module per NAVSS on TI AM65x SoCs + If unsure, say N. + endif # SOC_TI config TI_SCI_INTA_MSI_DOMAIN diff --git a/drivers/soc/ti/Makefile b/drivers/soc/ti/Makefile index 788b5cd1e180..bec827937a5f 100644 --- a/drivers/soc/ti/Makefile +++ b/drivers/soc/ti/Makefile @@ -10,3 +10,4 @@ obj-$(CONFIG_ARCH_OMAP2PLUS) += omap_prm.o obj-$(CONFIG_WKUP_M3_IPC) += wkup_m3_ipc.o obj-$(CONFIG_TI_SCI_PM_DOMAINS) += ti_sci_pm_domains.o obj-$(CONFIG_TI_SCI_INTA_MSI_DOMAIN) += ti_sci_inta_msi.o +obj-$(CONFIG_TI_K3_RINGACC) += k3-ringacc.o diff --git a/drivers/soc/ti/k3-ringacc.c b/drivers/soc/ti/k3-ringacc.c new file mode 100644 index 000000000000..5fb2ee2ac978 --- /dev/null +++ b/drivers/soc/ti/k3-ringacc.c @@ -0,0 +1,1157 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TI K3 NAVSS Ring Accelerator subsystem driver + * + * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com + */ + +#include <linux/dma-mapping.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/soc/ti/k3-ringacc.h> +#include <linux/soc/ti/ti_sci_protocol.h> +#include <linux/soc/ti/ti_sci_inta_msi.h> +#include <linux/of_irq.h> +#include <linux/irqdomain.h> + +static LIST_HEAD(k3_ringacc_list); +static DEFINE_MUTEX(k3_ringacc_list_lock); + +#define K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK GENMASK(19, 0) + +/** + * struct k3_ring_rt_regs - The RA realtime Control/Status Registers region + * + * @resv_16: Reserved + * @db: Ring Doorbell Register + * @resv_4: Reserved + * @occ: Ring Occupancy Register + * @indx: Ring Current Index Register + * @hwocc: Ring Hardware Occupancy Register + * @hwindx: Ring Hardware Current Index Register + */ +struct k3_ring_rt_regs { + u32 resv_16[4]; + u32 db; + u32 resv_4[1]; + u32 occ; + u32 indx; + u32 hwocc; + u32 hwindx; +}; + +#define K3_RINGACC_RT_REGS_STEP 0x1000 + +/** + * struct k3_ring_fifo_regs - The Ring Accelerator Queues Registers region + * + * @head_data: Ring Head Entry Data Registers + * @tail_data: Ring Tail Entry Data Registers + * @peek_head_data: Ring Peek Head Entry Data Regs + * @peek_tail_data: Ring Peek Tail Entry Data Regs + */ +struct k3_ring_fifo_regs { + u32 head_data[128]; + u32 tail_data[128]; + u32 peek_head_data[128]; + u32 peek_tail_data[128]; +}; + +/** + * struct k3_ringacc_proxy_gcfg_regs - RA Proxy Global Config MMIO Region + * + * @revision: Revision Register + * @config: Config Register + */ +struct k3_ringacc_proxy_gcfg_regs { + u32 revision; + u32 config; +}; + +#define K3_RINGACC_PROXY_CFG_THREADS_MASK GENMASK(15, 0) + +/** + * struct k3_ringacc_proxy_target_regs - Proxy Datapath MMIO Region + * + * @control: Proxy Control Register + * @status: Proxy Status Register + * @resv_512: Reserved + * @data: Proxy Data Register + */ +struct k3_ringacc_proxy_target_regs { + u32 control; + u32 status; + u8 resv_512[504]; + u32 data[128]; +}; + +#define K3_RINGACC_PROXY_TARGET_STEP 0x1000 +#define K3_RINGACC_PROXY_NOT_USED (-1) + +enum k3_ringacc_proxy_access_mode { + PROXY_ACCESS_MODE_HEAD = 0, + PROXY_ACCESS_MODE_TAIL = 1, + PROXY_ACCESS_MODE_PEEK_HEAD = 2, + PROXY_ACCESS_MODE_PEEK_TAIL = 3, +}; + +#define K3_RINGACC_FIFO_WINDOW_SIZE_BYTES (512U) +#define K3_RINGACC_FIFO_REGS_STEP 0x1000 +#define K3_RINGACC_MAX_DB_RING_CNT (127U) + +struct k3_ring_ops { + int (*push_tail)(struct k3_ring *ring, void *elm); + int (*push_head)(struct k3_ring *ring, void *elm); + int (*pop_tail)(struct k3_ring *ring, void *elm); + int (*pop_head)(struct k3_ring *ring, void *elm); +}; + +/** + * struct k3_ring - RA Ring descriptor + * + * @rt: Ring control/status registers + * @fifos: Ring queues registers + * @proxy: Ring Proxy Datapath registers + * @ring_mem_dma: Ring buffer dma address + * @ring_mem_virt: Ring buffer virt address + * @ops: Ring operations + * @size: Ring size in elements + * @elm_size: Size of the ring element + * @mode: Ring mode + * @flags: flags + * @free: Number of free elements + * @occ: Ring occupancy + * @windex: Write index (only for @K3_RINGACC_RING_MODE_RING) + * @rindex: Read index (only for @K3_RINGACC_RING_MODE_RING) + * @ring_id: Ring Id + * @parent: Pointer on struct @k3_ringacc + * @use_count: Use count for shared rings + * @proxy_id: RA Ring Proxy Id (only if @K3_RINGACC_RING_USE_PROXY) + */ +struct k3_ring { + struct k3_ring_rt_regs __iomem *rt; + struct k3_ring_fifo_regs __iomem *fifos; + struct k3_ringacc_proxy_target_regs __iomem *proxy; + dma_addr_t ring_mem_dma; + void *ring_mem_virt; + struct k3_ring_ops *ops; + u32 size; + enum k3_ring_size elm_size; + enum k3_ring_mode mode; + u32 flags; +#define K3_RING_FLAG_BUSY BIT(1) +#define K3_RING_FLAG_SHARED BIT(2) + u32 free; + u32 occ; + u32 windex; + u32 rindex; + u32 ring_id; + struct k3_ringacc *parent; + u32 use_count; + int proxy_id; +}; + +/** + * struct k3_ringacc - Rings accelerator descriptor + * + * @dev: pointer on RA device + * @proxy_gcfg: RA proxy global config registers + * @proxy_target_base: RA proxy datapath region + * @num_rings: number of ring in RA + * @rings_inuse: bitfield for ring usage tracking + * @rm_gp_range: general purpose rings range from tisci + * @dma_ring_reset_quirk: DMA reset w/a enable + * @num_proxies: number of RA proxies + * @proxy_inuse: bitfield for proxy usage tracking + * @rings: array of rings descriptors (struct @k3_ring) + * @list: list of RAs in the system + * @req_lock: protect rings allocation + * @tisci: pointer ti-sci handle + * @tisci_ring_ops: ti-sci rings ops + * @tisci_dev_id: ti-sci device id + */ +struct k3_ringacc { + struct device *dev; + struct k3_ringacc_proxy_gcfg_regs __iomem *proxy_gcfg; + void __iomem *proxy_target_base; + u32 num_rings; /* number of rings in Ringacc module */ + unsigned long *rings_inuse; + struct ti_sci_resource *rm_gp_range; + + bool dma_ring_reset_quirk; + u32 num_proxies; + unsigned long *proxy_inuse; + + struct k3_ring *rings; + struct list_head list; + struct mutex req_lock; /* protect rings allocation */ + + const struct ti_sci_handle *tisci; + const struct ti_sci_rm_ringacc_ops *tisci_ring_ops; + u32 tisci_dev_id; +}; + +static long k3_ringacc_ring_get_fifo_pos(struct k3_ring *ring) +{ + return K3_RINGACC_FIFO_WINDOW_SIZE_BYTES - + (4 << ring->elm_size); +} + +static void *k3_ringacc_get_elm_addr(struct k3_ring *ring, u32 idx) +{ + return (ring->ring_mem_virt + idx * (4 << ring->elm_size)); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_ring_ops = { + .push_tail = k3_ringacc_ring_push_mem, + .pop_head = k3_ringacc_ring_pop_mem, +}; + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_msg_ops = { + .push_tail = k3_ringacc_ring_push_io, + .push_head = k3_ringacc_ring_push_head_io, + .pop_tail = k3_ringacc_ring_pop_tail_io, + .pop_head = k3_ringacc_ring_pop_io, +}; + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem); +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem); + +static struct k3_ring_ops k3_ring_mode_proxy_ops = { + .push_tail = k3_ringacc_ring_push_tail_proxy, + .push_head = k3_ringacc_ring_push_head_proxy, + .pop_tail = k3_ringacc_ring_pop_tail_proxy, + .pop_head = k3_ringacc_ring_pop_head_proxy, +}; + +static void k3_ringacc_ring_dump(struct k3_ring *ring) +{ + struct device *dev = ring->parent->dev; + + dev_dbg(dev, "dump ring: %d\n", ring->ring_id); + dev_dbg(dev, "dump mem virt %p, dma %pad\n", ring->ring_mem_virt, + &ring->ring_mem_dma); + dev_dbg(dev, "dump elmsize %d, size %d, mode %d, proxy_id %d\n", + ring->elm_size, ring->size, ring->mode, ring->proxy_id); + + dev_dbg(dev, "dump ring_rt_regs: db%08x\n", readl(&ring->rt->db)); + dev_dbg(dev, "dump occ%08x\n", readl(&ring->rt->occ)); + dev_dbg(dev, "dump indx%08x\n", readl(&ring->rt->indx)); + dev_dbg(dev, "dump hwocc%08x\n", readl(&ring->rt->hwocc)); + dev_dbg(dev, "dump hwindx%08x\n", readl(&ring->rt->hwindx)); + + if (ring->ring_mem_virt) + print_hex_dump_debug("dump ring_mem_virt ", DUMP_PREFIX_NONE, + 16, 1, ring->ring_mem_virt, 16 * 8, false); +} + +struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc, + int id, u32 flags) +{ + int proxy_id = K3_RINGACC_PROXY_NOT_USED; + + mutex_lock(&ringacc->req_lock); + + if (id == K3_RINGACC_RING_ID_ANY) { + /* Request for any general purpose ring */ + struct ti_sci_resource_desc *gp_rings = + &ringacc->rm_gp_range->desc[0]; + unsigned long size; + + size = gp_rings->start + gp_rings->num; + id = find_next_zero_bit(ringacc->rings_inuse, size, + gp_rings->start); + if (id == size) + goto error; + } else if (id < 0) { + goto error; + } + + if (test_bit(id, ringacc->rings_inuse) && + !(ringacc->rings[id].flags & K3_RING_FLAG_SHARED)) + goto error; + else if (ringacc->rings[id].flags & K3_RING_FLAG_SHARED) + goto out; + + if (flags & K3_RINGACC_RING_USE_PROXY) { + proxy_id = find_next_zero_bit(ringacc->proxy_inuse, + ringacc->num_proxies, 0); + if (proxy_id == ringacc->num_proxies) + goto error; + } + + if (proxy_id != K3_RINGACC_PROXY_NOT_USED) { + set_bit(proxy_id, ringacc->proxy_inuse); + ringacc->rings[id].proxy_id = proxy_id; + dev_dbg(ringacc->dev, "Giving ring#%d proxy#%d\n", id, + proxy_id); + } else { + dev_dbg(ringacc->dev, "Giving ring#%d\n", id); + } + + set_bit(id, ringacc->rings_inuse); +out: + ringacc->rings[id].use_count++; + mutex_unlock(&ringacc->req_lock); + return &ringacc->rings[id]; + +error: + mutex_unlock(&ringacc->req_lock); + return NULL; +} +EXPORT_SYMBOL_GPL(k3_ringacc_request_ring); + +static void k3_ringacc_ring_reset_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_COUNT_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + ring->size, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reset ring fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + k3_ringacc_ring_reset_sci(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset); + +static void k3_ringacc_ring_reconfig_qmode_sci(struct k3_ring *ring, + enum k3_ring_mode mode) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_RING_MODE_VALID, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + mode, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI reconf qmode fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return; + + if (!ring->parent->dma_ring_reset_quirk) + goto reset; + + if (!occ) + occ = readl(&ring->rt->occ); + + if (occ) { + u32 db_ring_cnt, db_ring_cnt_cur; + + dev_dbg(ring->parent->dev, "%s %u occ: %u\n", __func__, + ring->ring_id, occ); + /* TI-SCI ring reset */ + k3_ringacc_ring_reset_sci(ring); + + /* + * Setup the ring in ring/doorbell mode (if not already in this + * mode) + */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci( + ring, K3_RINGACC_RING_MODE_RING); + /* + * Ring the doorbell 2**22 – ringOcc times. + * This will wrap the internal UDMAP ring state occupancy + * counter (which is 21-bits wide) to 0. + */ + db_ring_cnt = (1U << 22) - occ; + + while (db_ring_cnt != 0) { + /* + * Ring the doorbell with the maximum count each + * iteration if possible to minimize the total + * of writes + */ + if (db_ring_cnt > K3_RINGACC_MAX_DB_RING_CNT) + db_ring_cnt_cur = K3_RINGACC_MAX_DB_RING_CNT; + else + db_ring_cnt_cur = db_ring_cnt; + + writel(db_ring_cnt_cur, &ring->rt->db); + db_ring_cnt -= db_ring_cnt_cur; + } + + /* Restore the original ring mode (if not ring mode) */ + if (ring->mode != K3_RINGACC_RING_MODE_RING) + k3_ringacc_ring_reconfig_qmode_sci(ring, ring->mode); + } + +reset: + /* Reset the ring */ + k3_ringacc_ring_reset(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_reset_dma); + +static void k3_ringacc_ring_free_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret; + + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring->ring_id, + 0, + 0, + 0, + 0, + 0, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI ring free fail (%d) ring_idx %d\n", + ret, ring->ring_id); +} + +int k3_ringacc_ring_free(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc; + + if (!ring) + return -EINVAL; + + ringacc = ring->parent; + + dev_dbg(ring->parent->dev, "flags: 0x%08x\n", ring->flags); + + if (!test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + mutex_lock(&ringacc->req_lock); + + if (--ring->use_count) + goto out; + + if (!(ring->flags & K3_RING_FLAG_BUSY)) + goto no_init; + + k3_ringacc_ring_free_sci(ring); + + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, ring->ring_mem_dma); + ring->flags = 0; + ring->ops = NULL; + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) { + clear_bit(ring->proxy_id, ringacc->proxy_inuse); + ring->proxy = NULL; + ring->proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + +no_init: + clear_bit(ring->ring_id, ringacc->rings_inuse); + +out: + mutex_unlock(&ringacc->req_lock); + return 0; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_free); + +u32 k3_ringacc_get_ring_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->ring_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_id); + +u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring) +{ + if (!ring) + return -EINVAL; + + return ring->parent->tisci_dev_id; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_tisci_dev_id); + +int k3_ringacc_get_ring_irq_num(struct k3_ring *ring) +{ + int irq_num; + + if (!ring) + return -EINVAL; + + irq_num = ti_sci_inta_msi_get_virq(ring->parent->dev, ring->ring_id); + if (irq_num <= 0) + irq_num = -EINVAL; + return irq_num; +} +EXPORT_SYMBOL_GPL(k3_ringacc_get_ring_irq_num); + +static int k3_ringacc_ring_cfg_sci(struct k3_ring *ring) +{ + struct k3_ringacc *ringacc = ring->parent; + u32 ring_idx; + int ret; + + if (!ringacc->tisci) + return -EINVAL; + + ring_idx = ring->ring_id; + ret = ringacc->tisci_ring_ops->config( + ringacc->tisci, + TI_SCI_MSG_VALUE_RM_ALL_NO_ORDER, + ringacc->tisci_dev_id, + ring_idx, + lower_32_bits(ring->ring_mem_dma), + upper_32_bits(ring->ring_mem_dma), + ring->size, + ring->mode, + ring->elm_size, + 0); + if (ret) + dev_err(ringacc->dev, "TISCI config ring fail (%d) ring_idx %d\n", + ret, ring_idx); + + return ret; +} + +int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg) +{ + struct k3_ringacc *ringacc = ring->parent; + int ret = 0; + + if (!ring || !cfg) + return -EINVAL; + if (cfg->elm_size > K3_RINGACC_RING_ELSIZE_256 || + cfg->mode >= K3_RINGACC_RING_MODE_INVALID || + cfg->size & ~K3_RINGACC_CFG_RING_SIZE_ELCNT_MASK || + !test_bit(ring->ring_id, ringacc->rings_inuse)) + return -EINVAL; + + if (cfg->mode == K3_RINGACC_RING_MODE_MESSAGE && + ring->proxy_id == K3_RINGACC_PROXY_NOT_USED && + cfg->elm_size > K3_RINGACC_RING_ELSIZE_8) { + dev_err(ringacc->dev, + "Message mode must use proxy for %u element size\n", + 4 << ring->elm_size); + return -EINVAL; + } + + /* + * In case of shared ring only the first user (master user) can + * configure the ring. The sequence should be by the client: + * ring = k3_ringacc_request_ring(ringacc, ring_id, 0); # master user + * k3_ringacc_ring_cfg(ring, cfg); # master configuration + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + * k3_ringacc_request_ring(ringacc, ring_id, K3_RING_FLAG_SHARED); + */ + if (ring->use_count != 1) + return 0; + + ring->size = cfg->size; + ring->elm_size = cfg->elm_size; + ring->mode = cfg->mode; + ring->occ = 0; + ring->free = 0; + ring->rindex = 0; + ring->windex = 0; + + if (ring->proxy_id != K3_RINGACC_PROXY_NOT_USED) + ring->proxy = ringacc->proxy_target_base + + ring->proxy_id * K3_RINGACC_PROXY_TARGET_STEP; + + switch (ring->mode) { + case K3_RINGACC_RING_MODE_RING: + ring->ops = &k3_ring_mode_ring_ops; + break; + case K3_RINGACC_RING_MODE_MESSAGE: + if (ring->proxy) + ring->ops = &k3_ring_mode_proxy_ops; + else + ring->ops = &k3_ring_mode_msg_ops; + break; + default: + ring->ops = NULL; + ret = -EINVAL; + goto err_free_proxy; + }; + + ring->ring_mem_virt = dma_alloc_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + &ring->ring_mem_dma, GFP_KERNEL); + if (!ring->ring_mem_virt) { + dev_err(ringacc->dev, "Failed to alloc ring mem\n"); + ret = -ENOMEM; + goto err_free_ops; + } + + ret = k3_ringacc_ring_cfg_sci(ring); + + if (ret) + goto err_free_mem; + + ring->flags |= K3_RING_FLAG_BUSY; + ring->flags |= (cfg->flags & K3_RINGACC_RING_SHARED) ? + K3_RING_FLAG_SHARED : 0; + + k3_ringacc_ring_dump(ring); + + return 0; + +err_free_mem: + dma_free_coherent(ringacc->dev, + ring->size * (4 << ring->elm_size), + ring->ring_mem_virt, + ring->ring_mem_dma); +err_free_ops: + ring->ops = NULL; +err_free_proxy: + ring->proxy = NULL; + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_cfg); + +u32 k3_ringacc_ring_get_size(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return ring->size; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_size); + +u32 k3_ringacc_ring_get_free(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->free) + ring->free = ring->size - readl(&ring->rt->occ); + + return ring->free; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_free); + +u32 k3_ringacc_ring_get_occ(struct k3_ring *ring) +{ + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + return readl(&ring->rt->occ); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_get_occ); + +u32 k3_ringacc_ring_is_full(struct k3_ring *ring) +{ + return !k3_ringacc_ring_get_free(ring); +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_is_full); + +enum k3_ringacc_access_mode { + K3_RINGACC_ACCESS_MODE_PUSH_HEAD, + K3_RINGACC_ACCESS_MODE_POP_HEAD, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL, + K3_RINGACC_ACCESS_MODE_POP_TAIL, + K3_RINGACC_ACCESS_MODE_PEEK_HEAD, + K3_RINGACC_ACCESS_MODE_PEEK_TAIL, +}; + +#define K3_RINGACC_PROXY_MODE(x) (((x) & 0x3) << 16) +#define K3_RINGACC_PROXY_ELSIZE(x) (((x) & 0x7) << 24) +static int k3_ringacc_ring_cfg_proxy(struct k3_ring *ring, + enum k3_ringacc_proxy_access_mode mode) +{ + u32 val; + + val = ring->ring_id; + val |= K3_RINGACC_PROXY_MODE(mode); + val |= K3_RINGACC_PROXY_ELSIZE(ring->elm_size); + writel(val, &ring->proxy->control); + return 0; +} + +static int k3_ringacc_ring_access_proxy(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + ptr = (void __iomem *)&ring->proxy->data; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_HEAD); + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + k3_ringacc_ring_cfg_proxy(ring, PROXY_ACCESS_MODE_TAIL); + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "proxy:memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "proxy:memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "proxy: free%d occ%d\n", ring->free, + ring->occ); + return 0; +} + +static int k3_ringacc_ring_push_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_head_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_proxy(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_proxy(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_access_io(struct k3_ring *ring, void *elem, + enum k3_ringacc_access_mode access_mode) +{ + void __iomem *ptr; + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + ptr = (void __iomem *)&ring->fifos->head_data; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + ptr = (void __iomem *)&ring->fifos->tail_data; + break; + default: + return -EINVAL; + } + + ptr += k3_ringacc_ring_get_fifo_pos(ring); + + switch (access_mode) { + case K3_RINGACC_ACCESS_MODE_POP_HEAD: + case K3_RINGACC_ACCESS_MODE_POP_TAIL: + dev_dbg(ring->parent->dev, + "memcpy_fromio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_fromio(elem, ptr, (4 << ring->elm_size)); + ring->occ--; + break; + case K3_RINGACC_ACCESS_MODE_PUSH_TAIL: + case K3_RINGACC_ACCESS_MODE_PUSH_HEAD: + dev_dbg(ring->parent->dev, + "memcpy_toio(x): --> ptr(%p), mode:%d\n", ptr, + access_mode); + memcpy_toio(ptr, elem, (4 << ring->elm_size)); + ring->free--; + break; + default: + return -EINVAL; + } + + dev_dbg(ring->parent->dev, "free%d index%d occ%d index%d\n", ring->free, + ring->windex, ring->occ, ring->rindex); + return 0; +} + +static int k3_ringacc_ring_push_head_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_HEAD); +} + +static int k3_ringacc_ring_push_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_PUSH_TAIL); +} + +static int k3_ringacc_ring_pop_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_pop_tail_io(struct k3_ring *ring, void *elem) +{ + return k3_ringacc_ring_access_io(ring, elem, + K3_RINGACC_ACCESS_MODE_POP_HEAD); +} + +static int k3_ringacc_ring_push_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->windex); + + memcpy(elem_ptr, elem, (4 << ring->elm_size)); + + ring->windex = (ring->windex + 1) % ring->size; + ring->free--; + writel(1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_push_mem: free%d index%d\n", + ring->free, ring->windex); + + return 0; +} + +static int k3_ringacc_ring_pop_mem(struct k3_ring *ring, void *elem) +{ + void *elem_ptr; + + elem_ptr = k3_ringacc_get_elm_addr(ring, ring->rindex); + + memcpy(elem, elem_ptr, (4 << ring->elm_size)); + + ring->rindex = (ring->rindex + 1) % ring->size; + ring->occ--; + writel(-1, &ring->rt->db); + + dev_dbg(ring->parent->dev, "ring_pop_mem: occ%d index%d pos_ptr%p\n", + ring->occ, ring->rindex, elem_ptr); + return 0; +} + +int k3_ringacc_ring_push(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push: free%d index%d\n", ring->free, + ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_tail) + ret = ring->ops->push_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push); + +int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + dev_dbg(ring->parent->dev, "ring_push_head: free%d index%d\n", + ring->free, ring->windex); + + if (k3_ringacc_ring_is_full(ring)) + return -ENOMEM; + + if (ring->ops && ring->ops->push_head) + ret = ring->ops->push_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_push_head); + +int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_head) + ret = ring->ops->pop_head(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop); + +int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem) +{ + int ret = -EOPNOTSUPP; + + if (!ring || !(ring->flags & K3_RING_FLAG_BUSY)) + return -EINVAL; + + if (!ring->occ) + ring->occ = k3_ringacc_ring_get_occ(ring); + + dev_dbg(ring->parent->dev, "ring_pop_tail: occ%d index%d\n", ring->occ, + ring->rindex); + + if (!ring->occ) + return -ENODATA; + + if (ring->ops && ring->ops->pop_tail) + ret = ring->ops->pop_tail(ring, elem); + + return ret; +} +EXPORT_SYMBOL_GPL(k3_ringacc_ring_pop_tail); + +struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np, + const char *property) +{ + struct device_node *ringacc_np; + struct k3_ringacc *ringacc = ERR_PTR(-EPROBE_DEFER); + struct k3_ringacc *entry; + + ringacc_np = of_parse_phandle(np, property, 0); + if (!ringacc_np) + return ERR_PTR(-ENODEV); + + mutex_lock(&k3_ringacc_list_lock); + list_for_each_entry(entry, &k3_ringacc_list, list) + if (entry->dev->of_node == ringacc_np) { + ringacc = entry; + break; + } + mutex_unlock(&k3_ringacc_list_lock); + of_node_put(ringacc_np); + + return ringacc; +} +EXPORT_SYMBOL_GPL(of_k3_ringacc_get_by_phandle); + +static int k3_ringacc_probe_dt(struct k3_ringacc *ringacc) +{ + struct device_node *node = ringacc->dev->of_node; + struct device *dev = ringacc->dev; + struct platform_device *pdev = to_platform_device(dev); + int ret; + + if (!node) { + dev_err(dev, "device tree info unavailable\n"); + return -ENODEV; + } + + ret = of_property_read_u32(node, "ti,num-rings", &ringacc->num_rings); + if (ret) { + dev_err(dev, "ti,num-rings read failure %d\n", ret); + return ret; + } + + ringacc->dma_ring_reset_quirk = + of_property_read_bool(node, "ti,dma-ring-reset-quirk"); + + ringacc->tisci = ti_sci_get_by_phandle(node, "ti,sci"); + if (IS_ERR(ringacc->tisci)) { + ret = PTR_ERR(ringacc->tisci); + if (ret != -EPROBE_DEFER) + dev_err(dev, "ti,sci read fail %d\n", ret); + ringacc->tisci = NULL; + return ret; + } + + ret = of_property_read_u32(node, "ti,sci-dev-id", + &ringacc->tisci_dev_id); + if (ret) { + dev_err(dev, "ti,sci-dev-id read fail %d\n", ret); + return ret; + } + + pdev->id = ringacc->tisci_dev_id; + + ringacc->rm_gp_range = devm_ti_sci_get_of_resource(ringacc->tisci, dev, + ringacc->tisci_dev_id, + "ti,sci-rm-range-gp-rings"); + if (IS_ERR(ringacc->rm_gp_range)) { + dev_err(dev, "Failed to allocate MSI interrupts\n"); + return PTR_ERR(ringacc->rm_gp_range); + } + + return ti_sci_inta_msi_domain_alloc_irqs(ringacc->dev, + ringacc->rm_gp_range); +} + +static int k3_ringacc_probe(struct platform_device *pdev) +{ + struct k3_ringacc *ringacc; + void __iomem *base_fifo, *base_rt; + struct device *dev = &pdev->dev; + struct resource *res; + int ret, i; + + ringacc = devm_kzalloc(dev, sizeof(*ringacc), GFP_KERNEL); + if (!ringacc) + return -ENOMEM; + + ringacc->dev = dev; + mutex_init(&ringacc->req_lock); + + dev->msi_domain = of_msi_get_domain(dev, dev->of_node, + DOMAIN_BUS_TI_SCI_INTA_MSI); + if (!dev->msi_domain) { + dev_err(dev, "Failed to get MSI domain\n"); + return -EPROBE_DEFER; + } + + ret = k3_ringacc_probe_dt(ringacc); + if (ret) + return ret; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rt"); + base_rt = devm_ioremap_resource(dev, res); + if (IS_ERR(base_rt)) + return PTR_ERR(base_rt); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "fifos"); + base_fifo = devm_ioremap_resource(dev, res); + if (IS_ERR(base_fifo)) + return PTR_ERR(base_fifo); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "proxy_gcfg"); + ringacc->proxy_gcfg = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_gcfg)) + return PTR_ERR(ringacc->proxy_gcfg); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + "proxy_target"); + ringacc->proxy_target_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ringacc->proxy_target_base)) + return PTR_ERR(ringacc->proxy_target_base); + + ringacc->num_proxies = readl(&ringacc->proxy_gcfg->config) & + K3_RINGACC_PROXY_CFG_THREADS_MASK; + + ringacc->rings = devm_kzalloc(dev, + sizeof(*ringacc->rings) * + ringacc->num_rings, + GFP_KERNEL); + ringacc->rings_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_rings), + sizeof(unsigned long), GFP_KERNEL); + ringacc->proxy_inuse = devm_kcalloc(dev, + BITS_TO_LONGS(ringacc->num_proxies), + sizeof(unsigned long), GFP_KERNEL); + + if (!ringacc->rings || !ringacc->rings_inuse || !ringacc->proxy_inuse) + return -ENOMEM; + + for (i = 0; i < ringacc->num_rings; i++) { + ringacc->rings[i].rt = base_rt + + K3_RINGACC_RT_REGS_STEP * i; + ringacc->rings[i].fifos = base_fifo + + K3_RINGACC_FIFO_REGS_STEP * i; + ringacc->rings[i].parent = ringacc; + ringacc->rings[i].ring_id = i; + ringacc->rings[i].proxy_id = K3_RINGACC_PROXY_NOT_USED; + } + dev_set_drvdata(dev, ringacc); + + ringacc->tisci_ring_ops = &ringacc->tisci->ops.rm_ring_ops; + + mutex_lock(&k3_ringacc_list_lock); + list_add_tail(&ringacc->list, &k3_ringacc_list); + mutex_unlock(&k3_ringacc_list_lock); + + dev_info(dev, "Ring Accelerator probed rings:%u, gp-rings[%u,%u] sci-dev-id:%u\n", + ringacc->num_rings, + ringacc->rm_gp_range->desc[0].start, + ringacc->rm_gp_range->desc[0].num, + ringacc->tisci_dev_id); + dev_info(dev, "dma-ring-reset-quirk: %s\n", + ringacc->dma_ring_reset_quirk ? "enabled" : "disabled"); + dev_info(dev, "RA Proxy rev. %08x, num_proxies:%u\n", + readl(&ringacc->proxy_gcfg->revision), ringacc->num_proxies); + return 0; +} + +/* Match table for of_platform binding */ +static const struct of_device_id k3_ringacc_of_match[] = { + { .compatible = "ti,am654-navss-ringacc", }, + {}, +}; + +static struct platform_driver k3_ringacc_driver = { + .probe = k3_ringacc_probe, + .driver = { + .name = "k3-ringacc", + .of_match_table = k3_ringacc_of_match, + .suppress_bind_attrs = true, + }, +}; +builtin_platform_driver(k3_ringacc_driver); diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 870f7797b56b..d6ed0c355954 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -281,6 +281,15 @@ config SPI_FSL_QUADSPI This controller does not support generic SPI messages. It only supports the high-level SPI memory interface. +config SPI_HISI_SFC_V3XX + tristate "HiSilicon SPI-NOR Flash Controller for Hi16XX chipsets" + depends on (ARM64 && ACPI) || COMPILE_TEST + depends on HAS_IOMEM + select CONFIG_MTD_SPI_NOR + help + This enables support for HiSilicon v3xx SPI-NOR flash controller + found in hi16xx chipsets. + config SPI_NXP_FLEXSPI tristate "NXP Flex SPI controller" depends on ARCH_LAYERSCAPE || HAS_IOMEM diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index bb49c9e6d0a0..9b65ec5afc5e 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -48,6 +48,7 @@ obj-$(CONFIG_SPI_FSL_LPSPI) += spi-fsl-lpspi.o obj-$(CONFIG_SPI_FSL_QUADSPI) += spi-fsl-qspi.o obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o obj-$(CONFIG_SPI_GPIO) += spi-gpio.o +obj-$(CONFIG_SPI_HISI_SFC_V3XX) += spi-hisi-sfc-v3xx.o obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o obj-$(CONFIG_SPI_IMX) += spi-imx.o obj-$(CONFIG_SPI_LANTIQ_SSC) += spi-lantiq-ssc.o diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 56f0ca361deb..013458cabe3c 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -514,26 +514,19 @@ static int atmel_spi_configure_dma(struct spi_master *master, master->dma_tx = dma_request_chan(dev, "tx"); if (IS_ERR(master->dma_tx)) { err = PTR_ERR(master->dma_tx); - if (err == -EPROBE_DEFER) { - dev_warn(dev, "no DMA channel available at the moment\n"); - goto error_clear; - } - dev_err(dev, - "DMA TX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + if (err != -EPROBE_DEFER) + dev_err(dev, "No TX DMA channel, DMA is disabled\n"); goto error_clear; } - /* - * No reason to check EPROBE_DEFER here since we have already requested - * tx channel. If it fails here, it's for another reason. - */ - master->dma_rx = dma_request_slave_channel(dev, "rx"); - - if (!master->dma_rx) { - dev_err(dev, - "DMA RX channel not available, SPI unable to use DMA\n"); - err = -EBUSY; + master->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(master->dma_rx)) { + err = PTR_ERR(master->dma_rx); + /* + * No reason to check EPROBE_DEFER here since we have already + * requested tx channel. + */ + dev_err(dev, "No RX DMA channel, DMA is disabled\n"); goto error; } @@ -548,7 +541,7 @@ static int atmel_spi_configure_dma(struct spi_master *master, return 0; error: - if (master->dma_rx) + if (!IS_ERR(master->dma_rx)) dma_release_channel(master->dma_rx); if (!IS_ERR(master->dma_tx)) dma_release_channel(master->dma_tx); diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 85bad70f59e3..23d295f36c80 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1293,7 +1293,7 @@ int bcm_qspi_probe(struct platform_device *pdev, name = qspi_irq_tab[val].irq_name; if (qspi_irq_tab[val].irq_source == SINGLE_L2) { /* get the l2 interrupts */ - irq = platform_get_irq_byname(pdev, name); + irq = platform_get_irq_byname_optional(pdev, name); } else if (!num_ints && soc_intc) { /* all mspi, bspi intrs muxed to one L1 intr */ irq = platform_get_irq(pdev, 0); diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index fb61a620effc..11c235879bb7 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -68,7 +68,7 @@ #define BCM2835_SPI_FIFO_SIZE 64 #define BCM2835_SPI_FIFO_SIZE_3_4 48 #define BCM2835_SPI_DMA_MIN_LENGTH 96 -#define BCM2835_SPI_NUM_CS 3 /* raise as necessary */ +#define BCM2835_SPI_NUM_CS 4 /* raise as necessary */ #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | SPI_NO_CS | SPI_3WIRE) @@ -888,8 +888,8 @@ static void bcm2835_dma_release(struct spi_controller *ctlr, } } -static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, - struct bcm2835_spi *bs) +static int bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, + struct bcm2835_spi *bs) { struct dma_slave_config slave_config; const __be32 *addr; @@ -900,19 +900,24 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); if (!addr) { dev_err(dev, "could not get DMA-register address - not using dma mode\n"); - goto err; + /* Fall back to interrupt mode */ + return 0; } dma_reg_base = be32_to_cpup(addr); /* get tx/rx dma */ - ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); - if (!ctlr->dma_tx) { + ctlr->dma_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(ctlr->dma_tx)) { dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_tx); + ctlr->dma_tx = NULL; goto err; } - ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); - if (!ctlr->dma_rx) { + ctlr->dma_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(ctlr->dma_rx)) { dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); + ret = PTR_ERR(ctlr->dma_rx); + ctlr->dma_rx = NULL; goto err_release; } @@ -997,7 +1002,7 @@ static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev, /* all went well, so set can_dma */ ctlr->can_dma = bcm2835_spi_can_dma; - return; + return 0; err_config: dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", @@ -1005,7 +1010,14 @@ err_config: err_release: bcm2835_dma_release(ctlr, bs); err: - return; + /* + * Only report error for deferred probing, otherwise fall back to + * interrupt mode + */ + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, @@ -1305,7 +1317,10 @@ static int bcm2835_spi_probe(struct platform_device *pdev) bs->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(bs->clk)) { err = PTR_ERR(bs->clk); - dev_err(&pdev->dev, "could not get clk: %d\n", err); + if (err == -EPROBE_DEFER) + dev_dbg(&pdev->dev, "could not get clk: %d\n", err); + else + dev_err(&pdev->dev, "could not get clk: %d\n", err); goto out_controller_put; } @@ -1317,7 +1332,9 @@ static int bcm2835_spi_probe(struct platform_device *pdev) clk_prepare_enable(bs->clk); - bcm2835_dma_init(ctlr, &pdev->dev, bs); + err = bcm2835_dma_init(ctlr, &pdev->dev, bs); + if (err) + goto out_clk_disable; /* initialise the hardware with the default polarities */ bcm2835_wr(bs, BCM2835_SPI_CS, @@ -1327,20 +1344,22 @@ static int bcm2835_spi_probe(struct platform_device *pdev) dev_name(&pdev->dev), ctlr); if (err) { dev_err(&pdev->dev, "could not request IRQ: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } err = devm_spi_register_controller(&pdev->dev, ctlr); if (err) { dev_err(&pdev->dev, "could not register SPI controller: %d\n", err); - goto out_clk_disable; + goto out_dma_release; } bcm2835_debugfs_create(bs, dev_name(&pdev->dev)); return 0; +out_dma_release: + bcm2835_dma_release(ctlr, bs); out_clk_disable: clk_disable_unprepare(bs->clk); out_controller_put: diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c index d84e22dd6f9f..68491a8bf7b5 100644 --- a/drivers/spi/spi-bitbang.c +++ b/drivers/spi/spi-bitbang.c @@ -329,8 +329,20 @@ static void spi_bitbang_set_cs(struct spi_device *spi, bool enable) int spi_bitbang_init(struct spi_bitbang *bitbang) { struct spi_master *master = bitbang->master; + bool custom_cs; - if (!master || !bitbang->chipselect) + if (!master) + return -EINVAL; + /* + * We only need the chipselect callback if we are actually using it. + * If we just use GPIO descriptors, it is surplus. If the + * SPI_MASTER_GPIO_SS flag is set, we always need to call the + * driver-specific chipselect routine. + */ + custom_cs = (!master->use_gpio_descriptors || + (master->flags & SPI_MASTER_GPIO_SS)); + + if (custom_cs && !bitbang->chipselect) return -EINVAL; mutex_init(&bitbang->lock); @@ -344,7 +356,12 @@ int spi_bitbang_init(struct spi_bitbang *bitbang) master->prepare_transfer_hardware = spi_bitbang_prepare_hardware; master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware; master->transfer_one = spi_bitbang_transfer_one; - master->set_cs = spi_bitbang_set_cs; + /* + * When using GPIO descriptors, the ->set_cs() callback doesn't even + * get called unless SPI_MASTER_GPIO_SS is set. + */ + if (custom_cs) + master->set_cs = spi_bitbang_set_cs; if (!bitbang->txrx_bufs) { bitbang->use_dma = 0; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a25da377119..31e3f866d11a 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -297,6 +297,9 @@ static int dw_spi_transfer_one(struct spi_controller *master, dws->len = transfer->len; spin_unlock_irqrestore(&dws->buf_lock, flags); + /* Ensure dw->rx and dw->rx_end are visible */ + smp_mb(); + spi_enable_chip(dws, 0); /* Handle per transfer options for bpw and speed */ @@ -469,7 +472,8 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws) struct spi_controller *master; int ret; - BUG_ON(dws == NULL); + if (!dws) + return -EINVAL; master = spi_alloc_master(dev, 0); if (!master) diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 8428b69c858b..6ec2dcb8c57a 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -396,17 +396,17 @@ static int dspi_request_dma(struct fsl_dspi *dspi, phys_addr_t phy_addr) if (!dma) return -ENOMEM; - dma->chan_rx = dma_request_slave_channel(dev, "rx"); - if (!dma->chan_rx) { + dma->chan_rx = dma_request_chan(dev, "rx"); + if (IS_ERR(dma->chan_rx)) { dev_err(dev, "rx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_rx); return ret; } - dma->chan_tx = dma_request_slave_channel(dev, "tx"); - if (!dma->chan_tx) { + dma->chan_tx = dma_request_chan(dev, "tx"); + if (IS_ERR(dma->chan_tx)) { dev_err(dev, "tx dma channel not available\n"); - ret = -ENODEV; + ret = PTR_ERR(dma->chan_tx); goto err_tx_channel; } diff --git a/drivers/spi/spi-fsl-lpspi.c b/drivers/spi/spi-fsl-lpspi.c index 2cc0ddb4a988..d0b8cc741a24 100644 --- a/drivers/spi/spi-fsl-lpspi.c +++ b/drivers/spi/spi-fsl-lpspi.c @@ -469,9 +469,9 @@ static int fsl_lpspi_setup_transfer(struct spi_controller *controller, fsl_lpspi->watermark = fsl_lpspi->txfifosize; if (fsl_lpspi_can_dma(controller, spi, t)) - fsl_lpspi->usedma = 1; + fsl_lpspi->usedma = true; else - fsl_lpspi->usedma = 0; + fsl_lpspi->usedma = false; return fsl_lpspi_config(fsl_lpspi); } @@ -862,6 +862,22 @@ static int fsl_lpspi_probe(struct platform_device *pdev) fsl_lpspi->dev = &pdev->dev; fsl_lpspi->is_slave = is_slave; + controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); + controller->transfer_one = fsl_lpspi_transfer_one; + controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; + controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; + controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; + controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + controller->dev.of_node = pdev->dev.of_node; + controller->bus_num = pdev->id; + controller->slave_abort = fsl_lpspi_slave_abort; + + ret = devm_spi_register_controller(&pdev->dev, controller); + if (ret < 0) { + dev_err(&pdev->dev, "spi_register_controller error.\n"); + goto out_controller_put; + } + if (!fsl_lpspi->is_slave) { for (i = 0; i < controller->num_chipselect; i++) { int cs_gpio = of_get_named_gpio(np, "cs-gpios", i); @@ -885,16 +901,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) controller->prepare_message = fsl_lpspi_prepare_message; } - controller->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32); - controller->transfer_one = fsl_lpspi_transfer_one; - controller->prepare_transfer_hardware = lpspi_prepare_xfer_hardware; - controller->unprepare_transfer_hardware = lpspi_unprepare_xfer_hardware; - controller->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; - controller->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; - controller->dev.of_node = pdev->dev.of_node; - controller->bus_num = pdev->id; - controller->slave_abort = fsl_lpspi_slave_abort; - init_completion(&fsl_lpspi->xfer_done); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -952,12 +958,6 @@ static int fsl_lpspi_probe(struct platform_device *pdev) if (ret < 0) dev_err(&pdev->dev, "dma setup error %d, use pio\n", ret); - ret = devm_spi_register_controller(&pdev->dev, controller); - if (ret < 0) { - dev_err(&pdev->dev, "spi_register_controller error.\n"); - goto out_controller_put; - } - return 0; out_controller_put: diff --git a/drivers/spi/spi-fsl-qspi.c b/drivers/spi/spi-fsl-qspi.c index 79b1558b74b8..e8a499cd1f13 100644 --- a/drivers/spi/spi-fsl-qspi.c +++ b/drivers/spi/spi-fsl-qspi.c @@ -410,7 +410,7 @@ static bool fsl_qspi_supports_op(struct spi_mem *mem, op->data.nbytes > q->devtype_data->txfifo) return false; - return true; + return spi_mem_default_supports_op(mem, op); } static void fsl_qspi_prepare_lut(struct fsl_qspi *q, diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c index fb4159ad6bf6..3b81772fea0d 100644 --- a/drivers/spi/spi-fsl-spi.c +++ b/drivers/spi/spi-fsl-spi.c @@ -706,8 +706,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; - int irq = 0, type; - int ret = -ENOMEM; + int irq, type; + int ret; ret = of_mpc8xxx_spi_probe(ofdev); if (ret) @@ -722,10 +722,8 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) if (spisel_boot) { pinfo->immr_spi_cs = ioremap(get_immrbase() + IMMR_SPI_CS_OFFSET, 4); - if (!pinfo->immr_spi_cs) { - ret = -ENOMEM; - goto err; - } + if (!pinfo->immr_spi_cs) + return -ENOMEM; } #endif /* @@ -744,24 +742,15 @@ static int of_fsl_spi_probe(struct platform_device *ofdev) ret = of_address_to_resource(np, 0, &mem); if (ret) - goto err; + return ret; irq = platform_get_irq(ofdev, 0); - if (irq < 0) { - ret = irq; - goto err; - } + if (irq < 0) + return irq; master = fsl_spi_probe(dev, &mem, irq); - if (IS_ERR(master)) { - ret = PTR_ERR(master); - goto err; - } - - return 0; -err: - return ret; + return PTR_ERR_OR_ZERO(master); } static int of_fsl_spi_remove(struct platform_device *ofdev) diff --git a/drivers/spi/spi-hisi-sfc-v3xx.c b/drivers/spi/spi-hisi-sfc-v3xx.c new file mode 100644 index 000000000000..4cf8fc80a7b7 --- /dev/null +++ b/drivers/spi/spi-hisi-sfc-v3xx.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +// +// HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets +// +// Copyright (c) 2019 HiSilicon Technologies Co., Ltd. +// Author: John Garry <john.garry@huawei.com> + +#include <linux/acpi.h> +#include <linux/bitops.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi-mem.h> + +#define HISI_SFC_V3XX_VERSION (0x1f8) + +#define HISI_SFC_V3XX_CMD_CFG (0x300) +#define HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF 9 +#define HISI_SFC_V3XX_CMD_CFG_RW_MSK BIT(8) +#define HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK BIT(7) +#define HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF 4 +#define HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK BIT(3) +#define HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF 1 +#define HISI_SFC_V3XX_CMD_CFG_START_MSK BIT(0) +#define HISI_SFC_V3XX_CMD_INS (0x308) +#define HISI_SFC_V3XX_CMD_ADDR (0x30c) +#define HISI_SFC_V3XX_CMD_DATABUF0 (0x400) + +struct hisi_sfc_v3xx_host { + struct device *dev; + void __iomem *regbase; + int max_cmd_dword; +}; + +#define HISI_SFC_V3XX_WAIT_TIMEOUT_US 1000000 +#define HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US 10 + +static int hisi_sfc_v3xx_wait_cmd_idle(struct hisi_sfc_v3xx_host *host) +{ + u32 reg; + + return readl_poll_timeout(host->regbase + HISI_SFC_V3XX_CMD_CFG, reg, + !(reg & HISI_SFC_V3XX_CMD_CFG_START_MSK), + HISI_SFC_V3XX_WAIT_POLL_INTERVAL_US, + HISI_SFC_V3XX_WAIT_TIMEOUT_US); +} + +static int hisi_sfc_v3xx_adjust_op_size(struct spi_mem *mem, + struct spi_mem_op *op) +{ + struct spi_device *spi = mem->spi; + struct hisi_sfc_v3xx_host *host; + uintptr_t addr = (uintptr_t)op->data.buf.in; + int max_byte_count; + + host = spi_controller_get_devdata(spi->master); + + max_byte_count = host->max_cmd_dword * 4; + + if (!IS_ALIGNED(addr, 4) && op->data.nbytes >= 4) + op->data.nbytes = 4 - (addr % 4); + else if (op->data.nbytes > max_byte_count) + op->data.nbytes = max_byte_count; + + return 0; +} + +/* + * memcpy_{to,from}io doesn't gurantee 32b accesses - which we require for the + * DATABUF registers -so use __io{read,write}32_copy when possible. For + * trailing bytes, copy them byte-by-byte from the DATABUF register, as we + * can't clobber outside the source/dest buffer. + * + * For efficient data read/write, we try to put any start 32b unaligned data + * into a separate transaction in hisi_sfc_v3xx_adjust_op_size(). + */ +static void hisi_sfc_v3xx_read_databuf(struct hisi_sfc_v3xx_host *host, + u8 *to, unsigned int len) +{ + void __iomem *from; + int i; + + from = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)to, 4)) { + int words = len / 4; + + __ioread32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val; + + to += words * 4; + from += words * 4; + + val = __raw_readl(from); + + for (i = 0; i < len; i++, val >>= 8, to++) + *to = (u8)val; + } + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, from += 4) { + u32 val = __raw_readl(from); + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + to++, val >>= 8, j++) + *to = (u8)val; + } + } +} + +static void hisi_sfc_v3xx_write_databuf(struct hisi_sfc_v3xx_host *host, + const u8 *from, unsigned int len) +{ + void __iomem *to; + int i; + + to = host->regbase + HISI_SFC_V3XX_CMD_DATABUF0; + + if (IS_ALIGNED((uintptr_t)from, 4)) { + int words = len / 4; + + __iowrite32_copy(to, from, words); + + len -= words * 4; + if (len) { + u32 val = 0; + + to += words * 4; + from += words * 4; + + for (i = 0; i < len; i++, from++) + val |= *from << i * 8; + __raw_writel(val, to); + } + + } else { + for (i = 0; i < DIV_ROUND_UP(len, 4); i++, to += 4) { + u32 val = 0; + int j; + + for (j = 0; j < 4 && (j + (i * 4) < len); + from++, j++) + val |= *from << j * 8; + __raw_writel(val, to); + } + } +} + +static int hisi_sfc_v3xx_generic_exec_op(struct hisi_sfc_v3xx_host *host, + const struct spi_mem_op *op, + u8 chip_select) +{ + int ret, len = op->data.nbytes; + u32 config = 0; + + if (op->addr.nbytes) + config |= HISI_SFC_V3XX_CMD_CFG_ADDR_EN_MSK; + + if (op->data.dir != SPI_MEM_NO_DATA) { + config |= (len - 1) << HISI_SFC_V3XX_CMD_CFG_DATA_CNT_OFF; + config |= HISI_SFC_V3XX_CMD_CFG_DATA_EN_MSK; + } + + if (op->data.dir == SPI_MEM_DATA_OUT) + hisi_sfc_v3xx_write_databuf(host, op->data.buf.out, len); + else if (op->data.dir == SPI_MEM_DATA_IN) + config |= HISI_SFC_V3XX_CMD_CFG_RW_MSK; + + config |= op->dummy.nbytes << HISI_SFC_V3XX_CMD_CFG_DUMMY_CNT_OFF | + chip_select << HISI_SFC_V3XX_CMD_CFG_CS_SEL_OFF | + HISI_SFC_V3XX_CMD_CFG_START_MSK; + + writel(op->addr.val, host->regbase + HISI_SFC_V3XX_CMD_ADDR); + writel(op->cmd.opcode, host->regbase + HISI_SFC_V3XX_CMD_INS); + + writel(config, host->regbase + HISI_SFC_V3XX_CMD_CFG); + + ret = hisi_sfc_v3xx_wait_cmd_idle(host); + if (ret) + return ret; + + if (op->data.dir == SPI_MEM_DATA_IN) + hisi_sfc_v3xx_read_databuf(host, op->data.buf.in, len); + + return 0; +} + +static int hisi_sfc_v3xx_exec_op(struct spi_mem *mem, + const struct spi_mem_op *op) +{ + struct hisi_sfc_v3xx_host *host; + struct spi_device *spi = mem->spi; + u8 chip_select = spi->chip_select; + + host = spi_controller_get_devdata(spi->master); + + return hisi_sfc_v3xx_generic_exec_op(host, op, chip_select); +} + +static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = { + .adjust_op_size = hisi_sfc_v3xx_adjust_op_size, + .exec_op = hisi_sfc_v3xx_exec_op, +}; + +static int hisi_sfc_v3xx_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct hisi_sfc_v3xx_host *host; + struct spi_controller *ctlr; + u32 version; + int ret; + + ctlr = spi_alloc_master(&pdev->dev, sizeof(*host)); + if (!ctlr) + return -ENOMEM; + + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | + SPI_TX_DUAL | SPI_TX_QUAD; + + host = spi_controller_get_devdata(ctlr); + host->dev = dev; + + platform_set_drvdata(pdev, host); + + host->regbase = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(host->regbase)) { + ret = PTR_ERR(host->regbase); + goto err_put_master; + } + + ctlr->bus_num = -1; + ctlr->num_chipselect = 1; + ctlr->mem_ops = &hisi_sfc_v3xx_mem_ops; + + version = readl(host->regbase + HISI_SFC_V3XX_VERSION); + + switch (version) { + case 0x351: + host->max_cmd_dword = 64; + break; + default: + host->max_cmd_dword = 16; + break; + } + + ret = devm_spi_register_controller(dev, ctlr); + if (ret) + goto err_put_master; + + dev_info(&pdev->dev, "hw version 0x%x\n", version); + + return 0; + +err_put_master: + spi_master_put(ctlr); + return ret; +} + +#if IS_ENABLED(CONFIG_ACPI) +static const struct acpi_device_id hisi_sfc_v3xx_acpi_ids[] = { + {"HISI0341", 0}, + {} +}; +MODULE_DEVICE_TABLE(acpi, hisi_sfc_v3xx_acpi_ids); +#endif + +static struct platform_driver hisi_sfc_v3xx_spi_driver = { + .driver = { + .name = "hisi-sfc-v3xx", + .acpi_match_table = ACPI_PTR(hisi_sfc_v3xx_acpi_ids), + }, + .probe = hisi_sfc_v3xx_probe, +}; + +module_platform_driver(hisi_sfc_v3xx_spi_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("John Garry <john.garry@huawei.com>"); +MODULE_DESCRIPTION("HiSilicon SPI NOR V3XX Flash Controller Driver for hi16xx chipsets"); diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index f4a8f470aecc..8543f5ed1099 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -666,8 +666,22 @@ static int img_spfi_probe(struct platform_device *pdev) master->unprepare_message = img_spfi_unprepare; master->handle_err = img_spfi_handle_err; - spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx"); - spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx"); + spfi->tx_ch = dma_request_chan(spfi->dev, "tx"); + if (IS_ERR(spfi->tx_ch)) { + ret = PTR_ERR(spfi->tx_ch); + spfi->tx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + + spfi->rx_ch = dma_request_chan(spfi->dev, "rx"); + if (IS_ERR(spfi->rx_ch)) { + ret = PTR_ERR(spfi->rx_ch); + spfi->rx_ch = NULL; + if (ret == -EPROBE_DEFER) + goto disable_pm; + } + if (!spfi->tx_ch || !spfi->rx_ch) { if (spfi->tx_ch) dma_release_channel(spfi->tx_ch); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 49f0099db0cb..f4f28a400a96 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1230,9 +1230,9 @@ static int spi_imx_setupxfer(struct spi_device *spi, } if (spi_imx_can_dma(spi_imx->bitbang.master, spi, t)) - spi_imx->usedma = 1; + spi_imx->usedma = true; else - spi_imx->usedma = 0; + spi_imx->usedma = false; if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) { spi_imx->rx = mx53_ecspi_rx_slave; diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index f3f10443f9e2..7f5680fe2568 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -19,7 +19,6 @@ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/reset.h> -#include <linux/gpio.h> /* * The Meson SPICC controller could support DMA based transfers, but is not @@ -467,35 +466,14 @@ static int meson_spicc_unprepare_transfer(struct spi_master *master) static int meson_spicc_setup(struct spi_device *spi) { - int ret = 0; - if (!spi->controller_state) spi->controller_state = spi_master_get_devdata(spi->master); - else if (gpio_is_valid(spi->cs_gpio)) - goto out_gpio; - else if (spi->cs_gpio == -ENOENT) - return 0; - - if (gpio_is_valid(spi->cs_gpio)) { - ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev)); - if (ret) { - dev_err(&spi->dev, "failed to request cs gpio\n"); - return ret; - } - } - -out_gpio: - ret = gpio_direction_output(spi->cs_gpio, - !(spi->mode & SPI_CS_HIGH)); - return ret; + return 0; } static void meson_spicc_cleanup(struct spi_device *spi) { - if (gpio_is_valid(spi->cs_gpio)) - gpio_free(spi->cs_gpio); - spi->controller_state = NULL; } @@ -564,6 +542,7 @@ static int meson_spicc_probe(struct platform_device *pdev) master->prepare_message = meson_spicc_prepare_message; master->unprepare_transfer_hardware = meson_spicc_unprepare_transfer; master->transfer_one = meson_spicc_transfer_one; + master->use_gpio_descriptors = true; /* Setup max rate according to the Meson GX datasheet */ if ((rate >> 2) > SPICC_MAX_FREQ) diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 996c1c8a9c71..dce85ee07cd0 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -590,10 +590,10 @@ static int mxs_spi_probe(struct platform_device *pdev) if (ret) goto out_master_free; - ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); - if (!ssp->dmach) { + ssp->dmach = dma_request_chan(&pdev->dev, "rx-tx"); + if (IS_ERR(ssp->dmach)) { dev_err(ssp->dev, "Failed to request DMA\n"); - ret = -ENODEV; + ret = PTR_ERR(ssp->dmach); goto out_master_free; } diff --git a/drivers/spi/spi-npcm-pspi.c b/drivers/spi/spi-npcm-pspi.c index fe624731c74c..87cd0233c60b 100644 --- a/drivers/spi/spi-npcm-pspi.c +++ b/drivers/spi/spi-npcm-pspi.c @@ -12,6 +12,7 @@ #include <linux/spi/spi.h> #include <linux/gpio.h> #include <linux/of_gpio.h> +#include <linux/reset.h> #include <asm/unaligned.h> @@ -20,7 +21,7 @@ struct npcm_pspi { struct completion xfer_done; - struct regmap *rst_regmap; + struct reset_control *reset; struct spi_master *master; unsigned int tx_bytes; unsigned int rx_bytes; @@ -59,12 +60,6 @@ struct npcm_pspi { #define NPCM_PSPI_MIN_CLK_DIVIDER 4 #define NPCM_PSPI_DEFAULT_CLK 25000000 -/* reset register */ -#define NPCM7XX_IPSRST2_OFFSET 0x24 - -#define NPCM7XX_PSPI1_RESET BIT(22) -#define NPCM7XX_PSPI2_RESET BIT(23) - static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : 2; @@ -178,6 +173,13 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, priv->mode = spi->mode; } + /* + * If transfer is even length, and 8 bits per word transfer, + * then implement 16 bits-per-word transfer. + */ + if (priv->bits_per_word == 8 && !(t->len & 0x1)) + t->bits_per_word = 16; + if (!priv->is_save_param || priv->bits_per_word != t->bits_per_word) { npcm_pspi_set_transfer_size(priv, t->bits_per_word); priv->bits_per_word = t->bits_per_word; @@ -195,6 +197,7 @@ static void npcm_pspi_setup_transfer(struct spi_device *spi, static void npcm_pspi_send(struct npcm_pspi *priv) { int wsize; + u16 val; wsize = min(bytes_per_word(priv->bits_per_word), priv->tx_bytes); priv->tx_bytes -= wsize; @@ -204,17 +207,18 @@ static void npcm_pspi_send(struct npcm_pspi *priv) switch (wsize) { case 1: - iowrite8(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + iowrite8(val, NPCM_PSPI_DATA + priv->base); break; case 2: - iowrite16(*priv->tx_buf, NPCM_PSPI_DATA + priv->base); + val = *priv->tx_buf++; + val = *priv->tx_buf++ | (val << 8); + iowrite16(val, NPCM_PSPI_DATA + priv->base); break; default: WARN_ON_ONCE(1); return; } - - priv->tx_buf += wsize; } static void npcm_pspi_recv(struct npcm_pspi *priv) @@ -230,18 +234,17 @@ static void npcm_pspi_recv(struct npcm_pspi *priv) switch (rsize) { case 1: - val = ioread8(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = ioread8(priv->base + NPCM_PSPI_DATA); break; case 2: val = ioread16(priv->base + NPCM_PSPI_DATA); + *priv->rx_buf++ = (val >> 8); + *priv->rx_buf++ = val & 0xff; break; default: WARN_ON_ONCE(1); return; } - - *priv->rx_buf = val; - priv->rx_buf += rsize; } static int npcm_pspi_transfer_one(struct spi_master *master, @@ -285,9 +288,9 @@ static int npcm_pspi_unprepare_transfer_hardware(struct spi_master *master) static void npcm_pspi_reset_hw(struct npcm_pspi *priv) { - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, - NPCM7XX_PSPI1_RESET << priv->id); - regmap_write(priv->rst_regmap, NPCM7XX_IPSRST2_OFFSET, 0x0); + reset_control_assert(priv->reset); + udelay(5); + reset_control_deassert(priv->reset); } static irqreturn_t npcm_pspi_handler(int irq, void *dev_id) @@ -351,10 +354,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (num_cs < 0) return num_cs; - pdev->id = of_alias_get_id(np, "spi"); - if (pdev->id < 0) - pdev->id = 0; - master = spi_alloc_master(&pdev->dev, sizeof(*priv)); if (!master) return -ENOMEM; @@ -364,7 +363,6 @@ static int npcm_pspi_probe(struct platform_device *pdev) priv = spi_master_get_devdata(master); priv->master = master; priv->is_save_param = false; - priv->id = pdev->id; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { @@ -389,11 +387,10 @@ static int npcm_pspi_probe(struct platform_device *pdev) goto out_disable_clk; } - priv->rst_regmap = - syscon_regmap_lookup_by_compatible("nuvoton,npcm750-rst"); - if (IS_ERR(priv->rst_regmap)) { - dev_err(&pdev->dev, "failed to find nuvoton,npcm750-rst\n"); - return PTR_ERR(priv->rst_regmap); + priv->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(priv->reset)) { + ret = PTR_ERR(priv->reset); + goto out_disable_clk; } /* reset SPI-HW block */ @@ -414,7 +411,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) master->min_speed_hz = DIV_ROUND_UP(clk_hz, NPCM_PSPI_MAX_CLK_DIVIDER); master->mode_bits = SPI_CPHA | SPI_CPOL; master->dev.of_node = pdev->dev.of_node; - master->bus_num = pdev->id; + master->bus_num = -1; master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16); master->transfer_one = npcm_pspi_transfer_one; master->prepare_transfer_hardware = @@ -447,7 +444,7 @@ static int npcm_pspi_probe(struct platform_device *pdev) if (ret) goto out_disable_clk; - pr_info("NPCM Peripheral SPI %d probed\n", pdev->id); + pr_info("NPCM Peripheral SPI %d probed\n", master->bus_num); return 0; diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c index e2331eb7b47a..9df7c5979c29 100644 --- a/drivers/spi/spi-oc-tiny.c +++ b/drivers/spi/spi-oc-tiny.c @@ -20,7 +20,6 @@ #include <linux/spi/spi_bitbang.h> #include <linux/spi/spi_oc_tiny.h> #include <linux/io.h> -#include <linux/gpio.h> #include <linux/of.h> #define DRV_NAME "spi_oc_tiny" @@ -50,8 +49,6 @@ struct tiny_spi { unsigned int txc, rxc; const u8 *txp; u8 *rxp; - int gpio_cs_count; - int *gpio_cs; }; static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev) @@ -66,16 +63,6 @@ static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz) return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1; } -static void tiny_spi_chipselect(struct spi_device *spi, int is_active) -{ - struct tiny_spi *hw = tiny_spi_to_hw(spi); - - if (hw->gpio_cs_count > 0) { - gpio_set_value(hw->gpio_cs[spi->chip_select], - (spi->mode & SPI_CS_HIGH) ? is_active : !is_active); - } -} - static int tiny_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { @@ -203,24 +190,10 @@ static int tiny_spi_of_probe(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node; - unsigned int i; u32 val; if (!np) return 0; - hw->gpio_cs_count = of_gpio_count(np); - if (hw->gpio_cs_count > 0) { - hw->gpio_cs = devm_kcalloc(&pdev->dev, - hw->gpio_cs_count, sizeof(unsigned int), - GFP_KERNEL); - if (!hw->gpio_cs) - return -ENOMEM; - } - for (i = 0; i < hw->gpio_cs_count; i++) { - hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL); - if (hw->gpio_cs[i] < 0) - return -ENODEV; - } hw->bitbang.master->dev.of_node = pdev->dev.of_node; if (!of_property_read_u32(np, "clock-frequency", &val)) hw->freq = val; @@ -240,7 +213,6 @@ static int tiny_spi_probe(struct platform_device *pdev) struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev); struct tiny_spi *hw; struct spi_master *master; - unsigned int i; int err = -ENODEV; master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi)); @@ -249,9 +221,9 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the master state. */ master->bus_num = pdev->id; - master->num_chipselect = 255; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->setup = tiny_spi_setup; + master->use_gpio_descriptors = true; hw = spi_master_get_devdata(master); platform_set_drvdata(pdev, hw); @@ -259,7 +231,6 @@ static int tiny_spi_probe(struct platform_device *pdev) /* setup the state for the bitbang driver */ hw->bitbang.master = master; hw->bitbang.setup_transfer = tiny_spi_setup_transfer; - hw->bitbang.chipselect = tiny_spi_chipselect; hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs; /* find and map our resources */ @@ -279,12 +250,6 @@ static int tiny_spi_probe(struct platform_device *pdev) } /* find platform data */ if (platp) { - hw->gpio_cs_count = platp->gpio_cs_count; - hw->gpio_cs = platp->gpio_cs; - if (platp->gpio_cs_count && !platp->gpio_cs) { - err = -EBUSY; - goto exit; - } hw->freq = platp->freq; hw->baudwidth = platp->baudwidth; } else { @@ -292,13 +257,6 @@ static int tiny_spi_probe(struct platform_device *pdev) if (err) goto exit; } - for (i = 0; i < hw->gpio_cs_count; i++) { - err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev)); - if (err) - goto exit_gpio; - gpio_direction_output(hw->gpio_cs[i], 1); - } - hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count); /* register our spi controller */ err = spi_bitbang_start(&hw->bitbang); @@ -308,9 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev) return 0; -exit_gpio: - while (i-- > 0) - gpio_free(hw->gpio_cs[i]); exit: spi_master_put(master); return err; @@ -320,11 +275,8 @@ static int tiny_spi_remove(struct platform_device *pdev) { struct tiny_spi *hw = platform_get_drvdata(pdev); struct spi_master *master = hw->bitbang.master; - unsigned int i; spi_bitbang_stop(&hw->bitbang); - for (i = 0; i < hw->gpio_cs_count; i++) - gpio_free(hw->gpio_cs[i]); spi_master_put(master); return 0; } diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c index 9071333ebdd8..4c7a71f0fb3e 100644 --- a/drivers/spi/spi-pxa2xx.c +++ b/drivers/spi/spi-pxa2xx.c @@ -461,6 +461,16 @@ int pxa2xx_spi_flush(struct driver_data *drv_data) return limit; } +static void pxa2xx_spi_off(struct driver_data *drv_data) +{ + /* On MMP, disabling SSE seems to corrupt the rx fifo */ + if (drv_data->ssp_type == MMP2_SSP) + return; + + pxa2xx_spi_write(drv_data, SSCR0, + pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); +} + static int null_writer(struct driver_data *drv_data) { u8 n_bytes = drv_data->n_bytes; @@ -587,8 +597,7 @@ static void int_error_stop(struct driver_data *drv_data, const char* msg) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_err(&drv_data->pdev->dev, "%s\n", msg); @@ -686,8 +695,7 @@ static irqreturn_t interrupt_transfer(struct driver_data *drv_data) static void handle_bad_msg(struct driver_data *drv_data) { - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); pxa2xx_spi_write(drv_data, SSCR1, pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1); if (!pxa25x_ssp_comp(drv_data)) @@ -1062,7 +1070,8 @@ static int pxa2xx_spi_transfer_one(struct spi_controller *controller, || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask) != (cr1 & change_mask)) { /* stop the SSP, and update the other bits */ - pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); + if (drv_data->ssp_type != MMP2_SSP) + pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE); if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, chip->timeout); /* first set CR1 without interrupt and service enables */ @@ -1118,8 +1127,7 @@ static int pxa2xx_spi_slave_abort(struct spi_controller *controller) if (!pxa25x_ssp_comp(drv_data)) pxa2xx_spi_write(drv_data, SSTO, 0); pxa2xx_spi_flush(drv_data); - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); dev_dbg(&drv_data->pdev->dev, "transfer aborted\n"); @@ -1135,8 +1143,7 @@ static void pxa2xx_spi_handle_err(struct spi_controller *controller, struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); /* Clear and disable interrupts and service requests */ write_SSSR_CS(drv_data, drv_data->clear_sr); pxa2xx_spi_write(drv_data, SSCR1, @@ -1161,8 +1168,7 @@ static int pxa2xx_spi_unprepare_transfer(struct spi_controller *controller) struct driver_data *drv_data = spi_controller_get_devdata(controller); /* Disable the SSP now */ - pxa2xx_spi_write(drv_data, SSCR0, - pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE); + pxa2xx_spi_off(drv_data); return 0; } @@ -1423,6 +1429,9 @@ static const struct pci_device_id pxa2xx_spi_pci_compound_match[] = { /* KBL-H */ { PCI_VDEVICE(INTEL, 0xa2a9), LPSS_SPT_SSP }, { PCI_VDEVICE(INTEL, 0xa2aa), LPSS_SPT_SSP }, + /* CML-V */ + { PCI_VDEVICE(INTEL, 0xa3a9), LPSS_SPT_SSP }, + { PCI_VDEVICE(INTEL, 0xa3aa), LPSS_SPT_SSP }, /* BXT A-Step */ { PCI_VDEVICE(INTEL, 0x0ac2), LPSS_BXT_SSP }, { PCI_VDEVICE(INTEL, 0x0ac4), LPSS_BXT_SSP }, diff --git a/drivers/spi/spi-qcom-qspi.c b/drivers/spi/spi-qcom-qspi.c index 250fd60e1678..3c4f83bf7084 100644 --- a/drivers/spi/spi-qcom-qspi.c +++ b/drivers/spi/spi-qcom-qspi.c @@ -137,7 +137,7 @@ enum qspi_clocks { struct qcom_qspi { void __iomem *base; struct device *dev; - struct clk_bulk_data clks[QSPI_NUM_CLKS]; + struct clk_bulk_data *clks; struct qspi_xfer xfer; /* Lock to protect xfer and IRQ accessed registers */ spinlock_t lock; @@ -445,6 +445,13 @@ static int qcom_qspi_probe(struct platform_device *pdev) goto exit_probe_master_put; } + ctrl->clks = devm_kcalloc(dev, QSPI_NUM_CLKS, + sizeof(*ctrl->clks), GFP_KERNEL); + if (!ctrl->clks) { + ret = -ENOMEM; + goto exit_probe_master_put; + } + ctrl->clks[QSPI_CLK_CORE].id = "core"; ctrl->clks[QSPI_CLK_IFACE].id = "iface"; ret = devm_clk_bulk_get(dev, QSPI_NUM_CLKS, ctrl->clks); diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 7222c7689c3c..85575d45901c 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -159,7 +159,7 @@ #define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0 #define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1 #define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */ -#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */ +#define SPCMD_SSLA(i) ((i) << 4) /* SSL Assert Signal Setting */ #define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */ #define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */ #define SPCMD_CPHA 0x0001 /* Clock Phase Setting */ @@ -242,6 +242,7 @@ struct spi_ops { u16 mode_bits; u16 flags; u16 fifo_size; + u8 num_hw_ss; }; /* @@ -426,8 +427,6 @@ static int qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len) return n; } -#define set_config_register(spi, n) spi->ops->set_config_register(spi, n) - static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable) { rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR); @@ -620,9 +619,8 @@ no_dma_tx: dmaengine_terminate_all(rspi->ctlr->dma_rx); no_dma_rx: if (ret == -EAGAIN) { - pr_warn_once("%s %s: DMA not available, falling back to PIO\n", - dev_driver_string(&rspi->ctlr->dev), - dev_name(&rspi->ctlr->dev)); + dev_warn_once(&rspi->ctlr->dev, + "DMA not available, falling back to PIO\n"); } return ret; } @@ -936,12 +934,16 @@ static int rspi_prepare_message(struct spi_controller *ctlr, if (spi->mode & SPI_CPHA) rspi->spcmd |= SPCMD_CPHA; + /* Configure slave signal to assert */ + rspi->spcmd |= SPCMD_SSLA(spi->cs_gpiod ? rspi->ctlr->unused_native_cs + : spi->chip_select); + /* CMOS output mode and MOSI signal from previous transfer */ rspi->sppcr = 0; if (spi->mode & SPI_LOOP) rspi->sppcr |= SPPCR_SPLP; - set_config_register(rspi, 8); + rspi->ops->set_config_register(rspi, 8); if (msg->spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) { @@ -1123,6 +1125,7 @@ static const struct spi_ops rspi_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_TX, .fifo_size = 8, + .num_hw_ss = 2, }; static const struct spi_ops rspi_rz_ops = { @@ -1131,6 +1134,7 @@ static const struct spi_ops rspi_rz_ops = { .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 8, /* 8 for TX, 32 for RX */ + .num_hw_ss = 1, }; static const struct spi_ops qspi_ops = { @@ -1141,6 +1145,7 @@ static const struct spi_ops qspi_ops = { SPI_RX_DUAL | SPI_RX_QUAD, .flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX, .fifo_size = 32, + .num_hw_ss = 1, }; #ifdef CONFIG_OF @@ -1256,6 +1261,8 @@ static int rspi_probe(struct platform_device *pdev) ctlr->mode_bits = ops->mode_bits; ctlr->flags = ops->flags; ctlr->dev.of_node = pdev->dev.of_node; + ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = rspi->ops->num_hw_ss; ret = platform_get_irq_byname_optional(pdev, "rx"); if (ret < 0) { @@ -1314,8 +1321,6 @@ error1: static const struct platform_device_id spi_driver_ids[] = { { "rspi", (kernel_ulong_t)&rspi_ops }, - { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops }, - { "qspi", (kernel_ulong_t)&qspi_ops }, {}, }; diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 8f134735291f..1c11a00a2c36 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -14,8 +14,6 @@ #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/err.h> -#include <linux/gpio.h> -#include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> @@ -55,7 +53,6 @@ struct sh_msiof_spi_priv { void *rx_dma_page; dma_addr_t tx_dma_addr; dma_addr_t rx_dma_addr; - unsigned short unused_ss; bool native_cs_inited; bool native_cs_high; bool slave_aborted; @@ -63,140 +60,140 @@ struct sh_msiof_spi_priv { #define MAX_SS 3 /* Maximum number of native chip selects */ -#define TMDR1 0x00 /* Transmit Mode Register 1 */ -#define TMDR2 0x04 /* Transmit Mode Register 2 */ -#define TMDR3 0x08 /* Transmit Mode Register 3 */ -#define RMDR1 0x10 /* Receive Mode Register 1 */ -#define RMDR2 0x14 /* Receive Mode Register 2 */ -#define RMDR3 0x18 /* Receive Mode Register 3 */ -#define TSCR 0x20 /* Transmit Clock Select Register */ -#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ -#define CTR 0x28 /* Control Register */ -#define FCTR 0x30 /* FIFO Control Register */ -#define STR 0x40 /* Status Register */ -#define IER 0x44 /* Interrupt Enable Register */ -#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ -#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ -#define TFDR 0x50 /* Transmit FIFO Data Register */ -#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ -#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ -#define RFDR 0x60 /* Receive FIFO Data Register */ - -/* TMDR1 and RMDR1 */ -#define MDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ -#define MDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ -#define MDR1_SYNCMD_SPI (2 << 28)/* Level mode/SPI */ -#define MDR1_SYNCMD_LR (3 << 28)/* L/R mode */ -#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ -#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ -#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ -#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ -#define MDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ -#define MDR1_FLD_SHIFT 2 -#define MDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ -/* TMDR1 */ -#define TMDR1_PCON BIT(30) /* Transfer Signal Connection */ -#define TMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ -#define TMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ - -/* TMDR2 and RMDR2 */ -#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ -#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ -#define MDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ - -/* TSCR and RSCR */ -#define SCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ -#define SCR_BRPS(i) (((i) - 1) << 8) -#define SCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ -#define SCR_BRDV_DIV_2 0 -#define SCR_BRDV_DIV_4 1 -#define SCR_BRDV_DIV_8 2 -#define SCR_BRDV_DIV_16 3 -#define SCR_BRDV_DIV_32 4 -#define SCR_BRDV_DIV_1 7 - -/* CTR */ -#define CTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ -#define CTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ -#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ -#define CTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ -#define CTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ -#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ -#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ -#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ -#define CTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ -#define CTR_TXDIZ_LOW (0 << 22) /* 0 */ -#define CTR_TXDIZ_HIGH (1 << 22) /* 1 */ -#define CTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ -#define CTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ -#define CTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ -#define CTR_TXE BIT(9) /* Transmit Enable */ -#define CTR_RXE BIT(8) /* Receive Enable */ -#define CTR_TXRST BIT(1) /* Transmit Reset */ -#define CTR_RXRST BIT(0) /* Receive Reset */ - -/* FCTR */ -#define FCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ -#define FCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ -#define FCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ -#define FCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ -#define FCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ -#define FCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ -#define FCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ -#define FCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ -#define FCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ -#define FCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ -#define FCTR_TFUA_SHIFT 20 -#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT) -#define FCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ -#define FCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ -#define FCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ -#define FCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ -#define FCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ -#define FCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ -#define FCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ -#define FCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ -#define FCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ -#define FCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ -#define FCTR_RFUA_SHIFT 4 -#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT) - -/* STR */ -#define STR_TFEMP BIT(29) /* Transmit FIFO Empty */ -#define STR_TDREQ BIT(28) /* Transmit Data Transfer Request */ -#define STR_TEOF BIT(23) /* Frame Transmission End */ -#define STR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ -#define STR_TFOVF BIT(20) /* Transmit FIFO Overflow */ -#define STR_TFUDF BIT(19) /* Transmit FIFO Underflow */ -#define STR_RFFUL BIT(13) /* Receive FIFO Full */ -#define STR_RDREQ BIT(12) /* Receive Data Transfer Request */ -#define STR_REOF BIT(7) /* Frame Reception End */ -#define STR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ -#define STR_RFUDF BIT(4) /* Receive FIFO Underflow */ -#define STR_RFOVF BIT(3) /* Receive FIFO Overflow */ - -/* IER */ -#define IER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ -#define IER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ -#define IER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ -#define IER_TEOFE BIT(23) /* Frame Transmission End Enable */ -#define IER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ -#define IER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ -#define IER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ -#define IER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ -#define IER_RFFULE BIT(13) /* Receive FIFO Full Enable */ -#define IER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ -#define IER_REOFE BIT(7) /* Frame Reception End Enable */ -#define IER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ -#define IER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ -#define IER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ +#define SITMDR1 0x00 /* Transmit Mode Register 1 */ +#define SITMDR2 0x04 /* Transmit Mode Register 2 */ +#define SITMDR3 0x08 /* Transmit Mode Register 3 */ +#define SIRMDR1 0x10 /* Receive Mode Register 1 */ +#define SIRMDR2 0x14 /* Receive Mode Register 2 */ +#define SIRMDR3 0x18 /* Receive Mode Register 3 */ +#define SITSCR 0x20 /* Transmit Clock Select Register */ +#define SIRSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */ +#define SICTR 0x28 /* Control Register */ +#define SIFCTR 0x30 /* FIFO Control Register */ +#define SISTR 0x40 /* Status Register */ +#define SIIER 0x44 /* Interrupt Enable Register */ +#define SITDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */ +#define SITDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */ +#define SITFDR 0x50 /* Transmit FIFO Data Register */ +#define SIRDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */ +#define SIRDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */ +#define SIRFDR 0x60 /* Receive FIFO Data Register */ + +/* SITMDR1 and SIRMDR1 */ +#define SIMDR1_TRMD BIT(31) /* Transfer Mode (1 = Master mode) */ +#define SIMDR1_SYNCMD_MASK GENMASK(29, 28) /* SYNC Mode */ +#define SIMDR1_SYNCMD_SPI (2 << 28) /* Level mode/SPI */ +#define SIMDR1_SYNCMD_LR (3 << 28) /* L/R mode */ +#define SIMDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */ +#define SIMDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */ +#define SIMDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */ +#define SIMDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */ +#define SIMDR1_FLD_MASK GENMASK(3, 2) /* Frame Sync Signal Interval (0-3) */ +#define SIMDR1_FLD_SHIFT 2 +#define SIMDR1_XXSTP BIT(0) /* Transmission/Reception Stop on FIFO */ +/* SITMDR1 */ +#define SITMDR1_PCON BIT(30) /* Transfer Signal Connection */ +#define SITMDR1_SYNCCH_MASK GENMASK(27, 26) /* Sync Signal Channel Select */ +#define SITMDR1_SYNCCH_SHIFT 26 /* 0=MSIOF_SYNC, 1=MSIOF_SS1, 2=MSIOF_SS2 */ + +/* SITMDR2 and SIRMDR2 */ +#define SIMDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */ +#define SIMDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */ +#define SIMDR2_GRPMASK1 BIT(0) /* Group Output Mask 1 (SH, A1) */ + +/* SITSCR and SIRSCR */ +#define SISCR_BRPS_MASK GENMASK(12, 8) /* Prescaler Setting (1-32) */ +#define SISCR_BRPS(i) (((i) - 1) << 8) +#define SISCR_BRDV_MASK GENMASK(2, 0) /* Baud Rate Generator's Division Ratio */ +#define SISCR_BRDV_DIV_2 0 +#define SISCR_BRDV_DIV_4 1 +#define SISCR_BRDV_DIV_8 2 +#define SISCR_BRDV_DIV_16 3 +#define SISCR_BRDV_DIV_32 4 +#define SISCR_BRDV_DIV_1 7 + +/* SICTR */ +#define SICTR_TSCKIZ_MASK GENMASK(31, 30) /* Transmit Clock I/O Polarity Select */ +#define SICTR_TSCKIZ_SCK BIT(31) /* Disable SCK when TX disabled */ +#define SICTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */ +#define SICTR_RSCKIZ_MASK GENMASK(29, 28) /* Receive Clock Polarity Select */ +#define SICTR_RSCKIZ_SCK BIT(29) /* Must match CTR_TSCKIZ_SCK */ +#define SICTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */ +#define SICTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */ +#define SICTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */ +#define SICTR_TXDIZ_MASK GENMASK(23, 22) /* Pin Output When TX is Disabled */ +#define SICTR_TXDIZ_LOW (0 << 22) /* 0 */ +#define SICTR_TXDIZ_HIGH (1 << 22) /* 1 */ +#define SICTR_TXDIZ_HIZ (2 << 22) /* High-impedance */ +#define SICTR_TSCKE BIT(15) /* Transmit Serial Clock Output Enable */ +#define SICTR_TFSE BIT(14) /* Transmit Frame Sync Signal Output Enable */ +#define SICTR_TXE BIT(9) /* Transmit Enable */ +#define SICTR_RXE BIT(8) /* Receive Enable */ +#define SICTR_TXRST BIT(1) /* Transmit Reset */ +#define SICTR_RXRST BIT(0) /* Receive Reset */ + +/* SIFCTR */ +#define SIFCTR_TFWM_MASK GENMASK(31, 29) /* Transmit FIFO Watermark */ +#define SIFCTR_TFWM_64 (0 << 29) /* Transfer Request when 64 empty stages */ +#define SIFCTR_TFWM_32 (1 << 29) /* Transfer Request when 32 empty stages */ +#define SIFCTR_TFWM_24 (2 << 29) /* Transfer Request when 24 empty stages */ +#define SIFCTR_TFWM_16 (3 << 29) /* Transfer Request when 16 empty stages */ +#define SIFCTR_TFWM_12 (4 << 29) /* Transfer Request when 12 empty stages */ +#define SIFCTR_TFWM_8 (5 << 29) /* Transfer Request when 8 empty stages */ +#define SIFCTR_TFWM_4 (6 << 29) /* Transfer Request when 4 empty stages */ +#define SIFCTR_TFWM_1 (7 << 29) /* Transfer Request when 1 empty stage */ +#define SIFCTR_TFUA_MASK GENMASK(26, 20) /* Transmit FIFO Usable Area */ +#define SIFCTR_TFUA_SHIFT 20 +#define SIFCTR_TFUA(i) ((i) << SIFCTR_TFUA_SHIFT) +#define SIFCTR_RFWM_MASK GENMASK(15, 13) /* Receive FIFO Watermark */ +#define SIFCTR_RFWM_1 (0 << 13) /* Transfer Request when 1 valid stages */ +#define SIFCTR_RFWM_4 (1 << 13) /* Transfer Request when 4 valid stages */ +#define SIFCTR_RFWM_8 (2 << 13) /* Transfer Request when 8 valid stages */ +#define SIFCTR_RFWM_16 (3 << 13) /* Transfer Request when 16 valid stages */ +#define SIFCTR_RFWM_32 (4 << 13) /* Transfer Request when 32 valid stages */ +#define SIFCTR_RFWM_64 (5 << 13) /* Transfer Request when 64 valid stages */ +#define SIFCTR_RFWM_128 (6 << 13) /* Transfer Request when 128 valid stages */ +#define SIFCTR_RFWM_256 (7 << 13) /* Transfer Request when 256 valid stages */ +#define SIFCTR_RFUA_MASK GENMASK(12, 4) /* Receive FIFO Usable Area (0x40 = full) */ +#define SIFCTR_RFUA_SHIFT 4 +#define SIFCTR_RFUA(i) ((i) << SIFCTR_RFUA_SHIFT) + +/* SISTR */ +#define SISTR_TFEMP BIT(29) /* Transmit FIFO Empty */ +#define SISTR_TDREQ BIT(28) /* Transmit Data Transfer Request */ +#define SISTR_TEOF BIT(23) /* Frame Transmission End */ +#define SISTR_TFSERR BIT(21) /* Transmit Frame Synchronization Error */ +#define SISTR_TFOVF BIT(20) /* Transmit FIFO Overflow */ +#define SISTR_TFUDF BIT(19) /* Transmit FIFO Underflow */ +#define SISTR_RFFUL BIT(13) /* Receive FIFO Full */ +#define SISTR_RDREQ BIT(12) /* Receive Data Transfer Request */ +#define SISTR_REOF BIT(7) /* Frame Reception End */ +#define SISTR_RFSERR BIT(5) /* Receive Frame Synchronization Error */ +#define SISTR_RFUDF BIT(4) /* Receive FIFO Underflow */ +#define SISTR_RFOVF BIT(3) /* Receive FIFO Overflow */ + +/* SIIER */ +#define SIIER_TDMAE BIT(31) /* Transmit Data DMA Transfer Req. Enable */ +#define SIIER_TFEMPE BIT(29) /* Transmit FIFO Empty Enable */ +#define SIIER_TDREQE BIT(28) /* Transmit Data Transfer Request Enable */ +#define SIIER_TEOFE BIT(23) /* Frame Transmission End Enable */ +#define SIIER_TFSERRE BIT(21) /* Transmit Frame Sync Error Enable */ +#define SIIER_TFOVFE BIT(20) /* Transmit FIFO Overflow Enable */ +#define SIIER_TFUDFE BIT(19) /* Transmit FIFO Underflow Enable */ +#define SIIER_RDMAE BIT(15) /* Receive Data DMA Transfer Req. Enable */ +#define SIIER_RFFULE BIT(13) /* Receive FIFO Full Enable */ +#define SIIER_RDREQE BIT(12) /* Receive Data Transfer Request Enable */ +#define SIIER_REOFE BIT(7) /* Frame Reception End Enable */ +#define SIIER_RFSERRE BIT(5) /* Receive Frame Sync Error Enable */ +#define SIIER_RFUDFE BIT(4) /* Receive FIFO Underflow Enable */ +#define SIIER_RFOVFE BIT(3) /* Receive FIFO Overflow Enable */ static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: return ioread16(p->mapbase + reg_offs); default: return ioread32(p->mapbase + reg_offs); @@ -207,8 +204,8 @@ static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs, u32 value) { switch (reg_offs) { - case TSCR: - case RSCR: + case SITSCR: + case SIRSCR: iowrite16(value, p->mapbase + reg_offs); break; default: @@ -223,12 +220,12 @@ static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p, u32 mask = clr | set; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data &= ~clr; data |= set; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - return readl_poll_timeout_atomic(p->mapbase + CTR, data, + return readl_poll_timeout_atomic(p->mapbase + SICTR, data, (data & mask) == set, 1, 100); } @@ -237,7 +234,7 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) struct sh_msiof_spi_priv *p = data; /* just disable the interrupt and wake up */ - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); complete(&p->done); return IRQ_HANDLED; @@ -245,20 +242,20 @@ static irqreturn_t sh_msiof_spi_irq(int irq, void *data) static void sh_msiof_spi_reset_regs(struct sh_msiof_spi_priv *p) { - u32 mask = CTR_TXRST | CTR_RXRST; + u32 mask = SICTR_TXRST | SICTR_RXRST; u32 data; - data = sh_msiof_read(p, CTR); + data = sh_msiof_read(p, SICTR); data |= mask; - sh_msiof_write(p, CTR, data); + sh_msiof_write(p, SICTR, data); - readl_poll_timeout_atomic(p->mapbase + CTR, data, !(data & mask), 1, + readl_poll_timeout_atomic(p->mapbase + SICTR, data, !(data & mask), 1, 100); } static const u32 sh_msiof_spi_div_array[] = { - SCR_BRDV_DIV_1, SCR_BRDV_DIV_2, SCR_BRDV_DIV_4, - SCR_BRDV_DIV_8, SCR_BRDV_DIV_16, SCR_BRDV_DIV_32, + SISCR_BRDV_DIV_1, SISCR_BRDV_DIV_2, SISCR_BRDV_DIV_4, + SISCR_BRDV_DIV_8, SISCR_BRDV_DIV_16, SISCR_BRDV_DIV_32, }; static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, @@ -276,7 +273,7 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, div = DIV_ROUND_UP(parent_rate, spi_hz); if (div <= 1024) { - /* SCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ + /* SISCR_BRDV_DIV_1 is valid only if BRPS is x 1/1 or x 1/2 */ if (!div_pow && div <= 32 && div > 2) div_pow = 1; @@ -295,10 +292,10 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p, brps = 32; } - scr = sh_msiof_spi_div_array[div_pow] | SCR_BRPS(brps); - sh_msiof_write(p, TSCR, scr); + scr = sh_msiof_spi_div_array[div_pow] | SISCR_BRPS(brps); + sh_msiof_write(p, SITSCR, scr); if (!(p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, RSCR, scr); + sh_msiof_write(p, SIRSCR, scr); } static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl) @@ -337,8 +334,8 @@ static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p) return 0; } - val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT; - val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT; + val = sh_msiof_get_delay_bit(p->info->dtdl) << SIMDR1_DTDL_SHIFT; + val |= sh_msiof_get_delay_bit(p->info->syncdl) << SIMDR1_SYNCDL_SHIFT; return val; } @@ -357,54 +354,54 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p, u32 ss, * 1 0 11 11 0 0 * 1 1 11 11 1 1 */ - tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP; - tmp |= !cs_high << MDR1_SYNCAC_SHIFT; - tmp |= lsb_first << MDR1_BITLSB_SHIFT; + tmp = SIMDR1_SYNCMD_SPI | 1 << SIMDR1_FLD_SHIFT | SIMDR1_XXSTP; + tmp |= !cs_high << SIMDR1_SYNCAC_SHIFT; + tmp |= lsb_first << SIMDR1_BITLSB_SHIFT; tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p); if (spi_controller_is_slave(p->ctlr)) { - sh_msiof_write(p, TMDR1, tmp | TMDR1_PCON); + sh_msiof_write(p, SITMDR1, tmp | SITMDR1_PCON); } else { - sh_msiof_write(p, TMDR1, - tmp | MDR1_TRMD | TMDR1_PCON | - (ss < MAX_SS ? ss : 0) << TMDR1_SYNCCH_SHIFT); + sh_msiof_write(p, SITMDR1, + tmp | SIMDR1_TRMD | SITMDR1_PCON | + (ss < MAX_SS ? ss : 0) << SITMDR1_SYNCCH_SHIFT); } if (p->ctlr->flags & SPI_CONTROLLER_MUST_TX) { /* These bits are reserved if RX needs TX */ tmp &= ~0x0000ffff; } - sh_msiof_write(p, RMDR1, tmp); + sh_msiof_write(p, SIRMDR1, tmp); tmp = 0; - tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT; - tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT; + tmp |= SICTR_TSCKIZ_SCK | cpol << SICTR_TSCKIZ_POL_SHIFT; + tmp |= SICTR_RSCKIZ_SCK | cpol << SICTR_RSCKIZ_POL_SHIFT; edge = cpol ^ !cpha; - tmp |= edge << CTR_TEDG_SHIFT; - tmp |= edge << CTR_REDG_SHIFT; - tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW; - sh_msiof_write(p, CTR, tmp); + tmp |= edge << SICTR_TEDG_SHIFT; + tmp |= edge << SICTR_REDG_SHIFT; + tmp |= tx_hi_z ? SICTR_TXDIZ_HIZ : SICTR_TXDIZ_LOW; + sh_msiof_write(p, SICTR, tmp); } static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p, const void *tx_buf, void *rx_buf, u32 bits, u32 words) { - u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words); + u32 dr2 = SIMDR2_BITLEN1(bits) | SIMDR2_WDLEN1(words); if (tx_buf || (p->ctlr->flags & SPI_CONTROLLER_MUST_TX)) - sh_msiof_write(p, TMDR2, dr2); + sh_msiof_write(p, SITMDR2, dr2); else - sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1); + sh_msiof_write(p, SITMDR2, dr2 | SIMDR2_GRPMASK1); if (rx_buf) - sh_msiof_write(p, RMDR2, dr2); + sh_msiof_write(p, SIRMDR2, dr2); } static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p) { - sh_msiof_write(p, STR, - sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ)); + sh_msiof_write(p, SISTR, + sh_msiof_read(p, SISTR) & ~(SISTR_TDREQ | SISTR_RDREQ)); } static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, @@ -414,7 +411,7 @@ static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_8[k] << fs); + sh_msiof_write(p, SITFDR, buf_8[k] << fs); } static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, @@ -424,7 +421,7 @@ static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_16[k] << fs); + sh_msiof_write(p, SITFDR, buf_16[k] << fs); } static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, @@ -434,7 +431,7 @@ static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_16[k]) << fs); } static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, @@ -444,7 +441,7 @@ static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, buf_32[k] << fs); + sh_msiof_write(p, SITFDR, buf_32[k] << fs); } static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, @@ -454,7 +451,7 @@ static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs); + sh_msiof_write(p, SITFDR, get_unaligned(&buf_32[k]) << fs); } static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, @@ -464,7 +461,7 @@ static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs)); + sh_msiof_write(p, SITFDR, swab32(buf_32[k] << fs)); } static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -474,7 +471,7 @@ static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs)); + sh_msiof_write(p, SITFDR, swab32(get_unaligned(&buf_32[k]) << fs)); } static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, @@ -484,7 +481,7 @@ static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_8[k] = sh_msiof_read(p, RFDR) >> fs; + buf_8[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, @@ -494,7 +491,7 @@ static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_16[k] = sh_msiof_read(p, RFDR) >> fs; + buf_16[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, @@ -504,7 +501,7 @@ static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_16[k]); } static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, @@ -514,7 +511,7 @@ static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = sh_msiof_read(p, RFDR) >> fs; + buf_32[k] = sh_msiof_read(p, SIRFDR) >> fs; } static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, @@ -524,7 +521,7 @@ static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]); + put_unaligned(sh_msiof_read(p, SIRFDR) >> fs, &buf_32[k]); } static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, @@ -534,7 +531,7 @@ static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs); + buf_32[k] = swab32(sh_msiof_read(p, SIRFDR) >> fs); } static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, @@ -544,7 +541,7 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p, int k; for (k = 0; k < words; k++) - put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]); + put_unaligned(swab32(sh_msiof_read(p, SIRFDR) >> fs), &buf_32[k]); } static int sh_msiof_spi_setup(struct spi_device *spi) @@ -561,17 +558,17 @@ static int sh_msiof_spi_setup(struct spi_device *spi) return 0; /* Configure native chip select mode/polarity early */ - clr = MDR1_SYNCMD_MASK; - set = MDR1_SYNCMD_SPI; + clr = SIMDR1_SYNCMD_MASK; + set = SIMDR1_SYNCMD_SPI; if (spi->mode & SPI_CS_HIGH) - clr |= BIT(MDR1_SYNCAC_SHIFT); + clr |= BIT(SIMDR1_SYNCAC_SHIFT); else - set |= BIT(MDR1_SYNCAC_SHIFT); + set |= BIT(SIMDR1_SYNCAC_SHIFT); pm_runtime_get_sync(&p->pdev->dev); - tmp = sh_msiof_read(p, TMDR1) & ~clr; - sh_msiof_write(p, TMDR1, tmp | set | MDR1_TRMD | TMDR1_PCON); - tmp = sh_msiof_read(p, RMDR1) & ~clr; - sh_msiof_write(p, RMDR1, tmp | set); + tmp = sh_msiof_read(p, SITMDR1) & ~clr; + sh_msiof_write(p, SITMDR1, tmp | set | SIMDR1_TRMD | SITMDR1_PCON); + tmp = sh_msiof_read(p, SIRMDR1) & ~clr; + sh_msiof_write(p, SIRMDR1, tmp | set); pm_runtime_put(&p->pdev->dev); p->native_cs_high = spi->mode & SPI_CS_HIGH; p->native_cs_inited = true; @@ -587,7 +584,7 @@ static int sh_msiof_prepare_message(struct spi_controller *ctlr, /* Configure pins before asserting CS */ if (spi->cs_gpiod) { - ss = p->unused_ss; + ss = ctlr->unused_native_cs; cs_high = p->native_cs_high; } else { ss = spi->chip_select; @@ -607,15 +604,15 @@ static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf) /* setup clock and rx/tx signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TSCKE); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_RXE); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TXE); /* start by setting frame bit */ if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE); + ret = sh_msiof_modify_ctr_wait(p, 0, SICTR_TFSE); return ret; } @@ -627,13 +624,13 @@ static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf) /* shut down frame, rx/tx and clock signals */ if (!slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TFSE, 0); if (!ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TXE, 0); if (rx_buf && !ret) - ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_RXE, 0); if (!ret && !slave) - ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0); + ret = sh_msiof_modify_ctr_wait(p, SICTR_TSCKE, 0); return ret; } @@ -688,11 +685,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p, fifo_shift = 32 - bits; /* default FIFO watermarks for PIO */ - sh_msiof_write(p, FCTR, 0); + sh_msiof_write(p, SIFCTR, 0); /* setup msiof transfer mode registers */ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words); - sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE | SIIER_REOFE); /* write tx fifo */ if (tx_buf) @@ -731,7 +728,7 @@ stop_reset: sh_msiof_reset_str(p); sh_msiof_spi_stop(p, rx_buf); stop_ier: - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -750,7 +747,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, /* First prepare and submit the DMA request(s), as this may fail */ if (rx) { - ier_bits |= IER_RDREQE | IER_RDMAE; + ier_bits |= SIIER_RDREQE | SIIER_RDMAE; desc_rx = dmaengine_prep_slave_single(p->ctlr->dma_rx, p->rx_dma_addr, len, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -765,7 +762,7 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } if (tx) { - ier_bits |= IER_TDREQE | IER_TDMAE; + ier_bits |= SIIER_TDREQE | SIIER_TDMAE; dma_sync_single_for_device(p->ctlr->dma_tx->device->dev, p->tx_dma_addr, len, DMA_TO_DEVICE); desc_tx = dmaengine_prep_slave_single(p->ctlr->dma_tx, @@ -786,12 +783,12 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, } /* 1 stage FIFO watermarks for DMA */ - sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1); + sh_msiof_write(p, SIFCTR, SIFCTR_TFWM_1 | SIFCTR_RFWM_1); /* setup msiof transfer mode registers (32-bit words) */ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4); - sh_msiof_write(p, IER, ier_bits); + sh_msiof_write(p, SIIER, ier_bits); reinit_completion(&p->done); if (tx) @@ -823,10 +820,10 @@ static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx, if (ret) goto stop_reset; - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); } else { /* wait for tx fifo to be emptied */ - sh_msiof_write(p, IER, IER_TEOFE); + sh_msiof_write(p, SIIER, SIIER_TEOFE); ret = sh_msiof_wait_for_completion(p, &p->done); if (ret) goto stop_reset; @@ -856,7 +853,7 @@ stop_dma: no_dma_tx: if (rx) dmaengine_terminate_all(p->ctlr->dma_rx); - sh_msiof_write(p, IER, 0); + sh_msiof_write(p, SIIER, 0); return ret; } @@ -1124,46 +1121,6 @@ static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev) } #endif -static int sh_msiof_get_cs_gpios(struct sh_msiof_spi_priv *p) -{ - struct device *dev = &p->pdev->dev; - unsigned int used_ss_mask = 0; - unsigned int cs_gpios = 0; - unsigned int num_cs, i; - int ret; - - ret = gpiod_count(dev, "cs"); - if (ret <= 0) - return 0; - - num_cs = max_t(unsigned int, ret, p->ctlr->num_chipselect); - for (i = 0; i < num_cs; i++) { - struct gpio_desc *gpiod; - - gpiod = devm_gpiod_get_index(dev, "cs", i, GPIOD_ASIS); - if (!IS_ERR(gpiod)) { - devm_gpiod_put(dev, gpiod); - cs_gpios++; - continue; - } - - if (PTR_ERR(gpiod) != -ENOENT) - return PTR_ERR(gpiod); - - if (i >= MAX_SS) { - dev_err(dev, "Invalid native chip select %d\n", i); - return -EINVAL; - } - used_ss_mask |= BIT(i); - } - p->unused_ss = ffz(used_ss_mask); - if (cs_gpios && p->unused_ss >= MAX_SS) { - dev_err(dev, "No unused native chip select available\n"); - return -EINVAL; - } - return 0; -} - static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev, enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr) { @@ -1232,12 +1189,12 @@ static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p) ctlr = p->ctlr; ctlr->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV, - dma_tx_id, res->start + TFDR); + dma_tx_id, res->start + SITFDR); if (!ctlr->dma_tx) return -ENODEV; ctlr->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM, - dma_rx_id, res->start + RFDR); + dma_rx_id, res->start + SIRFDR); if (!ctlr->dma_rx) goto free_tx_chan; @@ -1373,17 +1330,12 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) if (p->info->rx_fifo_override) p->rx_fifo_size = p->info->rx_fifo_override; - /* Setup GPIO chip selects */ - ctlr->num_chipselect = p->info->num_chipselect; - ret = sh_msiof_get_cs_gpios(p); - if (ret) - goto err1; - /* init controller code */ ctlr->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; ctlr->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE; ctlr->flags = chipdata->ctlr_flags; ctlr->bus_num = pdev->id; + ctlr->num_chipselect = p->info->num_chipselect; ctlr->dev.of_node = pdev->dev.of_node; ctlr->setup = sh_msiof_spi_setup; ctlr->prepare_message = sh_msiof_prepare_message; @@ -1392,6 +1344,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) ctlr->auto_runtime_pm = true; ctlr->transfer_one = sh_msiof_transfer_one; ctlr->use_gpio_descriptors = true; + ctlr->max_native_cs = MAX_SS; ret = sh_msiof_request_dma(p); if (ret < 0) diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c index e1e639191557..8419e6722e17 100644 --- a/drivers/spi/spi-sirf.c +++ b/drivers/spi/spi-sirf.c @@ -1126,16 +1126,16 @@ static int spi_sirfsoc_probe(struct platform_device *pdev) sspi->bitbang.master->dev.of_node = pdev->dev.of_node; /* request DMA channels */ - sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx"); - if (!sspi->rx_chan) { + sspi->rx_chan = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR(sspi->rx_chan)) { dev_err(&pdev->dev, "can not allocate rx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->rx_chan); goto free_master; } - sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx"); - if (!sspi->tx_chan) { + sspi->tx_chan = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR(sspi->tx_chan)) { dev_err(&pdev->dev, "can not allocate tx dma channel\n"); - ret = -ENODEV; + ret = PTR_ERR(sspi->tx_chan); goto free_rx_dma; } diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 4e726929bb4f..4ef569b47aa6 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -470,10 +470,11 @@ static int stm32_qspi_setup(struct spi_device *spi) return 0; } -static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) +static int stm32_qspi_dma_setup(struct stm32_qspi *qspi) { struct dma_slave_config dma_cfg; struct device *dev = qspi->dev; + int ret = 0; memset(&dma_cfg, 0, sizeof(dma_cfg)); @@ -484,8 +485,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) dma_cfg.src_maxburst = 4; dma_cfg.dst_maxburst = 4; - qspi->dma_chrx = dma_request_slave_channel(dev, "rx"); - if (qspi->dma_chrx) { + qspi->dma_chrx = dma_request_chan(dev, "rx"); + if (IS_ERR(qspi->dma_chrx)) { + ret = PTR_ERR(qspi->dma_chrx); + qspi->dma_chrx = NULL; + if (ret == -EPROBE_DEFER) + goto out; + } else { if (dmaengine_slave_config(qspi->dma_chrx, &dma_cfg)) { dev_err(dev, "dma rx config failed\n"); dma_release_channel(qspi->dma_chrx); @@ -493,8 +499,11 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } - qspi->dma_chtx = dma_request_slave_channel(dev, "tx"); - if (qspi->dma_chtx) { + qspi->dma_chtx = dma_request_chan(dev, "tx"); + if (IS_ERR(qspi->dma_chtx)) { + ret = PTR_ERR(qspi->dma_chtx); + qspi->dma_chtx = NULL; + } else { if (dmaengine_slave_config(qspi->dma_chtx, &dma_cfg)) { dev_err(dev, "dma tx config failed\n"); dma_release_channel(qspi->dma_chtx); @@ -502,7 +511,13 @@ static void stm32_qspi_dma_setup(struct stm32_qspi *qspi) } } +out: init_completion(&qspi->dma_completion); + + if (ret != -EPROBE_DEFER) + ret = 0; + + return ret; } static void stm32_qspi_dma_free(struct stm32_qspi *qspi) @@ -608,7 +623,10 @@ static int stm32_qspi_probe(struct platform_device *pdev) qspi->dev = dev; platform_set_drvdata(pdev, qspi); - stm32_qspi_dma_setup(qspi); + ret = stm32_qspi_dma_setup(qspi); + if (ret) + goto err; + mutex_init(&qspi->lock); ctrl->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index b222ce8d083e..e041f9c4ec47 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -9,7 +9,6 @@ #include <linux/clk.h> #include <linux/delay.h> #include <linux/dmaengine.h> -#include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -974,29 +973,6 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) } /** - * stm32_spi_setup - setup device chip select - */ -static int stm32_spi_setup(struct spi_device *spi_dev) -{ - int ret = 0; - - if (!gpio_is_valid(spi_dev->cs_gpio)) { - dev_err(&spi_dev->dev, "%d is not a valid gpio\n", - spi_dev->cs_gpio); - return -EINVAL; - } - - dev_dbg(&spi_dev->dev, "%s: set gpio%d output %s\n", __func__, - spi_dev->cs_gpio, - (spi_dev->mode & SPI_CS_HIGH) ? "low" : "high"); - - ret = gpio_direction_output(spi_dev->cs_gpio, - !(spi_dev->mode & SPI_CS_HIGH)); - - return ret; -} - -/** * stm32_spi_prepare_msg - set up the controller to transfer a single message */ static int stm32_spi_prepare_msg(struct spi_master *master, @@ -1810,7 +1786,7 @@ static int stm32_spi_probe(struct platform_device *pdev) struct spi_master *master; struct stm32_spi *spi; struct resource *res; - int i, ret; + int ret; master = spi_alloc_master(&pdev->dev, sizeof(struct stm32_spi)); if (!master) { @@ -1898,22 +1874,34 @@ static int stm32_spi_probe(struct platform_device *pdev) master->bits_per_word_mask = spi->cfg->get_bpw_mask(spi); master->max_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_min; master->min_speed_hz = spi->clk_rate / spi->cfg->baud_rate_div_max; - master->setup = stm32_spi_setup; + master->use_gpio_descriptors = true; master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; master->unprepare_message = stm32_spi_unprepare_msg; - spi->dma_tx = dma_request_slave_channel(spi->dev, "tx"); - if (!spi->dma_tx) + spi->dma_tx = dma_request_chan(spi->dev, "tx"); + if (IS_ERR(spi->dma_tx)) { + ret = PTR_ERR(spi->dma_tx); + spi->dma_tx = NULL; + if (ret == -EPROBE_DEFER) + goto err_clk_disable; + dev_warn(&pdev->dev, "failed to request tx dma channel\n"); - else + } else { master->dma_tx = spi->dma_tx; + } + + spi->dma_rx = dma_request_chan(spi->dev, "rx"); + if (IS_ERR(spi->dma_rx)) { + ret = PTR_ERR(spi->dma_rx); + spi->dma_rx = NULL; + if (ret == -EPROBE_DEFER) + goto err_dma_release; - spi->dma_rx = dma_request_slave_channel(spi->dev, "rx"); - if (!spi->dma_rx) dev_warn(&pdev->dev, "failed to request rx dma channel\n"); - else + } else { master->dma_rx = spi->dma_rx; + } if (spi->dma_tx || spi->dma_rx) master->can_dma = stm32_spi_can_dma; @@ -1925,43 +1913,26 @@ static int stm32_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "spi master registration failed: %d\n", ret); - goto err_dma_release; + goto err_pm_disable; } - if (!master->cs_gpios) { + if (!master->cs_gpiods) { dev_err(&pdev->dev, "no CS gpios available\n"); ret = -EINVAL; - goto err_dma_release; - } - - for (i = 0; i < master->num_chipselect; i++) { - if (!gpio_is_valid(master->cs_gpios[i])) { - dev_err(&pdev->dev, "%i is not a valid gpio\n", - master->cs_gpios[i]); - ret = -EINVAL; - goto err_dma_release; - } - - ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], - DRIVER_NAME); - if (ret) { - dev_err(&pdev->dev, "can't get CS gpio %i\n", - master->cs_gpios[i]); - goto err_dma_release; - } + goto err_pm_disable; } dev_info(&pdev->dev, "driver initialized\n"); return 0; +err_pm_disable: + pm_runtime_disable(&pdev->dev); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); if (spi->dma_rx) dma_release_channel(spi->dma_rx); - - pm_runtime_disable(&pdev->dev); err_clk_disable: clk_disable_unprepare(spi->clk); err_master_put: diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index fc40ab146c86..83edabdb41ad 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -269,10 +269,10 @@ static unsigned tegra_spi_calculate_curr_xfer_param( if ((bits_per_word == 8 || bits_per_word == 16 || bits_per_word == 32) && t->len > 3) { - tspi->is_packed = 1; + tspi->is_packed = true; tspi->words_per_32bit = 32/bits_per_word; } else { - tspi->is_packed = 0; + tspi->is_packed = false; tspi->words_per_32bit = 1; } diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 66dcb6128539..366a3e5cca6b 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -80,8 +80,6 @@ struct ti_qspi { #define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000) -#define QSPI_FCLK 192000000 - /* Clock Control */ #define QSPI_CLK_EN (1 << 31) #define QSPI_CLK_DIV_MAX 0xffff @@ -316,6 +314,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, { int wlen; unsigned int cmd; + u32 rx; + u8 rxlen, rx_wlen; u8 *rxbuf; rxbuf = t->rx_buf; @@ -332,20 +332,67 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, break; } wlen = t->bits_per_word >> 3; /* in bytes */ + rx_wlen = wlen; while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); if (qspi_is_busy(qspi)) return -EBUSY; + switch (wlen) { + case 1: + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + rxlen = QSPI_WLEN_MAX_BYTES; + } else { + rxlen = min(count, 4); + } + rx_wlen = rxlen << 3; + cmd &= ~QSPI_WLEN_MASK; + cmd |= QSPI_WLEN(rx_wlen); + break; + default: + rxlen = wlen; + break; + } + ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); if (ti_qspi_poll_wc(qspi)) { dev_err(qspi->dev, "read timed out\n"); return -ETIMEDOUT; } + switch (wlen) { case 1: - *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG); + /* + * Optimize the 8-bit words transfers, as used by + * the SPI flash devices. + */ + if (count >= QSPI_WLEN_MAX_BYTES) { + u32 *rxp = (u32 *) rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG_3); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_2); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG_1); + *rxp++ = be32_to_cpu(rx); + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + *rxp++ = be32_to_cpu(rx); + } else { + u8 *rxp = rxbuf; + rx = readl(qspi->base + QSPI_SPI_DATA_REG); + if (rx_wlen >= 8) + *rxp++ = rx >> (rx_wlen - 8); + if (rx_wlen >= 16) + *rxp++ = rx >> (rx_wlen - 16); + if (rx_wlen >= 24) + *rxp++ = rx >> (rx_wlen - 24); + if (rx_wlen >= 32) + *rxp++ = rx; + } break; case 2: *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG); @@ -354,8 +401,8 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t, *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG); break; } - rxbuf += wlen; - count -= wlen; + rxbuf += rxlen; + count -= rxlen; } return 0; @@ -527,6 +574,35 @@ static void ti_qspi_setup_mmap_read(struct spi_device *spi, u8 opcode, QSPI_SPI_SETUP_REG(spi->chip_select)); } +static int ti_qspi_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) +{ + struct ti_qspi *qspi = spi_controller_get_devdata(mem->spi->master); + size_t max_len; + + if (op->data.dir == SPI_MEM_DATA_IN) { + if (op->addr.val < qspi->mmap_size) { + /* Limit MMIO to the mmaped region */ + if (op->addr.val + op->data.nbytes > qspi->mmap_size) { + max_len = qspi->mmap_size - op->addr.val; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } else { + /* + * Use fallback mode (SW generated transfers) above the + * mmaped region. + * Adjust size to comply with the QSPI max frame length. + */ + max_len = QSPI_FRAME; + max_len -= 1 + op->addr.nbytes + op->dummy.nbytes; + op->data.nbytes = min((size_t) op->data.nbytes, + max_len); + } + } + + return 0; +} + static int ti_qspi_exec_mem_op(struct spi_mem *mem, const struct spi_mem_op *op) { @@ -577,6 +653,7 @@ static int ti_qspi_exec_mem_op(struct spi_mem *mem, static const struct spi_controller_mem_ops ti_qspi_mem_ops = { .exec_op = ti_qspi_exec_mem_op, + .adjust_op_size = ti_qspi_adjust_op_size, }; static int ti_qspi_start_transfer_one(struct spi_master *master, diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 223353fa2d8a..d7ea6af74743 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -863,7 +863,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Tx DMA */ param = &dma->param_tx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2; /* Tx = 0, 2 */; + param->chan_id = data->ch * 2; /* Tx = 0, 2 */ param->tx_reg = data->io_base_addr + PCH_SPDWR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); @@ -878,7 +878,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Rx DMA */ param = &dma->param_rx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */; + param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */ param->rx_reg = data->io_base_addr + PCH_SPDRR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index ce9b30112e26..0fa50979644d 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -8,6 +8,7 @@ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> @@ -23,6 +24,7 @@ struct uniphier_spi_priv { void __iomem *base; + dma_addr_t base_dma_addr; struct clk *clk; struct spi_master *master; struct completion xfer_done; @@ -32,6 +34,7 @@ struct uniphier_spi_priv { unsigned int rx_bytes; const u8 *tx_buf; u8 *rx_buf; + atomic_t dma_busy; bool is_save_param; u8 bits_per_word; @@ -61,11 +64,16 @@ struct uniphier_spi_priv { #define SSI_FPS_FSTRT BIT(14) #define SSI_SR 0x14 +#define SSI_SR_BUSY BIT(7) #define SSI_SR_RNE BIT(0) #define SSI_IE 0x18 +#define SSI_IE_TCIE BIT(4) #define SSI_IE_RCIE BIT(3) +#define SSI_IE_TXRE BIT(2) +#define SSI_IE_RXRE BIT(1) #define SSI_IE_RORIE BIT(0) +#define SSI_IE_ALL_MASK GENMASK(4, 0) #define SSI_IS 0x1c #define SSI_IS_RXRS BIT(9) @@ -87,15 +95,19 @@ struct uniphier_spi_priv { #define SSI_RXDR 0x24 #define SSI_FIFO_DEPTH 8U +#define SSI_FIFO_BURST_NUM 1 + +#define SSI_DMA_RX_BUSY BIT(1) +#define SSI_DMA_TX_BUSY BIT(0) static inline unsigned int bytes_per_word(unsigned int bits) { return bits <= 8 ? 1 : (bits <= 16 ? 2 : 4); } -static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_enable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -103,9 +115,9 @@ static inline void uniphier_spi_irq_enable(struct spi_device *spi, u32 mask) writel(val, priv->base + SSI_IE); } -static inline void uniphier_spi_irq_disable(struct spi_device *spi, u32 mask) +static inline void uniphier_spi_irq_disable(struct uniphier_spi_priv *priv, + u32 mask) { - struct uniphier_spi_priv *priv = spi_master_get_devdata(spi->master); u32 val; val = readl(priv->base + SSI_IE); @@ -334,6 +346,128 @@ static void uniphier_spi_set_cs(struct spi_device *spi, bool enable) writel(val, priv->base + SSI_FPS); } +static bool uniphier_spi_can_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + unsigned int bpw = bytes_per_word(priv->bits_per_word); + + if ((!master->dma_tx && !master->dma_rx) + || (!master->dma_tx && t->tx_buf) + || (!master->dma_rx && t->rx_buf)) + return false; + + return DIV_ROUND_UP(t->len, bpw) > SSI_FIFO_DEPTH; +} + +static void uniphier_spi_dma_rxcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_RXRE); + + if (!(state & SSI_DMA_TX_BUSY)) + spi_finalize_current_transfer(master); +} + +static void uniphier_spi_dma_txcb(void *data) +{ + struct spi_master *master = data; + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + int state = atomic_fetch_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + + uniphier_spi_irq_disable(priv, SSI_IE_TXRE); + + if (!(state & SSI_DMA_RX_BUSY)) + spi_finalize_current_transfer(master); +} + +static int uniphier_spi_transfer_one_dma(struct spi_master *master, + struct spi_device *spi, + struct spi_transfer *t) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL; + int buswidth; + + atomic_set(&priv->dma_busy, 0); + + uniphier_spi_set_fifo_threshold(priv, SSI_FIFO_BURST_NUM); + + if (priv->bits_per_word <= 8) + buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE; + else if (priv->bits_per_word <= 16) + buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES; + else + buswidth = DMA_SLAVE_BUSWIDTH_4_BYTES; + + if (priv->rx_buf) { + struct dma_slave_config rxconf = { + .direction = DMA_DEV_TO_MEM, + .src_addr = priv->base_dma_addr + SSI_RXDR, + .src_addr_width = buswidth, + .src_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_rx, &rxconf); + + rxdesc = dmaengine_prep_slave_sg( + master->dma_rx, + t->rx_sg.sgl, t->rx_sg.nents, + DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + goto out_err_prep; + + rxdesc->callback = uniphier_spi_dma_rxcb; + rxdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_RXRE); + atomic_or(SSI_DMA_RX_BUSY, &priv->dma_busy); + + dmaengine_submit(rxdesc); + dma_async_issue_pending(master->dma_rx); + } + + if (priv->tx_buf) { + struct dma_slave_config txconf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = priv->base_dma_addr + SSI_TXDR, + .dst_addr_width = buswidth, + .dst_maxburst = SSI_FIFO_BURST_NUM, + }; + + dmaengine_slave_config(master->dma_tx, &txconf); + + txdesc = dmaengine_prep_slave_sg( + master->dma_tx, + t->tx_sg.sgl, t->tx_sg.nents, + DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + goto out_err_prep; + + txdesc->callback = uniphier_spi_dma_txcb; + txdesc->callback_param = master; + + uniphier_spi_irq_enable(priv, SSI_IE_TXRE); + atomic_or(SSI_DMA_TX_BUSY, &priv->dma_busy); + + dmaengine_submit(txdesc); + dma_async_issue_pending(master->dma_tx); + } + + /* signal that we need to wait for completion */ + return (priv->tx_buf || priv->rx_buf); + +out_err_prep: + if (rxdesc) + dmaengine_terminate_sync(master->dma_rx); + + return -EINVAL; +} + static int uniphier_spi_transfer_one_irq(struct spi_master *master, struct spi_device *spi, struct spi_transfer *t) @@ -346,12 +480,12 @@ static int uniphier_spi_transfer_one_irq(struct spi_master *master, uniphier_spi_fill_tx_fifo(priv); - uniphier_spi_irq_enable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_enable(priv, SSI_IE_RCIE | SSI_IE_RORIE); time_left = wait_for_completion_timeout(&priv->xfer_done, msecs_to_jiffies(SSI_TIMEOUT_MS)); - uniphier_spi_irq_disable(spi, SSI_IE_RCIE | SSI_IE_RORIE); + uniphier_spi_irq_disable(priv, SSI_IE_RCIE | SSI_IE_RORIE); if (!time_left) { dev_err(dev, "transfer timeout.\n"); @@ -395,6 +529,7 @@ static int uniphier_spi_transfer_one(struct spi_master *master, { struct uniphier_spi_priv *priv = spi_master_get_devdata(master); unsigned long threshold; + bool use_dma; /* Terminate and return success for 0 byte length transfer */ if (!t->len) @@ -402,6 +537,10 @@ static int uniphier_spi_transfer_one(struct spi_master *master, uniphier_spi_setup_transfer(spi, t); + use_dma = master->can_dma ? master->can_dma(master, spi, t) : false; + if (use_dma) + return uniphier_spi_transfer_one_dma(master, spi, t); + /* * If the transfer operation will take longer than * SSI_POLL_TIMEOUT_US, it should use irq. @@ -432,6 +571,32 @@ static int uniphier_spi_unprepare_transfer_hardware(struct spi_master *master) return 0; } +static void uniphier_spi_handle_err(struct spi_master *master, + struct spi_message *msg) +{ + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); + u32 val; + + /* stop running spi transfer */ + writel(0, priv->base + SSI_CTL); + + /* reset FIFOs */ + val = SSI_FC_TXFFL | SSI_FC_RXFFL; + writel(val, priv->base + SSI_FC); + + uniphier_spi_irq_disable(priv, SSI_IE_ALL_MASK); + + if (atomic_read(&priv->dma_busy) & SSI_DMA_TX_BUSY) { + dmaengine_terminate_async(master->dma_tx); + atomic_andnot(SSI_DMA_TX_BUSY, &priv->dma_busy); + } + + if (atomic_read(&priv->dma_busy) & SSI_DMA_RX_BUSY) { + dmaengine_terminate_async(master->dma_rx); + atomic_andnot(SSI_DMA_RX_BUSY, &priv->dma_busy); + } +} + static irqreturn_t uniphier_spi_handler(int irq, void *dev_id) { struct uniphier_spi_priv *priv = dev_id; @@ -477,6 +642,9 @@ static int uniphier_spi_probe(struct platform_device *pdev) { struct uniphier_spi_priv *priv; struct spi_master *master; + struct resource *res; + struct dma_slave_caps caps; + u32 dma_tx_burst = 0, dma_rx_burst = 0; unsigned long clk_rate; int irq; int ret; @@ -491,11 +659,13 @@ static int uniphier_spi_probe(struct platform_device *pdev) priv->master = master; priv->is_save_param = false; - priv->base = devm_platform_ioremap_resource(pdev, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + priv->base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(priv->base)) { ret = PTR_ERR(priv->base); goto out_master_put; } + priv->base_dma_addr = res->start; priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { @@ -538,7 +708,45 @@ static int uniphier_spi_probe(struct platform_device *pdev) = uniphier_spi_prepare_transfer_hardware; master->unprepare_transfer_hardware = uniphier_spi_unprepare_transfer_hardware; + master->handle_err = uniphier_spi_handle_err; + master->can_dma = uniphier_spi_can_dma; + master->num_chipselect = 1; + master->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; + + master->dma_tx = dma_request_chan(&pdev->dev, "tx"); + if (IS_ERR_OR_NULL(master->dma_tx)) { + if (PTR_ERR(master->dma_tx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_tx = NULL; + dma_tx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_tx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_tx_burst = caps.max_burst; + } + + master->dma_rx = dma_request_chan(&pdev->dev, "rx"); + if (IS_ERR_OR_NULL(master->dma_rx)) { + if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) + goto out_disable_clk; + master->dma_rx = NULL; + dma_rx_burst = INT_MAX; + } else { + ret = dma_get_slave_caps(master->dma_rx, &caps); + if (ret) { + dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", + ret); + goto out_disable_clk; + } + dma_rx_burst = caps.max_burst; + } + + master->max_dma_len = min(dma_tx_burst, dma_rx_burst); ret = devm_spi_register_master(&pdev->dev, master); if (ret) @@ -558,6 +766,11 @@ static int uniphier_spi_remove(struct platform_device *pdev) { struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); + if (priv->master->dma_tx) + dma_release_channel(priv->master->dma_tx); + if (priv->master->dma_rx) + dma_release_channel(priv->master->dma_rx); + clk_disable_unprepare(priv->clk); return 0; diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 8994545367a2..38b4c78df506 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -1674,6 +1674,13 @@ void spi_finalize_current_message(struct spi_controller *ctlr) } } + if (unlikely(ctlr->ptp_sts_supported)) { + list_for_each_entry(xfer, &mesg->transfers, transfer_list) { + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_pre); + WARN_ON_ONCE(xfer->ptp_sts && !xfer->timestamped_post); + } + } + spi_unmap_msg(ctlr, mesg); if (ctlr->cur_msg_prepared && ctlr->unprepare_message) { @@ -2451,6 +2458,8 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) int nb, i; struct gpio_desc **cs; struct device *dev = &ctlr->dev; + unsigned long native_cs_mask = 0; + unsigned int num_cs_gpios = 0; nb = gpiod_count(dev, "cs"); ctlr->num_chipselect = max_t(int, nb, ctlr->num_chipselect); @@ -2492,7 +2501,22 @@ static int spi_get_gpio_descs(struct spi_controller *ctlr) if (!gpioname) return -ENOMEM; gpiod_set_consumer_name(cs[i], gpioname); + num_cs_gpios++; + continue; } + + if (ctlr->max_native_cs && i >= ctlr->max_native_cs) { + dev_err(dev, "Invalid native chip select %d\n", i); + return -EINVAL; + } + native_cs_mask |= BIT(i); + } + + ctlr->unused_native_cs = ffz(native_cs_mask); + if (num_cs_gpios && ctlr->max_native_cs && + ctlr->unused_native_cs >= ctlr->max_native_cs) { + dev_err(dev, "No unused native chip select available\n"); + return -EINVAL; } return 0; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7251a87bb576..b94ed4e30770 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -4149,9 +4149,6 @@ int iscsit_close_connection( iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); - if (conn->conn_transport->iscsit_wait_conn) - conn->conn_transport->iscsit_wait_conn(conn); - /* * During Connection recovery drop unacknowledged out of order * commands for this connection, and prepare the other commands @@ -4237,6 +4234,9 @@ int iscsit_close_connection( target_sess_cmd_list_set_waiting(sess->se_sess); target_wait_for_sess_cmds(sess->se_sess); + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + ahash_request_free(conn->conn_tx_hash); if (conn->conn_rx_hash) { struct crypto_ahash *tfm; diff --git a/drivers/tee/optee/Kconfig b/drivers/tee/optee/Kconfig index d1ad512e1708..3ca71e3812ed 100644 --- a/drivers/tee/optee/Kconfig +++ b/drivers/tee/optee/Kconfig @@ -3,6 +3,7 @@ config OPTEE tristate "OP-TEE" depends on HAVE_ARM_SMCCC + depends on MMU help This implements the OP-TEE Trusted Execution Environment (TEE) driver. |