diff options
Diffstat (limited to 'drivers/misc')
-rw-r--r-- | drivers/misc/Kconfig | 9 | ||||
-rw-r--r-- | drivers/misc/Makefile | 1 | ||||
-rw-r--r-- | drivers/misc/cardreader/alcor_pci.c | 6 | ||||
-rw-r--r-- | drivers/misc/eeprom/ee1004.c | 6 | ||||
-rw-r--r-- | drivers/misc/eeprom/max6875.c | 6 | ||||
-rw-r--r-- | drivers/misc/fastrpc.c | 5 | ||||
-rw-r--r-- | drivers/misc/lkdtm/Makefile | 1 | ||||
-rw-r--r-- | drivers/misc/lkdtm/bugs.c | 7 | ||||
-rw-r--r-- | drivers/misc/lkdtm/cfi.c | 42 | ||||
-rw-r--r-- | drivers/misc/lkdtm/core.c | 2 | ||||
-rw-r--r-- | drivers/misc/lkdtm/lkdtm.h | 4 | ||||
-rw-r--r-- | drivers/misc/mei/pci-me.c | 19 | ||||
-rw-r--r-- | drivers/misc/mei/pci-txe.c | 19 | ||||
-rw-r--r-- | drivers/misc/mic/card/mic_x100.c | 28 | ||||
-rw-r--r-- | drivers/misc/sgi-xp/xpc_uv.c | 4 | ||||
-rw-r--r-- | drivers/misc/spear13xx_pcie_gadget.c | 797 | ||||
-rw-r--r-- | drivers/misc/xilinx_sdfec.c | 1214 |
17 files changed, 1286 insertions, 884 deletions
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 16900357afc2..9042ba7baed6 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -375,15 +375,6 @@ config DS1682 This driver can also be built as a module. If so, the module will be called ds1682. -config SPEAR13XX_PCIE_GADGET - bool "PCIe gadget support for SPEAr13XX platform" - depends on ARCH_SPEAR13XX && BROKEN - help - This option enables gadget support for PCIe controller. If - board file defines any controller as PCIe endpoint then a sysfs - entry will be created for that controller. User can use these - sysfs node to configure PCIe EP as per his requirements. - config VMWARE_BALLOON tristate "VMware Balloon Driver" depends on VMWARE_VMCI && X86 && HYPERVISOR_GUEST diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index abd8ae249746..9e1eaf523d6e 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -37,7 +37,6 @@ obj-$(CONFIG_C2PORT) += c2port/ obj-$(CONFIG_HMC6352) += hmc6352.o obj-y += eeprom/ obj-y += cb710/ -obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o obj-$(CONFIG_PCH_PHUB) += pch_phub.o obj-y += ti-st/ diff --git a/drivers/misc/cardreader/alcor_pci.c b/drivers/misc/cardreader/alcor_pci.c index bcb10fa4bc3a..259fe1dfec03 100644 --- a/drivers/misc/cardreader/alcor_pci.c +++ b/drivers/misc/cardreader/alcor_pci.c @@ -334,8 +334,7 @@ static void alcor_pci_remove(struct pci_dev *pdev) #ifdef CONFIG_PM_SLEEP static int alcor_suspend(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct alcor_pci_priv *priv = pci_get_drvdata(pdev); + struct alcor_pci_priv *priv = dev_get_drvdata(dev); alcor_pci_aspm_ctrl(priv, 1); return 0; @@ -344,8 +343,7 @@ static int alcor_suspend(struct device *dev) static int alcor_resume(struct device *dev) { - struct pci_dev *pdev = to_pci_dev(dev); - struct alcor_pci_priv *priv = pci_get_drvdata(pdev); + struct alcor_pci_priv *priv = dev_get_drvdata(dev); alcor_pci_aspm_ctrl(priv, 0); return 0; diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index 6f00c33cfe22..b081c67416d7 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -195,13 +195,13 @@ static int ee1004_probe(struct i2c_client *client, mutex_lock(&ee1004_bus_lock); if (++ee1004_dev_count == 1) { for (cnr = 0; cnr < 2; cnr++) { - ee1004_set_page[cnr] = i2c_new_dummy(client->adapter, + ee1004_set_page[cnr] = i2c_new_dummy_device(client->adapter, EE1004_ADDR_SET_PAGE + cnr); - if (!ee1004_set_page[cnr]) { + if (IS_ERR(ee1004_set_page[cnr])) { dev_err(&client->dev, "address 0x%02x unavailable\n", EE1004_ADDR_SET_PAGE + cnr); - err = -EADDRINUSE; + err = PTR_ERR(ee1004_set_page[cnr]); goto err_clients; } } diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c index 4d0cb90f4aeb..9da81f6d4a1c 100644 --- a/drivers/misc/eeprom/max6875.c +++ b/drivers/misc/eeprom/max6875.c @@ -150,9 +150,9 @@ static int max6875_probe(struct i2c_client *client, return -ENOMEM; /* A fake client is created on the odd address */ - data->fake_client = i2c_new_dummy(client->adapter, client->addr + 1); - if (!data->fake_client) { - err = -ENOMEM; + data->fake_client = i2c_new_dummy_device(client->adapter, client->addr + 1); + if (IS_ERR(data->fake_client)) { + err = PTR_ERR(data->fake_client); goto exit_kfree; } diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c index 98603e235cf0..c790585da14c 100644 --- a/drivers/misc/fastrpc.c +++ b/drivers/misc/fastrpc.c @@ -279,8 +279,11 @@ static int fastrpc_buf_alloc(struct fastrpc_user *fl, struct device *dev, buf->virt = dma_alloc_coherent(dev, buf->size, (dma_addr_t *)&buf->phys, GFP_KERNEL); - if (!buf->virt) + if (!buf->virt) { + mutex_destroy(&buf->lock); + kfree(buf); return -ENOMEM; + } if (fl->sctx && fl->sctx->sid) buf->phys += ((u64)fl->sctx->sid << 32); diff --git a/drivers/misc/lkdtm/Makefile b/drivers/misc/lkdtm/Makefile index fb10eafe9bde..c70b3822013f 100644 --- a/drivers/misc/lkdtm/Makefile +++ b/drivers/misc/lkdtm/Makefile @@ -9,6 +9,7 @@ lkdtm-$(CONFIG_LKDTM) += refcount.o lkdtm-$(CONFIG_LKDTM) += rodata_objcopy.o lkdtm-$(CONFIG_LKDTM) += usercopy.o lkdtm-$(CONFIG_LKDTM) += stackleak.o +lkdtm-$(CONFIG_LKDTM) += cfi.o KASAN_SANITIZE_stackleak.o := n KCOV_INSTRUMENT_rodata.o := n diff --git a/drivers/misc/lkdtm/bugs.c b/drivers/misc/lkdtm/bugs.c index 24245ccdba72..7284a22b1a09 100644 --- a/drivers/misc/lkdtm/bugs.c +++ b/drivers/misc/lkdtm/bugs.c @@ -75,7 +75,12 @@ static int warn_counter; void lkdtm_WARNING(void) { - WARN(1, "Warning message trigger count: %d\n", warn_counter++); + WARN_ON(++warn_counter); +} + +void lkdtm_WARNING_MESSAGE(void) +{ + WARN(1, "Warning message trigger count: %d\n", ++warn_counter); } void lkdtm_EXCEPTION(void) diff --git a/drivers/misc/lkdtm/cfi.c b/drivers/misc/lkdtm/cfi.c new file mode 100644 index 000000000000..e73ebdbfa806 --- /dev/null +++ b/drivers/misc/lkdtm/cfi.c @@ -0,0 +1,42 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * This is for all the tests relating directly to Control Flow Integrity. + */ +#include "lkdtm.h" + +static int called_count; + +/* Function taking one argument, without a return value. */ +static noinline void lkdtm_increment_void(int *counter) +{ + (*counter)++; +} + +/* Function taking one argument, returning int. */ +static noinline int lkdtm_increment_int(int *counter) +{ + (*counter)++; + + return *counter; +} +/* + * This tries to call an indirect function with a mismatched prototype. + */ +void lkdtm_CFI_FORWARD_PROTO(void) +{ + /* + * Matches lkdtm_increment_void()'s prototype, but not + * lkdtm_increment_int()'s prototype. + */ + void (*func)(int *); + + pr_info("Calling matched prototype ...\n"); + func = lkdtm_increment_void; + func(&called_count); + + pr_info("Calling mismatched prototype ...\n"); + func = (void *)lkdtm_increment_int; + func(&called_count); + + pr_info("Fail: survived mismatched prototype function call!\n"); +} diff --git a/drivers/misc/lkdtm/core.c b/drivers/misc/lkdtm/core.c index 66ae6b2a6950..cbc4c9045a99 100644 --- a/drivers/misc/lkdtm/core.c +++ b/drivers/misc/lkdtm/core.c @@ -104,6 +104,7 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(PANIC), CRASHTYPE(BUG), CRASHTYPE(WARNING), + CRASHTYPE(WARNING_MESSAGE), CRASHTYPE(EXCEPTION), CRASHTYPE(LOOP), CRASHTYPE(EXHAUST_STACK), @@ -169,6 +170,7 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(USERCOPY_KERNEL), CRASHTYPE(USERCOPY_KERNEL_DS), CRASHTYPE(STACKLEAK_ERASING), + CRASHTYPE(CFI_FORWARD_PROTO), }; diff --git a/drivers/misc/lkdtm/lkdtm.h b/drivers/misc/lkdtm/lkdtm.h index 6a284a87a037..ab446e0bde97 100644 --- a/drivers/misc/lkdtm/lkdtm.h +++ b/drivers/misc/lkdtm/lkdtm.h @@ -11,6 +11,7 @@ void __init lkdtm_bugs_init(int *recur_param); void lkdtm_PANIC(void); void lkdtm_BUG(void); void lkdtm_WARNING(void); +void lkdtm_WARNING_MESSAGE(void); void lkdtm_EXCEPTION(void); void lkdtm_LOOP(void); void lkdtm_EXHAUST_STACK(void); @@ -95,4 +96,7 @@ void lkdtm_USERCOPY_KERNEL_DS(void); /* lkdtm_stackleak.c */ void lkdtm_STACKLEAK_ERASING(void); +/* cfi.c */ +void lkdtm_CFI_FORWARD_PROTO(void); + #endif diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c index 541538eff8b1..d5a92c6eadb3 100644 --- a/drivers/misc/mei/pci-me.c +++ b/drivers/misc/mei/pci-me.c @@ -383,12 +383,11 @@ static int mei_me_pci_resume(struct device *device) #ifdef CONFIG_PM static int mei_me_pm_runtime_idle(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; - dev_dbg(&pdev->dev, "rpm: me: runtime_idle\n"); + dev_dbg(device, "rpm: me: runtime_idle\n"); - dev = pci_get_drvdata(pdev); + dev = dev_get_drvdata(device); if (!dev) return -ENODEV; if (mei_write_is_idle(dev)) @@ -399,13 +398,12 @@ static int mei_me_pm_runtime_idle(struct device *device) static int mei_me_pm_runtime_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; int ret; - dev_dbg(&pdev->dev, "rpm: me: runtime suspend\n"); + dev_dbg(device, "rpm: me: runtime suspend\n"); - dev = pci_get_drvdata(pdev); + dev = dev_get_drvdata(device); if (!dev) return -ENODEV; @@ -418,7 +416,7 @@ static int mei_me_pm_runtime_suspend(struct device *device) mutex_unlock(&dev->device_lock); - dev_dbg(&pdev->dev, "rpm: me: runtime suspend ret=%d\n", ret); + dev_dbg(device, "rpm: me: runtime suspend ret=%d\n", ret); if (ret && ret != -EAGAIN) schedule_work(&dev->reset_work); @@ -428,13 +426,12 @@ static int mei_me_pm_runtime_suspend(struct device *device) static int mei_me_pm_runtime_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; int ret; - dev_dbg(&pdev->dev, "rpm: me: runtime resume\n"); + dev_dbg(device, "rpm: me: runtime resume\n"); - dev = pci_get_drvdata(pdev); + dev = dev_get_drvdata(device); if (!dev) return -ENODEV; @@ -444,7 +441,7 @@ static int mei_me_pm_runtime_resume(struct device *device) mutex_unlock(&dev->device_lock); - dev_dbg(&pdev->dev, "rpm: me: runtime resume ret = %d\n", ret); + dev_dbg(device, "rpm: me: runtime resume ret = %d\n", ret); if (ret) schedule_work(&dev->reset_work); diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c index 2e37fc2e0fa8..f1c16a587495 100644 --- a/drivers/misc/mei/pci-txe.c +++ b/drivers/misc/mei/pci-txe.c @@ -276,12 +276,11 @@ static int mei_txe_pci_resume(struct device *device) #ifdef CONFIG_PM static int mei_txe_pm_runtime_idle(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; - dev_dbg(&pdev->dev, "rpm: txe: runtime_idle\n"); + dev_dbg(device, "rpm: txe: runtime_idle\n"); - dev = pci_get_drvdata(pdev); + dev = dev_get_drvdata(device); if (!dev) return -ENODEV; if (mei_write_is_idle(dev)) @@ -291,13 +290,12 @@ static int mei_txe_pm_runtime_idle(struct device *device) } static int mei_txe_pm_runtime_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; int ret; - dev_dbg(&pdev->dev, "rpm: txe: runtime suspend\n"); + dev_dbg(device, "rpm: txe: runtime suspend\n"); - dev = pci_get_drvdata(pdev); + dev = dev_get_drvdata(device); if (!dev) return -ENODEV; @@ -310,7 +308,7 @@ static int mei_txe_pm_runtime_suspend(struct device *device) /* keep irq on we are staying in D0 */ - dev_dbg(&pdev->dev, "rpm: txe: runtime suspend ret=%d\n", ret); + dev_dbg(device, "rpm: txe: runtime suspend ret=%d\n", ret); mutex_unlock(&dev->device_lock); @@ -322,13 +320,12 @@ static int mei_txe_pm_runtime_suspend(struct device *device) static int mei_txe_pm_runtime_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); struct mei_device *dev; int ret; - dev_dbg(&pdev->dev, "rpm: txe: runtime resume\n"); + dev_dbg(device, "rpm: txe: runtime resume\n"); - dev = pci_get_drvdata(pdev); + dev = dev_get_drvdata(device); if (!dev) return -ENODEV; @@ -340,7 +337,7 @@ static int mei_txe_pm_runtime_resume(struct device *device) mutex_unlock(&dev->device_lock); - dev_dbg(&pdev->dev, "rpm: txe: runtime resume ret = %d\n", ret); + dev_dbg(device, "rpm: txe: runtime resume ret = %d\n", ret); if (ret) schedule_work(&dev->reset_work); diff --git a/drivers/misc/mic/card/mic_x100.c b/drivers/misc/mic/card/mic_x100.c index 266ffb6f6c44..c8bff2916d3d 100644 --- a/drivers/misc/mic/card/mic_x100.c +++ b/drivers/misc/mic/card/mic_x100.c @@ -237,6 +237,9 @@ static int __init mic_probe(struct platform_device *pdev) mdrv->dev = &pdev->dev; snprintf(mdrv->name, sizeof(mic_driver_name), mic_driver_name); + /* FIXME: use dma_set_mask_and_coherent() and check result */ + dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + mdev->mmio.pa = MIC_X100_MMIO_BASE; mdev->mmio.len = MIC_X100_MMIO_LEN; mdev->mmio.va = devm_ioremap(&pdev->dev, MIC_X100_MMIO_BASE, @@ -282,18 +285,6 @@ static void mic_platform_shutdown(struct platform_device *pdev) mic_remove(pdev); } -static u64 mic_dma_mask = DMA_BIT_MASK(64); - -static struct platform_device mic_platform_dev = { - .name = mic_driver_name, - .id = 0, - .num_resources = 0, - .dev = { - .dma_mask = &mic_dma_mask, - .coherent_dma_mask = DMA_BIT_MASK(64), - }, -}; - static struct platform_driver __refdata mic_platform_driver = { .probe = mic_probe, .remove = mic_remove, @@ -303,6 +294,8 @@ static struct platform_driver __refdata mic_platform_driver = { }, }; +static struct platform_device *mic_platform_dev; + static int __init mic_init(void) { int ret; @@ -316,9 +309,12 @@ static int __init mic_init(void) request_module("mic_x100_dma"); mic_init_card_debugfs(); - ret = platform_device_register(&mic_platform_dev); + + mic_platform_dev = platform_device_register_simple(mic_driver_name, + 0, NULL, 0); + ret = PTR_ERR_OR_ZERO(mic_platform_dev); if (ret) { - pr_err("platform_device_register ret %d\n", ret); + pr_err("platform_device_register_full ret %d\n", ret); goto cleanup_debugfs; } ret = platform_driver_register(&mic_platform_driver); @@ -329,7 +325,7 @@ static int __init mic_init(void) return ret; device_unregister: - platform_device_unregister(&mic_platform_dev); + platform_device_unregister(mic_platform_dev); cleanup_debugfs: mic_exit_card_debugfs(); done: @@ -339,7 +335,7 @@ done: static void __exit mic_exit(void) { platform_driver_unregister(&mic_platform_driver); - platform_device_unregister(&mic_platform_dev); + platform_device_unregister(mic_platform_dev); mic_exit_card_debugfs(); } diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index 0c6de97dd347..7f34b97fd02e 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -694,7 +694,7 @@ again: if (gru_mq_desc == NULL) { gru_mq_desc = kmalloc(sizeof(struct gru_message_queue_desc), - GFP_KERNEL); + GFP_ATOMIC); if (gru_mq_desc == NULL) { ret = xpNoMemory; goto done; @@ -1678,7 +1678,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload) XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret); } -static struct xpc_arch_operations xpc_arch_ops_uv = { +static const struct xpc_arch_operations xpc_arch_ops_uv = { .setup_partitions = xpc_setup_partitions_uv, .teardown_partitions = xpc_teardown_partitions_uv, .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv, diff --git a/drivers/misc/spear13xx_pcie_gadget.c b/drivers/misc/spear13xx_pcie_gadget.c deleted file mode 100644 index ee120dcbb3e6..000000000000 --- a/drivers/misc/spear13xx_pcie_gadget.c +++ /dev/null @@ -1,797 +0,0 @@ -/* - * drivers/misc/spear13xx_pcie_gadget.c - * - * Copyright (C) 2010 ST Microelectronics - * Pratyush Anand<pratyush.anand@gmail.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#include <linux/device.h> -#include <linux/clk.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/irq.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/pci_regs.h> -#include <linux/configfs.h> -#include <mach/pcie.h> -#include <mach/misc_regs.h> - -#define IN0_MEM_SIZE (200 * 1024 * 1024 - 1) -/* In current implementation address translation is done using IN0 only. - * So IN1 start address and IN0 end address has been kept same -*/ -#define IN1_MEM_SIZE (0 * 1024 * 1024 - 1) -#define IN_IO_SIZE (20 * 1024 * 1024 - 1) -#define IN_CFG0_SIZE (12 * 1024 * 1024 - 1) -#define IN_CFG1_SIZE (12 * 1024 * 1024 - 1) -#define IN_MSG_SIZE (12 * 1024 * 1024 - 1) -/* Keep default BAR size as 4K*/ -/* AORAM would be mapped by default*/ -#define INBOUND_ADDR_MASK (SPEAR13XX_SYSRAM1_SIZE - 1) - -#define INT_TYPE_NO_INT 0 -#define INT_TYPE_INTX 1 -#define INT_TYPE_MSI 2 -struct spear_pcie_gadget_config { - void __iomem *base; - void __iomem *va_app_base; - void __iomem *va_dbi_base; - char int_type[10]; - ulong requested_msi; - ulong configured_msi; - ulong bar0_size; - ulong bar0_rw_offset; - void __iomem *va_bar0_address; -}; - -struct pcie_gadget_target { - struct configfs_subsystem subsys; - struct spear_pcie_gadget_config config; -}; - -struct pcie_gadget_target_attr { - struct configfs_attribute attr; - ssize_t (*show)(struct spear_pcie_gadget_config *config, - char *buf); - ssize_t (*store)(struct spear_pcie_gadget_config *config, - const char *buf, - size_t count); -}; - -static void enable_dbi_access(struct pcie_app_reg __iomem *app_reg) -{ - /* Enable DBI access */ - writel(readl(&app_reg->slv_armisc) | (1 << AXI_OP_DBI_ACCESS_ID), - &app_reg->slv_armisc); - writel(readl(&app_reg->slv_awmisc) | (1 << AXI_OP_DBI_ACCESS_ID), - &app_reg->slv_awmisc); - -} - -static void disable_dbi_access(struct pcie_app_reg __iomem *app_reg) -{ - /* disable DBI access */ - writel(readl(&app_reg->slv_armisc) & ~(1 << AXI_OP_DBI_ACCESS_ID), - &app_reg->slv_armisc); - writel(readl(&app_reg->slv_awmisc) & ~(1 << AXI_OP_DBI_ACCESS_ID), - &app_reg->slv_awmisc); - -} - -static void spear_dbi_read_reg(struct spear_pcie_gadget_config *config, - int where, int size, u32 *val) -{ - struct pcie_app_reg __iomem *app_reg = config->va_app_base; - ulong va_address; - - /* Enable DBI access */ - enable_dbi_access(app_reg); - - va_address = (ulong)config->va_dbi_base + (where & ~0x3); - - *val = readl(va_address); - - if (size == 1) - *val = (*val >> (8 * (where & 3))) & 0xff; - else if (size == 2) - *val = (*val >> (8 * (where & 3))) & 0xffff; - - /* Disable DBI access */ - disable_dbi_access(app_reg); -} - -static void spear_dbi_write_reg(struct spear_pcie_gadget_config *config, - int where, int size, u32 val) -{ - struct pcie_app_reg __iomem *app_reg = config->va_app_base; - ulong va_address; - - /* Enable DBI access */ - enable_dbi_access(app_reg); - - va_address = (ulong)config->va_dbi_base + (where & ~0x3); - - if (size == 4) - writel(val, va_address); - else if (size == 2) - writew(val, va_address + (where & 2)); - else if (size == 1) - writeb(val, va_address + (where & 3)); - - /* Disable DBI access */ - disable_dbi_access(app_reg); -} - -#define PCI_FIND_CAP_TTL 48 - -static int pci_find_own_next_cap_ttl(struct spear_pcie_gadget_config *config, - u32 pos, int cap, int *ttl) -{ - u32 id; - - while ((*ttl)--) { - spear_dbi_read_reg(config, pos, 1, &pos); - if (pos < 0x40) - break; - pos &= ~3; - spear_dbi_read_reg(config, pos + PCI_CAP_LIST_ID, 1, &id); - if (id == 0xff) - break; - if (id == cap) - return pos; - pos += PCI_CAP_LIST_NEXT; - } - return 0; -} - -static int pci_find_own_next_cap(struct spear_pcie_gadget_config *config, - u32 pos, int cap) -{ - int ttl = PCI_FIND_CAP_TTL; - - return pci_find_own_next_cap_ttl(config, pos, cap, &ttl); -} - -static int pci_find_own_cap_start(struct spear_pcie_gadget_config *config, - u8 hdr_type) -{ - u32 status; - - spear_dbi_read_reg(config, PCI_STATUS, 2, &status); - if (!(status & PCI_STATUS_CAP_LIST)) - return 0; - - switch (hdr_type) { - case PCI_HEADER_TYPE_NORMAL: - case PCI_HEADER_TYPE_BRIDGE: - return PCI_CAPABILITY_LIST; - case PCI_HEADER_TYPE_CARDBUS: - return PCI_CB_CAPABILITY_LIST; - default: - return 0; - } - - return 0; -} - -/* - * Tell if a device supports a given PCI capability. - * Returns the address of the requested capability structure within the - * device's PCI configuration space or 0 in case the device does not - * support it. Possible values for @cap: - * - * %PCI_CAP_ID_PM Power Management - * %PCI_CAP_ID_AGP Accelerated Graphics Port - * %PCI_CAP_ID_VPD Vital Product Data - * %PCI_CAP_ID_SLOTID Slot Identification - * %PCI_CAP_ID_MSI Message Signalled Interrupts - * %PCI_CAP_ID_CHSWP CompactPCI HotSwap - * %PCI_CAP_ID_PCIX PCI-X - * %PCI_CAP_ID_EXP PCI Express - */ -static int pci_find_own_capability(struct spear_pcie_gadget_config *config, - int cap) -{ - u32 pos; - u32 hdr_type; - - spear_dbi_read_reg(config, PCI_HEADER_TYPE, 1, &hdr_type); - - pos = pci_find_own_cap_start(config, hdr_type); - if (pos) - pos = pci_find_own_next_cap(config, pos, cap); - - return pos; -} - -static irqreturn_t spear_pcie_gadget_irq(int irq, void *dev_id) -{ - return 0; -} - -/* - * configfs interfaces show/store functions - */ - -static struct pcie_gadget_target *to_target(struct config_item *item) -{ - return item ? - container_of(to_configfs_subsystem(to_config_group(item)), - struct pcie_gadget_target, subsys) : NULL; -} - -static ssize_t pcie_gadget_link_show(struct config_item *item, char *buf) -{ - struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base; - - if (readl(&app_reg->app_status_1) & ((u32)1 << XMLH_LINK_UP_ID)) - return sprintf(buf, "UP"); - else - return sprintf(buf, "DOWN"); -} - -static ssize_t pcie_gadget_link_store(struct config_item *item, - const char *buf, size_t count) -{ - struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base; - - if (sysfs_streq(buf, "UP")) - writel(readl(&app_reg->app_ctrl_0) | (1 << APP_LTSSM_ENABLE_ID), - &app_reg->app_ctrl_0); - else if (sysfs_streq(buf, "DOWN")) - writel(readl(&app_reg->app_ctrl_0) - & ~(1 << APP_LTSSM_ENABLE_ID), - &app_reg->app_ctrl_0); - else - return -EINVAL; - return count; -} - -static ssize_t pcie_gadget_int_type_show(struct config_item *item, char *buf) -{ - return sprintf(buf, "%s", to_target(item)->int_type); -} - -static ssize_t pcie_gadget_int_type_store(struct config_item *item, - const char *buf, size_t count) -{ - struct spear_pcie_gadget_config *config = to_target(item) - u32 cap, vec, flags; - ulong vector; - - if (sysfs_streq(buf, "INTA")) - spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1); - - else if (sysfs_streq(buf, "MSI")) { - vector = config->requested_msi; - vec = 0; - while (vector > 1) { - vector /= 2; - vec++; - } - spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 0); - cap = pci_find_own_capability(config, PCI_CAP_ID_MSI); - spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags); - flags &= ~PCI_MSI_FLAGS_QMASK; - flags |= vec << 1; - spear_dbi_write_reg(config, cap + PCI_MSI_FLAGS, 1, flags); - } else - return -EINVAL; - - strcpy(config->int_type, buf); - - return count; -} - -static ssize_t pcie_gadget_no_of_msi_show(struct config_item *item, char *buf) -{ - struct spear_pcie_gadget_config *config = to_target(item) - struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base; - u32 cap, vec, flags; - ulong vector; - - if ((readl(&app_reg->msg_status) & (1 << CFG_MSI_EN_ID)) - != (1 << CFG_MSI_EN_ID)) - vector = 0; - else { - cap = pci_find_own_capability(config, PCI_CAP_ID_MSI); - spear_dbi_read_reg(config, cap + PCI_MSI_FLAGS, 1, &flags); - flags &= ~PCI_MSI_FLAGS_QSIZE; - vec = flags >> 4; - vector = 1; - while (vec--) - vector *= 2; - } - config->configured_msi = vector; - - return sprintf(buf, "%lu", vector); -} - -static ssize_t pcie_gadget_no_of_msi_store(struct config_item *item, - const char *buf, size_t count) -{ - int ret; - - ret = kstrtoul(buf, 0, &to_target(item)->requested_msi); - if (ret) - return ret; - - if (config->requested_msi > 32) - config->requested_msi = 32; - - return count; -} - -static ssize_t pcie_gadget_inta_store(struct config_item *item, - const char *buf, size_t count) -{ - struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base; - ulong en; - int ret; - - ret = kstrtoul(buf, 0, &en); - if (ret) - return ret; - - if (en) - writel(readl(&app_reg->app_ctrl_0) | (1 << SYS_INT_ID), - &app_reg->app_ctrl_0); - else - writel(readl(&app_reg->app_ctrl_0) & ~(1 << SYS_INT_ID), - &app_reg->app_ctrl_0); - - return count; -} - -static ssize_t pcie_gadget_send_msi_store(struct config_item *item, - const char *buf, size_t count) -{ - struct spear_pcie_gadget_config *config = to_target(item) - struct pcie_app_reg __iomem *app_reg = config->va_app_base; - ulong vector; - u32 ven_msi; - int ret; - - ret = kstrtoul(buf, 0, &vector); - if (ret) - return ret; - - if (!config->configured_msi) - return -EINVAL; - - if (vector >= config->configured_msi) - return -EINVAL; - - ven_msi = readl(&app_reg->ven_msi_1); - ven_msi &= ~VEN_MSI_FUN_NUM_MASK; - ven_msi |= 0 << VEN_MSI_FUN_NUM_ID; - ven_msi &= ~VEN_MSI_TC_MASK; - ven_msi |= 0 << VEN_MSI_TC_ID; - ven_msi &= ~VEN_MSI_VECTOR_MASK; - ven_msi |= vector << VEN_MSI_VECTOR_ID; - - /* generating interrupt for msi vector */ - ven_msi |= VEN_MSI_REQ_EN; - writel(ven_msi, &app_reg->ven_msi_1); - udelay(1); - ven_msi &= ~VEN_MSI_REQ_EN; - writel(ven_msi, &app_reg->ven_msi_1); - - return count; -} - -static ssize_t pcie_gadget_vendor_id_show(struct config_item *item, char *buf) -{ - u32 id; - - spear_dbi_read_reg(to_target(item), PCI_VENDOR_ID, 2, &id); - - return sprintf(buf, "%x", id); -} - -static ssize_t pcie_gadget_vendor_id_store(struct config_item *item, - const char *buf, size_t count) -{ - ulong id; - int ret; - - ret = kstrtoul(buf, 0, &id); - if (ret) - return ret; - - spear_dbi_write_reg(to_target(item), PCI_VENDOR_ID, 2, id); - - return count; -} - -static ssize_t pcie_gadget_device_id_show(struct config_item *item, char *buf) -{ - u32 id; - - spear_dbi_read_reg(to_target(item), PCI_DEVICE_ID, 2, &id); - - return sprintf(buf, "%x", id); -} - -static ssize_t pcie_gadget_device_id_store(struct config_item *item, - const char *buf, size_t count) -{ - ulong id; - int ret; - - ret = kstrtoul(buf, 0, &id); - if (ret) - return ret; - - spear_dbi_write_reg(to_target(item), PCI_DEVICE_ID, 2, id); - - return count; -} - -static ssize_t pcie_gadget_bar0_size_show(struct config_item *item, char *buf) -{ - return sprintf(buf, "%lx", to_target(item)->bar0_size); -} - -static ssize_t pcie_gadget_bar0_size_store(struct config_item *item, - const char *buf, size_t count) -{ - struct spear_pcie_gadget_config *config = to_target(item) - ulong size; - u32 pos, pos1; - u32 no_of_bit = 0; - int ret; - - ret = kstrtoul(buf, 0, &size); - if (ret) - return ret; - - /* min bar size is 256 */ - if (size <= 0x100) - size = 0x100; - /* max bar size is 1MB*/ - else if (size >= 0x100000) - size = 0x100000; - else { - pos = 0; - pos1 = 0; - while (pos < 21) { - pos = find_next_bit((ulong *)&size, 21, pos); - if (pos != 21) - pos1 = pos + 1; - pos++; - no_of_bit++; - } - if (no_of_bit == 2) - pos1--; - - size = 1 << pos1; - } - config->bar0_size = size; - spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, size - 1); - - return count; -} - -static ssize_t pcie_gadget_bar0_address_show(struct config_item *item, - char *buf) -{ - struct pcie_app_reg __iomem *app_reg = to_target(item)->va_app_base; - - u32 address = readl(&app_reg->pim0_mem_addr_start); - - return sprintf(buf, "%x", address); -} - -static ssize_t pcie_gadget_bar0_address_store(struct config_item *item, - const char *buf, size_t count) -{ - struct spear_pcie_gadget_config *config = to_target(item) - struct pcie_app_reg __iomem *app_reg = config->va_app_base; - ulong address; - int ret; - - ret = kstrtoul(buf, 0, &address); - if (ret) - return ret; - - address &= ~(config->bar0_size - 1); - if (config->va_bar0_address) - iounmap(config->va_bar0_address); - config->va_bar0_address = ioremap(address, config->bar0_size); - if (!config->va_bar0_address) - return -ENOMEM; - - writel(address, &app_reg->pim0_mem_addr_start); - - return count; -} - -static ssize_t pcie_gadget_bar0_rw_offset_show(struct config_item *item, - char *buf) -{ - return sprintf(buf, "%lx", to_target(item)->bar0_rw_offset); -} - -static ssize_t pcie_gadget_bar0_rw_offset_store(struct config_item *item, - const char *buf, size_t count) -{ - ulong offset; - int ret; - - ret = kstrtoul(buf, 0, &offset); - if (ret) - return ret; - - if (offset % 4) - return -EINVAL; - - to_target(item)->bar0_rw_offset = offset; - - return count; -} - -static ssize_t pcie_gadget_bar0_data_show(struct config_item *item, char *buf) -{ - struct spear_pcie_gadget_config *config = to_target(item) - ulong data; - - if (!config->va_bar0_address) - return -ENOMEM; - - data = readl((ulong)config->va_bar0_address + config->bar0_rw_offset); - - return sprintf(buf, "%lx", data); -} - -static ssize_t pcie_gadget_bar0_data_store(struct config_item *item, - const char *buf, size_t count) -{ - struct spear_pcie_gadget_config *config = to_target(item) - ulong data; - int ret; - - ret = kstrtoul(buf, 0, &data); - if (ret) - return ret; - - if (!config->va_bar0_address) - return -ENOMEM; - - writel(data, (ulong)config->va_bar0_address + config->bar0_rw_offset); - - return count; -} - -CONFIGFS_ATTR(pcie_gadget_, link); -CONFIGFS_ATTR(pcie_gadget_, int_type); -CONFIGFS_ATTR(pcie_gadget_, no_of_msi); -CONFIGFS_ATTR_WO(pcie_gadget_, inta); -CONFIGFS_ATTR_WO(pcie_gadget_, send_msi); -CONFIGFS_ATTR(pcie_gadget_, vendor_id); -CONFIGFS_ATTR(pcie_gadget_, device_id); -CONFIGFS_ATTR(pcie_gadget_, bar0_size); -CONFIGFS_ATTR(pcie_gadget_, bar0_address); -CONFIGFS_ATTR(pcie_gadget_, bar0_rw_offset); -CONFIGFS_ATTR(pcie_gadget_, bar0_data); - -static struct configfs_attribute *pcie_gadget_target_attrs[] = { - &pcie_gadget_attr_link, - &pcie_gadget_attr_int_type, - &pcie_gadget_attr_no_of_msi, - &pcie_gadget_attr_inta, - &pcie_gadget_attr_send_msi, - &pcie_gadget_attr_vendor_id, - &pcie_gadget_attr_device_id, - &pcie_gadget_attr_bar0_size, - &pcie_gadget_attr_bar0_address, - &pcie_gadget_attr_bar0_rw_offset, - &pcie_gadget_attr_bar0_data, - NULL, -}; - -static struct config_item_type pcie_gadget_target_type = { - .ct_attrs = pcie_gadget_target_attrs, - .ct_owner = THIS_MODULE, -}; - -static void spear13xx_pcie_device_init(struct spear_pcie_gadget_config *config) -{ - struct pcie_app_reg __iomem *app_reg = config->va_app_base; - - /*setup registers for outbound translation */ - - writel(config->base, &app_reg->in0_mem_addr_start); - writel(app_reg->in0_mem_addr_start + IN0_MEM_SIZE, - &app_reg->in0_mem_addr_limit); - writel(app_reg->in0_mem_addr_limit + 1, &app_reg->in1_mem_addr_start); - writel(app_reg->in1_mem_addr_start + IN1_MEM_SIZE, - &app_reg->in1_mem_addr_limit); - writel(app_reg->in1_mem_addr_limit + 1, &app_reg->in_io_addr_start); - writel(app_reg->in_io_addr_start + IN_IO_SIZE, - &app_reg->in_io_addr_limit); - writel(app_reg->in_io_addr_limit + 1, &app_reg->in_cfg0_addr_start); - writel(app_reg->in_cfg0_addr_start + IN_CFG0_SIZE, - &app_reg->in_cfg0_addr_limit); - writel(app_reg->in_cfg0_addr_limit + 1, &app_reg->in_cfg1_addr_start); - writel(app_reg->in_cfg1_addr_start + IN_CFG1_SIZE, - &app_reg->in_cfg1_addr_limit); - writel(app_reg->in_cfg1_addr_limit + 1, &app_reg->in_msg_addr_start); - writel(app_reg->in_msg_addr_start + IN_MSG_SIZE, - &app_reg->in_msg_addr_limit); - - writel(app_reg->in0_mem_addr_start, &app_reg->pom0_mem_addr_start); - writel(app_reg->in1_mem_addr_start, &app_reg->pom1_mem_addr_start); - writel(app_reg->in_io_addr_start, &app_reg->pom_io_addr_start); - - /*setup registers for inbound translation */ - - /* Keep AORAM mapped at BAR0 as default */ - config->bar0_size = INBOUND_ADDR_MASK + 1; - spear_dbi_write_reg(config, PCIE_BAR0_MASK_REG, 4, INBOUND_ADDR_MASK); - spear_dbi_write_reg(config, PCI_BASE_ADDRESS_0, 4, 0xC); - config->va_bar0_address = ioremap(SPEAR13XX_SYSRAM1_BASE, - config->bar0_size); - - writel(SPEAR13XX_SYSRAM1_BASE, &app_reg->pim0_mem_addr_start); - writel(0, &app_reg->pim1_mem_addr_start); - writel(INBOUND_ADDR_MASK + 1, &app_reg->mem0_addr_offset_limit); - - writel(0x0, &app_reg->pim_io_addr_start); - writel(0x0, &app_reg->pim_io_addr_start); - writel(0x0, &app_reg->pim_rom_addr_start); - - writel(DEVICE_TYPE_EP | (1 << MISCTRL_EN_ID) - | ((u32)1 << REG_TRANSLATION_ENABLE), - &app_reg->app_ctrl_0); - /* disable all rx interrupts */ - writel(0, &app_reg->int_mask); - - /* Select INTA as default*/ - spear_dbi_write_reg(config, PCI_INTERRUPT_LINE, 1, 1); -} - -static int spear_pcie_gadget_probe(struct platform_device *pdev) -{ - struct resource *res0, *res1; - unsigned int status = 0; - int irq; - struct clk *clk; - static struct pcie_gadget_target *target; - struct spear_pcie_gadget_config *config; - struct config_item *cg_item; - struct configfs_subsystem *subsys; - - target = devm_kzalloc(&pdev->dev, sizeof(*target), GFP_KERNEL); - if (!target) { - dev_err(&pdev->dev, "out of memory\n"); - return -ENOMEM; - } - - cg_item = &target->subsys.su_group.cg_item; - sprintf(cg_item->ci_namebuf, "pcie_gadget.%d", pdev->id); - cg_item->ci_type = &pcie_gadget_target_type; - config = &target->config; - - /* get resource for application registers*/ - res0 = platform_get_resource(pdev, IORESOURCE_MEM, 0); - config->va_app_base = devm_ioremap_resource(&pdev->dev, res0); - if (IS_ERR(config->va_app_base)) { - dev_err(&pdev->dev, "ioremap fail\n"); - return PTR_ERR(config->va_app_base); - } - - /* get resource for dbi registers*/ - res1 = platform_get_resource(pdev, IORESOURCE_MEM, 1); - config->base = (void __iomem *)res1->start; - - config->va_dbi_base = devm_ioremap_resource(&pdev->dev, res1); - if (IS_ERR(config->va_dbi_base)) { - dev_err(&pdev->dev, "ioremap fail\n"); - return PTR_ERR(config->va_dbi_base); - } - - platform_set_drvdata(pdev, target); - - irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "no update irq?\n"); - return irq; - } - - status = devm_request_irq(&pdev->dev, irq, spear_pcie_gadget_irq, - 0, pdev->name, NULL); - if (status) { - dev_err(&pdev->dev, - "pcie gadget interrupt IRQ%d already claimed\n", irq); - return status; - } - - /* Register configfs hooks */ - subsys = &target->subsys; - config_group_init(&subsys->su_group); - mutex_init(&subsys->su_mutex); - status = configfs_register_subsystem(subsys); - if (status) - return status; - - /* - * init basic pcie application registers - * do not enable clock if it is PCIE0.Ideally , all controller should - * have been independent from others with respect to clock. But PCIE1 - * and 2 depends on PCIE0.So PCIE0 clk is provided during board init. - */ - if (pdev->id == 1) { - /* - * Ideally CFG Clock should have been also enabled here. But - * it is done currently during board init routne - */ - clk = clk_get_sys("pcie1", NULL); - if (IS_ERR(clk)) { - pr_err("%s:couldn't get clk for pcie1\n", __func__); - return PTR_ERR(clk); - } - status = clk_enable(clk); - if (status) { - pr_err("%s:couldn't enable clk for pcie1\n", __func__); - return status; - } - } else if (pdev->id == 2) { - /* - * Ideally CFG Clock should have been also enabled here. But - * it is done currently during board init routne - */ - clk = clk_get_sys("pcie2", NULL); - if (IS_ERR(clk)) { - pr_err("%s:couldn't get clk for pcie2\n", __func__); - return PTR_ERR(clk); - } - status = clk_enable(clk); - if (status) { - pr_err("%s:couldn't enable clk for pcie2\n", __func__); - return status; - } - } - spear13xx_pcie_device_init(config); - - return 0; -} - -static int spear_pcie_gadget_remove(struct platform_device *pdev) -{ - static struct pcie_gadget_target *target; - - target = platform_get_drvdata(pdev); - - configfs_unregister_subsystem(&target->subsys); - - return 0; -} - -static void spear_pcie_gadget_shutdown(struct platform_device *pdev) -{ -} - -static struct platform_driver spear_pcie_gadget_driver = { - .probe = spear_pcie_gadget_probe, - .remove = spear_pcie_gadget_remove, - .shutdown = spear_pcie_gadget_shutdown, - .driver = { - .name = "pcie-gadget-spear", - .bus = &platform_bus_type - }, -}; - -module_platform_driver(spear_pcie_gadget_driver); - -MODULE_ALIAS("platform:pcie-gadget-spear"); -MODULE_AUTHOR("Pratyush Anand"); -MODULE_LICENSE("GPL"); diff --git a/drivers/misc/xilinx_sdfec.c b/drivers/misc/xilinx_sdfec.c index f257d3812110..11835969e982 100644 --- a/drivers/misc/xilinx_sdfec.c +++ b/drivers/misc/xilinx_sdfec.c @@ -19,11 +19,150 @@ #include <linux/poll.h> #include <linux/slab.h> #include <linux/clk.h> +#include <linux/compat.h> +#include <linux/highmem.h> + +#include <uapi/misc/xilinx_sdfec.h> #define DEV_NAME_LEN 12 -static struct idr dev_idr; -static struct mutex dev_idr_lock; +static DEFINE_IDA(dev_nrs); + +/* Xilinx SDFEC Register Map */ +/* CODE_WRI_PROTECT Register */ +#define XSDFEC_CODE_WR_PROTECT_ADDR (0x4) + +/* ACTIVE Register */ +#define XSDFEC_ACTIVE_ADDR (0x8) +#define XSDFEC_IS_ACTIVITY_SET (0x1) + +/* AXIS_WIDTH Register */ +#define XSDFEC_AXIS_WIDTH_ADDR (0xC) +#define XSDFEC_AXIS_DOUT_WORDS_LSB (5) +#define XSDFEC_AXIS_DOUT_WIDTH_LSB (3) +#define XSDFEC_AXIS_DIN_WORDS_LSB (2) +#define XSDFEC_AXIS_DIN_WIDTH_LSB (0) + +/* AXIS_ENABLE Register */ +#define XSDFEC_AXIS_ENABLE_ADDR (0x10) +#define XSDFEC_AXIS_OUT_ENABLE_MASK (0x38) +#define XSDFEC_AXIS_IN_ENABLE_MASK (0x7) +#define XSDFEC_AXIS_ENABLE_MASK \ + (XSDFEC_AXIS_OUT_ENABLE_MASK | XSDFEC_AXIS_IN_ENABLE_MASK) + +/* FEC_CODE Register */ +#define XSDFEC_FEC_CODE_ADDR (0x14) + +/* ORDER Register Map */ +#define XSDFEC_ORDER_ADDR (0x18) + +/* Interrupt Status Register */ +#define XSDFEC_ISR_ADDR (0x1C) +/* Interrupt Status Register Bit Mask */ +#define XSDFEC_ISR_MASK (0x3F) + +/* Write Only - Interrupt Enable Register */ +#define XSDFEC_IER_ADDR (0x20) +/* Write Only - Interrupt Disable Register */ +#define XSDFEC_IDR_ADDR (0x24) +/* Read Only - Interrupt Mask Register */ +#define XSDFEC_IMR_ADDR (0x28) + +/* ECC Interrupt Status Register */ +#define XSDFEC_ECC_ISR_ADDR (0x2C) +/* Single Bit Errors */ +#define XSDFEC_ECC_ISR_SBE_MASK (0x7FF) +/* PL Initialize Single Bit Errors */ +#define XSDFEC_PL_INIT_ECC_ISR_SBE_MASK (0x3C00000) +/* Multi Bit Errors */ +#define XSDFEC_ECC_ISR_MBE_MASK (0x3FF800) +/* PL Initialize Multi Bit Errors */ +#define XSDFEC_PL_INIT_ECC_ISR_MBE_MASK (0x3C000000) +/* Multi Bit Error to Event Shift */ +#define XSDFEC_ECC_ISR_MBE_TO_EVENT_SHIFT (11) +/* PL Initialize Multi Bit Error to Event Shift */ +#define XSDFEC_PL_INIT_ECC_ISR_MBE_TO_EVENT_SHIFT (4) +/* ECC Interrupt Status Bit Mask */ +#define XSDFEC_ECC_ISR_MASK (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_ECC_ISR_MBE_MASK) +/* ECC Interrupt Status PL Initialize Bit Mask */ +#define XSDFEC_PL_INIT_ECC_ISR_MASK \ + (XSDFEC_PL_INIT_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) +/* ECC Interrupt Status All Bit Mask */ +#define XSDFEC_ALL_ECC_ISR_MASK \ + (XSDFEC_ECC_ISR_MASK | XSDFEC_PL_INIT_ECC_ISR_MASK) +/* ECC Interrupt Status Single Bit Errors Mask */ +#define XSDFEC_ALL_ECC_ISR_SBE_MASK \ + (XSDFEC_ECC_ISR_SBE_MASK | XSDFEC_PL_INIT_ECC_ISR_SBE_MASK) +/* ECC Interrupt Status Multi Bit Errors Mask */ +#define XSDFEC_ALL_ECC_ISR_MBE_MASK \ + (XSDFEC_ECC_ISR_MBE_MASK | XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) + +/* Write Only - ECC Interrupt Enable Register */ +#define XSDFEC_ECC_IER_ADDR (0x30) +/* Write Only - ECC Interrupt Disable Register */ +#define XSDFEC_ECC_IDR_ADDR (0x34) +/* Read Only - ECC Interrupt Mask Register */ +#define XSDFEC_ECC_IMR_ADDR (0x38) + +/* BYPASS Register */ +#define XSDFEC_BYPASS_ADDR (0x3C) + +/* Turbo Code Register */ +#define XSDFEC_TURBO_ADDR (0x100) +#define XSDFEC_TURBO_SCALE_MASK (0xFFF) +#define XSDFEC_TURBO_SCALE_BIT_POS (8) +#define XSDFEC_TURBO_SCALE_MAX (15) + +/* REG0 Register */ +#define XSDFEC_LDPC_CODE_REG0_ADDR_BASE (0x2000) +#define XSDFEC_LDPC_CODE_REG0_ADDR_HIGH (0x27F0) +#define XSDFEC_REG0_N_MIN (4) +#define XSDFEC_REG0_N_MAX (32768) +#define XSDFEC_REG0_N_MUL_P (256) +#define XSDFEC_REG0_N_LSB (0) +#define XSDFEC_REG0_K_MIN (2) +#define XSDFEC_REG0_K_MAX (32766) +#define XSDFEC_REG0_K_MUL_P (256) +#define XSDFEC_REG0_K_LSB (16) + +/* REG1 Register */ +#define XSDFEC_LDPC_CODE_REG1_ADDR_BASE (0x2004) +#define XSDFEC_LDPC_CODE_REG1_ADDR_HIGH (0x27f4) +#define XSDFEC_REG1_PSIZE_MIN (2) +#define XSDFEC_REG1_PSIZE_MAX (512) +#define XSDFEC_REG1_NO_PACKING_MASK (0x400) +#define XSDFEC_REG1_NO_PACKING_LSB (10) +#define XSDFEC_REG1_NM_MASK (0xFF800) +#define XSDFEC_REG1_NM_LSB (11) +#define XSDFEC_REG1_BYPASS_MASK (0x100000) + +/* REG2 Register */ +#define XSDFEC_LDPC_CODE_REG2_ADDR_BASE (0x2008) +#define XSDFEC_LDPC_CODE_REG2_ADDR_HIGH (0x27f8) +#define XSDFEC_REG2_NLAYERS_MIN (1) +#define XSDFEC_REG2_NLAYERS_MAX (256) +#define XSDFEC_REG2_NNMQC_MASK (0xFFE00) +#define XSDFEC_REG2_NMQC_LSB (9) +#define XSDFEC_REG2_NORM_TYPE_MASK (0x100000) +#define XSDFEC_REG2_NORM_TYPE_LSB (20) +#define XSDFEC_REG2_SPECIAL_QC_MASK (0x200000) +#define XSDFEC_REG2_SPEICAL_QC_LSB (21) +#define XSDFEC_REG2_NO_FINAL_PARITY_MASK (0x400000) +#define XSDFEC_REG2_NO_FINAL_PARITY_LSB (22) +#define XSDFEC_REG2_MAX_SCHEDULE_MASK (0x1800000) +#define XSDFEC_REG2_MAX_SCHEDULE_LSB (23) + +/* REG3 Register */ +#define XSDFEC_LDPC_CODE_REG3_ADDR_BASE (0x200C) +#define XSDFEC_LDPC_CODE_REG3_ADDR_HIGH (0x27FC) +#define XSDFEC_REG3_LA_OFF_LSB (8) +#define XSDFEC_REG3_QC_OFF_LSB (16) + +#define XSDFEC_LDPC_REG_JUMP (0x10) +#define XSDFEC_REG_WIDTH_JUMP (4) + +/* The maximum number of pinned pages */ +#define MAX_NUM_PAGES ((XSDFEC_QC_TABLE_DEPTH / PAGE_SIZE) + 1) /** * struct xsdfec_clks - For managing SD-FEC clocks @@ -49,31 +188,1043 @@ struct xsdfec_clks { /** * struct xsdfec_dev - Driver data for SDFEC - * @regs: device physical base address - * @dev: pointer to device struct * @miscdev: Misc device handle - * @error_data_lock: Error counter and states spinlock * @clks: Clocks managed by the SDFEC driver + * @waitq: Driver wait queue + * @config: Configuration of the SDFEC device * @dev_name: Device name + * @flags: spinlock flags + * @regs: device physical base address + * @dev: pointer to device struct + * @state: State of the SDFEC device + * @error_data_lock: Error counter and states spinlock * @dev_id: Device ID + * @isr_err_count: Count of ISR errors + * @cecc_count: Count of Correctable ECC errors (SBE) + * @uecc_count: Count of Uncorrectable ECC errors (MBE) + * @irq: IRQ number + * @state_updated: indicates State updated by interrupt handler + * @stats_updated: indicates Stats updated by interrupt handler + * @intr_enabled: indicates IRQ enabled * * This structure contains necessary state for SDFEC driver to operate */ struct xsdfec_dev { + struct miscdevice miscdev; + struct xsdfec_clks clks; + wait_queue_head_t waitq; + struct xsdfec_config config; + char dev_name[DEV_NAME_LEN]; + unsigned long flags; void __iomem *regs; struct device *dev; - struct miscdevice miscdev; + enum xsdfec_state state; /* Spinlock to protect state_updated and stats_updated */ spinlock_t error_data_lock; - struct xsdfec_clks clks; - char dev_name[DEV_NAME_LEN]; int dev_id; + u32 isr_err_count; + u32 cecc_count; + u32 uecc_count; + int irq; + bool state_updated; + bool stats_updated; + bool intr_enabled; }; +static inline void xsdfec_regwrite(struct xsdfec_dev *xsdfec, u32 addr, + u32 value) +{ + dev_dbg(xsdfec->dev, "Writing 0x%x to offset 0x%x", value, addr); + iowrite32(value, xsdfec->regs + addr); +} + +static inline u32 xsdfec_regread(struct xsdfec_dev *xsdfec, u32 addr) +{ + u32 rval; + + rval = ioread32(xsdfec->regs + addr); + dev_dbg(xsdfec->dev, "Read value = 0x%x from offset 0x%x", rval, addr); + return rval; +} + +static void update_bool_config_from_reg(struct xsdfec_dev *xsdfec, + u32 reg_offset, u32 bit_num, + char *config_value) +{ + u32 reg_val; + u32 bit_mask = 1 << bit_num; + + reg_val = xsdfec_regread(xsdfec, reg_offset); + *config_value = (reg_val & bit_mask) > 0; +} + +static void update_config_from_hw(struct xsdfec_dev *xsdfec) +{ + u32 reg_value; + bool sdfec_started; + + /* Update the Order */ + reg_value = xsdfec_regread(xsdfec, XSDFEC_ORDER_ADDR); + xsdfec->config.order = reg_value; + + update_bool_config_from_reg(xsdfec, XSDFEC_BYPASS_ADDR, + 0, /* Bit Number, maybe change to mask */ + &xsdfec->config.bypass); + + update_bool_config_from_reg(xsdfec, XSDFEC_CODE_WR_PROTECT_ADDR, + 0, /* Bit Number */ + &xsdfec->config.code_wr_protect); + + reg_value = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + xsdfec->config.irq.enable_isr = (reg_value & XSDFEC_ISR_MASK) > 0; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + xsdfec->config.irq.enable_ecc_isr = + (reg_value & XSDFEC_ECC_ISR_MASK) > 0; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); + sdfec_started = (reg_value & XSDFEC_AXIS_IN_ENABLE_MASK) > 0; + if (sdfec_started) + xsdfec->state = XSDFEC_STARTED; + else + xsdfec->state = XSDFEC_STOPPED; +} + +static int xsdfec_get_status(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_status status; + int err; + + memset(&status, 0, sizeof(status)); + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + status.state = xsdfec->state; + xsdfec->state_updated = false; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + status.activity = (xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR) & + XSDFEC_IS_ACTIVITY_SET); + + err = copy_to_user(arg, &status, sizeof(status)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_get_config(struct xsdfec_dev *xsdfec, void __user *arg) +{ + int err; + + err = copy_to_user(arg, &xsdfec->config, sizeof(xsdfec->config)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_isr_enable(struct xsdfec_dev *xsdfec, bool enable) +{ + u32 mask_read; + + if (enable) { + /* Enable */ + xsdfec_regwrite(xsdfec, XSDFEC_IER_ADDR, XSDFEC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + if (mask_read & XSDFEC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC enabling irq with IER failed"); + return -EIO; + } + } else { + /* Disable */ + xsdfec_regwrite(xsdfec, XSDFEC_IDR_ADDR, XSDFEC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_IMR_ADDR); + if ((mask_read & XSDFEC_ISR_MASK) != XSDFEC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC disabling irq with IDR failed"); + return -EIO; + } + } + return 0; +} + +static int xsdfec_ecc_isr_enable(struct xsdfec_dev *xsdfec, bool enable) +{ + u32 mask_read; + + if (enable) { + /* Enable */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IER_ADDR, + XSDFEC_ALL_ECC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + if (mask_read & XSDFEC_ALL_ECC_ISR_MASK) { + dev_dbg(xsdfec->dev, + "SDFEC enabling ECC irq with ECC IER failed"); + return -EIO; + } + } else { + /* Disable */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_IDR_ADDR, + XSDFEC_ALL_ECC_ISR_MASK); + mask_read = xsdfec_regread(xsdfec, XSDFEC_ECC_IMR_ADDR); + if (!(((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == + XSDFEC_ECC_ISR_MASK) || + ((mask_read & XSDFEC_ALL_ECC_ISR_MASK) == + XSDFEC_PL_INIT_ECC_ISR_MASK))) { + dev_dbg(xsdfec->dev, + "SDFEC disable ECC irq with ECC IDR failed"); + return -EIO; + } + } + return 0; +} + +static int xsdfec_set_irq(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_irq irq; + int err; + int isr_err; + int ecc_err; + + err = copy_from_user(&irq, arg, sizeof(irq)); + if (err) + return -EFAULT; + + /* Setup tlast related IRQ */ + isr_err = xsdfec_isr_enable(xsdfec, irq.enable_isr); + if (!isr_err) + xsdfec->config.irq.enable_isr = irq.enable_isr; + + /* Setup ECC related IRQ */ + ecc_err = xsdfec_ecc_isr_enable(xsdfec, irq.enable_ecc_isr); + if (!ecc_err) + xsdfec->config.irq.enable_ecc_isr = irq.enable_ecc_isr; + + if (isr_err < 0 || ecc_err < 0) + err = -EIO; + + return err; +} + +static int xsdfec_set_turbo(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_turbo turbo; + int err; + u32 turbo_write; + + err = copy_from_user(&turbo, arg, sizeof(turbo)); + if (err) + return -EFAULT; + + if (turbo.alg >= XSDFEC_TURBO_ALG_MAX) + return -EINVAL; + + if (turbo.scale > XSDFEC_TURBO_SCALE_MAX) + return -EINVAL; + + /* Check to see what device tree says about the FEC codes */ + if (xsdfec->config.code == XSDFEC_LDPC_CODE) + return -EIO; + + turbo_write = ((turbo.scale & XSDFEC_TURBO_SCALE_MASK) + << XSDFEC_TURBO_SCALE_BIT_POS) | + turbo.alg; + xsdfec_regwrite(xsdfec, XSDFEC_TURBO_ADDR, turbo_write); + return err; +} + +static int xsdfec_get_turbo(struct xsdfec_dev *xsdfec, void __user *arg) +{ + u32 reg_value; + struct xsdfec_turbo turbo_params; + int err; + + if (xsdfec->config.code == XSDFEC_LDPC_CODE) + return -EIO; + + memset(&turbo_params, 0, sizeof(turbo_params)); + reg_value = xsdfec_regread(xsdfec, XSDFEC_TURBO_ADDR); + + turbo_params.scale = (reg_value & XSDFEC_TURBO_SCALE_MASK) >> + XSDFEC_TURBO_SCALE_BIT_POS; + turbo_params.alg = reg_value & 0x1; + + err = copy_to_user(arg, &turbo_params, sizeof(turbo_params)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_reg0_write(struct xsdfec_dev *xsdfec, u32 n, u32 k, u32 psize, + u32 offset) +{ + u32 wdata; + + if (n < XSDFEC_REG0_N_MIN || n > XSDFEC_REG0_N_MAX || psize == 0 || + (n > XSDFEC_REG0_N_MUL_P * psize) || n <= k || ((n % psize) != 0)) { + dev_dbg(xsdfec->dev, "N value is not in range"); + return -EINVAL; + } + n <<= XSDFEC_REG0_N_LSB; + + if (k < XSDFEC_REG0_K_MIN || k > XSDFEC_REG0_K_MAX || + (k > XSDFEC_REG0_K_MUL_P * psize) || ((k % psize) != 0)) { + dev_dbg(xsdfec->dev, "K value is not in range"); + return -EINVAL; + } + k = k << XSDFEC_REG0_K_LSB; + wdata = k | n; + + if (XSDFEC_LDPC_CODE_REG0_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG0_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg0 space 0x%x", + XSDFEC_LDPC_CODE_REG0_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG0_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg1_write(struct xsdfec_dev *xsdfec, u32 psize, + u32 no_packing, u32 nm, u32 offset) +{ + u32 wdata; + + if (psize < XSDFEC_REG1_PSIZE_MIN || psize > XSDFEC_REG1_PSIZE_MAX) { + dev_dbg(xsdfec->dev, "Psize is not in range"); + return -EINVAL; + } + + if (no_packing != 0 && no_packing != 1) + dev_dbg(xsdfec->dev, "No-packing bit register invalid"); + no_packing = ((no_packing << XSDFEC_REG1_NO_PACKING_LSB) & + XSDFEC_REG1_NO_PACKING_MASK); + + if (nm & ~(XSDFEC_REG1_NM_MASK >> XSDFEC_REG1_NM_LSB)) + dev_dbg(xsdfec->dev, "NM is beyond 10 bits"); + nm = (nm << XSDFEC_REG1_NM_LSB) & XSDFEC_REG1_NM_MASK; + + wdata = nm | no_packing | psize; + if (XSDFEC_LDPC_CODE_REG1_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG1_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg1 space 0x%x", + XSDFEC_LDPC_CODE_REG1_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG1_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg2_write(struct xsdfec_dev *xsdfec, u32 nlayers, u32 nmqc, + u32 norm_type, u32 special_qc, u32 no_final_parity, + u32 max_schedule, u32 offset) +{ + u32 wdata; + + if (nlayers < XSDFEC_REG2_NLAYERS_MIN || + nlayers > XSDFEC_REG2_NLAYERS_MAX) { + dev_dbg(xsdfec->dev, "Nlayers is not in range"); + return -EINVAL; + } + + if (nmqc & ~(XSDFEC_REG2_NNMQC_MASK >> XSDFEC_REG2_NMQC_LSB)) + dev_dbg(xsdfec->dev, "NMQC exceeds 11 bits"); + nmqc = (nmqc << XSDFEC_REG2_NMQC_LSB) & XSDFEC_REG2_NNMQC_MASK; + + if (norm_type > 1) + dev_dbg(xsdfec->dev, "Norm type is invalid"); + norm_type = ((norm_type << XSDFEC_REG2_NORM_TYPE_LSB) & + XSDFEC_REG2_NORM_TYPE_MASK); + if (special_qc > 1) + dev_dbg(xsdfec->dev, "Special QC in invalid"); + special_qc = ((special_qc << XSDFEC_REG2_SPEICAL_QC_LSB) & + XSDFEC_REG2_SPECIAL_QC_MASK); + + if (no_final_parity > 1) + dev_dbg(xsdfec->dev, "No final parity check invalid"); + no_final_parity = + ((no_final_parity << XSDFEC_REG2_NO_FINAL_PARITY_LSB) & + XSDFEC_REG2_NO_FINAL_PARITY_MASK); + if (max_schedule & + ~(XSDFEC_REG2_MAX_SCHEDULE_MASK >> XSDFEC_REG2_MAX_SCHEDULE_LSB)) + dev_dbg(xsdfec->dev, "Max Schedule exceeds 2 bits"); + max_schedule = ((max_schedule << XSDFEC_REG2_MAX_SCHEDULE_LSB) & + XSDFEC_REG2_MAX_SCHEDULE_MASK); + + wdata = (max_schedule | no_final_parity | special_qc | norm_type | + nmqc | nlayers); + + if (XSDFEC_LDPC_CODE_REG2_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG2_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg2 space 0x%x", + XSDFEC_LDPC_CODE_REG2_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG2_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_reg3_write(struct xsdfec_dev *xsdfec, u8 sc_off, u8 la_off, + u16 qc_off, u32 offset) +{ + u32 wdata; + + wdata = ((qc_off << XSDFEC_REG3_QC_OFF_LSB) | + (la_off << XSDFEC_REG3_LA_OFF_LSB) | sc_off); + if (XSDFEC_LDPC_CODE_REG3_ADDR_BASE + (offset * XSDFEC_LDPC_REG_JUMP) > + XSDFEC_LDPC_CODE_REG3_ADDR_HIGH) { + dev_dbg(xsdfec->dev, "Writing outside of LDPC reg3 space 0x%x", + XSDFEC_LDPC_CODE_REG3_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP)); + return -EINVAL; + } + xsdfec_regwrite(xsdfec, + XSDFEC_LDPC_CODE_REG3_ADDR_BASE + + (offset * XSDFEC_LDPC_REG_JUMP), + wdata); + return 0; +} + +static int xsdfec_table_write(struct xsdfec_dev *xsdfec, u32 offset, + u32 *src_ptr, u32 len, const u32 base_addr, + const u32 depth) +{ + u32 reg = 0; + u32 res; + u32 n, i; + u32 *addr = NULL; + struct page *page[MAX_NUM_PAGES]; + + /* + * Writes that go beyond the length of + * Shared Scale(SC) table should fail + */ + if (offset > depth / XSDFEC_REG_WIDTH_JUMP || + len > depth / XSDFEC_REG_WIDTH_JUMP || + offset + len > depth / XSDFEC_REG_WIDTH_JUMP) { + dev_dbg(xsdfec->dev, "Write exceeds SC table length"); + return -EINVAL; + } + + n = (len * XSDFEC_REG_WIDTH_JUMP) / PAGE_SIZE; + if ((len * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE) + n += 1; + + res = get_user_pages_fast((unsigned long)src_ptr, n, 0, page); + if (res < n) { + for (i = 0; i < res; i++) + put_page(page[i]); + return -EINVAL; + } + + for (i = 0; i < n; i++) { + addr = kmap(page[i]); + do { + xsdfec_regwrite(xsdfec, + base_addr + ((offset + reg) * + XSDFEC_REG_WIDTH_JUMP), + addr[reg]); + reg++; + } while ((reg < len) && + ((reg * XSDFEC_REG_WIDTH_JUMP) % PAGE_SIZE)); + put_page(page[i]); + } + return reg; +} + +static int xsdfec_add_ldpc(struct xsdfec_dev *xsdfec, void __user *arg) +{ + struct xsdfec_ldpc_params *ldpc; + int ret, n; + + ldpc = kzalloc(sizeof(*ldpc), GFP_KERNEL); + if (!ldpc) + return -ENOMEM; + + if (copy_from_user(ldpc, arg, sizeof(*ldpc))) { + ret = -EFAULT; + goto err_out; + } + + if (xsdfec->config.code == XSDFEC_TURBO_CODE) { + ret = -EIO; + goto err_out; + } + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) { + ret = -EIO; + goto err_out; + } + + if (xsdfec->config.code_wr_protect) { + ret = -EIO; + goto err_out; + } + + /* Write Reg 0 */ + ret = xsdfec_reg0_write(xsdfec, ldpc->n, ldpc->k, ldpc->psize, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 1 */ + ret = xsdfec_reg1_write(xsdfec, ldpc->psize, ldpc->no_packing, ldpc->nm, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 2 */ + ret = xsdfec_reg2_write(xsdfec, ldpc->nlayers, ldpc->nmqc, + ldpc->norm_type, ldpc->special_qc, + ldpc->no_final_parity, ldpc->max_schedule, + ldpc->code_id); + if (ret) + goto err_out; + + /* Write Reg 3 */ + ret = xsdfec_reg3_write(xsdfec, ldpc->sc_off, ldpc->la_off, + ldpc->qc_off, ldpc->code_id); + if (ret) + goto err_out; + + /* Write Shared Codes */ + n = ldpc->nlayers / 4; + if (ldpc->nlayers % 4) + n++; + + ret = xsdfec_table_write(xsdfec, ldpc->sc_off, ldpc->sc_table, n, + XSDFEC_LDPC_SC_TABLE_ADDR_BASE, + XSDFEC_SC_TABLE_DEPTH); + if (ret < 0) + goto err_out; + + ret = xsdfec_table_write(xsdfec, 4 * ldpc->la_off, ldpc->la_table, + ldpc->nlayers, XSDFEC_LDPC_LA_TABLE_ADDR_BASE, + XSDFEC_LA_TABLE_DEPTH); + if (ret < 0) + goto err_out; + + ret = xsdfec_table_write(xsdfec, 4 * ldpc->qc_off, ldpc->qc_table, + ldpc->nqc, XSDFEC_LDPC_QC_TABLE_ADDR_BASE, + XSDFEC_QC_TABLE_DEPTH); + if (ret > 0) + ret = 0; +err_out: + kfree(ldpc); + return ret; +} + +static int xsdfec_set_order(struct xsdfec_dev *xsdfec, void __user *arg) +{ + bool order_invalid; + enum xsdfec_order order; + int err; + + err = get_user(order, (enum xsdfec_order *)arg); + if (err) + return -EFAULT; + + order_invalid = (order != XSDFEC_MAINTAIN_ORDER) && + (order != XSDFEC_OUT_OF_ORDER); + if (order_invalid) + return -EINVAL; + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) + return -EIO; + + xsdfec_regwrite(xsdfec, XSDFEC_ORDER_ADDR, order); + + xsdfec->config.order = order; + + return 0; +} + +static int xsdfec_set_bypass(struct xsdfec_dev *xsdfec, bool __user *arg) +{ + bool bypass; + int err; + + err = get_user(bypass, arg); + if (err) + return -EFAULT; + + /* Verify Device has not started */ + if (xsdfec->state == XSDFEC_STARTED) + return -EIO; + + if (bypass) + xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 1); + else + xsdfec_regwrite(xsdfec, XSDFEC_BYPASS_ADDR, 0); + + xsdfec->config.bypass = bypass; + + return 0; +} + +static int xsdfec_is_active(struct xsdfec_dev *xsdfec, bool __user *arg) +{ + u32 reg_value; + bool is_active; + int err; + + reg_value = xsdfec_regread(xsdfec, XSDFEC_ACTIVE_ADDR); + /* using a double ! operator instead of casting */ + is_active = !!(reg_value & XSDFEC_IS_ACTIVITY_SET); + err = put_user(is_active, arg); + if (err) + return -EFAULT; + + return err; +} + +static u32 +xsdfec_translate_axis_width_cfg_val(enum xsdfec_axis_width axis_width_cfg) +{ + u32 axis_width_field = 0; + + switch (axis_width_cfg) { + case XSDFEC_1x128b: + axis_width_field = 0; + break; + case XSDFEC_2x128b: + axis_width_field = 1; + break; + case XSDFEC_4x128b: + axis_width_field = 2; + break; + } + + return axis_width_field; +} + +static u32 xsdfec_translate_axis_words_cfg_val(enum xsdfec_axis_word_include + axis_word_inc_cfg) +{ + u32 axis_words_field = 0; + + if (axis_word_inc_cfg == XSDFEC_FIXED_VALUE || + axis_word_inc_cfg == XSDFEC_IN_BLOCK) + axis_words_field = 0; + else if (axis_word_inc_cfg == XSDFEC_PER_AXI_TRANSACTION) + axis_words_field = 1; + + return axis_words_field; +} + +static int xsdfec_cfg_axi_streams(struct xsdfec_dev *xsdfec) +{ + u32 reg_value; + u32 dout_words_field; + u32 dout_width_field; + u32 din_words_field; + u32 din_width_field; + struct xsdfec_config *config = &xsdfec->config; + + /* translate config info to register values */ + dout_words_field = + xsdfec_translate_axis_words_cfg_val(config->dout_word_include); + dout_width_field = + xsdfec_translate_axis_width_cfg_val(config->dout_width); + din_words_field = + xsdfec_translate_axis_words_cfg_val(config->din_word_include); + din_width_field = + xsdfec_translate_axis_width_cfg_val(config->din_width); + + reg_value = dout_words_field << XSDFEC_AXIS_DOUT_WORDS_LSB; + reg_value |= dout_width_field << XSDFEC_AXIS_DOUT_WIDTH_LSB; + reg_value |= din_words_field << XSDFEC_AXIS_DIN_WORDS_LSB; + reg_value |= din_width_field << XSDFEC_AXIS_DIN_WIDTH_LSB; + + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_WIDTH_ADDR, reg_value); + + return 0; +} + +static int xsdfec_dev_open(struct inode *iptr, struct file *fptr) +{ + return 0; +} + +static int xsdfec_dev_release(struct inode *iptr, struct file *fptr) +{ + return 0; +} + +static int xsdfec_start(struct xsdfec_dev *xsdfec) +{ + u32 regread; + + regread = xsdfec_regread(xsdfec, XSDFEC_FEC_CODE_ADDR); + regread &= 0x1; + if (regread != xsdfec->config.code) { + dev_dbg(xsdfec->dev, + "%s SDFEC HW code does not match driver code, reg %d, code %d", + __func__, regread, xsdfec->config.code); + return -EINVAL; + } + + /* Set AXIS enable */ + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, + XSDFEC_AXIS_ENABLE_MASK); + /* Done */ + xsdfec->state = XSDFEC_STARTED; + return 0; +} + +static int xsdfec_stop(struct xsdfec_dev *xsdfec) +{ + u32 regread; + + if (xsdfec->state != XSDFEC_STARTED) + dev_dbg(xsdfec->dev, "Device not started correctly"); + /* Disable AXIS_ENABLE Input interfaces only */ + regread = xsdfec_regread(xsdfec, XSDFEC_AXIS_ENABLE_ADDR); + regread &= (~XSDFEC_AXIS_IN_ENABLE_MASK); + xsdfec_regwrite(xsdfec, XSDFEC_AXIS_ENABLE_ADDR, regread); + /* Stop */ + xsdfec->state = XSDFEC_STOPPED; + return 0; +} + +static int xsdfec_clear_stats(struct xsdfec_dev *xsdfec) +{ + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + xsdfec->isr_err_count = 0; + xsdfec->uecc_count = 0; + xsdfec->cecc_count = 0; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + return 0; +} + +static int xsdfec_get_stats(struct xsdfec_dev *xsdfec, void __user *arg) +{ + int err; + struct xsdfec_stats user_stats; + + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + user_stats.isr_err_count = xsdfec->isr_err_count; + user_stats.cecc_count = xsdfec->cecc_count; + user_stats.uecc_count = xsdfec->uecc_count; + xsdfec->stats_updated = false; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + err = copy_to_user(arg, &user_stats, sizeof(user_stats)); + if (err) + err = -EFAULT; + + return err; +} + +static int xsdfec_set_default_config(struct xsdfec_dev *xsdfec) +{ + /* Ensure registers are aligned with core configuration */ + xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); + xsdfec_cfg_axi_streams(xsdfec); + update_config_from_hw(xsdfec); + + return 0; +} + +static long xsdfec_dev_ioctl(struct file *fptr, unsigned int cmd, + unsigned long data) +{ + struct xsdfec_dev *xsdfec; + void __user *arg = NULL; + int rval = -EINVAL; + + xsdfec = container_of(fptr->private_data, struct xsdfec_dev, miscdev); + + /* In failed state allow only reset and get status IOCTLs */ + if (xsdfec->state == XSDFEC_NEEDS_RESET && + (cmd != XSDFEC_SET_DEFAULT_CONFIG && cmd != XSDFEC_GET_STATUS && + cmd != XSDFEC_GET_STATS && cmd != XSDFEC_CLEAR_STATS)) { + return -EPERM; + } + + if (_IOC_TYPE(cmd) != XSDFEC_MAGIC) + return -ENOTTY; + + /* check if ioctl argument is present and valid */ + if (_IOC_DIR(cmd) != _IOC_NONE) { + arg = (void __user *)data; + if (!arg) + return rval; + } + + switch (cmd) { + case XSDFEC_START_DEV: + rval = xsdfec_start(xsdfec); + break; + case XSDFEC_STOP_DEV: + rval = xsdfec_stop(xsdfec); + break; + case XSDFEC_CLEAR_STATS: + rval = xsdfec_clear_stats(xsdfec); + break; + case XSDFEC_GET_STATS: + rval = xsdfec_get_stats(xsdfec, arg); + break; + case XSDFEC_GET_STATUS: + rval = xsdfec_get_status(xsdfec, arg); + break; + case XSDFEC_GET_CONFIG: + rval = xsdfec_get_config(xsdfec, arg); + break; + case XSDFEC_SET_DEFAULT_CONFIG: + rval = xsdfec_set_default_config(xsdfec); + break; + case XSDFEC_SET_IRQ: + rval = xsdfec_set_irq(xsdfec, arg); + break; + case XSDFEC_SET_TURBO: + rval = xsdfec_set_turbo(xsdfec, arg); + break; + case XSDFEC_GET_TURBO: + rval = xsdfec_get_turbo(xsdfec, arg); + break; + case XSDFEC_ADD_LDPC_CODE_PARAMS: + rval = xsdfec_add_ldpc(xsdfec, arg); + break; + case XSDFEC_SET_ORDER: + rval = xsdfec_set_order(xsdfec, arg); + break; + case XSDFEC_SET_BYPASS: + rval = xsdfec_set_bypass(xsdfec, arg); + break; + case XSDFEC_IS_ACTIVE: + rval = xsdfec_is_active(xsdfec, (bool __user *)arg); + break; + default: + /* Should not get here */ + break; + } + return rval; +} + +#ifdef CONFIG_COMPAT +static long xsdfec_dev_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long data) +{ + return xsdfec_dev_ioctl(file, cmd, (unsigned long)compat_ptr(data)); +} +#endif + +static unsigned int xsdfec_poll(struct file *file, poll_table *wait) +{ + unsigned int mask = 0; + struct xsdfec_dev *xsdfec; + + xsdfec = container_of(file->private_data, struct xsdfec_dev, miscdev); + + if (!xsdfec) + return POLLNVAL | POLLHUP; + + poll_wait(file, &xsdfec->waitq, wait); + + /* XSDFEC ISR detected an error */ + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + if (xsdfec->state_updated) + mask |= POLLIN | POLLPRI; + + if (xsdfec->stats_updated) + mask |= POLLIN | POLLRDNORM; + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + + return mask; +} + static const struct file_operations xsdfec_fops = { .owner = THIS_MODULE, + .open = xsdfec_dev_open, + .release = xsdfec_dev_release, + .unlocked_ioctl = xsdfec_dev_ioctl, + .poll = xsdfec_poll, +#ifdef CONFIG_COMPAT + .compat_ioctl = xsdfec_dev_compat_ioctl, +#endif }; +static int xsdfec_parse_of(struct xsdfec_dev *xsdfec) +{ + struct device *dev = xsdfec->dev; + struct device_node *node = dev->of_node; + int rval; + const char *fec_code; + u32 din_width; + u32 din_word_include; + u32 dout_width; + u32 dout_word_include; + + rval = of_property_read_string(node, "xlnx,sdfec-code", &fec_code); + if (rval < 0) + return rval; + + if (!strcasecmp(fec_code, "ldpc")) + xsdfec->config.code = XSDFEC_LDPC_CODE; + else if (!strcasecmp(fec_code, "turbo")) + xsdfec->config.code = XSDFEC_TURBO_CODE; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-din-words", + &din_word_include); + if (rval < 0) + return rval; + + if (din_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) + xsdfec->config.din_word_include = din_word_include; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-din-width", &din_width); + if (rval < 0) + return rval; + + switch (din_width) { + /* Fall through and set for valid values */ + case XSDFEC_1x128b: + case XSDFEC_2x128b: + case XSDFEC_4x128b: + xsdfec->config.din_width = din_width; + break; + default: + return -EINVAL; + } + + rval = of_property_read_u32(node, "xlnx,sdfec-dout-words", + &dout_word_include); + if (rval < 0) + return rval; + + if (dout_word_include < XSDFEC_AXIS_WORDS_INCLUDE_MAX) + xsdfec->config.dout_word_include = dout_word_include; + else + return -EINVAL; + + rval = of_property_read_u32(node, "xlnx,sdfec-dout-width", &dout_width); + if (rval < 0) + return rval; + + switch (dout_width) { + /* Fall through and set for valid values */ + case XSDFEC_1x128b: + case XSDFEC_2x128b: + case XSDFEC_4x128b: + xsdfec->config.dout_width = dout_width; + break; + default: + return -EINVAL; + } + + /* Write LDPC to CODE Register */ + xsdfec_regwrite(xsdfec, XSDFEC_FEC_CODE_ADDR, xsdfec->config.code); + + xsdfec_cfg_axi_streams(xsdfec); + + return 0; +} + +static irqreturn_t xsdfec_irq_thread(int irq, void *dev_id) +{ + struct xsdfec_dev *xsdfec = dev_id; + irqreturn_t ret = IRQ_HANDLED; + u32 ecc_err; + u32 isr_err; + u32 uecc_count; + u32 cecc_count; + u32 isr_err_count; + u32 aecc_count; + u32 tmp; + + WARN_ON(xsdfec->irq != irq); + + /* Mask Interrupts */ + xsdfec_isr_enable(xsdfec, false); + xsdfec_ecc_isr_enable(xsdfec, false); + /* Read ISR */ + ecc_err = xsdfec_regread(xsdfec, XSDFEC_ECC_ISR_ADDR); + isr_err = xsdfec_regread(xsdfec, XSDFEC_ISR_ADDR); + /* Clear the interrupts */ + xsdfec_regwrite(xsdfec, XSDFEC_ECC_ISR_ADDR, ecc_err); + xsdfec_regwrite(xsdfec, XSDFEC_ISR_ADDR, isr_err); + + tmp = ecc_err & XSDFEC_ALL_ECC_ISR_MBE_MASK; + /* Count uncorrectable 2-bit errors */ + uecc_count = hweight32(tmp); + /* Count all ECC errors */ + aecc_count = hweight32(ecc_err); + /* Number of correctable 1-bit ECC error */ + cecc_count = aecc_count - 2 * uecc_count; + /* Count ISR errors */ + isr_err_count = hweight32(isr_err); + dev_dbg(xsdfec->dev, "tmp=%x, uecc=%x, aecc=%x, cecc=%x, isr=%x", tmp, + uecc_count, aecc_count, cecc_count, isr_err_count); + dev_dbg(xsdfec->dev, "uecc=%x, cecc=%x, isr=%x", xsdfec->uecc_count, + xsdfec->cecc_count, xsdfec->isr_err_count); + + spin_lock_irqsave(&xsdfec->error_data_lock, xsdfec->flags); + /* Add new errors to a 2-bits counter */ + if (uecc_count) + xsdfec->uecc_count += uecc_count; + /* Add new errors to a 1-bits counter */ + if (cecc_count) + xsdfec->cecc_count += cecc_count; + /* Add new errors to a ISR counter */ + if (isr_err_count) + xsdfec->isr_err_count += isr_err_count; + + /* Update state/stats flag */ + if (uecc_count) { + if (ecc_err & XSDFEC_ECC_ISR_MBE_MASK) + xsdfec->state = XSDFEC_NEEDS_RESET; + else if (ecc_err & XSDFEC_PL_INIT_ECC_ISR_MBE_MASK) + xsdfec->state = XSDFEC_PL_RECONFIGURE; + xsdfec->stats_updated = true; + xsdfec->state_updated = true; + } + + if (cecc_count) + xsdfec->stats_updated = true; + + if (isr_err_count) { + xsdfec->state = XSDFEC_NEEDS_RESET; + xsdfec->stats_updated = true; + xsdfec->state_updated = true; + } + + spin_unlock_irqrestore(&xsdfec->error_data_lock, xsdfec->flags); + dev_dbg(xsdfec->dev, "state=%x, stats=%x", xsdfec->state_updated, + xsdfec->stats_updated); + + /* Enable another polling */ + if (xsdfec->state_updated || xsdfec->stats_updated) + wake_up_interruptible(&xsdfec->waitq); + else + ret = IRQ_NONE; + + /* Unmask Interrupts */ + xsdfec_isr_enable(xsdfec, true); + xsdfec_ecc_isr_enable(xsdfec, true); + + return ret; +} + static int xsdfec_clk_init(struct platform_device *pdev, struct xsdfec_clks *clks) { @@ -227,19 +1378,13 @@ static void xsdfec_disable_all_clks(struct xsdfec_clks *clks) clk_disable_unprepare(clks->axi_clk); } -static void xsdfec_idr_remove(struct xsdfec_dev *xsdfec) -{ - mutex_lock(&dev_idr_lock); - idr_remove(&dev_idr, xsdfec->dev_id); - mutex_unlock(&dev_idr_lock); -} - static int xsdfec_probe(struct platform_device *pdev) { struct xsdfec_dev *xsdfec; struct device *dev; struct resource *res; int err; + bool irq_enabled = true; xsdfec = devm_kzalloc(&pdev->dev, sizeof(*xsdfec), GFP_KERNEL); if (!xsdfec) @@ -260,12 +1405,34 @@ static int xsdfec_probe(struct platform_device *pdev) goto err_xsdfec_dev; } + xsdfec->irq = platform_get_irq(pdev, 0); + if (xsdfec->irq < 0) { + dev_dbg(dev, "platform_get_irq failed"); + irq_enabled = false; + } + + err = xsdfec_parse_of(xsdfec); + if (err < 0) + goto err_xsdfec_dev; + + update_config_from_hw(xsdfec); + /* Save driver private data */ platform_set_drvdata(pdev, xsdfec); - mutex_lock(&dev_idr_lock); - err = idr_alloc(&dev_idr, xsdfec->dev_name, 0, 0, GFP_KERNEL); - mutex_unlock(&dev_idr_lock); + if (irq_enabled) { + init_waitqueue_head(&xsdfec->waitq); + /* Register IRQ thread */ + err = devm_request_threaded_irq(dev, xsdfec->irq, NULL, + xsdfec_irq_thread, IRQF_ONESHOT, + "xilinx-sdfec16", xsdfec); + if (err < 0) { + dev_err(dev, "unable to request IRQ%d", xsdfec->irq); + goto err_xsdfec_dev; + } + } + + err = ida_alloc(&dev_nrs, GFP_KERNEL); if (err < 0) goto err_xsdfec_dev; xsdfec->dev_id = err; @@ -278,12 +1445,12 @@ static int xsdfec_probe(struct platform_device *pdev) err = misc_register(&xsdfec->miscdev); if (err) { dev_err(dev, "error:%d. Unable to register device", err); - goto err_xsdfec_idr; + goto err_xsdfec_ida; } return 0; -err_xsdfec_idr: - xsdfec_idr_remove(xsdfec); +err_xsdfec_ida: + ida_free(&dev_nrs, xsdfec->dev_id); err_xsdfec_dev: xsdfec_disable_all_clks(&xsdfec->clks); return err; @@ -295,7 +1462,7 @@ static int xsdfec_remove(struct platform_device *pdev) xsdfec = platform_get_drvdata(pdev); misc_deregister(&xsdfec->miscdev); - xsdfec_idr_remove(xsdfec); + ida_free(&dev_nrs, xsdfec->dev_id); xsdfec_disable_all_clks(&xsdfec->clks); return 0; } @@ -321,8 +1488,6 @@ static int __init xsdfec_init(void) { int err; - mutex_init(&dev_idr_lock); - idr_init(&dev_idr); err = platform_driver_register(&xsdfec_driver); if (err < 0) { pr_err("%s Unabled to register SDFEC driver", __func__); @@ -334,7 +1499,6 @@ static int __init xsdfec_init(void) static void __exit xsdfec_exit(void) { platform_driver_unregister(&xsdfec_driver); - idr_destroy(&dev_idr); } module_init(xsdfec_init); |