From 7bd494491be0a330df74bb7f79427f5604060585 Mon Sep 17 00:00:00 2001 From: Heikki Krogerus Date: Tue, 1 Apr 2014 17:02:52 +0300 Subject: ARM: tegra: remove obsolete gpio entries After moving to description based gpio interface in rfkill-gpio, the gpio numbers are not used any more. Signed-off-by: Heikki Krogerus Reviewed-by: Marc Dietrich Acked-by: Stephen Warren Acked-by: Alexandre Courbot Signed-off-by: Johannes Berg --- arch/arm/mach-tegra/board-paz00.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c index e4dec9fcb084..9c6029ba526f 100644 --- a/arch/arm/mach-tegra/board-paz00.c +++ b/arch/arm/mach-tegra/board-paz00.c @@ -23,9 +23,7 @@ #include "board.h" static struct rfkill_gpio_platform_data wifi_rfkill_platform_data = { - .name = "wifi_rfkill", - .reset_gpio = 25, /* PD1 */ - .shutdown_gpio = 85, /* PK5 */ + .name = "wifi_rfkill", .type = RFKILL_TYPE_WLAN, }; -- cgit v1.2.3 From e179f6914152eca9b338e7d8445684062f560c55 Mon Sep 17 00:00:00 2001 From: "K. Y. Srinivasan" Date: Mon, 14 Apr 2014 11:43:49 -0700 Subject: x86, irq, pic: Probe for legacy PIC and set legacy_pic appropriately The legacy PIC may or may not be available and we need a mechanism to detect the existence of the legacy PIC that is applicable for all hardware (both physical as well as virtual) currently supported by Linux. On Hyper-V, when our legacy firmware presented to the guests, emulates the legacy PIC while when our EFI based firmware is presented we do not emulate the PIC. To support Hyper-V EFI firmware, we had to set the legacy_pic to the null_legacy_pic since we had to bypass PIC based calibration in the early boot code. While, on the EFI firmware, we know we don't emulate the legacy PIC, we need a generic mechanism to detect the presence of the legacy PIC that is not based on boot time state - this became apparent when we tried to get kexec to work on Hyper-V EFI firmware. This patch implements the proposal put forth by H. Peter Anvin : Write a known value to the PIC data port and read it back. If the value read is the value written, we do have the PIC, if not there is no PIC and we can safely set the legacy_pic to null_legacy_pic. Since the read from an unconnected I/O port returns 0xff, we will use ~(1 << PIC_CASCADE_IR) (0xfb: mask all lines except the cascade line) to probe for the existence of the PIC. In version V1 of the patch, I had cleaned up the code based on comments from Peter. In version V2 of the patch, I have addressed additional comments from Peter. In version V3 of the patch, I have addressed Jan's comments (JBeulich@suse.com). In version V4 of the patch, I have addressed additional comments from Peter. Signed-off-by: K. Y. Srinivasan Link: http://lkml.kernel.org/r/1397501029-29286-1-git-send-email-kys@microsoft.com Cc: Thomas Gleixner Signed-off-by: H. Peter Anvin --- arch/x86/kernel/cpu/mshyperv.c | 9 --------- arch/x86/kernel/i8259.c | 20 +++++++++++++++++++- 2 files changed, 19 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index 76f98fe5b35c..a450373e8e91 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -132,15 +132,6 @@ static void __init ms_hyperv_init_platform(void) lapic_timer_frequency = hv_lapic_frequency; printk(KERN_INFO "HyperV: LAPIC Timer Frequency: %#x\n", lapic_timer_frequency); - - /* - * On Hyper-V, when we are booting off an EFI firmware stack, - * we do not have many legacy devices including PIC, PIT etc. - */ - if (efi_enabled(EFI_BOOT)) { - printk(KERN_INFO "HyperV: Using null_legacy_pic\n"); - legacy_pic = &null_legacy_pic; - } } #endif diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c index 2e977b5d61dd..8af817105e29 100644 --- a/arch/x86/kernel/i8259.c +++ b/arch/x86/kernel/i8259.c @@ -299,13 +299,31 @@ static void unmask_8259A(void) static void init_8259A(int auto_eoi) { unsigned long flags; + unsigned char probe_val = ~(1 << PIC_CASCADE_IR); + unsigned char new_val; i8259A_auto_eoi = auto_eoi; raw_spin_lock_irqsave(&i8259A_lock, flags); - outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ + /* + * Check to see if we have a PIC. + * Mask all except the cascade and read + * back the value we just wrote. If we don't + * have a PIC, we will read 0xff as opposed to the + * value we wrote. + */ outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */ + outb(probe_val, PIC_MASTER_IMR); + new_val = inb(PIC_MASTER_IMR); + if (new_val != probe_val) { + printk(KERN_INFO "Using NULL legacy PIC\n"); + legacy_pic = &null_legacy_pic; + raw_spin_unlock_irqrestore(&i8259A_lock, flags); + return; + } + + outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */ /* * outb_pic - this has to work on a wide range of PC hardware. -- cgit v1.2.3 From fb24da805729ee4a83efa34015948f7d64da4b28 Mon Sep 17 00:00:00 2001 From: Prarit Bhargava Date: Wed, 2 Apr 2014 08:11:13 -0400 Subject: x86/irq: Fix fixup_irqs() error handling Several patches to fix cpu hotplug and the down'd cpu's irq relocations have been submitted in the past month or so. The patches should resolve the problems with cpu hotplug and irq relocation, however, there is always a possibility that a bug still exists. The big problem with debugging these irq reassignments is that the cpu down completes and then we get random stack traces from drivers for which irqs have not been properly assigned to a new cpu. The stack traces are a mix of storage, network, and other kernel subsystem (I once saw the serial port stop working ...) warnings and failures. The problem with these failures is that they are difficult to diagnose. There is no warning in the cpu hotplug down path to indicate that an IRQ has failed to be assigned to a new cpu, and all we are left with is a stack trace from a driver, or a non-functional device. If we had some information on the console debugging these situations would be much easier; after all we can map an IRQ to a device by simply using lspci or /proc/interrupts. The current code, fixup_irqs(), which migrates IRQs from the down'd cpu and is called close to the end of the cpu down path, calls chip->set_irq_affinity which eventually calls __assign_irq_vector(). Errors are not propogated back from this function call and this results in silent irq relocation failures. This patch fixes this issue by returning the error codes up the call stack and prints out a warning if there is a relocation failure. Signed-off-by: Prarit Bhargava Acked-by: Thomas Gleixner Cc: Rui Wang Cc: Liu Ping Fan Cc: Bjorn Helgaas Cc: Yoshihiro YUNOMAE Cc: Lv Zheng Cc: Seiji Aguchi Cc: Yang Zhang Cc: Andi Kleen Cc: Steven Rostedt (Red Hat) Cc: Li Fei Cc: gong.chen@linux.intel.com Link: http://lkml.kernel.org/r/1396440673-18286-1-git-send-email-prarit@redhat.com [ Made small cleanliness tweaks. ] Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/io_apic.c | 28 ++++++++++++++++++---------- arch/x86/kernel/irq.c | 13 +++++++++---- 2 files changed, 27 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 6ad4658de705..b4b21db9f4ad 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2312,7 +2312,7 @@ int __ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, int err; if (!config_enabled(CONFIG_SMP)) - return -1; + return -EPERM; if (!cpumask_intersects(mask, cpu_online_mask)) return -EINVAL; @@ -2343,7 +2343,7 @@ int native_ioapic_set_affinity(struct irq_data *data, int ret; if (!config_enabled(CONFIG_SMP)) - return -1; + return -EPERM; raw_spin_lock_irqsave(&ioapic_lock, flags); ret = __ioapic_set_affinity(data, mask, &dest); @@ -3075,9 +3075,11 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) struct irq_cfg *cfg = data->chip_data; struct msi_msg msg; unsigned int dest; + int ret; - if (__ioapic_set_affinity(data, mask, &dest)) - return -1; + ret = __ioapic_set_affinity(data, mask, &dest); + if (ret) + return ret; __get_cached_msi_msg(data->msi_desc, &msg); @@ -3177,9 +3179,11 @@ dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask, struct irq_cfg *cfg = data->chip_data; unsigned int dest, irq = data->irq; struct msi_msg msg; + int ret; - if (__ioapic_set_affinity(data, mask, &dest)) - return -1; + ret = __ioapic_set_affinity(data, mask, &dest); + if (ret) + return ret; dmar_msi_read(irq, &msg); @@ -3226,9 +3230,11 @@ static int hpet_msi_set_affinity(struct irq_data *data, struct irq_cfg *cfg = data->chip_data; struct msi_msg msg; unsigned int dest; + int ret; - if (__ioapic_set_affinity(data, mask, &dest)) - return -1; + ret = __ioapic_set_affinity(data, mask, &dest); + if (ret) + return ret; hpet_msi_read(data->handler_data, &msg); @@ -3295,9 +3301,11 @@ ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { struct irq_cfg *cfg = data->chip_data; unsigned int dest; + int ret; - if (__ioapic_set_affinity(data, mask, &dest)) - return -1; + ret = __ioapic_set_affinity(data, mask, &dest); + if (ret) + return ret; target_ht_irq(data->irq, dest, cfg->vector); return IRQ_SET_MASK_OK_NOCOPY; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 283a76a9cc40..49bbb57da7f5 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -357,6 +357,7 @@ void fixup_irqs(void) struct irq_desc *desc; struct irq_data *data; struct irq_chip *chip; + int ret; for_each_irq_desc(irq, desc) { int break_affinity = 0; @@ -395,10 +396,14 @@ void fixup_irqs(void) if (!irqd_can_move_in_process_context(data) && chip->irq_mask) chip->irq_mask(data); - if (chip->irq_set_affinity) - chip->irq_set_affinity(data, affinity, true); - else if (!(warned++)) - set_affinity = 0; + if (chip->irq_set_affinity) { + ret = chip->irq_set_affinity(data, affinity, true); + if (ret == -ENOSPC) + pr_crit("IRQ %d set affinity failed because there are no available vectors. The device assigned to this IRQ is unstable.\n", irq); + } else { + if (!(warned++)) + set_affinity = 0; + } /* * We unmask if the irq was not marked masked by the -- cgit v1.2.3 From 2d283862dc62daead9db0dc89cd0d0351e91f765 Mon Sep 17 00:00:00 2001 From: Alexey Charkov Date: Tue, 22 Apr 2014 19:28:09 +0400 Subject: net: via-rhine: add OF bus binding This should make the driver usable with VIA/WonderMedia ARM-based Systems-on-Chip integrated Rhine III adapters. Note that these are always in MMIO mode, and don't have any known EEPROM. Signed-off-by: Alexey Charkov Acked-by: Rob Herring Signed-off-by: David S. Miller --- .../devicetree/bindings/net/via-rhine.txt | 17 ++ arch/arm/boot/dts/vt8500.dtsi | 6 + arch/arm/boot/dts/wm8650.dtsi | 6 + arch/arm/boot/dts/wm8850.dtsi | 6 + drivers/net/ethernet/via/Kconfig | 2 +- drivers/net/ethernet/via/via-rhine.c | 307 +++++++++++++-------- 6 files changed, 229 insertions(+), 115 deletions(-) create mode 100644 Documentation/devicetree/bindings/net/via-rhine.txt (limited to 'arch') diff --git a/Documentation/devicetree/bindings/net/via-rhine.txt b/Documentation/devicetree/bindings/net/via-rhine.txt new file mode 100644 index 000000000000..334eca2bf937 --- /dev/null +++ b/Documentation/devicetree/bindings/net/via-rhine.txt @@ -0,0 +1,17 @@ +* VIA Rhine 10/100 Network Controller + +Required properties: +- compatible : Should be "via,vt8500-rhine" for integrated + Rhine controllers found in VIA VT8500, WonderMedia WM8950 + and similar. These are listed as 1106:3106 rev. 0x84 on the + virtual PCI bus under vendor-provided kernels +- reg : Address and length of the io space +- interrupts : Should contain the controller interrupt line + +Examples: + +ethernet@d8004000 { + compatible = "via,vt8500-rhine"; + reg = <0xd8004000 0x100>; + interrupts = <10>; +}; diff --git a/arch/arm/boot/dts/vt8500.dtsi b/arch/arm/boot/dts/vt8500.dtsi index 51d0e912c8f5..1929ad390d88 100644 --- a/arch/arm/boot/dts/vt8500.dtsi +++ b/arch/arm/boot/dts/vt8500.dtsi @@ -165,5 +165,11 @@ reg = <0xd8100000 0x10000>; interrupts = <48>; }; + + ethernet@d8004000 { + compatible = "via,vt8500-rhine"; + reg = <0xd8004000 0x100>; + interrupts = <10>; + }; }; }; diff --git a/arch/arm/boot/dts/wm8650.dtsi b/arch/arm/boot/dts/wm8650.dtsi index 7525982262ac..b1c59a766a13 100644 --- a/arch/arm/boot/dts/wm8650.dtsi +++ b/arch/arm/boot/dts/wm8650.dtsi @@ -218,5 +218,11 @@ reg = <0xd8100000 0x10000>; interrupts = <48>; }; + + ethernet@d8004000 { + compatible = "via,vt8500-rhine"; + reg = <0xd8004000 0x100>; + interrupts = <10>; + }; }; }; diff --git a/arch/arm/boot/dts/wm8850.dtsi b/arch/arm/boot/dts/wm8850.dtsi index d98386dd2882..8fbccfbe75f3 100644 --- a/arch/arm/boot/dts/wm8850.dtsi +++ b/arch/arm/boot/dts/wm8850.dtsi @@ -298,5 +298,11 @@ bus-width = <4>; sdon-inverted; }; + + ethernet@d8004000 { + compatible = "via,vt8500-rhine"; + reg = <0xd8004000 0x100>; + interrupts = <10>; + }; }; }; diff --git a/drivers/net/ethernet/via/Kconfig b/drivers/net/ethernet/via/Kconfig index 8a049a2b4474..f66ddaee0c87 100644 --- a/drivers/net/ethernet/via/Kconfig +++ b/drivers/net/ethernet/via/Kconfig @@ -19,7 +19,7 @@ if NET_VENDOR_VIA config VIA_RHINE tristate "VIA Rhine support" - depends on PCI + depends on (PCI || USE_OF) select CRC32 select MII ---help--- diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 20b83f11004a..4fa92012ceac 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -94,6 +94,10 @@ static const int multicast_filter_limit = 32; #include #include #include +#include +#include +#include +#include #include #include #include @@ -279,6 +283,15 @@ static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = { }; MODULE_DEVICE_TABLE(pci, rhine_pci_tbl); +/* OpenFirmware identifiers for platform-bus devices + * The .data field is currently only used to store chip revision + * (for quirks etc.) + */ +static struct of_device_id rhine_of_tbl[] = { + { .compatible = "via,vt8500-rhine", .data = (void *)0x84 }, + { } /* terminate list */ +}; +MODULE_DEVICE_TABLE(of, rhine_of_tbl); /* Offsets to the device registers. */ enum register_offsets { @@ -847,7 +860,8 @@ static void rhine_hw_init(struct net_device *dev, long pioaddr) msleep(5); /* Reload EEPROM controlled bytes cleared by soft reset */ - rhine_reload_eeprom(pioaddr, dev); + if (dev_is_pci(dev->dev.parent)) + rhine_reload_eeprom(pioaddr, dev); } static const struct net_device_ops rhine_netdev_ops = { @@ -868,125 +882,55 @@ static const struct net_device_ops rhine_netdev_ops = { #endif }; -static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) +static int rhine_init_one_common(struct device *hwdev, int revision, + long pioaddr, void __iomem *ioaddr, int irq) { struct net_device *dev; struct rhine_private *rp; - struct device *hwdev = &pdev->dev; - int revision = pdev->revision; - int i, rc; - u32 quirks; - long pioaddr; - long memaddr; - void __iomem *ioaddr; - int io_size, phy_id; + int i, rc, phy_id; const char *name; -#ifdef USE_MMIO - int bar = 1; -#else - int bar = 0; -#endif - -/* when built into the kernel, we only print version if device is found */ -#ifndef MODULE - pr_info_once("%s\n", version); -#endif - - io_size = 256; - phy_id = 0; - quirks = 0; - name = "Rhine"; - if (revision < VTunknown0) { - quirks = rqRhineI; - io_size = 128; - } else if (revision >= VT6102) { - quirks = rqWOL | rqForceReset; - if (revision < VT6105) { - name = "Rhine II"; - quirks |= rqStatusWBRace; /* Rhine-II exclusive */ - } else { - phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ - if (revision >= VT6105_B0) - quirks |= rq6patterns; - if (revision < VT6105M) - name = "Rhine III"; - else - name = "Rhine III (Management Adapter)"; - } - } - - rc = pci_enable_device(pdev); - if (rc) - goto err_out; /* this should always be supported */ rc = dma_set_mask(hwdev, DMA_BIT_MASK(32)); if (rc) { dev_err(hwdev, "32-bit DMA addresses not supported by the card!?\n"); - goto err_out_pci_disable; - } - - /* sanity check */ - if ((pci_resource_len(pdev, 0) < io_size) || - (pci_resource_len(pdev, 1) < io_size)) { - rc = -EIO; - dev_err(hwdev, "Insufficient PCI resources, aborting\n"); - goto err_out_pci_disable; + goto err_out; } - pioaddr = pci_resource_start(pdev, 0); - memaddr = pci_resource_start(pdev, 1); - - pci_set_master(pdev); - dev = alloc_etherdev(sizeof(struct rhine_private)); if (!dev) { rc = -ENOMEM; - goto err_out_pci_disable; + goto err_out; } SET_NETDEV_DEV(dev, hwdev); rp = netdev_priv(dev); rp->dev = dev; rp->revision = revision; - rp->quirks = quirks; rp->pioaddr = pioaddr; + rp->base = ioaddr; + rp->irq = irq; rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT); - rc = pci_request_regions(pdev, DRV_NAME); - if (rc) - goto err_out_free_netdev; - - ioaddr = pci_iomap(pdev, bar, io_size); - if (!ioaddr) { - rc = -EIO; - dev_err(hwdev, - "ioremap failed for device %s, region 0x%X @ 0x%lX\n", - dev_name(hwdev), io_size, memaddr); - goto err_out_free_res; - } - -#ifdef USE_MMIO - enable_mmio(pioaddr, quirks); - - /* Check that selected MMIO registers match the PIO ones */ - i = 0; - while (mmio_verify_registers[i]) { - int reg = mmio_verify_registers[i++]; - unsigned char a = inb(pioaddr+reg); - unsigned char b = readb(ioaddr+reg); - if (a != b) { - rc = -EIO; - dev_err(hwdev, - "MMIO do not match PIO [%02x] (%02x != %02x)\n", - reg, a, b); - goto err_out_unmap; + phy_id = 0; + name = "Rhine"; + if (revision < VTunknown0) { + rp->quirks = rqRhineI; + } else if (revision >= VT6102) { + rp->quirks = rqWOL | rqForceReset; + if (revision < VT6105) { + name = "Rhine II"; + rp->quirks |= rqStatusWBRace; /* Rhine-II exclusive */ + } else { + phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */ + if (revision >= VT6105_B0) + rp->quirks |= rq6patterns; + if (revision < VT6105M) + name = "Rhine III"; + else + name = "Rhine III (Management Adapter)"; } } -#endif /* USE_MMIO */ - - rp->base = ioaddr; - rp->irq = pdev->irq; u64_stats_init(&rp->tx_stats.syncp); u64_stats_init(&rp->rx_stats.syncp); @@ -1039,16 +983,10 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) /* dev->name not defined before register_netdev()! */ rc = register_netdev(dev); if (rc) - goto err_out_unmap; + goto err_out_free_netdev; netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n", - name, -#ifdef USE_MMIO - memaddr, -#else - (long)ioaddr, -#endif - dev->dev_addr, rp->irq); + name, (long)ioaddr, dev->dev_addr, rp->irq); dev_set_drvdata(hwdev, dev); @@ -1079,18 +1017,126 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; +err_out_free_netdev: + free_netdev(dev); +err_out: + return rc; +} + +static int rhine_init_one_pci(struct pci_dev *pdev, + const struct pci_device_id *ent) +{ + struct device *hwdev = &pdev->dev; + int i, rc; + long pioaddr, memaddr; + void __iomem *ioaddr; + int io_size = pdev->revision < VTunknown0 ? 128 : 256; + u32 quirks = pdev->revision < VTunknown0 ? rqRhineI : 0; +#ifdef USE_MMIO + int bar = 1; +#else + int bar = 0; +#endif + +/* when built into the kernel, we only print version if device is found */ +#ifndef MODULE + pr_info_once("%s\n", version); +#endif + + rc = pci_enable_device(pdev); + if (rc) + goto err_out; + + /* sanity check */ + if ((pci_resource_len(pdev, 0) < io_size) || + (pci_resource_len(pdev, 1) < io_size)) { + rc = -EIO; + dev_err(hwdev, "Insufficient PCI resources, aborting\n"); + goto err_out_pci_disable; + } + + pioaddr = pci_resource_start(pdev, 0); + memaddr = pci_resource_start(pdev, 1); + + pci_set_master(pdev); + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out_pci_disable; + + ioaddr = pci_iomap(pdev, bar, io_size); + if (!ioaddr) { + rc = -EIO; + dev_err(hwdev, + "ioremap failed for device %s, region 0x%X @ 0x%lX\n", + dev_name(hwdev), io_size, memaddr); + goto err_out_free_res; + } + +#ifdef USE_MMIO + enable_mmio(pioaddr, quirks); + + /* Check that selected MMIO registers match the PIO ones */ + i = 0; + while (mmio_verify_registers[i]) { + int reg = mmio_verify_registers[i++]; + unsigned char a = inb(pioaddr+reg); + unsigned char b = readb(ioaddr+reg); + + if (a != b) { + rc = -EIO; + dev_err(hwdev, + "MMIO do not match PIO [%02x] (%02x != %02x)\n", + reg, a, b); + goto err_out_unmap; + } + } +#endif /* USE_MMIO */ + + rc = rhine_init_one_common(&pdev->dev, pdev->revision, + pioaddr, ioaddr, pdev->irq); + if (!rc) + return 0; + err_out_unmap: pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions(pdev); -err_out_free_netdev: - free_netdev(dev); err_out_pci_disable: pci_disable_device(pdev); err_out: return rc; } +static int rhine_init_one_platform(struct platform_device *pdev) +{ + const struct of_device_id *match; + u32 revision; + int irq; + struct resource *res; + void __iomem *ioaddr; + + match = of_match_device(rhine_of_tbl, &pdev->dev); + if (!match) + return -EINVAL; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ioaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(ioaddr)) + return PTR_ERR(ioaddr); + + irq = irq_of_parse_and_map(pdev->dev.of_node, 0); + if (!irq) + return -EINVAL; + + revision = (u32)match->data; + if (!revision) + return -EINVAL; + + return rhine_init_one_common(&pdev->dev, revision, + (long)ioaddr, ioaddr, irq); +} + static int alloc_ring(struct net_device* dev) { struct rhine_private *rp = netdev_priv(dev); @@ -2297,7 +2343,7 @@ static int rhine_close(struct net_device *dev) } -static void rhine_remove_one(struct pci_dev *pdev) +static void rhine_remove_one_pci(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); @@ -2311,7 +2357,21 @@ static void rhine_remove_one(struct pci_dev *pdev) pci_disable_device(pdev); } -static void rhine_shutdown (struct pci_dev *pdev) +static int rhine_remove_one_platform(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct rhine_private *rp = netdev_priv(dev); + + unregister_netdev(dev); + + iounmap(rp->base); + + free_netdev(dev); + + return 0; +} + +static void rhine_shutdown_pci(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct rhine_private *rp = netdev_priv(dev); @@ -2378,7 +2438,7 @@ static int rhine_suspend(struct device *device) netif_device_detach(dev); if (dev_is_pci(device)) - rhine_shutdown(to_pci_dev(device)); + rhine_shutdown_pci(to_pci_dev(device)); return 0; } @@ -2418,15 +2478,26 @@ static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume); #endif /* !CONFIG_PM_SLEEP */ -static struct pci_driver rhine_driver = { +static struct pci_driver rhine_driver_pci = { .name = DRV_NAME, .id_table = rhine_pci_tbl, - .probe = rhine_init_one, - .remove = rhine_remove_one, - .shutdown = rhine_shutdown, + .probe = rhine_init_one_pci, + .remove = rhine_remove_one_pci, + .shutdown = rhine_shutdown_pci, .driver.pm = RHINE_PM_OPS, }; +static struct platform_driver rhine_driver_platform = { + .probe = rhine_init_one_platform, + .remove = rhine_remove_one_platform, + .driver = { + .name = DRV_NAME, + .owner = THIS_MODULE, + .of_match_table = rhine_of_tbl, + .pm = RHINE_PM_OPS, + } +}; + static struct dmi_system_id rhine_dmi_table[] __initdata = { { .ident = "EPIA-M", @@ -2447,6 +2518,8 @@ static struct dmi_system_id rhine_dmi_table[] __initdata = { static int __init rhine_init(void) { + int ret_pci, ret_platform; + /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE pr_info("%s\n", version); @@ -2459,13 +2532,19 @@ static int __init rhine_init(void) else if (avoid_D3) pr_info("avoid_D3 set\n"); - return pci_register_driver(&rhine_driver); + ret_pci = pci_register_driver(&rhine_driver_pci); + ret_platform = platform_driver_register(&rhine_driver_platform); + if ((ret_pci < 0) && (ret_platform < 0)) + return ret_pci; + + return 0; } static void __exit rhine_cleanup(void) { - pci_unregister_driver(&rhine_driver); + platform_driver_unregister(&rhine_driver_platform); + pci_unregister_driver(&rhine_driver_pci); } -- cgit v1.2.3 From 6a5022a56ac37da7bffece043331a101ed0040b1 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:16:51 +0900 Subject: kprobes/x86: Allow to handle reentered kprobe on single-stepping Since the NMI handlers(e.g. perf) can interrupt in the single stepping (or preparing the single stepping, do_debug etc.), we should consider a kprobe is hit in the NMI handler. Even in that case, the kprobe is allowed to be reentered as same as the kprobes hit in kprobe handlers (KPROBE_HIT_ACTIVE or KPROBE_HIT_SSDONE). The real issue will happen when a kprobe hit while another reentered kprobe is processing (KPROBE_REENTER), because we already consumed a saved-area for the previous kprobe. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Jiri Kosina Cc: Jonathan Lebon Link: http://lkml.kernel.org/r/20140417081651.26341.10593.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 61b17dc2c277..da7bdaa3ce15 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -531,10 +531,11 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SS: kprobes_inc_nmissed_count(p); setup_singlestep(p, regs, kcb, 1); break; - case KPROBE_HIT_SS: + case KPROBE_REENTER: /* A probe has been hit in the codepath leading up to, or just * after, single-stepping of a probed instruction. This entire * codepath should strictly reside in .kprobes.text section. -- cgit v1.2.3 From be8f274323c26ddc7e6fd6c44254b7abcdbe6389 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:16:58 +0900 Subject: kprobes: Prohibit probing on .entry.text code .entry.text is a code area which is used for interrupt/syscall entries, which includes many sensitive code. Thus, it is better to prohibit probing on all of such code instead of a part of that. Since some symbols are already registered on kprobe blacklist, this also removes them from the blacklist. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Ananth N Mavinakayanahalli Cc: Anil S Keshavamurthy Cc: Borislav Petkov Cc: David S. Miller Cc: Frederic Weisbecker Cc: Jan Kiszka Cc: Jiri Kosina Cc: Jonathan Lebon Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081658.26341.57354.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/entry_32.S | 33 --------------------------------- arch/x86/kernel/entry_64.S | 20 -------------------- arch/x86/kernel/kprobes/core.c | 8 ++++++++ include/linux/kprobes.h | 1 + kernel/kprobes.c | 13 ++++++++----- 5 files changed, 17 insertions(+), 58 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index a2a4f4697889..0ca5bf1697bb 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -314,10 +314,6 @@ ENTRY(ret_from_kernel_thread) CFI_ENDPROC ENDPROC(ret_from_kernel_thread) -/* - * Interrupt exit functions should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" /* * Return to user mode is not as complex as all this looks, * but we want the default path for a system call return to @@ -372,10 +368,6 @@ need_resched: END(resume_kernel) #endif CFI_ENDPROC -/* - * End of kprobes section - */ - .popsection /* SYSENTER_RETURN points to after the "sysenter" instruction in the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */ @@ -495,10 +487,6 @@ sysexit_audit: PTGS_TO_GS_EX ENDPROC(ia32_sysenter_target) -/* - * syscall stub including irq exit should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" # system call handler stub ENTRY(system_call) RING0_INT_FRAME # can't unwind into user space anyway @@ -691,10 +679,6 @@ syscall_badsys: jmp resume_userspace END(syscall_badsys) CFI_ENDPROC -/* - * End of kprobes section - */ - .popsection .macro FIXUP_ESPFIX_STACK /* @@ -781,10 +765,6 @@ common_interrupt: ENDPROC(common_interrupt) CFI_ENDPROC -/* - * Irq entries should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" #define BUILD_INTERRUPT3(name, nr, fn) \ ENTRY(name) \ RING0_INT_FRAME; \ @@ -961,10 +941,6 @@ ENTRY(spurious_interrupt_bug) jmp error_code CFI_ENDPROC END(spurious_interrupt_bug) -/* - * End of kprobes section - */ - .popsection #ifdef CONFIG_XEN /* Xen doesn't set %esp to be precisely what the normal sysenter @@ -1239,11 +1215,6 @@ return_to_handler: jmp *%ecx #endif -/* - * Some functions should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" - #ifdef CONFIG_TRACING ENTRY(trace_page_fault) RING0_EC_FRAME @@ -1453,7 +1424,3 @@ ENTRY(async_page_fault) END(async_page_fault) #endif -/* - * End of kprobes section - */ - .popsection diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 1e96c3628bf2..43bb38951660 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -487,8 +487,6 @@ ENDPROC(native_usergs_sysret64) TRACE_IRQS_OFF .endm -/* save complete stack frame */ - .pushsection .kprobes.text, "ax" ENTRY(save_paranoid) XCPT_FRAME 1 RDI+8 cld @@ -517,7 +515,6 @@ ENTRY(save_paranoid) 1: ret CFI_ENDPROC END(save_paranoid) - .popsection /* * A newly forked process directly context switches into this address. @@ -975,10 +972,6 @@ END(interrupt) call \func .endm -/* - * Interrupt entry/exit should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" /* * The interrupt stubs push (~vector+0x80) onto the stack and * then jump to common_interrupt. @@ -1113,10 +1106,6 @@ ENTRY(retint_kernel) CFI_ENDPROC END(common_interrupt) -/* - * End of kprobes section - */ - .popsection /* * APIC interrupts. @@ -1477,11 +1466,6 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \ hyperv_callback_vector hyperv_vector_handler #endif /* CONFIG_HYPERV */ -/* - * Some functions should be protected against kprobes - */ - .pushsection .kprobes.text, "ax" - paranoidzeroentry_ist debug do_debug DEBUG_STACK paranoidzeroentry_ist int3 do_int3 DEBUG_STACK paranoiderrorentry stack_segment do_stack_segment @@ -1898,7 +1882,3 @@ ENTRY(ignore_sysret) CFI_ENDPROC END(ignore_sysret) -/* - * End of kprobes section - */ - .popsection diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index da7bdaa3ce15..7751b3dee53a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1065,6 +1065,14 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) return 0; } +bool arch_within_kprobe_blacklist(unsigned long addr) +{ + return (addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end) || + (addr >= (unsigned long)__entry_text_start && + addr < (unsigned long)__entry_text_end); +} + int __init arch_init_kprobes(void) { return 0; diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 925eaf28fca9..cdf9251f8249 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -265,6 +265,7 @@ extern void arch_disarm_kprobe(struct kprobe *p); extern int arch_init_kprobes(void); extern void show_registers(struct pt_regs *regs); extern void kprobes_inc_nmissed_count(struct kprobe *p); +extern bool arch_within_kprobe_blacklist(unsigned long addr); struct kprobe_insn_cache { struct mutex mutex; diff --git a/kernel/kprobes.c b/kernel/kprobes.c index ceeadfcabb76..5b5ac76671e7 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -96,9 +96,6 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) static struct kprobe_blackpoint kprobe_blacklist[] = { {"preempt_schedule",}, {"native_get_debugreg",}, - {"irq_entries_start",}, - {"common_interrupt",}, - {"mcount",}, /* mcount can be called from everywhere */ {NULL} /* Terminator */ }; @@ -1324,12 +1321,18 @@ out: return ret; } +bool __weak arch_within_kprobe_blacklist(unsigned long addr) +{ + /* The __kprobes marked functions and entry code must not be probed */ + return addr >= (unsigned long)__kprobes_text_start && + addr < (unsigned long)__kprobes_text_end; +} + static int __kprobes in_kprobes_functions(unsigned long addr) { struct kprobe_blackpoint *kb; - if (addr >= (unsigned long)__kprobes_text_start && - addr < (unsigned long)__kprobes_text_end) + if (arch_within_kprobe_blacklist(addr)) return -EINVAL; /* * If there exists a kprobe_blacklist, verify and -- cgit v1.2.3 From 376e242429bf8539ef39a080ac113c8799840b13 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:05 +0900 Subject: kprobes: Introduce NOKPROBE_SYMBOL() macro to maintain kprobes blacklist MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Introduce NOKPROBE_SYMBOL() macro which builds a kprobes blacklist at kernel build time. The usage of this macro is similar to EXPORT_SYMBOL(), placed after the function definition: NOKPROBE_SYMBOL(function); Since this macro will inhibit inlining of static/inline functions, this patch also introduces a nokprobe_inline macro for static/inline functions. In this case, we must use NOKPROBE_SYMBOL() for the inline function caller. When CONFIG_KPROBES=y, the macro stores the given function address in the "_kprobe_blacklist" section. Since the data structures are not fully initialized by the macro (because there is no "size" information), those are re-initialized at boot time by using kallsyms. Signed-off-by: Masami Hiramatsu Link: http://lkml.kernel.org/r/20140417081705.26341.96719.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Alok Kataria Cc: Ananth N Mavinakayanahalli Cc: Andrew Morton Cc: Anil S Keshavamurthy Cc: Arnd Bergmann Cc: Christopher Li Cc: Chris Wright Cc: David S. Miller Cc: Jan-Simon Möller Cc: Jeremy Fitzhardinge Cc: Linus Torvalds Cc: Randy Dunlap Cc: Rusty Russell Cc: linux-arch@vger.kernel.org Cc: linux-doc@vger.kernel.org Cc: linux-sparse@vger.kernel.org Cc: virtualization@lists.linux-foundation.org Signed-off-by: Ingo Molnar --- Documentation/kprobes.txt | 16 +++++- arch/x86/include/asm/asm.h | 7 +++ arch/x86/kernel/paravirt.c | 4 ++ include/asm-generic/vmlinux.lds.h | 9 ++++ include/linux/compiler.h | 2 + include/linux/kprobes.h | 20 ++++++-- kernel/kprobes.c | 100 ++++++++++++++++++++------------------ kernel/sched/core.c | 1 + 8 files changed, 107 insertions(+), 52 deletions(-) (limited to 'arch') diff --git a/Documentation/kprobes.txt b/Documentation/kprobes.txt index 0cfb00fd86ff..4bbeca8483ed 100644 --- a/Documentation/kprobes.txt +++ b/Documentation/kprobes.txt @@ -22,8 +22,9 @@ Appendix B: The kprobes sysctl interface Kprobes enables you to dynamically break into any kernel routine and collect debugging and performance information non-disruptively. You -can trap at almost any kernel code address, specifying a handler +can trap at almost any kernel code address(*), specifying a handler routine to be invoked when the breakpoint is hit. +(*: some parts of the kernel code can not be trapped, see 1.5 Blacklist) There are currently three types of probes: kprobes, jprobes, and kretprobes (also called return probes). A kprobe can be inserted @@ -273,6 +274,19 @@ using one of the following techniques: or - Execute 'sysctl -w debug.kprobes_optimization=n' +1.5 Blacklist + +Kprobes can probe most of the kernel except itself. This means +that there are some functions where kprobes cannot probe. Probing +(trapping) such functions can cause a recursive trap (e.g. double +fault) or the nested probe handler may never be called. +Kprobes manages such functions as a blacklist. +If you want to add a function into the blacklist, you just need +to (1) include linux/kprobes.h and (2) use NOKPROBE_SYMBOL() macro +to specify a blacklisted function. +Kprobes checks the given probe address against the blacklist and +rejects registering it, if the given address is in the blacklist. + 2. Architectures Supported Kprobes, jprobes, and return probes are implemented on the following diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 4582e8e1cd1a..7730c1c5c83a 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -57,6 +57,12 @@ .long (from) - . ; \ .long (to) - . + 0x7ffffff0 ; \ .popsection + +# define _ASM_NOKPROBE(entry) \ + .pushsection "_kprobe_blacklist","aw" ; \ + _ASM_ALIGN ; \ + _ASM_PTR (entry); \ + .popsection #else # define _ASM_EXTABLE(from,to) \ " .pushsection \"__ex_table\",\"a\"\n" \ @@ -71,6 +77,7 @@ " .long (" #from ") - .\n" \ " .long (" #to ") - . + 0x7ffffff0\n" \ " .popsection\n" +/* For C file, we already have NOKPROBE_SYMBOL macro */ #endif #endif /* _ASM_X86_ASM_H */ diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 1b10af835c31..e136869ae42e 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -389,6 +390,9 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .end_context_switch = paravirt_nop, }; +/* At this point, native_get_debugreg has a real function entry */ +NOKPROBE_SYMBOL(native_get_debugreg); + struct pv_apic_ops pv_apic_ops = { #ifdef CONFIG_X86_LOCAL_APIC .startup_ipi_hook = paravirt_nop, diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 146e4fffd710..40ceb3ceba79 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -109,6 +109,14 @@ #define BRANCH_PROFILE() #endif +#ifdef CONFIG_KPROBES +#define KPROBE_BLACKLIST() VMLINUX_SYMBOL(__start_kprobe_blacklist) = .; \ + *(_kprobe_blacklist) \ + VMLINUX_SYMBOL(__stop_kprobe_blacklist) = .; +#else +#define KPROBE_BLACKLIST() +#endif + #ifdef CONFIG_EVENT_TRACING #define FTRACE_EVENTS() . = ALIGN(8); \ VMLINUX_SYMBOL(__start_ftrace_events) = .; \ @@ -507,6 +515,7 @@ *(.init.rodata) \ FTRACE_EVENTS() \ TRACE_SYSCALLS() \ + KPROBE_BLACKLIST() \ MEM_DISCARD(init.rodata) \ CLK_OF_TABLES() \ RESERVEDMEM_OF_TABLES() \ diff --git a/include/linux/compiler.h b/include/linux/compiler.h index ee7239ea1583..0300c0f5c88b 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -374,7 +374,9 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */ #ifdef CONFIG_KPROBES # define __kprobes __attribute__((__section__(".kprobes.text"))) +# define nokprobe_inline __always_inline #else # define __kprobes +# define nokprobe_inline inline #endif #endif /* __LINUX_COMPILER_H */ diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index cdf9251f8249..e059507c465d 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -205,10 +205,10 @@ struct kretprobe_blackpoint { void *addr; }; -struct kprobe_blackpoint { - const char *name; +struct kprobe_blacklist_entry { + struct list_head list; unsigned long start_addr; - unsigned long range; + unsigned long end_addr; }; #ifdef CONFIG_KPROBES @@ -477,4 +477,18 @@ static inline int enable_jprobe(struct jprobe *jp) return enable_kprobe(&jp->kp); } +#ifdef CONFIG_KPROBES +/* + * Blacklist ganerating macro. Specify functions which is not probed + * by using this macro. + */ +#define __NOKPROBE_SYMBOL(fname) \ +static unsigned long __used \ + __attribute__((section("_kprobe_blacklist"))) \ + _kbl_addr_##fname = (unsigned long)fname; +#define NOKPROBE_SYMBOL(fname) __NOKPROBE_SYMBOL(fname) +#else +#define NOKPROBE_SYMBOL(fname) +#endif + #endif /* _LINUX_KPROBES_H */ diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 5b5ac76671e7..5ffc6875d2a7 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -86,18 +86,8 @@ static raw_spinlock_t *kretprobe_table_lock_ptr(unsigned long hash) return &(kretprobe_table_locks[hash].lock); } -/* - * Normally, functions that we'd want to prohibit kprobes in, are marked - * __kprobes. But, there are cases where such functions already belong to - * a different section (__sched for preempt_schedule) - * - * For such cases, we now have a blacklist - */ -static struct kprobe_blackpoint kprobe_blacklist[] = { - {"preempt_schedule",}, - {"native_get_debugreg",}, - {NULL} /* Terminator */ -}; +/* Blacklist -- list of struct kprobe_blacklist_entry */ +static LIST_HEAD(kprobe_blacklist); #ifdef __ARCH_WANT_KPROBES_INSN_SLOT /* @@ -1328,24 +1318,22 @@ bool __weak arch_within_kprobe_blacklist(unsigned long addr) addr < (unsigned long)__kprobes_text_end; } -static int __kprobes in_kprobes_functions(unsigned long addr) +static bool __kprobes within_kprobe_blacklist(unsigned long addr) { - struct kprobe_blackpoint *kb; + struct kprobe_blacklist_entry *ent; if (arch_within_kprobe_blacklist(addr)) - return -EINVAL; + return true; /* * If there exists a kprobe_blacklist, verify and * fail any probe registration in the prohibited area */ - for (kb = kprobe_blacklist; kb->name != NULL; kb++) { - if (kb->start_addr) { - if (addr >= kb->start_addr && - addr < (kb->start_addr + kb->range)) - return -EINVAL; - } + list_for_each_entry(ent, &kprobe_blacklist, list) { + if (addr >= ent->start_addr && addr < ent->end_addr) + return true; } - return 0; + + return false; } /* @@ -1436,7 +1424,7 @@ static __kprobes int check_kprobe_address_safe(struct kprobe *p, /* Ensure it is not in reserved area nor out of text */ if (!kernel_text_address((unsigned long) p->addr) || - in_kprobes_functions((unsigned long) p->addr) || + within_kprobe_blacklist((unsigned long) p->addr) || jump_label_text_reserved(p->addr, p->addr)) { ret = -EINVAL; goto out; @@ -2022,6 +2010,38 @@ void __kprobes dump_kprobe(struct kprobe *kp) kp->symbol_name, kp->addr, kp->offset); } +/* + * Lookup and populate the kprobe_blacklist. + * + * Unlike the kretprobe blacklist, we'll need to determine + * the range of addresses that belong to the said functions, + * since a kprobe need not necessarily be at the beginning + * of a function. + */ +static int __init populate_kprobe_blacklist(unsigned long *start, + unsigned long *end) +{ + unsigned long *iter; + struct kprobe_blacklist_entry *ent; + unsigned long offset = 0, size = 0; + + for (iter = start; iter < end; iter++) { + if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) { + pr_err("Failed to find blacklist %p\n", (void *)*iter); + continue; + } + + ent = kmalloc(sizeof(*ent), GFP_KERNEL); + if (!ent) + return -ENOMEM; + ent->start_addr = *iter; + ent->end_addr = *iter + size; + INIT_LIST_HEAD(&ent->list); + list_add_tail(&ent->list, &kprobe_blacklist); + } + return 0; +} + /* Module notifier call back, checking kprobes on the module */ static int __kprobes kprobes_module_callback(struct notifier_block *nb, unsigned long val, void *data) @@ -2065,14 +2085,13 @@ static struct notifier_block kprobe_module_nb = { .priority = 0 }; +/* Markers of _kprobe_blacklist section */ +extern unsigned long __start_kprobe_blacklist[]; +extern unsigned long __stop_kprobe_blacklist[]; + static int __init init_kprobes(void) { int i, err = 0; - unsigned long offset = 0, size = 0; - char *modname, namebuf[KSYM_NAME_LEN]; - const char *symbol_name; - void *addr; - struct kprobe_blackpoint *kb; /* FIXME allocate the probe table, currently defined statically */ /* initialize all list heads */ @@ -2082,26 +2101,11 @@ static int __init init_kprobes(void) raw_spin_lock_init(&(kretprobe_table_locks[i].lock)); } - /* - * Lookup and populate the kprobe_blacklist. - * - * Unlike the kretprobe blacklist, we'll need to determine - * the range of addresses that belong to the said functions, - * since a kprobe need not necessarily be at the beginning - * of a function. - */ - for (kb = kprobe_blacklist; kb->name != NULL; kb++) { - kprobe_lookup_name(kb->name, addr); - if (!addr) - continue; - - kb->start_addr = (unsigned long)addr; - symbol_name = kallsyms_lookup(kb->start_addr, - &size, &offset, &modname, namebuf); - if (!symbol_name) - kb->range = 0; - else - kb->range = size; + err = populate_kprobe_blacklist(__start_kprobe_blacklist, + __stop_kprobe_blacklist); + if (err) { + pr_err("kprobes: failed to populate blacklist: %d\n", err); + pr_err("Please take care of using kprobes.\n"); } if (kretprobe_blacklist_size) { diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 268a45ea238c..6863631e8cd0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2804,6 +2804,7 @@ asmlinkage void __sched notrace preempt_schedule(void) barrier(); } while (need_resched()); } +NOKPROBE_SYMBOL(preempt_schedule); EXPORT_SYMBOL(preempt_schedule); #endif /* CONFIG_PREEMPT */ -- cgit v1.2.3 From 0f46efeb44e360b78f54a968b4d92e6877c35891 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:12 +0900 Subject: kprobes, x86: Prohibit probing on debug_stack_*() Prohibit probing on debug_stack_reset and debug_stack_set_zero. Since the both functions are called from TRACE_IRQS_ON/OFF_DEBUG macros which run in int3 ist entry, probing it may cause a soft lockup. This happens when the kernel built with CONFIG_DYNAMIC_FTRACE=y and CONFIG_TRACE_IRQFLAGS=y. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Borislav Petkov Cc: Jan Beulich Cc: Paul Gortmaker Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081712.26341.32994.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a135239badb7..5af696dddd1d 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include @@ -1160,6 +1161,7 @@ int is_debug_stack(unsigned long addr) (addr <= __get_cpu_var(debug_stack_addr) && addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); } +NOKPROBE_SYMBOL(is_debug_stack); DEFINE_PER_CPU(u32, debug_idt_ctr); @@ -1168,6 +1170,7 @@ void debug_stack_set_zero(void) this_cpu_inc(debug_idt_ctr); load_current_idt(); } +NOKPROBE_SYMBOL(debug_stack_set_zero); void debug_stack_reset(void) { @@ -1176,6 +1179,7 @@ void debug_stack_reset(void) if (this_cpu_dec_return(debug_idt_ctr) == 0) load_current_idt(); } +NOKPROBE_SYMBOL(debug_stack_reset); #else /* CONFIG_X86_64 */ -- cgit v1.2.3 From 8027197220e02d5cebbbfdff36c2827661fbc692 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:19 +0900 Subject: kprobes, x86: Prohibit probing on native_set_debugreg()/load_idt() Since the kprobes uses do_debug for single stepping, functions called from do_debug() before notify_die() must not be probed. And also native_load_idt() is called from paranoid_exit when returning int3, this also must not be probed. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Alok Kataria Cc: Chris Wright Cc: Jeremy Fitzhardinge Cc: Rusty Russell Cc: virtualization@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20140417081719.26341.65542.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/paravirt.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index e136869ae42e..548d25f00c90 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -390,8 +390,10 @@ __visible struct pv_cpu_ops pv_cpu_ops = { .end_context_switch = paravirt_nop, }; -/* At this point, native_get_debugreg has a real function entry */ +/* At this point, native_get/set_debugreg has real function entries */ NOKPROBE_SYMBOL(native_get_debugreg); +NOKPROBE_SYMBOL(native_set_debugreg); +NOKPROBE_SYMBOL(native_load_idt); struct pv_apic_ops pv_apic_ops = { #ifdef CONFIG_X86_LOCAL_APIC -- cgit v1.2.3 From 98def1dedd00f42ded8423c418c971751f46aad2 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:26 +0900 Subject: kprobes, x86: Prohibit probing on thunk functions and restore thunk/restore functions are also used for tracing irqoff etc. and those are involved in kprobe's exception handling. Prohibit probing on them to avoid kernel crash. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Link: http://lkml.kernel.org/r/20140417081726.26341.3872.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/lib/thunk_32.S | 3 ++- arch/x86/lib/thunk_64.S | 3 +++ 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S index 2930ae05d773..28f85c916712 100644 --- a/arch/x86/lib/thunk_32.S +++ b/arch/x86/lib/thunk_32.S @@ -4,8 +4,8 @@ * (inspired by Andi Kleen's thunk_64.S) * Subject to the GNU public license, v.2. No warranty of any kind. */ - #include + #include #ifdef CONFIG_TRACE_IRQFLAGS /* put return address in eax (arg1) */ @@ -22,6 +22,7 @@ popl %ecx popl %eax ret + _ASM_NOKPROBE(\name) .endm thunk_ra trace_hardirqs_on_thunk,trace_hardirqs_on_caller diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S index a63efd6bb6a5..92d9feaff42b 100644 --- a/arch/x86/lib/thunk_64.S +++ b/arch/x86/lib/thunk_64.S @@ -8,6 +8,7 @@ #include #include #include +#include /* rdi: arg1 ... normal C conventions. rax is saved/restored. */ .macro THUNK name, func, put_ret_addr_in_rdi=0 @@ -25,6 +26,7 @@ call \func jmp restore CFI_ENDPROC + _ASM_NOKPROBE(\name) .endm #ifdef CONFIG_TRACE_IRQFLAGS @@ -43,3 +45,4 @@ restore: RESTORE_ARGS ret CFI_ENDPROC + _ASM_NOKPROBE(restore) -- cgit v1.2.3 From 6f6343f53d133bae516caf3d254bce37d8774625 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:33 +0900 Subject: kprobes/x86: Call exception handlers directly from do_int3/do_debug To avoid a kernel crash by probing on lockdep code, call kprobe_int3_handler() and kprobe_debug_handler()(which was formerly called post_kprobe_handler()) directly from do_int3 and do_debug. Currently kprobes uses notify_die() to hook the int3/debug exceptoins. Since there is a locking code in notify_die, the lockdep code can be invoked. And because the lockdep involves printk() related things, theoretically, we need to prohibit probing on such code, which means much longer blacklist we'll have. Instead, hooking the int3/debug for kprobes before notify_die() can avoid this problem. Anyway, most of the int3 handlers in the kernel are already called from do_int3 directly, e.g. ftrace_int3_handler, poke_int3_handler, kgdb_ll_trap. Actually only kprobe_exceptions_notify is on the notifier_call_chain. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Andrew Morton Cc: Borislav Petkov Cc: Jiri Kosina Cc: Jonathan Lebon Cc: Kees Cook Cc: Rusty Russell Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081733.26341.24423.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/include/asm/kprobes.h | 2 ++ arch/x86/kernel/kprobes/core.c | 24 +++--------------------- arch/x86/kernel/traps.c | 10 ++++++++++ 3 files changed, 15 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h index 9454c167629f..53cdfb2857ab 100644 --- a/arch/x86/include/asm/kprobes.h +++ b/arch/x86/include/asm/kprobes.h @@ -116,4 +116,6 @@ struct kprobe_ctlblk { extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr); extern int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); +extern int kprobe_int3_handler(struct pt_regs *regs); +extern int kprobe_debug_handler(struct pt_regs *regs); #endif /* _ASM_X86_KPROBES_H */ diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 7751b3dee53a..9b80aec1ea1a 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -559,7 +559,7 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. */ -static int __kprobes kprobe_handler(struct pt_regs *regs) +int __kprobes kprobe_int3_handler(struct pt_regs *regs) { kprobe_opcode_t *addr; struct kprobe *p; @@ -857,7 +857,7 @@ no_change: * Interrupts are disabled on entry as trap1 is an interrupt gate and they * remain disabled throughout this function. */ -static int __kprobes post_kprobe_handler(struct pt_regs *regs) +int __kprobes kprobe_debug_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -963,22 +963,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d if (args->regs && user_mode_vm(args->regs)) return ret; - switch (val) { - case DIE_INT3: - if (kprobe_handler(args->regs)) - ret = NOTIFY_STOP; - break; - case DIE_DEBUG: - if (post_kprobe_handler(args->regs)) { - /* - * Reset the BS bit in dr6 (pointed by args->err) to - * denote completion of processing - */ - (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; - ret = NOTIFY_STOP; - } - break; - case DIE_GPF: + if (val == DIE_GPF) { /* * To be potentially processing a kprobe fault and to * trust the result from kprobe_running(), we have @@ -987,9 +972,6 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d if (!preemptible() && kprobe_running() && kprobe_fault_handler(args->regs, args->trapnr)) ret = NOTIFY_STOP; - break; - default: - break; } return ret; } diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 57409f6b8c62..e5d4a70814d7 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -334,6 +334,11 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co goto exit; #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ +#ifdef CONFIG_KPROBES + if (kprobe_int3_handler(regs)) + return; +#endif + if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) goto exit; @@ -440,6 +445,11 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) /* Store the virtualized DR6 value */ tsk->thread.debugreg6 = dr6; +#ifdef CONFIG_KPROBES + if (kprobe_debug_handler(regs)) + goto exit; +#endif + if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, SIGTRAP) == NOTIFY_STOP) goto exit; -- cgit v1.2.3 From ecd50f714c421c759354632dd00f70c718c95b10 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:40 +0900 Subject: kprobes, x86: Call exception_enter after kprobes handled Move exception_enter() call after kprobes handler is done. Since the exception_enter() involves many other functions (like printk), it can cause recursive int3/break loop when kprobes probe such functions. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Andrew Morton Cc: Borislav Petkov Cc: Jiri Kosina Cc: Kees Cook Cc: Rusty Russell Cc: Seiji Aguchi Link: http://lkml.kernel.org/r/20140417081740.26341.10894.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/traps.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index e5d4a70814d7..ba9abe9cbce3 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -327,7 +327,6 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co if (poke_int3_handler(regs)) return; - prev_state = exception_enter(); #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -338,6 +337,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co if (kprobe_int3_handler(regs)) return; #endif + prev_state = exception_enter(); if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP, SIGTRAP) == NOTIFY_STOP) @@ -415,8 +415,6 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) unsigned long dr6; int si_code; - prev_state = exception_enter(); - get_debugreg(dr6, 6); /* Filter out all the reserved bits which are preset to 1 */ @@ -449,6 +447,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) if (kprobe_debug_handler(regs)) goto exit; #endif + prev_state = exception_enter(); if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code, SIGTRAP) == NOTIFY_STOP) -- cgit v1.2.3 From 7ec8a97a990da8e3ba87175a757731e17f74072e Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:17:47 +0900 Subject: kprobes/x86: Allow probe on some kprobe preparation functions There is no need to prohibit probing on the functions used in preparation phase. Those are safely probed because those are not invoked from breakpoint/fault/debug handlers, there is no chance to cause recursive exceptions. Following functions are now removed from the kprobes blacklist: can_boost can_probe can_optimize is_IF_modifier __copy_instruction copy_optimized_instructions arch_copy_kprobe arch_prepare_kprobe arch_arm_kprobe arch_disarm_kprobe arch_remove_kprobe arch_trampoline_kprobe arch_prepare_kprobe_ftrace arch_prepare_optimized_kprobe arch_check_optimized_kprobe arch_within_optimized_kprobe __arch_remove_optimized_kprobe arch_remove_optimized_kprobe arch_optimize_kprobes arch_unoptimize_kprobe I tested those functions by putting kprobes on all instructions in the functions with the bash script I sent to LKML. See: https://lkml.org/lkml/2014/3/27/33 Signed-off-by: Masami Hiramatsu Cc: Jiri Kosina Cc: Jonathan Lebon Link: http://lkml.kernel.org/r/20140417081747.26341.36065.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/core.c | 20 ++++++++++---------- arch/x86/kernel/kprobes/ftrace.c | 2 +- arch/x86/kernel/kprobes/opt.c | 24 ++++++++++++------------ 3 files changed, 23 insertions(+), 23 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 9b80aec1ea1a..bd717137ae77 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -159,7 +159,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) * Returns non-zero if opcode is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -int __kprobes can_boost(kprobe_opcode_t *opcodes) +int can_boost(kprobe_opcode_t *opcodes) { kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; @@ -260,7 +260,7 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add } /* Check if paddr is at an instruction boundary */ -static int __kprobes can_probe(unsigned long paddr) +static int can_probe(unsigned long paddr) { unsigned long addr, __addr, offset = 0; struct insn insn; @@ -299,7 +299,7 @@ static int __kprobes can_probe(unsigned long paddr) /* * Returns non-zero if opcode modifies the interrupt flag. */ -static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) +static int is_IF_modifier(kprobe_opcode_t *insn) { /* Skip prefixes */ insn = skip_prefixes(insn); @@ -322,7 +322,7 @@ static int __kprobes is_IF_modifier(kprobe_opcode_t *insn) * If not, return null. * Only applicable to 64-bit x86. */ -int __kprobes __copy_instruction(u8 *dest, u8 *src) +int __copy_instruction(u8 *dest, u8 *src) { struct insn insn; kprobe_opcode_t buf[MAX_INSN_SIZE]; @@ -365,7 +365,7 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src) return insn.length; } -static int __kprobes arch_copy_kprobe(struct kprobe *p) +static int arch_copy_kprobe(struct kprobe *p) { int ret; @@ -392,7 +392,7 @@ static int __kprobes arch_copy_kprobe(struct kprobe *p) return 0; } -int __kprobes arch_prepare_kprobe(struct kprobe *p) +int arch_prepare_kprobe(struct kprobe *p) { if (alternatives_text_reserved(p->addr, p->addr)) return -EINVAL; @@ -407,17 +407,17 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return arch_copy_kprobe(p); } -void __kprobes arch_arm_kprobe(struct kprobe *p) +void arch_arm_kprobe(struct kprobe *p) { text_poke(p->addr, ((unsigned char []){BREAKPOINT_INSTRUCTION}), 1); } -void __kprobes arch_disarm_kprobe(struct kprobe *p) +void arch_disarm_kprobe(struct kprobe *p) { text_poke(p->addr, &p->opcode, 1); } -void __kprobes arch_remove_kprobe(struct kprobe *p) +void arch_remove_kprobe(struct kprobe *p) { if (p->ainsn.insn) { free_insn_slot(p->ainsn.insn, (p->ainsn.boostable == 1)); @@ -1060,7 +1060,7 @@ int __init arch_init_kprobes(void) return 0; } -int __kprobes arch_trampoline_kprobe(struct kprobe *p) +int arch_trampoline_kprobe(struct kprobe *p) { return 0; } diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index 23ef5c556f06..dcaa1310ccfd 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -85,7 +85,7 @@ end: local_irq_restore(flags); } -int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p) +int arch_prepare_kprobe_ftrace(struct kprobe *p) { p->ainsn.insn = NULL; p->ainsn.boostable = -1; diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 898160b42e43..fba7fb075e8a 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -77,7 +77,7 @@ found: } /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */ -static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) +static void synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val) { #ifdef CONFIG_X86_64 *addr++ = 0x48; @@ -169,7 +169,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_ local_irq_restore(flags); } -static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) +static int copy_optimized_instructions(u8 *dest, u8 *src) { int len = 0, ret; @@ -189,7 +189,7 @@ static int __kprobes copy_optimized_instructions(u8 *dest, u8 *src) } /* Check whether insn is indirect jump */ -static int __kprobes insn_is_indirect_jump(struct insn *insn) +static int insn_is_indirect_jump(struct insn *insn) { return ((insn->opcode.bytes[0] == 0xff && (X86_MODRM_REG(insn->modrm.value) & 6) == 4) || /* Jump */ @@ -224,7 +224,7 @@ static int insn_jump_into_range(struct insn *insn, unsigned long start, int len) } /* Decode whole function to ensure any instructions don't jump into target */ -static int __kprobes can_optimize(unsigned long paddr) +static int can_optimize(unsigned long paddr) { unsigned long addr, size = 0, offset = 0; struct insn insn; @@ -275,7 +275,7 @@ static int __kprobes can_optimize(unsigned long paddr) } /* Check optimized_kprobe can actually be optimized. */ -int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) +int arch_check_optimized_kprobe(struct optimized_kprobe *op) { int i; struct kprobe *p; @@ -290,15 +290,15 @@ int __kprobes arch_check_optimized_kprobe(struct optimized_kprobe *op) } /* Check the addr is within the optimized instructions. */ -int __kprobes -arch_within_optimized_kprobe(struct optimized_kprobe *op, unsigned long addr) +int arch_within_optimized_kprobe(struct optimized_kprobe *op, + unsigned long addr) { return ((unsigned long)op->kp.addr <= addr && (unsigned long)op->kp.addr + op->optinsn.size > addr); } /* Free optimized instruction slot */ -static __kprobes +static void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) { if (op->optinsn.insn) { @@ -308,7 +308,7 @@ void __arch_remove_optimized_kprobe(struct optimized_kprobe *op, int dirty) } } -void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) +void arch_remove_optimized_kprobe(struct optimized_kprobe *op) { __arch_remove_optimized_kprobe(op, 1); } @@ -318,7 +318,7 @@ void __kprobes arch_remove_optimized_kprobe(struct optimized_kprobe *op) * Target instructions MUST be relocatable (checked inside) * This is called when new aggr(opt)probe is allocated or reused. */ -int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) +int arch_prepare_optimized_kprobe(struct optimized_kprobe *op) { u8 *buf; int ret; @@ -372,7 +372,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op) * Replace breakpoints (int3) with relative jumps. * Caller must call with locking kprobe_mutex and text_mutex. */ -void __kprobes arch_optimize_kprobes(struct list_head *oplist) +void arch_optimize_kprobes(struct list_head *oplist) { struct optimized_kprobe *op, *tmp; u8 insn_buf[RELATIVEJUMP_SIZE]; @@ -398,7 +398,7 @@ void __kprobes arch_optimize_kprobes(struct list_head *oplist) } /* Replace a relative jump with a breakpoint (int3). */ -void __kprobes arch_unoptimize_kprobe(struct optimized_kprobe *op) +void arch_unoptimize_kprobe(struct optimized_kprobe *op) { u8 insn_buf[RELATIVEJUMP_SIZE]; -- cgit v1.2.3 From 9c54b6164eeb292a0eac86c6913bd8daaff35e62 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:07 +0900 Subject: kprobes, x86: Allow kprobes on text_poke/hw_breakpoint Allow kprobes on text_poke/hw_breakpoint because those are not related to the critical int3-debug recursive path of kprobes at this moment. Signed-off-by: Masami Hiramatsu Reviewed-by: Steven Rostedt Cc: Andrew Morton Cc: Borislav Petkov Cc: Fengguang Wu Cc: Jiri Kosina Cc: Oleg Nesterov Cc: Paul Gortmaker Link: http://lkml.kernel.org/r/20140417081807.26341.73219.stgit@ltc230.yrl.intra.hitachi.co.jp Signed-off-by: Ingo Molnar --- arch/x86/kernel/alternative.c | 3 +-- arch/x86/kernel/hw_breakpoint.c | 5 ++--- 2 files changed, 3 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index df94598ad05a..703130f469ec 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -5,7 +5,6 @@ #include #include #include -#include #include #include #include @@ -551,7 +550,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode, * * Note: Must be called under text_mutex. */ -void *__kprobes text_poke(void *addr, const void *opcode, size_t len) +void *text_poke(void *addr, const void *opcode, size_t len) { unsigned long flags; char *vaddr; diff --git a/arch/x86/kernel/hw_breakpoint.c b/arch/x86/kernel/hw_breakpoint.c index a67b47c31314..5f9cf20cdb68 100644 --- a/arch/x86/kernel/hw_breakpoint.c +++ b/arch/x86/kernel/hw_breakpoint.c @@ -32,7 +32,6 @@ #include #include #include -#include #include #include #include @@ -424,7 +423,7 @@ EXPORT_SYMBOL_GPL(hw_breakpoint_restore); * NOTIFY_STOP returned for all other cases * */ -static int __kprobes hw_breakpoint_handler(struct die_args *args) +static int hw_breakpoint_handler(struct die_args *args) { int i, cpu, rc = NOTIFY_STOP; struct perf_event *bp; @@ -511,7 +510,7 @@ static int __kprobes hw_breakpoint_handler(struct die_args *args) /* * Handle debug exception notifications. */ -int __kprobes hw_breakpoint_exceptions_notify( +int hw_breakpoint_exceptions_notify( struct notifier_block *unused, unsigned long val, void *data) { if (val != DIE_DEBUG) -- cgit v1.2.3 From 9326638cbee2d36b051ed2a69f3e4e107e5f86bd Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Thu, 17 Apr 2014 17:18:14 +0900 Subject: kprobes, x86: Use NOKPROBE_SYMBOL() instead of __kprobes annotation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use NOKPROBE_SYMBOL macro for protecting functions from kprobes instead of __kprobes annotation under arch/x86. This applies nokprobe_inline annotation for some cases, because NOKPROBE_SYMBOL() will inhibit inlining by referring the symbol address. This just folds a bunch of previous NOKPROBE_SYMBOL() cleanup patches for x86 to one patch. Signed-off-by: Masami Hiramatsu Link: http://lkml.kernel.org/r/20140417081814.26341.51656.stgit@ltc230.yrl.intra.hitachi.co.jp Cc: Andrew Morton Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: Dave Hansen Cc: Fernando Luis Vázquez Cao Cc: Gleb Natapov Cc: Jason Wang Cc: Jesper Nilsson Cc: Jiri Kosina Cc: Jiri Olsa Cc: Jiri Slaby Cc: Johannes Weiner Cc: Jonathan Lebon Cc: Kees Cook Cc: Matt Fleming Cc: Michel Lespinasse Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Paul Gortmaker Cc: Paul Mackerras Cc: Raghavendra K T Cc: Rusty Russell Cc: Seiji Aguchi Cc: Srivatsa Vaddagiri Cc: Tejun Heo Cc: Vineet Gupta Signed-off-by: Ingo Molnar --- arch/x86/include/asm/traps.h | 2 +- arch/x86/kernel/apic/hw_nmi.c | 3 +- arch/x86/kernel/cpu/perf_event.c | 3 +- arch/x86/kernel/cpu/perf_event_amd_ibs.c | 3 +- arch/x86/kernel/dumpstack.c | 9 ++-- arch/x86/kernel/kprobes/core.c | 77 ++++++++++++++++++++------------ arch/x86/kernel/kprobes/ftrace.c | 15 ++++--- arch/x86/kernel/kprobes/opt.c | 8 ++-- arch/x86/kernel/kvm.c | 4 +- arch/x86/kernel/nmi.c | 18 +++++--- arch/x86/kernel/traps.c | 20 ++++++--- arch/x86/mm/fault.c | 29 +++++++----- 12 files changed, 122 insertions(+), 69 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 58d66fe06b61..ca32508c58c5 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -68,7 +68,7 @@ dotraplinkage void do_segment_not_present(struct pt_regs *, long); dotraplinkage void do_stack_segment(struct pt_regs *, long); #ifdef CONFIG_X86_64 dotraplinkage void do_double_fault(struct pt_regs *, long); -asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *); +asmlinkage struct pt_regs *sync_regs(struct pt_regs *); #endif dotraplinkage void do_general_protection(struct pt_regs *, long); dotraplinkage void do_page_fault(struct pt_regs *, unsigned long); diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c index a698d7165c96..73eb5b336f63 100644 --- a/arch/x86/kernel/apic/hw_nmi.c +++ b/arch/x86/kernel/apic/hw_nmi.c @@ -60,7 +60,7 @@ void arch_trigger_all_cpu_backtrace(void) smp_mb__after_clear_bit(); } -static int __kprobes +static int arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) { int cpu; @@ -80,6 +80,7 @@ arch_trigger_all_cpu_backtrace_handler(unsigned int cmd, struct pt_regs *regs) return NMI_DONE; } +NOKPROBE_SYMBOL(arch_trigger_all_cpu_backtrace_handler); static int __init register_trigger_all_cpu_backtrace(void) { diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index ae407f7226c8..5fc8771979ba 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -1292,7 +1292,7 @@ void perf_events_lapic_init(void) apic_write(APIC_LVTPC, APIC_DM_NMI); } -static int __kprobes +static int perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) { u64 start_clock; @@ -1310,6 +1310,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) return ret; } +NOKPROBE_SYMBOL(perf_event_nmi_handler); struct event_constraint emptyconstraint; struct event_constraint unconstrained; diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index 4c36bbe3173a..cbb1be3ed9e4 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c @@ -593,7 +593,7 @@ out: return 1; } -static int __kprobes +static int perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) { int handled = 0; @@ -606,6 +606,7 @@ perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs) return handled; } +NOKPROBE_SYMBOL(perf_ibs_nmi_handler); static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name) { diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index d9c12d3022a7..b74ebc7c4402 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -200,7 +200,7 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; -unsigned __kprobes long oops_begin(void) +unsigned long oops_begin(void) { int cpu; unsigned long flags; @@ -223,8 +223,9 @@ unsigned __kprobes long oops_begin(void) return flags; } EXPORT_SYMBOL_GPL(oops_begin); +NOKPROBE_SYMBOL(oops_begin); -void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) +void oops_end(unsigned long flags, struct pt_regs *regs, int signr) { if (regs && kexec_should_crash(current)) crash_kexec(regs); @@ -247,8 +248,9 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) panic("Fatal exception"); do_exit(signr); } +NOKPROBE_SYMBOL(oops_end); -int __kprobes __die(const char *str, struct pt_regs *regs, long err) +int __die(const char *str, struct pt_regs *regs, long err) { #ifdef CONFIG_X86_32 unsigned short ss; @@ -291,6 +293,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err) #endif return 0; } +NOKPROBE_SYMBOL(__die); /* * This is gone through when something in the kernel has done something bad diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index bd717137ae77..7596df664901 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -112,7 +112,8 @@ struct kretprobe_blackpoint kretprobe_blacklist[] = { const int kretprobe_blacklist_size = ARRAY_SIZE(kretprobe_blacklist); -static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) +static nokprobe_inline void +__synthesize_relative_insn(void *from, void *to, u8 op) { struct __arch_relative_insn { u8 op; @@ -125,21 +126,23 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op) } /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ -void __kprobes synthesize_reljump(void *from, void *to) +void synthesize_reljump(void *from, void *to) { __synthesize_relative_insn(from, to, RELATIVEJUMP_OPCODE); } +NOKPROBE_SYMBOL(synthesize_reljump); /* Insert a call instruction at address 'from', which calls address 'to'.*/ -void __kprobes synthesize_relcall(void *from, void *to) +void synthesize_relcall(void *from, void *to) { __synthesize_relative_insn(from, to, RELATIVECALL_OPCODE); } +NOKPROBE_SYMBOL(synthesize_relcall); /* * Skip the prefixes of the instruction. */ -static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) +static kprobe_opcode_t *skip_prefixes(kprobe_opcode_t *insn) { insn_attr_t attr; @@ -154,6 +157,7 @@ static kprobe_opcode_t *__kprobes skip_prefixes(kprobe_opcode_t *insn) #endif return insn; } +NOKPROBE_SYMBOL(skip_prefixes); /* * Returns non-zero if opcode is boostable. @@ -425,7 +429,8 @@ void arch_remove_kprobe(struct kprobe *p) } } -static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) +static nokprobe_inline void +save_previous_kprobe(struct kprobe_ctlblk *kcb) { kcb->prev_kprobe.kp = kprobe_running(); kcb->prev_kprobe.status = kcb->kprobe_status; @@ -433,7 +438,8 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb) kcb->prev_kprobe.saved_flags = kcb->kprobe_saved_flags; } -static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) +static nokprobe_inline void +restore_previous_kprobe(struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); kcb->kprobe_status = kcb->prev_kprobe.status; @@ -441,8 +447,9 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) kcb->kprobe_saved_flags = kcb->prev_kprobe.saved_flags; } -static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static nokprobe_inline void +set_current_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { __this_cpu_write(current_kprobe, p); kcb->kprobe_saved_flags = kcb->kprobe_old_flags @@ -451,7 +458,7 @@ static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, kcb->kprobe_saved_flags &= ~X86_EFLAGS_IF; } -static void __kprobes clear_btf(void) +static nokprobe_inline void clear_btf(void) { if (test_thread_flag(TIF_BLOCKSTEP)) { unsigned long debugctl = get_debugctlmsr(); @@ -461,7 +468,7 @@ static void __kprobes clear_btf(void) } } -static void __kprobes restore_btf(void) +static nokprobe_inline void restore_btf(void) { if (test_thread_flag(TIF_BLOCKSTEP)) { unsigned long debugctl = get_debugctlmsr(); @@ -471,8 +478,7 @@ static void __kprobes restore_btf(void) } } -void __kprobes -arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) +void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long *sara = stack_addr(regs); @@ -481,9 +487,10 @@ arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs) /* Replace the return addr with trampoline addr */ *sara = (unsigned long) &kretprobe_trampoline; } +NOKPROBE_SYMBOL(arch_prepare_kretprobe); -static void __kprobes -setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb, int reenter) +static void setup_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb, int reenter) { if (setup_detour_execution(p, regs, reenter)) return; @@ -519,14 +526,15 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k else regs->ip = (unsigned long)p->ainsn.insn; } +NOKPROBE_SYMBOL(setup_singlestep); /* * We have reentered the kprobe_handler(), since another probe was hit while * within the handler. We save the original kprobes variables and just single * step on the instruction of the new probe without calling any user handlers. */ -static int __kprobes -reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) +static int reenter_kprobe(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { switch (kcb->kprobe_status) { case KPROBE_HIT_SSDONE: @@ -554,12 +562,13 @@ reenter_kprobe(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb return 1; } +NOKPROBE_SYMBOL(reenter_kprobe); /* * Interrupts are disabled on entry as trap3 is an interrupt gate and they * remain disabled throughout this function. */ -int __kprobes kprobe_int3_handler(struct pt_regs *regs) +int kprobe_int3_handler(struct pt_regs *regs) { kprobe_opcode_t *addr; struct kprobe *p; @@ -622,12 +631,13 @@ int __kprobes kprobe_int3_handler(struct pt_regs *regs) preempt_enable_no_resched(); return 0; } +NOKPROBE_SYMBOL(kprobe_int3_handler); /* * When a retprobed function returns, this code saves registers and * calls trampoline_handler() runs, which calls the kretprobe's handler. */ -static void __used __kprobes kretprobe_trampoline_holder(void) +static void __used kretprobe_trampoline_holder(void) { asm volatile ( ".global kretprobe_trampoline\n" @@ -658,11 +668,13 @@ static void __used __kprobes kretprobe_trampoline_holder(void) #endif " ret\n"); } +NOKPROBE_SYMBOL(kretprobe_trampoline_holder); +NOKPROBE_SYMBOL(kretprobe_trampoline); /* * Called from kretprobe_trampoline */ -__visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) +__visible __used void *trampoline_handler(struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; @@ -748,6 +760,7 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) } return (void *)orig_ret_address; } +NOKPROBE_SYMBOL(trampoline_handler); /* * Called after single-stepping. p->addr is the address of the @@ -776,8 +789,8 @@ __visible __used __kprobes void *trampoline_handler(struct pt_regs *regs) * jump instruction after the copied instruction, that jumps to the next * instruction after the probepoint. */ -static void __kprobes -resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) +static void resume_execution(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { unsigned long *tos = stack_addr(regs); unsigned long copy_ip = (unsigned long)p->ainsn.insn; @@ -852,12 +865,13 @@ resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k no_change: restore_btf(); } +NOKPROBE_SYMBOL(resume_execution); /* * Interrupts are disabled on entry as trap1 is an interrupt gate and they * remain disabled throughout this function. */ -int __kprobes kprobe_debug_handler(struct pt_regs *regs) +int kprobe_debug_handler(struct pt_regs *regs) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -892,8 +906,9 @@ out: return 1; } +NOKPROBE_SYMBOL(kprobe_debug_handler); -int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) +int kprobe_fault_handler(struct pt_regs *regs, int trapnr) { struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -950,12 +965,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) return 0; } +NOKPROBE_SYMBOL(kprobe_fault_handler); /* * Wrapper routine for handling exceptions. */ -int __kprobes -kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data) +int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, + void *data) { struct die_args *args = data; int ret = NOTIFY_DONE; @@ -975,8 +991,9 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d } return ret; } +NOKPROBE_SYMBOL(kprobe_exceptions_notify); -int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) +int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) { struct jprobe *jp = container_of(p, struct jprobe, kp); unsigned long addr; @@ -1000,8 +1017,9 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) regs->ip = (unsigned long)(jp->entry); return 1; } +NOKPROBE_SYMBOL(setjmp_pre_handler); -void __kprobes jprobe_return(void) +void jprobe_return(void) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); @@ -1017,8 +1035,10 @@ void __kprobes jprobe_return(void) " nop \n"::"b" (kcb->jprobe_saved_sp):"memory"); } +NOKPROBE_SYMBOL(jprobe_return); +NOKPROBE_SYMBOL(jprobe_return_end); -int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) +int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); u8 *addr = (u8 *) (regs->ip - 1); @@ -1046,6 +1066,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) } return 0; } +NOKPROBE_SYMBOL(longjmp_break_handler); bool arch_within_kprobe_blacklist(unsigned long addr) { diff --git a/arch/x86/kernel/kprobes/ftrace.c b/arch/x86/kernel/kprobes/ftrace.c index dcaa1310ccfd..717b02a22e67 100644 --- a/arch/x86/kernel/kprobes/ftrace.c +++ b/arch/x86/kernel/kprobes/ftrace.c @@ -25,8 +25,9 @@ #include "common.h" -static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static nokprobe_inline +int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { /* * Emulate singlestep (and also recover regs->ip) @@ -41,18 +42,19 @@ static int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, return 1; } -int __kprobes skip_singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +int skip_singlestep(struct kprobe *p, struct pt_regs *regs, + struct kprobe_ctlblk *kcb) { if (kprobe_ftrace(p)) return __skip_singlestep(p, regs, kcb); else return 0; } +NOKPROBE_SYMBOL(skip_singlestep); /* Ftrace callback handler for kprobes */ -void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, - struct ftrace_ops *ops, struct pt_regs *regs) +void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *ops, struct pt_regs *regs) { struct kprobe *p; struct kprobe_ctlblk *kcb; @@ -84,6 +86,7 @@ void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, end: local_irq_restore(flags); } +NOKPROBE_SYMBOL(kprobe_ftrace_handler); int arch_prepare_kprobe_ftrace(struct kprobe *p) { diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index fba7fb075e8a..f304773285ae 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -138,7 +138,8 @@ asm ( #define INT3_SIZE sizeof(kprobe_opcode_t) /* Optimized kprobe call back function: called from optinsn */ -static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) +static void +optimized_callback(struct optimized_kprobe *op, struct pt_regs *regs) { struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); unsigned long flags; @@ -168,6 +169,7 @@ static void __kprobes optimized_callback(struct optimized_kprobe *op, struct pt_ } local_irq_restore(flags); } +NOKPROBE_SYMBOL(optimized_callback); static int copy_optimized_instructions(u8 *dest, u8 *src) { @@ -424,8 +426,7 @@ extern void arch_unoptimize_kprobes(struct list_head *oplist, } } -int __kprobes -setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) +int setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) { struct optimized_kprobe *op; @@ -441,3 +442,4 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter) } return 0; } +NOKPROBE_SYMBOL(setup_detour_execution); diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 0331cb389d68..d81abcbfe501 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -251,8 +251,9 @@ u32 kvm_read_and_reset_pf_reason(void) return reason; } EXPORT_SYMBOL_GPL(kvm_read_and_reset_pf_reason); +NOKPROBE_SYMBOL(kvm_read_and_reset_pf_reason); -dotraplinkage void __kprobes +dotraplinkage void do_async_page_fault(struct pt_regs *regs, unsigned long error_code) { enum ctx_state prev_state; @@ -276,6 +277,7 @@ do_async_page_fault(struct pt_regs *regs, unsigned long error_code) break; } } +NOKPROBE_SYMBOL(do_async_page_fault); static void __init paravirt_ops_setup(void) { diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index b4872b999a71..c3e985d1751c 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -110,7 +110,7 @@ static void nmi_max_handler(struct irq_work *w) a->handler, whole_msecs, decimal_msecs); } -static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) +static int nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *a; @@ -146,6 +146,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2 /* return total number of NMI events handled */ return handled; } +NOKPROBE_SYMBOL(nmi_handle); int __register_nmi_handler(unsigned int type, struct nmiaction *action) { @@ -208,7 +209,7 @@ void unregister_nmi_handler(unsigned int type, const char *name) } EXPORT_SYMBOL_GPL(unregister_nmi_handler); -static __kprobes void +static void pci_serr_error(unsigned char reason, struct pt_regs *regs) { /* check to see if anyone registered against these types of errors */ @@ -238,8 +239,9 @@ pci_serr_error(unsigned char reason, struct pt_regs *regs) reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; outb(reason, NMI_REASON_PORT); } +NOKPROBE_SYMBOL(pci_serr_error); -static __kprobes void +static void io_check_error(unsigned char reason, struct pt_regs *regs) { unsigned long i; @@ -269,8 +271,9 @@ io_check_error(unsigned char reason, struct pt_regs *regs) reason &= ~NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); } +NOKPROBE_SYMBOL(io_check_error); -static __kprobes void +static void unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { int handled; @@ -298,11 +301,12 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs) pr_emerg("Dazed and confused, but trying to continue\n"); } +NOKPROBE_SYMBOL(unknown_nmi_error); static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(unsigned long, last_nmi_rip); -static __kprobes void default_do_nmi(struct pt_regs *regs) +static void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; int handled; @@ -401,6 +405,7 @@ static __kprobes void default_do_nmi(struct pt_regs *regs) else unknown_nmi_error(reason, regs); } +NOKPROBE_SYMBOL(default_do_nmi); /* * NMIs can hit breakpoints which will cause it to lose its @@ -520,7 +525,7 @@ static inline void nmi_nesting_postprocess(void) } #endif -dotraplinkage notrace __kprobes void +dotraplinkage notrace void do_nmi(struct pt_regs *regs, long error_code) { nmi_nesting_preprocess(regs); @@ -537,6 +542,7 @@ do_nmi(struct pt_regs *regs, long error_code) /* On i386, may loop back to preprocess */ nmi_nesting_postprocess(); } +NOKPROBE_SYMBOL(do_nmi); void stop_nmi(void) { diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ba9abe9cbce3..3c8ae7d83820 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -106,7 +106,7 @@ static inline void preempt_conditional_cli(struct pt_regs *regs) preempt_count_dec(); } -static int __kprobes +static nokprobe_inline int do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, struct pt_regs *regs, long error_code) { @@ -136,7 +136,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, return -1; } -static void __kprobes +static void do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, long error_code, siginfo_t *info) { @@ -173,6 +173,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, else force_sig(signr, tsk); } +NOKPROBE_SYMBOL(do_trap); #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ @@ -263,7 +264,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) } #endif -dotraplinkage void __kprobes +dotraplinkage void do_general_protection(struct pt_regs *regs, long error_code) { struct task_struct *tsk; @@ -309,9 +310,10 @@ do_general_protection(struct pt_regs *regs, long error_code) exit: exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_general_protection); /* May run on IST stack. */ -dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) +dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code) { enum ctx_state prev_state; @@ -355,6 +357,7 @@ dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_co exit: exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_int3); #ifdef CONFIG_X86_64 /* @@ -362,7 +365,7 @@ exit: * for scheduling or signal handling. The actual stack switch is done in * entry.S */ -asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) +asmlinkage struct pt_regs *sync_regs(struct pt_regs *eregs) { struct pt_regs *regs = eregs; /* Did already sync */ @@ -381,6 +384,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) *regs = *eregs; return regs; } +NOKPROBE_SYMBOL(sync_regs); #endif /* @@ -407,7 +411,7 @@ asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs) * * May run on IST stack. */ -dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) +dotraplinkage void do_debug(struct pt_regs *regs, long error_code) { struct task_struct *tsk = current; enum ctx_state prev_state; @@ -491,6 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code) exit: exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_debug); /* * Note that we play around with the 'TS' bit in an attempt to get @@ -662,7 +667,7 @@ void math_state_restore(void) } EXPORT_SYMBOL_GPL(math_state_restore); -dotraplinkage void __kprobes +dotraplinkage void do_device_not_available(struct pt_regs *regs, long error_code) { enum ctx_state prev_state; @@ -688,6 +693,7 @@ do_device_not_available(struct pt_regs *regs, long error_code) #endif exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_device_not_available); #ifdef CONFIG_X86_32 dotraplinkage void do_iret_error(struct pt_regs *regs, long error_code) diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 8e5722992677..f83bd0de5eef 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -8,7 +8,7 @@ #include /* oops_begin/end, ... */ #include /* search_exception_table */ #include /* max_low_pfn */ -#include /* __kprobes, ... */ +#include /* NOKPROBE_SYMBOL, ... */ #include /* kmmio_handler, ... */ #include /* perf_sw_event */ #include /* hstate_index_to_shift */ @@ -45,7 +45,7 @@ enum x86_pf_error_code { * Returns 0 if mmiotrace is disabled, or if the fault is not * handled by mmiotrace: */ -static inline int __kprobes +static nokprobe_inline int kmmio_fault(struct pt_regs *regs, unsigned long addr) { if (unlikely(is_kmmio_active())) @@ -54,7 +54,7 @@ kmmio_fault(struct pt_regs *regs, unsigned long addr) return 0; } -static inline int __kprobes kprobes_fault(struct pt_regs *regs) +static nokprobe_inline int kprobes_fault(struct pt_regs *regs) { int ret = 0; @@ -261,7 +261,7 @@ void vmalloc_sync_all(void) * * Handle a fault on the vmalloc or module mapping area */ -static noinline __kprobes int vmalloc_fault(unsigned long address) +static noinline int vmalloc_fault(unsigned long address) { unsigned long pgd_paddr; pmd_t *pmd_k; @@ -291,6 +291,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) return 0; } +NOKPROBE_SYMBOL(vmalloc_fault); /* * Did it hit the DOS screen memory VA from vm86 mode? @@ -358,7 +359,7 @@ void vmalloc_sync_all(void) * * This assumes no large pages in there. */ -static noinline __kprobes int vmalloc_fault(unsigned long address) +static noinline int vmalloc_fault(unsigned long address) { pgd_t *pgd, *pgd_ref; pud_t *pud, *pud_ref; @@ -425,6 +426,7 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) return 0; } +NOKPROBE_SYMBOL(vmalloc_fault); #ifdef CONFIG_CPU_SUP_AMD static const char errata93_warning[] = @@ -927,7 +929,7 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte) * There are no security implications to leaving a stale TLB when * increasing the permissions on a page. */ -static noinline __kprobes int +static noinline int spurious_fault(unsigned long error_code, unsigned long address) { pgd_t *pgd; @@ -975,6 +977,7 @@ spurious_fault(unsigned long error_code, unsigned long address) return ret; } +NOKPROBE_SYMBOL(spurious_fault); int show_unhandled_signals = 1; @@ -1030,7 +1033,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) * {,trace_}do_page_fault() have notrace on. Having this an actual function * guarantees there's a function trace entry. */ -static void __kprobes noinline +static noinline void __do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address) { @@ -1253,8 +1256,9 @@ good_area: up_read(&mm->mmap_sem); } +NOKPROBE_SYMBOL(__do_page_fault); -dotraplinkage void __kprobes notrace +dotraplinkage void notrace do_page_fault(struct pt_regs *regs, unsigned long error_code) { unsigned long address = read_cr2(); /* Get the faulting address */ @@ -1272,10 +1276,12 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) __do_page_fault(regs, error_code, address); exception_exit(prev_state); } +NOKPROBE_SYMBOL(do_page_fault); #ifdef CONFIG_TRACING -static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, - unsigned long error_code) +static nokprobe_inline void +trace_page_fault_entries(unsigned long address, struct pt_regs *regs, + unsigned long error_code) { if (user_mode(regs)) trace_page_fault_user(address, regs, error_code); @@ -1283,7 +1289,7 @@ static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs trace_page_fault_kernel(address, regs, error_code); } -dotraplinkage void __kprobes notrace +dotraplinkage void notrace trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) { /* @@ -1300,4 +1306,5 @@ trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) __do_page_fault(regs, error_code, address); exception_exit(prev_state); } +NOKPROBE_SYMBOL(trace_do_page_fault); #endif /* CONFIG_TRACING */ -- cgit v1.2.3 From 250bbd12c2fe1221ec96d8087d63e982d4f2180a Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 24 Apr 2014 19:08:24 +0200 Subject: uprobes/x86: Refuse to attach uprobe to "word-sized" branch insns All branch insns on x86 can be prefixed with the operand-size override prefix, 0x66. It was only ever useful for performing jumps to 32-bit offsets in 16-bit code segments. In 32-bit code, such instructions are useless since they cause IP truncation to 16 bits, and in case of call insns, they save only 16 bits of return address and misalign the stack pointer as a "bonus". In 64-bit code, such instructions are treated differently by Intel and AMD CPUs: Intel ignores the prefix altogether, AMD treats them the same as in 32-bit mode. Before this patch, the emulation code would execute the instructions as if they have no 0x66 prefix. With this patch, we refuse to attach uprobes to such insns. Signed-off-by: Denys Vlasenko Acked-by: Jim Keniston Acked-by: Masami Hiramatsu Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index ace22916ade3..3cf24a218196 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -583,6 +583,7 @@ static struct uprobe_xol_ops branch_xol_ops = { static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) { u8 opc1 = OPCODE1(insn); + int i; /* has the side-effect of processing the entire instruction */ insn_get_length(insn); @@ -612,6 +613,16 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) return -ENOSYS; } + /* + * 16-bit overrides such as CALLW (66 e8 nn nn) are not supported. + * Intel and AMD behavior differ in 64-bit mode: Intel ignores 66 prefix. + * No one uses these insns, reject any branch insns with such prefix. + */ + for (i = 0; i < insn->prefixes.nbytes; i++) { + if (insn->prefixes.bytes[i] == 0x66) + return -ENOTSUPP; + } + auprobe->branch.opc1 = opc1; auprobe->branch.ilen = insn->length; auprobe->branch.offs = insn->immediate.value; -- cgit v1.2.3 From 73175d0d19657ec132cc24e8cf0e341e73c54868 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 12:34:02 +0200 Subject: uprobes/x86: Add uprobe_init_insn(), kill validate_insn_{32,64}bits() validate_insn_32bits() and validate_insn_64bits() are very similar, turn them into the single uprobe_init_insn() which has the additional "bool x86_64" argument which can be passed to insn_init() and used to choose between good_insns_64/good_insns_32. Also kill UPROBE_FIX_NONE, it has no users. Note: the current code doesn't use ifdef's consistently, good_insns_64 depends on CONFIG_X86_64 but good_insns_32 is unconditional. This patch removes ifdef around good_insns_64, we will add it back later along with the similar one for good_insns_32. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 45 +++++++++++++-------------------------------- 1 file changed, 13 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 3cf24a218196..b4aff6a70f4d 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -32,9 +32,6 @@ /* Post-execution fixups. */ -/* No fixup needed */ -#define UPROBE_FIX_NONE 0x0 - /* Adjust IP back to vicinity of actual insn */ #define UPROBE_FIX_IP 0x1 @@ -114,7 +111,6 @@ static volatile u32 good_2byte_insns[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -#ifdef CONFIG_X86_64 /* Good-instruction tables for 64-bit apps */ static volatile u32 good_insns_64[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ @@ -138,7 +134,6 @@ static volatile u32 good_insns_64[256 / 32] = { /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; -#endif #undef W /* @@ -209,16 +204,22 @@ static bool is_prefix_bad(struct insn *insn) return false; } -static int validate_insn_32bits(struct arch_uprobe *auprobe, struct insn *insn) +static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool x86_64) { - insn_init(insn, auprobe->insn, false); + u32 volatile *good_insns; + + insn_init(insn, auprobe->insn, x86_64); - /* Skip good instruction prefixes; reject "bad" ones. */ insn_get_opcode(insn); if (is_prefix_bad(insn)) return -ENOTSUPP; - if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_32)) + if (x86_64) + good_insns = good_insns_64; + else + good_insns = good_insns_32; + + if (test_bit(OPCODE1(insn), (unsigned long *)good_insns)) return 0; if (insn->opcode.nbytes == 2) { @@ -355,30 +356,10 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long * } } -static int validate_insn_64bits(struct arch_uprobe *auprobe, struct insn *insn) -{ - insn_init(insn, auprobe->insn, true); - - /* Skip good instruction prefixes; reject "bad" ones. */ - insn_get_opcode(insn); - if (is_prefix_bad(insn)) - return -ENOTSUPP; - - if (test_bit(OPCODE1(insn), (unsigned long *)good_insns_64)) - return 0; - - if (insn->opcode.nbytes == 2) { - if (test_bit(OPCODE2(insn), (unsigned long *)good_2byte_insns)) - return 0; - } - return -ENOTSUPP; -} - static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) { - if (mm->context.ia32_compat) - return validate_insn_32bits(auprobe, insn); - return validate_insn_64bits(auprobe, insn); + bool x86_64 = !mm->context.ia32_compat; + return uprobe_init_insn(auprobe, insn, x86_64); } #else /* 32-bit: */ /* @@ -398,7 +379,7 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs * static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) { - return validate_insn_32bits(auprobe, insn); + return uprobe_init_insn(auprobe, insn, false); } #endif /* CONFIG_X86_64 */ -- cgit v1.2.3 From 2ae1f49ae1978fedb6ad607e1f8b084aa9752f95 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 14:03:05 +0200 Subject: uprobes/x86: Add is_64bit_mm(), kill validate_insn_bits() 1. Extract the ->ia32_compat check from 64bit validate_insn_bits() into the new helper, is_64bit_mm(), it will have more users. TODO: this checks is actually wrong if mm owner is X32 task, we need another fix which changes set_personality_ia32(). TODO: even worse, the whole 64-or-32-bit logic is very broken and the fix is not simple, we need the nontrivial changes in the core uprobes code. 2. Kill validate_insn_bits() and change its single caller to use uprobe_init_insn(is_64bit_mm(mm). Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b4aff6a70f4d..b3b25ddc04fb 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -231,6 +231,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool } #ifdef CONFIG_X86_64 +static inline bool is_64bit_mm(struct mm_struct *mm) +{ + return !config_enabled(CONFIG_IA32_EMULATION) || + !mm->context.ia32_compat; +} /* * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses @@ -355,13 +360,11 @@ handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long * *correction += 4; } } - -static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) +#else /* 32-bit: */ +static inline bool is_64bit_mm(struct mm_struct *mm) { - bool x86_64 = !mm->context.ia32_compat; - return uprobe_init_insn(auprobe, insn, x86_64); + return false; } -#else /* 32-bit: */ /* * No RIP-relative addressing on 32-bit */ @@ -376,11 +379,6 @@ static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs * long *correction) { } - -static int validate_insn_bits(struct arch_uprobe *auprobe, struct mm_struct *mm, struct insn *insn) -{ - return uprobe_init_insn(auprobe, insn, false); -} #endif /* CONFIG_X86_64 */ struct uprobe_xol_ops { @@ -625,7 +623,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, bool fix_ip = true, fix_call = false; int ret; - ret = validate_insn_bits(auprobe, mm, &insn); + ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm)); if (ret) return ret; -- cgit v1.2.3 From ff261964cfcfe49d73690ca29b0ba2853d9497e3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 14:15:27 +0200 Subject: uprobes/x86: Shift "insn_complete" from branch_setup_xol_ops() to uprobe_init_insn() Change uprobe_init_insn() to make insn_complete() == T, this makes other insn_get_*() calls unnecessary. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b3b25ddc04fb..98d7db50f425 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -209,8 +209,11 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool u32 volatile *good_insns; insn_init(insn, auprobe->insn, x86_64); + /* has the side-effect of processing the entire instruction */ + insn_get_length(insn); + if (WARN_ON_ONCE(!insn_complete(insn))) + return -ENOEXEC; - insn_get_opcode(insn); if (is_prefix_bad(insn)) return -ENOTSUPP; @@ -283,8 +286,6 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * is the immediate operand. */ cursor = auprobe->insn + insn_offset_modrm(insn); - insn_get_length(insn); - /* * Convert from rip-relative addressing to indirect addressing * via a scratch register. Change the r/m field from 0x5 (%rip) @@ -564,11 +565,6 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) u8 opc1 = OPCODE1(insn); int i; - /* has the side-effect of processing the entire instruction */ - insn_get_length(insn); - if (WARN_ON_ONCE(!insn_complete(insn))) - return -ENOEXEC; - switch (opc1) { case 0xeb: /* jmp 8 */ case 0xe9: /* jmp 32 */ @@ -654,7 +650,6 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, fix_ip = false; break; case 0xff: - insn_get_modrm(&insn); switch (MODRM_REG(&insn)) { case 2: case 3: /* call or lcall, indirect */ fix_call = true; -- cgit v1.2.3 From 8dbacad93a2a12adebcc717e6055b1bcc1739ab8 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 16:07:15 +0200 Subject: uprobes/x86: Make good_insns_* depend on CONFIG_X86_* Add the suitable ifdef's around good_insns_* arrays. We do not want to add the ugly ifdef's into their only user, uprobe_init_insn(), so the "#else" branch simply defines them as NULL. This doesn't generate the extra code, gcc is smart enough, although the code is fine even if it could not detect that (without CONFIG_IA32_EMULATION) is_64bit_mm() is __builtin_constant_p(). The patch looks more complicated because it also moves good_insns_64 up close to good_insns_32. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 56 +++++++++++++++++++++++++++-------------------- 1 file changed, 32 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 98d7db50f425..892975b3c99c 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -64,6 +64,7 @@ * to keep gcc from statically optimizing it out, as variable_test_bit makes * some versions of gcc to think only *(unsigned long*) is used. */ +#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION) static volatile u32 good_insns_32[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ @@ -86,32 +87,12 @@ static volatile u32 good_insns_32[256 / 32] = { /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; - -/* Using this for both 64-bit and 32-bit apps */ -static volatile u32 good_2byte_insns[256 / 32] = { - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ - /* ---------------------------------------------- */ - W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ - W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ - W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ - W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ - W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ - W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ - W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ - W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ - W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ - W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ - W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ - W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ - W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ - W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ - W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ - W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ - /* ---------------------------------------------- */ - /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ -}; +#else +#define good_insns_32 NULL +#endif /* Good-instruction tables for 64-bit apps */ +#if defined(CONFIG_X86_64) static volatile u32 good_insns_64[256 / 32] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ---------------------------------------------- */ @@ -134,6 +115,33 @@ static volatile u32 good_insns_64[256 / 32] = { /* ---------------------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; +#else +#define good_insns_64 NULL +#endif + +/* Using this for both 64-bit and 32-bit apps */ +static volatile u32 good_2byte_insns[256 / 32] = { + /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ + /* ---------------------------------------------- */ + W(0x00, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1) | /* 00 */ + W(0x10, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* 10 */ + W(0x20, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1) | /* 20 */ + W(0x30, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) , /* 30 */ + W(0x40, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 40 */ + W(0x50, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 50 */ + W(0x60, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 60 */ + W(0x70, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1) , /* 70 */ + W(0x80, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* 80 */ + W(0x90, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* 90 */ + W(0xa0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1) | /* a0 */ + W(0xb0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1) , /* b0 */ + W(0xc0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* c0 */ + W(0xd0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) , /* d0 */ + W(0xe0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) | /* e0 */ + W(0xf0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0) /* f0 */ + /* ---------------------------------------------- */ + /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ +}; #undef W /* -- cgit v1.2.3 From b24dc8dace74708fd849312722090169c5da97d3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sat, 19 Apr 2014 18:10:09 +0200 Subject: uprobes/x86: Fix is_64bit_mm() with CONFIG_X86_X32 is_64bit_mm() assumes that mm->context.ia32_compat means the 32-bit instruction set, this is not true if the task is TIF_X32. Change set_personality_ia32() to initialize mm->context.ia32_compat by TIF_X32 or TIF_IA32 instead of 1. This allows to fix is_64bit_mm() without affecting other users, they all treat ia32_compat as "bool". TIF_ in ->ia32_compat looks a bit strange, but this is grep-friendly and avoids the new define's. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston Acked-by: Srikar Dronamraju --- arch/x86/kernel/process_64.c | 7 ++++--- arch/x86/kernel/uprobes.c | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c index 9c0280f93d05..9b53940981b7 100644 --- a/arch/x86/kernel/process_64.c +++ b/arch/x86/kernel/process_64.c @@ -413,12 +413,11 @@ void set_personality_ia32(bool x32) set_thread_flag(TIF_ADDR32); /* Mark the associated mm as containing 32-bit tasks. */ - if (current->mm) - current->mm->context.ia32_compat = 1; - if (x32) { clear_thread_flag(TIF_IA32); set_thread_flag(TIF_X32); + if (current->mm) + current->mm->context.ia32_compat = TIF_X32; current->personality &= ~READ_IMPLIES_EXEC; /* is_compat_task() uses the presence of the x32 syscall bit flag to determine compat status */ @@ -426,6 +425,8 @@ void set_personality_ia32(bool x32) } else { set_thread_flag(TIF_IA32); clear_thread_flag(TIF_X32); + if (current->mm) + current->mm->context.ia32_compat = TIF_IA32; current->personality |= force_personality32; /* Prepare the first "return" to user space */ current_thread_info()->status |= TS_COMPAT; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 892975b3c99c..ecbffd16d090 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -245,7 +245,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool static inline bool is_64bit_mm(struct mm_struct *mm) { return !config_enabled(CONFIG_IA32_EMULATION) || - !mm->context.ia32_compat; + !(mm->context.ia32_compat == TIF_IA32); } /* * If arch_uprobe->insn doesn't use rip-relative addressing, return -- cgit v1.2.3 From dd91016dfc9ba9236cb0149984da3f0434278b49 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 22 Apr 2014 15:20:07 +0200 Subject: uprobes/x86: Don't change the task's state if ->pre_xol() fails Currently this doesn't matter, the only ->pre_xol() hook can't fail, but we need to fix arch_uprobe_pre_xol() anyway. If ->pre_xol() fails we should not change regs->ip/flags, we should just return the error to make restart actually possible. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index ecbffd16d090..f4464b1b9435 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -687,6 +687,12 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; + if (auprobe->ops->pre_xol) { + int err = auprobe->ops->pre_xol(auprobe, regs); + if (err) + return err; + } + regs->ip = utask->xol_vaddr; utask->autask.saved_trap_nr = current->thread.trap_nr; current->thread.trap_nr = UPROBE_TRAP_NR; @@ -696,8 +702,6 @@ int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) if (test_tsk_thread_flag(current, TIF_BLOCKSTEP)) set_task_blockstep(current, false); - if (auprobe->ops->pre_xol) - return auprobe->ops->pre_xol(auprobe, regs); return 0; } -- cgit v1.2.3 From 588fbd613c3d8fa73e96720761d49f1d40d34d4c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 21 Apr 2014 16:58:17 +0200 Subject: uprobes/x86: Introduce uprobe_xol_ops->abort() and default_abort_op() arch_uprobe_abort_xol() calls handle_riprel_post_xol() even if auprobe->ops != default_xol_ops. This is fine correctness wise, only default_pre_xol_op() can set UPROBE_FIX_RIP_AX|UPROBE_FIX_RIP_CX and otherwise handle_riprel_post_xol() is nop. But this doesn't look clean and this doesn't allow us to move ->fixups into the union in arch_uprobe. Move this handle_riprel_post_xol() call into the new default_abort_op() hook and change arch_uprobe_abort_xol() accordingly. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index f4464b1b9435..b3c2a92cce6c 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -394,6 +394,7 @@ struct uprobe_xol_ops { bool (*emulate)(struct arch_uprobe *, struct pt_regs *); int (*pre_xol)(struct arch_uprobe *, struct pt_regs *); int (*post_xol)(struct arch_uprobe *, struct pt_regs *); + void (*abort)(struct arch_uprobe *, struct pt_regs *); }; static inline int sizeof_long(void) @@ -444,9 +445,15 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs return 0; } +static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + handle_riprel_post_xol(auprobe, regs, NULL); +} + static struct uprobe_xol_ops default_xol_ops = { .pre_xol = default_pre_xol_op, .post_xol = default_post_xol_op, + .abort = default_abort_op, }; static bool branch_is_call(struct arch_uprobe *auprobe) @@ -820,10 +827,11 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; - current->thread.trap_nr = utask->autask.saved_trap_nr; - handle_riprel_post_xol(auprobe, regs, NULL); - instruction_pointer_set(regs, utask->vaddr); + if (auprobe->ops->abort) + auprobe->ops->abort(auprobe, regs); + current->thread.trap_nr = utask->autask.saved_trap_nr; + regs->ip = utask->vaddr; /* clear TF if it was set by us in arch_uprobe_pre_xol() */ if (!utask->autask.saved_tf) regs->flags &= ~X86_EFLAGS_TF; -- cgit v1.2.3 From 6ded5f3848bfd3227ee208aa38f8bf8d7209d4e3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 21 Apr 2014 18:28:02 +0200 Subject: uprobes/x86: Don't use arch_uprobe_abort_xol() in arch_uprobe_post_xol() 014940bad8e4 "uprobes/x86: Send SIGILL if arch_uprobe_post_xol() fails" changed arch_uprobe_post_xol() to use arch_uprobe_abort_xol() if ->post_xol fails. This was correct and helped to avoid the additional complications, we need to clear X86_EFLAGS_TF in this case. However, now that we have uprobe_xol_ops->abort() hook it would be better to avoid arch_uprobe_abort_xol() here. ->post_xol() should likely do what ->abort() does anyway, we should not do the same work twice. Currently only handle_riprel_post_xol() can be called twice, this is unnecessary but safe. Still this is not clean and can lead to the problems in future. Change arch_uprobe_post_xol() to clear X86_EFLAGS_TF and restore ->ip by hand and avoid arch_uprobe_abort_xol(). This temporary uglifies the usage of autask.saved_tf, we will cleanup this later. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b3c2a92cce6c..2efb93f96030 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -759,22 +759,24 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) struct uprobe_task *utask = current->utask; WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); + current->thread.trap_nr = utask->autask.saved_trap_nr; if (auprobe->ops->post_xol) { int err = auprobe->ops->post_xol(auprobe, regs); if (err) { - arch_uprobe_abort_xol(auprobe, regs); + if (!utask->autask.saved_tf) + regs->flags &= ~X86_EFLAGS_TF; /* - * Restart the probed insn. ->post_xol() must ensure - * this is really possible if it returns -ERESTART. + * Restore ->ip for restart or post mortem analysis. + * ->post_xol() must not return -ERESTART unless this + * is really possible. */ + regs->ip = utask->vaddr; if (err == -ERESTART) return 0; return err; } } - - current->thread.trap_nr = utask->autask.saved_trap_nr; /* * arch_uprobe_pre_xol() doesn't save the state of TIF_BLOCKSTEP * so we can get an extra SIGTRAP if we do not clear TF. We need @@ -819,9 +821,8 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, /* * This function gets called when XOL instruction either gets trapped or - * the thread has a fatal signal, or if arch_uprobe_post_xol() failed. - * Reset the instruction pointer to its probed address for the potential - * restart or for post mortem analysis. + * the thread has a fatal signal. Reset the instruction pointer to its + * probed address for the potential restart or for post mortem analysis. */ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { -- cgit v1.2.3 From 220ef8dc9a7a63fe202aacd3fc61e5104f6dd98c Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 21 Apr 2014 20:39:56 +0200 Subject: uprobes/x86: Move UPROBE_FIX_SETF logic from arch_uprobe_post_xol() to default_post_xol_op() UPROBE_FIX_SETF is only needed to handle "popf" correctly but it is processed by the generic arch_uprobe_post_xol() code. This doesn't allows us to make ->fixups private for default_xol_ops. 1 Change default_post_xol_op(UPROBE_FIX_SETF) to set ->saved_tf = T. "popf" always reads the flags from stack, it doesn't matter if TF was set or not before single-step. Ignoring the naming, this is even more logical, "saved_tf" means "owned by application" and we do not own this flag after "popf". 2. Change arch_uprobe_post_xol() to save ->saved_tf into the local "bool send_sigtrap" before ->post_xol(). 3. Change arch_uprobe_post_xol() to ignore UPROBE_FIX_SETF and just check ->saved_tf after ->post_xol(). With this patch ->fixups and ->rip_rela_target_address are only used by default_xol_ops hooks, we are ready to remove them from the common part of arch_uprobe. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/kernel/uprobes.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 2efb93f96030..b2bca293fc57 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -441,6 +441,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs return -ERESTART; } } + /* popf; tell the caller to not touch TF */ + if (auprobe->fixups & UPROBE_FIX_SETF) + utask->autask.saved_tf = true; return 0; } @@ -757,15 +760,15 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t) int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; + bool send_sigtrap = utask->autask.saved_tf; + int err = 0; WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR); current->thread.trap_nr = utask->autask.saved_trap_nr; if (auprobe->ops->post_xol) { - int err = auprobe->ops->post_xol(auprobe, regs); + err = auprobe->ops->post_xol(auprobe, regs); if (err) { - if (!utask->autask.saved_tf) - regs->flags &= ~X86_EFLAGS_TF; /* * Restore ->ip for restart or post mortem analysis. * ->post_xol() must not return -ERESTART unless this @@ -773,8 +776,8 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) */ regs->ip = utask->vaddr; if (err == -ERESTART) - return 0; - return err; + err = 0; + send_sigtrap = false; } } /* @@ -782,12 +785,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) * so we can get an extra SIGTRAP if we do not clear TF. We need * to examine the opcode to make it right. */ - if (utask->autask.saved_tf) + if (send_sigtrap) send_sig(SIGTRAP, current, 0); - else if (!(auprobe->fixups & UPROBE_FIX_SETF)) + + if (!utask->autask.saved_tf) regs->flags &= ~X86_EFLAGS_TF; - return 0; + return err; } /* callback routine for handling exceptions. */ -- cgit v1.2.3 From 97aa5cddbe9e01521137f337624469374e3cbde5 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Tue, 22 Apr 2014 16:20:55 +0200 Subject: uprobes/x86: Move default_xol_ops's data into arch_uprobe->def Finally we can move arch_uprobe->fixups/rip_rela_target_address into the new "def" struct and place this struct in the union, they are only used by default_xol_ops paths. The patch also renames rip_rela_target_address to riprel_target just to make this name shorter. Signed-off-by: Oleg Nesterov Reviewed-by: Jim Keniston --- arch/x86/include/asm/uprobes.h | 12 +++++++----- arch/x86/kernel/uprobes.c | 43 +++++++++++++++++++++--------------------- 2 files changed, 28 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 93bee7b93854..72caff7afbde 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -41,18 +41,20 @@ struct arch_uprobe { u8 ixol[MAX_UINSN_BYTES]; }; - u16 fixups; const struct uprobe_xol_ops *ops; union { -#ifdef CONFIG_X86_64 - unsigned long rip_rela_target_address; -#endif struct { s32 offs; u8 ilen; u8 opc1; - } branch; + } branch; + struct { +#ifdef CONFIG_X86_64 + long riprel_target; +#endif + u16 fixups; + } def; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index b2bca293fc57..7824ce248f8f 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -251,10 +251,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses * its memory operand indirectly through a scratch register. Set - * arch_uprobe->fixups and arch_uprobe->rip_rela_target_address - * accordingly. (The contents of the scratch register will be saved - * before we single-step the modified instruction, and restored - * afterward.) + * def->fixups and def->riprel_target accordingly. (The contents of the + * scratch register will be saved before we single-step the modified + * instruction, and restored afterward). * * We do this because a rip-relative instruction can access only a * relatively small area (+/- 2 GB from the instruction), and the XOL @@ -308,18 +307,18 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * is NOT the register operand, so we use %rcx (register * #1) for the scratch register. */ - auprobe->fixups = UPROBE_FIX_RIP_CX; + auprobe->def.fixups = UPROBE_FIX_RIP_CX; /* Change modrm from 00 000 101 to 00 000 001. */ *cursor = 0x1; } else { /* Use %rax (register #0) for the scratch register. */ - auprobe->fixups = UPROBE_FIX_RIP_AX; + auprobe->def.fixups = UPROBE_FIX_RIP_AX; /* Change modrm from 00 xxx 101 to 00 xxx 000 */ *cursor = (reg << 3); } /* Target address = address of next instruction + (signed) offset */ - auprobe->rip_rela_target_address = (long)insn->length + insn->displacement.value; + auprobe->def.riprel_target = (long)insn->length + insn->displacement.value; /* Displacement field is gone; slide immediate field (if any) over. */ if (insn->immediate.nbytes) { @@ -336,25 +335,25 @@ static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, struct arch_uprobe_task *autask) { - if (auprobe->fixups & UPROBE_FIX_RIP_AX) { + if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { autask->saved_scratch_register = regs->ax; regs->ax = current->utask->vaddr; - regs->ax += auprobe->rip_rela_target_address; - } else if (auprobe->fixups & UPROBE_FIX_RIP_CX) { + regs->ax += auprobe->def.riprel_target; + } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) { autask->saved_scratch_register = regs->cx; regs->cx = current->utask->vaddr; - regs->cx += auprobe->rip_rela_target_address; + regs->cx += auprobe->def.riprel_target; } } static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) { - if (auprobe->fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { struct arch_uprobe_task *autask; autask = ¤t->utask->autask; - if (auprobe->fixups & UPROBE_FIX_RIP_AX) + if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) regs->ax = autask->saved_scratch_register; else regs->cx = autask->saved_scratch_register; @@ -432,17 +431,17 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs long correction = (long)(utask->vaddr - utask->xol_vaddr); handle_riprel_post_xol(auprobe, regs, &correction); - if (auprobe->fixups & UPROBE_FIX_IP) + if (auprobe->def.fixups & UPROBE_FIX_IP) regs->ip += correction; - if (auprobe->fixups & UPROBE_FIX_CALL) { + if (auprobe->def.fixups & UPROBE_FIX_CALL) { if (adjust_ret_addr(regs->sp, correction)) { regs->sp += sizeof_long(); return -ERESTART; } } /* popf; tell the caller to not touch TF */ - if (auprobe->fixups & UPROBE_FIX_SETF) + if (auprobe->def.fixups & UPROBE_FIX_SETF) utask->autask.saved_tf = true; return 0; @@ -646,13 +645,13 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, return ret; /* - * Figure out which fixups arch_uprobe_post_xol() will need to perform, - * and annotate arch_uprobe->fixups accordingly. To start with, ->fixups - * is either zero or it reflects rip-related fixups. + * Figure out which fixups default_post_xol_op() will need to perform, + * and annotate def->fixups accordingly. To start with, ->fixups is + * either zero or it reflects rip-related fixups. */ switch (OPCODE1(&insn)) { case 0x9d: /* popf */ - auprobe->fixups |= UPROBE_FIX_SETF; + auprobe->def.fixups |= UPROBE_FIX_SETF; break; case 0xc3: /* ret or lret -- ip is correct */ case 0xcb: @@ -680,9 +679,9 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, } if (fix_ip) - auprobe->fixups |= UPROBE_FIX_IP; + auprobe->def.fixups |= UPROBE_FIX_IP; if (fix_call) - auprobe->fixups |= UPROBE_FIX_CALL; + auprobe->def.fixups |= UPROBE_FIX_CALL; auprobe->ops = &default_xol_ops; return 0; -- cgit v1.2.3 From 78d9af4cd375880a574327210eb9dab572618364 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 24 Apr 2014 18:52:37 +0200 Subject: uprobes/x86: Cleanup the usage of arch_uprobe->def.fixups, make it u8 handle_riprel_insn() assumes that nobody else could modify ->fixups before. This is correct but fragile, change it to use "|=". Also make ->fixups u8, we are going to add the new members into the union. It is not clear why UPROBE_FIX_RIP_.X lived in the upper byte, redefine them so that they can fit into u8. Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 2 +- arch/x86/kernel/uprobes.c | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 72caff7afbde..9ce25ce04fee 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -53,7 +53,7 @@ struct arch_uprobe { #ifdef CONFIG_X86_64 long riprel_target; #endif - u16 fixups; + u8 fixups; } def; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 7824ce248f8f..a8e1d7e47001 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -33,16 +33,16 @@ /* Post-execution fixups. */ /* Adjust IP back to vicinity of actual insn */ -#define UPROBE_FIX_IP 0x1 +#define UPROBE_FIX_IP 0x01 /* Adjust the return address of a call insn */ -#define UPROBE_FIX_CALL 0x2 +#define UPROBE_FIX_CALL 0x02 /* Instruction will modify TF, don't change it */ -#define UPROBE_FIX_SETF 0x4 +#define UPROBE_FIX_SETF 0x04 -#define UPROBE_FIX_RIP_AX 0x8000 -#define UPROBE_FIX_RIP_CX 0x4000 +#define UPROBE_FIX_RIP_AX 0x08 +#define UPROBE_FIX_RIP_CX 0x10 #define UPROBE_TRAP_NR UINT_MAX @@ -307,12 +307,12 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * is NOT the register operand, so we use %rcx (register * #1) for the scratch register. */ - auprobe->def.fixups = UPROBE_FIX_RIP_CX; + auprobe->def.fixups |= UPROBE_FIX_RIP_CX; /* Change modrm from 00 000 101 to 00 000 001. */ *cursor = 0x1; } else { /* Use %rax (register #0) for the scratch register. */ - auprobe->def.fixups = UPROBE_FIX_RIP_AX; + auprobe->def.fixups |= UPROBE_FIX_RIP_AX; /* Change modrm from 00 xxx 101 to 00 xxx 000 */ *cursor = (reg << 3); } -- cgit v1.2.3 From 2b82cadffc4154a25c25d88a63c7fb3397cda9d6 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 24 Apr 2014 19:21:38 +0200 Subject: uprobes/x86: Introduce push_ret_address() Extract the "push return address" code from branch_emulate_op() into the new simple helper, push_ret_address(). It will have more users. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index a8e1d7e47001..df75913acfc0 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -407,6 +407,17 @@ static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) return 0; } +static int push_ret_address(struct pt_regs *regs, unsigned long ip) +{ + unsigned long new_sp = regs->sp - sizeof_long(); + + if (copy_to_user((void __user *)new_sp, &ip, sizeof_long())) + return -EFAULT; + + regs->sp = new_sp; + return 0; +} + /* * Adjust the return address pushed by a call insn executed out of line. */ @@ -517,7 +528,6 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long offs = (long)auprobe->branch.offs; if (branch_is_call(auprobe)) { - unsigned long new_sp = regs->sp - sizeof_long(); /* * If it fails we execute this (mangled, see the comment in * branch_clear_offset) insn out-of-line. In the likely case @@ -527,9 +537,8 @@ static bool branch_emulate_op(struct arch_uprobe *auprobe, struct pt_regs *regs) * * But there is corner case, see the comment in ->post_xol(). */ - if (copy_to_user((void __user *)new_sp, &new_ip, sizeof_long())) + if (push_ret_address(regs, new_ip)) return false; - regs->sp = new_sp; } else if (!check_jmp_cond(auprobe, regs)) { offs = 0; } -- cgit v1.2.3 From 1dc76e6eacef271230d9ff6fd0f91824bda03f44 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 25 Apr 2014 18:06:19 +0200 Subject: uprobes/x86: Kill adjust_ret_addr(), simplify UPROBE_FIX_CALL logic The only insn which could have both UPROBE_FIX_IP and UPROBE_FIX_CALL was 0xe8 "call relative", and now it is handled by branch_xol_ops. So we can change default_post_xol_op(UPROBE_FIX_CALL) to simply push the address of next insn == utask->vaddr + insn.length, just we need to record insn.length into the new auprobe->def.ilen member. Note: if/when we teach branch_xol_ops to support jcxz/loopz we can remove the "correction" logic, UPROBE_FIX_IP can use the same address. Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 1 + arch/x86/kernel/uprobes.c | 24 +++--------------------- 2 files changed, 4 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 9ce25ce04fee..a040d493a4f9 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -54,6 +54,7 @@ struct arch_uprobe { long riprel_target; #endif u8 fixups; + u8 ilen; } def; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index df75913acfc0..5bcce852628a 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -418,24 +418,6 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip) return 0; } -/* - * Adjust the return address pushed by a call insn executed out of line. - */ -static int adjust_ret_addr(unsigned long sp, long correction) -{ - int rasize = sizeof_long(); - long ra; - - if (copy_from_user(&ra, (void __user *)sp, rasize)) - return -EFAULT; - - ra += correction; - if (copy_to_user((void __user *)sp, &ra, rasize)) - return -EFAULT; - - return 0; -} - static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; @@ -446,10 +428,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs regs->ip += correction; if (auprobe->def.fixups & UPROBE_FIX_CALL) { - if (adjust_ret_addr(regs->sp, correction)) { - regs->sp += sizeof_long(); + regs->sp += sizeof_long(); + if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen)) return -ERESTART; - } } /* popf; tell the caller to not touch TF */ if (auprobe->def.fixups & UPROBE_FIX_SETF) @@ -687,6 +668,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, handle_riprel_insn(auprobe, &insn); } + auprobe->def.ilen = insn.length; if (fix_ip) auprobe->def.fixups |= UPROBE_FIX_IP; if (fix_call) -- cgit v1.2.3 From 83cd591485e558ab70aed45ce7261ce3f5ee8746 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Fri, 25 Apr 2014 18:53:32 +0200 Subject: uprobes/x86: Cleanup the usage of UPROBE_FIX_IP/UPROBE_FIX_CALL Now that UPROBE_FIX_IP/UPROBE_FIX_CALL are mutually exclusive we can use a single "fix_ip_or_call" enum instead of 2 fix_* booleans. This way the logic looks more understandable and clean to me. While at it, join "case 0xea" with other "ip is correct" ret/lret cases. Also change default_post_xol_op() to use "else if" for the same reason. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 27 +++++++++++---------------- 1 file changed, 11 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5bcce852628a..d2792e884d54 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -424,10 +424,9 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs long correction = (long)(utask->vaddr - utask->xol_vaddr); handle_riprel_post_xol(auprobe, regs, &correction); - if (auprobe->def.fixups & UPROBE_FIX_IP) + if (auprobe->def.fixups & UPROBE_FIX_IP) { regs->ip += correction; - - if (auprobe->def.fixups & UPROBE_FIX_CALL) { + } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { regs->sp += sizeof_long(); if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen)) return -ERESTART; @@ -623,7 +622,7 @@ static int branch_setup_xol_ops(struct arch_uprobe *auprobe, struct insn *insn) int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long addr) { struct insn insn; - bool fix_ip = true, fix_call = false; + u8 fix_ip_or_call = UPROBE_FIX_IP; int ret; ret = uprobe_init_insn(auprobe, &insn, is_64bit_mm(mm)); @@ -647,21 +646,20 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, case 0xcb: case 0xc2: case 0xca: - fix_ip = false; + case 0xea: /* jmp absolute -- ip is correct */ + fix_ip_or_call = 0; break; case 0x9a: /* call absolute - Fix return addr, not ip */ - fix_call = true; - fix_ip = false; - break; - case 0xea: /* jmp absolute -- ip is correct */ - fix_ip = false; + fix_ip_or_call = UPROBE_FIX_CALL; break; case 0xff: switch (MODRM_REG(&insn)) { case 2: case 3: /* call or lcall, indirect */ - fix_call = true; + fix_ip_or_call = UPROBE_FIX_CALL; + break; case 4: case 5: /* jmp or ljmp, indirect */ - fix_ip = false; + fix_ip_or_call = 0; + break; } /* fall through */ default: @@ -669,10 +667,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, } auprobe->def.ilen = insn.length; - if (fix_ip) - auprobe->def.fixups |= UPROBE_FIX_IP; - if (fix_call) - auprobe->def.fixups |= UPROBE_FIX_CALL; + auprobe->def.fixups |= fix_ip_or_call; auprobe->ops = &default_xol_ops; return 0; -- cgit v1.2.3 From 1475ee7fadafc6d0c194f2f4cbdae10ed04b9580 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Apr 2014 16:31:59 +0200 Subject: uprobes/x86: Rename *riprel* helpers to make the naming consistent handle_riprel_insn(), pre_xol_rip_insn() and handle_riprel_post_xol() look confusing and inconsistent. Rename them into riprel_analyze(), riprel_pre_xol(), and riprel_post_xol() respectively. No changes in compiled code. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index d2792e884d54..187be0e15e1d 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -268,8 +268,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * - There's never a SIB byte. * - The displacement is always 4 bytes. */ -static void -handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) +static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { u8 *cursor; u8 reg; @@ -331,8 +330,7 @@ handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) * If we're emulating a rip-relative instruction, save the contents * of the scratch register and store the target address in that register. */ -static void -pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, struct arch_uprobe_task *autask) { if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { @@ -346,8 +344,8 @@ pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, } } -static void -handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, + long *correction) { if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { struct arch_uprobe_task *autask; @@ -376,14 +374,14 @@ static inline bool is_64bit_mm(struct mm_struct *mm) /* * No RIP-relative addressing on 32-bit */ -static void handle_riprel_insn(struct arch_uprobe *auprobe, struct insn *insn) +static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { } -static void pre_xol_rip_insn(struct arch_uprobe *auprobe, struct pt_regs *regs, +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, struct arch_uprobe_task *autask) { } -static void handle_riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) { } @@ -403,7 +401,7 @@ static inline int sizeof_long(void) static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - pre_xol_rip_insn(auprobe, regs, ¤t->utask->autask); + riprel_pre_xol(auprobe, regs, ¤t->utask->autask); return 0; } @@ -423,7 +421,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs struct uprobe_task *utask = current->utask; long correction = (long)(utask->vaddr - utask->xol_vaddr); - handle_riprel_post_xol(auprobe, regs, &correction); + riprel_post_xol(auprobe, regs, &correction); if (auprobe->def.fixups & UPROBE_FIX_IP) { regs->ip += correction; } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { @@ -440,7 +438,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - handle_riprel_post_xol(auprobe, regs, NULL); + riprel_post_xol(auprobe, regs, NULL); } static struct uprobe_xol_ops default_xol_ops = { @@ -663,7 +661,7 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, } /* fall through */ default: - handle_riprel_insn(auprobe, &insn); + riprel_analyze(auprobe, &insn); } auprobe->def.ilen = insn.length; -- cgit v1.2.3 From 7f55e82bacaaa2c41b8e14d6bc78129b096b67b8 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Apr 2014 17:00:46 +0200 Subject: uprobes/x86: Kill the "autask" arg of riprel_pre_xol() default_pre_xol_op() passes ¤t->utask->autask to riprel_pre_xol() and this is just ugly because it still needs to load current->utask to read ->vaddr. Remove this argument, change riprel_pre_xol() to use current->utask. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 187be0e15e1d..5df1bca7c2bc 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -330,16 +330,17 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) * If we're emulating a rip-relative instruction, save the contents * of the scratch register and store the target address in that register. */ -static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - struct arch_uprobe_task *autask) +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { + struct uprobe_task *utask = current->utask; + if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { - autask->saved_scratch_register = regs->ax; - regs->ax = current->utask->vaddr; + utask->autask.saved_scratch_register = regs->ax; + regs->ax = utask->vaddr; regs->ax += auprobe->def.riprel_target; } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) { - autask->saved_scratch_register = regs->cx; - regs->cx = current->utask->vaddr; + utask->autask.saved_scratch_register = regs->cx; + regs->cx = utask->vaddr; regs->cx += auprobe->def.riprel_target; } } @@ -377,8 +378,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm) static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { } -static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - struct arch_uprobe_task *autask) +static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { } static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, @@ -401,7 +401,7 @@ static inline int sizeof_long(void) static int default_pre_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - riprel_pre_xol(auprobe, regs, ¤t->utask->autask); + riprel_pre_xol(auprobe, regs); return 0; } -- cgit v1.2.3 From c90a6950120a7e45f31a22653fe6543507ae64d0 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 27 Apr 2014 18:13:31 +0200 Subject: uprobes/x86: Simplify riprel_{pre,post}_xol() and make them similar Ignoring the "correction" logic riprel_pre_xol() and riprel_post_xol() are very similar but look quite differently. 1. Add the "UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX" check at the start of riprel_pre_xol(), like the same check in riprel_post_xol(). 2. Add the trivial scratch_reg() helper which returns the address of scratch register pre_xol/post_xol need to change. 3. Change these functions to use the new helper and avoid copy-and-paste under if/else branches. Signed-off-by: Oleg Nesterov Acked-by: Srikar Dronamraju --- arch/x86/kernel/uprobes.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 5df1bca7c2bc..2ebadb252093 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -326,22 +326,24 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) } } +static inline unsigned long * +scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) +{ + return (auprobe->def.fixups & UPROBE_FIX_RIP_AX) ? ®s->ax : ®s->cx; +} + /* * If we're emulating a rip-relative instruction, save the contents * of the scratch register and store the target address in that register. */ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - struct uprobe_task *utask = current->utask; + if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + struct uprobe_task *utask = current->utask; + unsigned long *sr = scratch_reg(auprobe, regs); - if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) { - utask->autask.saved_scratch_register = regs->ax; - regs->ax = utask->vaddr; - regs->ax += auprobe->def.riprel_target; - } else if (auprobe->def.fixups & UPROBE_FIX_RIP_CX) { - utask->autask.saved_scratch_register = regs->cx; - regs->cx = utask->vaddr; - regs->cx += auprobe->def.riprel_target; + utask->autask.saved_scratch_register = *sr; + *sr = utask->vaddr + auprobe->def.riprel_target; } } @@ -349,14 +351,10 @@ static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, long *correction) { if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { - struct arch_uprobe_task *autask; - - autask = ¤t->utask->autask; - if (auprobe->def.fixups & UPROBE_FIX_RIP_AX) - regs->ax = autask->saved_scratch_register; - else - regs->cx = autask->saved_scratch_register; + struct uprobe_task *utask = current->utask; + unsigned long *sr = scratch_reg(auprobe, regs); + *sr = utask->autask.saved_scratch_register; /* * The original instruction includes a displacement, and so * is 4 bytes longer than what we've just single-stepped. -- cgit v1.2.3 From 3af1ea5a2e8ff7ac7ffea3cff1f93914f3cc211d Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Tue, 29 Apr 2014 15:39:45 +0800 Subject: tile: use BOOTMEM_DEFAULT instead of magic number 0 for reserve_bootmem flags Use macro flag BOOTMEM_DEFAULT instead of magic number 0 for reserve_bootmem. Signed-off-by: Wang Sheng-Hui Signed-off-by: Chris Metcalf --- arch/tile/kernel/setup.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 74c91729a62a..00732474fb55 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -691,7 +691,7 @@ static void __init setup_bootmem_allocator(void) /* Reserve any memory excluded by "memmap" arguments. */ for (i = 0; i < memmap_nr; ++i) { struct memmap_entry *m = &memmap_map[i]; - reserve_bootmem(m->addr, m->size, 0); + reserve_bootmem(m->addr, m->size, BOOTMEM_DEFAULT); } #ifdef CONFIG_BLK_DEV_INITRD @@ -715,7 +715,8 @@ static void __init setup_bootmem_allocator(void) #ifdef CONFIG_KEXEC if (crashk_res.start != crashk_res.end) - reserve_bootmem(crashk_res.start, resource_size(&crashk_res), 0); + reserve_bootmem(crashk_res.start, resource_size(&crashk_res), + BOOTMEM_DEFAULT); #endif } -- cgit v1.2.3 From 0987a6ef94238097a380aa5e8a69a74393baa5be Mon Sep 17 00:00:00 2001 From: George Cherian Date: Fri, 2 May 2014 12:01:59 +0530 Subject: ARM: dts: am33xx: Add clock names for cpsw and cpts Add CPSW fck and CPTS clock and clock names Signed-off-by: George Cherian Signed-off-by: David S. Miller --- arch/arm/boot/dts/am33xx.dtsi | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi index 9770e35f2536..d1e2b363f927 100644 --- a/arch/arm/boot/dts/am33xx.dtsi +++ b/arch/arm/boot/dts/am33xx.dtsi @@ -665,6 +665,8 @@ mac: ethernet@4a100000 { compatible = "ti,cpsw"; ti,hwmods = "cpgmac0"; + clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; + clock-names = "fck", "cpts"; cpdma_channels = <8>; ale_entries = <1024>; bd_ram_size = <0x2000>; -- cgit v1.2.3 From de21b26e51d1ab35696b99a29afc30582cfda031 Mon Sep 17 00:00:00 2001 From: George Cherian Date: Fri, 2 May 2014 12:02:04 +0530 Subject: ARM: dts: am4372: Add clock names for cpsw and cpts Add CPSW fck and CPTS clock and clock names for AM4372 Signed-off-by: George Cherian Signed-off-by: David S. Miller --- arch/arm/boot/dts/am4372.dtsi | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 36d523a26831..c2779f653020 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -489,6 +489,8 @@ #address-cells = <1>; #size-cells = <1>; ti,hwmods = "cpgmac0"; + clocks = <&cpsw_125mhz_gclk>, <&cpsw_cpts_rft_clk>; + clock-names = "fck", "cpts"; status = "disabled"; cpdma_channels = <8>; ale_entries = <1024>; -- cgit v1.2.3 From a278534406ab13e4ecbca8941eb4c104a5971cd7 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 2 May 2014 16:28:15 -0700 Subject: x86_64: csum_add for x86_64 Add csum_add function for x86_64. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- arch/x86/include/asm/checksum_64.h | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h index e6fd8a026c7b..3581761bd86c 100644 --- a/arch/x86/include/asm/checksum_64.h +++ b/arch/x86/include/asm/checksum_64.h @@ -188,4 +188,11 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b) return a; } +#define HAVE_ARCH_CSUM_ADD +static inline __wsum csum_add(__wsum csum, __wsum addend) +{ + return (__force __wsum)add32_with_carry((__force unsigned)csum, + (__force unsigned)addend); +} + #endif /* _ASM_X86_CHECKSUM_64_H */ -- cgit v1.2.3 From 4405b4d635aa2c5c7eb8873696b54811531e0d08 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 2 May 2014 16:28:40 -0700 Subject: net: Change x86_64 add32_with_carry to allow memory operand Note add32_with_carry(a, b) is suboptimal, as it forces a and b in registers. b could be a memory or a register operand. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- arch/x86/include/asm/checksum_64.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/checksum_64.h b/arch/x86/include/asm/checksum_64.h index 3581761bd86c..cd00e1774491 100644 --- a/arch/x86/include/asm/checksum_64.h +++ b/arch/x86/include/asm/checksum_64.h @@ -184,7 +184,7 @@ static inline unsigned add32_with_carry(unsigned a, unsigned b) asm("addl %2,%0\n\t" "adcl $0,%0" : "=r" (a) - : "0" (a), "r" (b)); + : "0" (a), "rm" (b)); return a; } -- cgit v1.2.3 From 20fce54fa74a55ad1e821b61612393d7159b6af8 Mon Sep 17 00:00:00 2001 From: Tom Herbert Date: Fri, 2 May 2014 16:28:54 -0700 Subject: sparc: csum_add for Sparc versions. Signed-off-by: Tom Herbert Signed-off-by: David S. Miller --- arch/sparc/include/asm/checksum_32.h | 12 ++++++++++++ arch/sparc/include/asm/checksum_64.h | 12 ++++++++++++ 2 files changed, 24 insertions(+) (limited to 'arch') diff --git a/arch/sparc/include/asm/checksum_32.h b/arch/sparc/include/asm/checksum_32.h index bdbda1453aa9..04471dc64847 100644 --- a/arch/sparc/include/asm/checksum_32.h +++ b/arch/sparc/include/asm/checksum_32.h @@ -238,4 +238,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) return csum_fold(csum_partial(buff, len, 0)); } +#define HAVE_ARCH_CSUM_ADD +static inline __wsum csum_add(__wsum csum, __wsum addend) +{ + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; +} + #endif /* !(__SPARC_CHECKSUM_H) */ diff --git a/arch/sparc/include/asm/checksum_64.h b/arch/sparc/include/asm/checksum_64.h index 019b9615e43c..2ff81ae8f3af 100644 --- a/arch/sparc/include/asm/checksum_64.h +++ b/arch/sparc/include/asm/checksum_64.h @@ -164,4 +164,16 @@ static inline __sum16 ip_compute_csum(const void *buff, int len) return csum_fold(csum_partial(buff, len, 0)); } +#define HAVE_ARCH_CSUM_ADD +static inline __wsum csum_add(__wsum csum, __wsum addend) +{ + __asm__ __volatile__( + "addcc %0, %1, %0\n" + "addx %0, %%g0, %0" + : "=r" (csum) + : "r" (addend), "0" (csum)); + + return csum; +} + #endif /* !(__SPARC64_CHECKSUM_H) */ -- cgit v1.2.3 From a4406f165faa222a0d2c5e660473bb6822850a55 Mon Sep 17 00:00:00 2001 From: Alexandre Courbot Date: Thu, 27 Feb 2014 14:53:33 +0900 Subject: ARM: SAMSUNG: remove GPIO flags in dev-backlight The pwm-backlight driver is moving to use the gpiod interface, which has its own mapping mechanism for platform data GPIOs. These mappings carry GPIO properties like active low so they don't have to be explicitly handled by GPIO consumers. Because of this change, the enable_gpio_flags member of platform_pwm_backlight_data is going away. dev-backlight was passing this member, but had no user making use of it, so it can safely be removed. Further GPIOs used by pwm-backlight are expected to be defined using the mechanisms provided by the gpiod API. Signed-off-by: Alexandre Courbot Reviewed-by: Jingoo Han Acked-by: Kukjin Kim Signed-off-by: Thierry Reding --- arch/arm/plat-samsung/dev-backlight.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/plat-samsung/dev-backlight.c b/arch/arm/plat-samsung/dev-backlight.c index be4ad0b21c08..2157c5b539e6 100644 --- a/arch/arm/plat-samsung/dev-backlight.c +++ b/arch/arm/plat-samsung/dev-backlight.c @@ -124,8 +124,6 @@ void __init samsung_bl_set(struct samsung_bl_gpio_info *gpio_info, samsung_bl_data->pwm_period_ns = bl_data->pwm_period_ns; if (bl_data->enable_gpio >= 0) samsung_bl_data->enable_gpio = bl_data->enable_gpio; - if (bl_data->enable_gpio_flags) - samsung_bl_data->enable_gpio_flags = bl_data->enable_gpio_flags; if (bl_data->init) samsung_bl_data->init = bl_data->init; if (bl_data->notify) -- cgit v1.2.3 From 3e3b2c39081c7f22f300da89b6e922a4dd2899d3 Mon Sep 17 00:00:00 2001 From: Damien Lespiau Date: Thu, 8 May 2014 22:19:41 +0300 Subject: x86/gpu: Implement stolen memory size early quirk for CHV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CHV uses the same bits as SNB/VLV to code the Graphics Mode Select field (GFX stolen memory size) with the addition of finer granularity modes: 4MB increments from 0x11 (8MB) to 0x1d. Values strictly above 0x1d are either reserved or not supported. v2: 4MB increments, not 8MB. 32MB has been omitted from the list of new values (Ville Syrjälä) v3: Also correctly interpret GGMS (GTT Graphics Memory Size) (Ville Syrjälä) v4: Don't assign a value that needs 20bits or more to a u16 (Rafael Barbalho) [vsyrjala: v5: Split from i915 changes and add chv_stolen_funcs] Cc: Ingo Molnar Cc: H. Peter Anvin Reviewed-by: Jani Nikula Reviewed-by: Ville Syrjälä Reviewed-by: Rafael Barbalho Tested-by: Rafael Barbalho Signed-off-by: Damien Lespiau Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- arch/x86/kernel/early-quirks.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 6e2537c32190..832357541787 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -428,6 +428,26 @@ static size_t gen8_stolen_size(int num, int slot, int func) return gmch_ctrl << 25; /* 32 MB units */ } +static size_t __init chv_stolen_size(int num, int slot, int func) +{ + u16 gmch_ctrl; + + gmch_ctrl = read_pci_config_16(num, slot, func, SNB_GMCH_CTRL); + gmch_ctrl >>= SNB_GMCH_GMS_SHIFT; + gmch_ctrl &= SNB_GMCH_GMS_MASK; + + /* + * 0x0 to 0x10: 32MB increments starting at 0MB + * 0x11 to 0x16: 4MB increments starting at 8MB + * 0x17 to 0x1d: 4MB increments start at 36MB + */ + if (gmch_ctrl < 0x11) + return gmch_ctrl << 25; + else if (gmch_ctrl < 0x17) + return (gmch_ctrl - 0x11 + 2) << 22; + else + return (gmch_ctrl - 0x17 + 9) << 22; +} struct intel_stolen_funcs { size_t (*size)(int num, int slot, int func); @@ -469,6 +489,11 @@ static const struct intel_stolen_funcs gen8_stolen_funcs = { .size = gen8_stolen_size, }; +static const struct intel_stolen_funcs chv_stolen_funcs = { + .base = intel_stolen_base, + .size = chv_stolen_size, +}; + static struct pci_device_id intel_stolen_ids[] __initdata = { INTEL_I830_IDS(&i830_stolen_funcs), INTEL_I845G_IDS(&i845_stolen_funcs), @@ -495,7 +520,8 @@ static struct pci_device_id intel_stolen_ids[] __initdata = { INTEL_HSW_D_IDS(&gen6_stolen_funcs), INTEL_HSW_M_IDS(&gen6_stolen_funcs), INTEL_BDW_M_IDS(&gen8_stolen_funcs), - INTEL_BDW_D_IDS(&gen8_stolen_funcs) + INTEL_BDW_D_IDS(&gen8_stolen_funcs), + INTEL_CHV_IDS(&chv_stolen_funcs), }; static void __init intel_graphics_stolen(int num, int slot, int func) -- cgit v1.2.3 From 36dfcea47a862421154e9943ada5f8147bcbd43c Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 8 May 2014 22:19:42 +0300 Subject: x86/gpu: Sprinkle const, __init and __initconst to stolen memory quirks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gen8_stolen_size() is missing __init, so add it. Also all the intel_stolen_funcs structures can be marked __initconst. intel_stolen_ids[] can also be made const if we replace the __initdata with __initconst. Cc: Ingo Molnar Cc: H. Peter Anvin Signed-off-by: Ville Syrjälä Signed-off-by: Daniel Vetter --- arch/x86/kernel/early-quirks.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index 832357541787..f96098fda1a2 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -418,7 +418,7 @@ static size_t __init gen6_stolen_size(int num, int slot, int func) return gmch_ctrl << 25; /* 32 MB units */ } -static size_t gen8_stolen_size(int num, int slot, int func) +static size_t __init gen8_stolen_size(int num, int slot, int func) { u16 gmch_ctrl; @@ -454,47 +454,47 @@ struct intel_stolen_funcs { u32 (*base)(int num, int slot, int func, size_t size); }; -static const struct intel_stolen_funcs i830_stolen_funcs = { +static const struct intel_stolen_funcs i830_stolen_funcs __initconst = { .base = i830_stolen_base, .size = i830_stolen_size, }; -static const struct intel_stolen_funcs i845_stolen_funcs = { +static const struct intel_stolen_funcs i845_stolen_funcs __initconst = { .base = i845_stolen_base, .size = i830_stolen_size, }; -static const struct intel_stolen_funcs i85x_stolen_funcs = { +static const struct intel_stolen_funcs i85x_stolen_funcs __initconst = { .base = i85x_stolen_base, .size = gen3_stolen_size, }; -static const struct intel_stolen_funcs i865_stolen_funcs = { +static const struct intel_stolen_funcs i865_stolen_funcs __initconst = { .base = i865_stolen_base, .size = gen3_stolen_size, }; -static const struct intel_stolen_funcs gen3_stolen_funcs = { +static const struct intel_stolen_funcs gen3_stolen_funcs __initconst = { .base = intel_stolen_base, .size = gen3_stolen_size, }; -static const struct intel_stolen_funcs gen6_stolen_funcs = { +static const struct intel_stolen_funcs gen6_stolen_funcs __initconst = { .base = intel_stolen_base, .size = gen6_stolen_size, }; -static const struct intel_stolen_funcs gen8_stolen_funcs = { +static const struct intel_stolen_funcs gen8_stolen_funcs __initconst = { .base = intel_stolen_base, .size = gen8_stolen_size, }; -static const struct intel_stolen_funcs chv_stolen_funcs = { +static const struct intel_stolen_funcs chv_stolen_funcs __initconst = { .base = intel_stolen_base, .size = chv_stolen_size, }; -static struct pci_device_id intel_stolen_ids[] __initdata = { +static const struct pci_device_id intel_stolen_ids[] __initconst = { INTEL_I830_IDS(&i830_stolen_funcs), INTEL_I845G_IDS(&i845_stolen_funcs), INTEL_I85X_IDS(&i85x_stolen_funcs), -- cgit v1.2.3 From e540e835f89c3cfa0eca5d909047224c78c874a8 Mon Sep 17 00:00:00 2001 From: Wang Sheng-Hui Date: Wed, 7 May 2014 15:28:12 +0800 Subject: tile: cleanup the comment in init_pgprot In tile vmlinux, the rodata area start after the _sdata. The rodata area is included between [_sdata, __end_rodata), and is handled at an earlier point. The page walk starts at __end_rodata, not _sdata. Signed-off-by: Wang Sheng-Hui Signed-off-by: Chris Metcalf --- arch/tile/mm/init.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 0fa1acfac79a..7ba1dc3d41fa 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -273,9 +273,9 @@ static pgprot_t __init init_pgprot(ulong address) /* * Otherwise we just hand out consecutive cpus. To avoid * requiring this function to hold state, we just walk forward from - * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach - * the requested address, while walking cpu home around kdata_mask. - * This is typically no more than a dozen or so iterations. + * __end_rodata by PAGE_SIZE, skipping the readonly and init data, to + * reach the requested address, while walking cpu home around + * kdata_mask. This is typically no more than a dozen or so iterations. */ page = (((ulong)__end_rodata) + PAGE_SIZE - 1) & PAGE_MASK; BUG_ON(address < page || address >= (ulong)_end); -- cgit v1.2.3 From 50204c6f6dd01b5bce1b53e0b003d01849455512 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Thu, 1 May 2014 16:52:46 +0200 Subject: uprobes/x86: Simplify rip-relative handling It is possible to replace rip-relative addressing mode with addressing mode of the same length: (reg+disp32). This eliminates the need to fix up immediate and correct for changing instruction length. And we can kill arch_uprobe->def.riprel_target. Signed-off-by: Denys Vlasenko Reviewed-by: Jim Keniston Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 3 -- arch/x86/kernel/uprobes.c | 71 ++++++++++++++++++------------------------ 2 files changed, 30 insertions(+), 44 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index a040d493a4f9..7be3c079e389 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -50,9 +50,6 @@ struct arch_uprobe { u8 opc1; } branch; struct { -#ifdef CONFIG_X86_64 - long riprel_target; -#endif u8 fixups; u8 ilen; } def; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 2ebadb252093..31dcb4d5ea46 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -251,9 +251,9 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses * its memory operand indirectly through a scratch register. Set - * def->fixups and def->riprel_target accordingly. (The contents of the - * scratch register will be saved before we single-step the modified - * instruction, and restored afterward). + * def->fixups accordingly. (The contents of the scratch register + * will be saved before we single-step the modified instruction, + * and restored afterward). * * We do this because a rip-relative instruction can access only a * relatively small area (+/- 2 GB from the instruction), and the XOL @@ -264,9 +264,12 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * * Some useful facts about rip-relative instructions: * - * - There's always a modrm byte. + * - There's always a modrm byte with bit layout "00 reg 101". * - There's never a SIB byte. * - The displacement is always 4 bytes. + * - REX.B=1 bit in REX prefix, which normally extends r/m field, + * has no effect on rip-relative mode. It doesn't make modrm byte + * with r/m=101 refer to register 1101 = R13. */ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { @@ -293,9 +296,8 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) */ cursor = auprobe->insn + insn_offset_modrm(insn); /* - * Convert from rip-relative addressing to indirect addressing - * via a scratch register. Change the r/m field from 0x5 (%rip) - * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field. + * Convert from rip-relative addressing + * to register-relative addressing via a scratch register. */ reg = MODRM_REG(insn); if (reg == 0) { @@ -307,22 +309,21 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) * #1) for the scratch register. */ auprobe->def.fixups |= UPROBE_FIX_RIP_CX; - /* Change modrm from 00 000 101 to 00 000 001. */ - *cursor = 0x1; + /* + * Change modrm from "00 000 101" to "10 000 001". Example: + * 89 05 disp32 mov %eax,disp32(%rip) becomes + * 89 81 disp32 mov %eax,disp32(%rcx) + */ + *cursor = 0x81; } else { /* Use %rax (register #0) for the scratch register. */ auprobe->def.fixups |= UPROBE_FIX_RIP_AX; - /* Change modrm from 00 xxx 101 to 00 xxx 000 */ - *cursor = (reg << 3); - } - - /* Target address = address of next instruction + (signed) offset */ - auprobe->def.riprel_target = (long)insn->length + insn->displacement.value; - - /* Displacement field is gone; slide immediate field (if any) over. */ - if (insn->immediate.nbytes) { - cursor++; - memmove(cursor, cursor + insn->displacement.nbytes, insn->immediate.nbytes); + /* + * Change modrm from "00 reg 101" to "10 reg 000". Example: + * 89 1d disp32 mov %edx,disp32(%rip) becomes + * 89 98 disp32 mov %edx,disp32(%rax) + */ + *cursor = (reg << 3) | 0x80; } } @@ -343,26 +344,17 @@ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) unsigned long *sr = scratch_reg(auprobe, regs); utask->autask.saved_scratch_register = *sr; - *sr = utask->vaddr + auprobe->def.riprel_target; + *sr = utask->vaddr + auprobe->def.ilen; } } -static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - long *correction) +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); *sr = utask->autask.saved_scratch_register; - /* - * The original instruction includes a displacement, and so - * is 4 bytes longer than what we've just single-stepped. - * Caller may need to apply other fixups to handle stuff - * like "jmpq *...(%rip)" and "callq *...(%rip)". - */ - if (correction) - *correction += 4; } } #else /* 32-bit: */ @@ -379,8 +371,7 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { } -static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs, - long *correction) +static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { } #endif /* CONFIG_X86_64 */ @@ -417,10 +408,10 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip) static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; - long correction = (long)(utask->vaddr - utask->xol_vaddr); - riprel_post_xol(auprobe, regs, &correction); + riprel_post_xol(auprobe, regs); if (auprobe->def.fixups & UPROBE_FIX_IP) { + long correction = utask->vaddr - utask->xol_vaddr; regs->ip += correction; } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { regs->sp += sizeof_long(); @@ -436,7 +427,7 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs static void default_abort_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { - riprel_post_xol(auprobe, regs, NULL); + riprel_post_xol(auprobe, regs); } static struct uprobe_xol_ops default_xol_ops = { @@ -732,11 +723,9 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t) * * If the original instruction was a rip-relative instruction such as * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent - * instruction using a scratch register -- e.g., "movl %edx,(%rax)". - * We need to restore the contents of the scratch register and adjust - * the ip, keeping in mind that the instruction we executed is 4 bytes - * shorter than the original instruction (since we squeezed out the offset - * field). (FIX_RIP_AX or FIX_RIP_CX) + * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rax)". + * We need to restore the contents of the scratch register + * (FIX_RIP_AX or FIX_RIP_CX). */ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { -- cgit v1.2.3 From 1ea30fb64598bd3a6ba43d874bb53c55878eaef5 Mon Sep 17 00:00:00 2001 From: Denys Vlasenko Date: Fri, 2 May 2014 17:04:00 +0200 Subject: uprobes/x86: Fix scratch register selection for rip-relative fixups Before this patch, instructions such as div, mul, shifts with count in CL, cmpxchg are mishandled. This patch adds vex prefix handling. In particular, it avoids colliding with register operand encoded in vex.vvvv field. Since we need to avoid two possible register operands, the selection of scratch register needs to be from at least three registers. After looking through a lot of CPU docs, it looks like the safest choice is SI,DI,BX. Selecting BX needs care to not collide with implicit use of BX by cmpxchg8b. Test-case: #include static const char *const pass[] = { "FAIL", "pass" }; long two = 2; void test1(void) { long ax = 0, dx = 0; asm volatile("\n" " xor %%edx,%%edx\n" " lea 2(%%edx),%%eax\n" // We divide 2 by 2. Result (in eax) should be 1: " probe1: .globl probe1\n" " divl two(%%rip)\n" // If we have a bug (eax mangled on entry) the result will be 2, // because eax gets restored by probe machinery. : "=a" (ax), "=d" (dx) /*out*/ : "0" (ax), "1" (dx) /*in*/ : "memory" /*clobber*/ ); dprintf(2, "%s: %s\n", __func__, pass[ax == 1] ); } long val2 = 0; void test2(void) { long old_val = val2; long ax = 0, dx = 0; asm volatile("\n" " mov val2,%%eax\n" // eax := val2 " lea 1(%%eax),%%edx\n" // edx := eax+1 // eax is equal to val2. cmpxchg should store edx to val2: " probe2: .globl probe2\n" " cmpxchg %%edx,val2(%%rip)\n" // If we have a bug (eax mangled on entry), val2 will stay unchanged : "=a" (ax), "=d" (dx) /*out*/ : "0" (ax), "1" (dx) /*in*/ : "memory" /*clobber*/ ); dprintf(2, "%s: %s\n", __func__, pass[val2 == old_val + 1] ); } long val3[2] = {0,0}; void test3(void) { long old_val = val3[0]; long ax = 0, dx = 0; asm volatile("\n" " mov val3,%%eax\n" // edx:eax := val3 " mov val3+4,%%edx\n" " mov %%eax,%%ebx\n" // ecx:ebx := edx:eax + 1 " mov %%edx,%%ecx\n" " add $1,%%ebx\n" " adc $0,%%ecx\n" // edx:eax is equal to val3. cmpxchg8b should store ecx:ebx to val3: " probe3: .globl probe3\n" " cmpxchg8b val3(%%rip)\n" // If we have a bug (edx:eax mangled on entry), val3 will stay unchanged. // If ecx:edx in mangled, val3 will get wrong value. : "=a" (ax), "=d" (dx) /*out*/ : "0" (ax), "1" (dx) /*in*/ : "cx", "bx", "memory" /*clobber*/ ); dprintf(2, "%s: %s\n", __func__, pass[val3[0] == old_val + 1 && val3[1] == 0] ); } int main(int argc, char **argv) { test1(); test2(); test3(); return 0; } Before this change all tests fail if probe{1,2,3} are probed. Signed-off-by: Denys Vlasenko Reviewed-by: Jim Keniston Signed-off-by: Oleg Nesterov --- arch/x86/kernel/uprobes.c | 176 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 125 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 31dcb4d5ea46..159ca520ef5b 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -41,8 +41,11 @@ /* Instruction will modify TF, don't change it */ #define UPROBE_FIX_SETF 0x04 -#define UPROBE_FIX_RIP_AX 0x08 -#define UPROBE_FIX_RIP_CX 0x10 +#define UPROBE_FIX_RIP_SI 0x08 +#define UPROBE_FIX_RIP_DI 0x10 +#define UPROBE_FIX_RIP_BX 0x20 +#define UPROBE_FIX_RIP_MASK \ + (UPROBE_FIX_RIP_SI | UPROBE_FIX_RIP_DI | UPROBE_FIX_RIP_BX) #define UPROBE_TRAP_NR UINT_MAX @@ -275,20 +278,109 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) { u8 *cursor; u8 reg; + u8 reg2; if (!insn_rip_relative(insn)) return; /* - * insn_rip_relative() would have decoded rex_prefix, modrm. + * insn_rip_relative() would have decoded rex_prefix, vex_prefix, modrm. * Clear REX.b bit (extension of MODRM.rm field): - * we want to encode rax/rcx, not r8/r9. + * we want to encode low numbered reg, not r8+. */ if (insn->rex_prefix.nbytes) { cursor = auprobe->insn + insn_offset_rex_prefix(insn); - *cursor &= 0xfe; /* Clearing REX.B bit */ + /* REX byte has 0100wrxb layout, clearing REX.b bit */ + *cursor &= 0xfe; } + /* + * Similar treatment for VEX3 prefix. + * TODO: add XOP/EVEX treatment when insn decoder supports them + */ + if (insn->vex_prefix.nbytes == 3) { + /* + * vex2: c5 rvvvvLpp (has no b bit) + * vex3/xop: c4/8f rxbmmmmm wvvvvLpp + * evex: 62 rxbR00mm wvvvv1pp zllBVaaa + * (evex will need setting of both b and x since + * in non-sib encoding evex.x is 4th bit of MODRM.rm) + * Setting VEX3.b (setting because it has inverted meaning): + */ + cursor = auprobe->insn + insn_offset_vex_prefix(insn) + 1; + *cursor |= 0x20; + } + + /* + * Convert from rip-relative addressing to register-relative addressing + * via a scratch register. + * + * This is tricky since there are insns with modrm byte + * which also use registers not encoded in modrm byte: + * [i]div/[i]mul: implicitly use dx:ax + * shift ops: implicitly use cx + * cmpxchg: implicitly uses ax + * cmpxchg8/16b: implicitly uses dx:ax and bx:cx + * Encoding: 0f c7/1 modrm + * The code below thinks that reg=1 (cx), chooses si as scratch. + * mulx: implicitly uses dx: mulx r/m,r1,r2 does r1:r2 = dx * r/m. + * First appeared in Haswell (BMI2 insn). It is vex-encoded. + * Example where none of bx,cx,dx can be used as scratch reg: + * c4 e2 63 f6 0d disp32 mulx disp32(%rip),%ebx,%ecx + * [v]pcmpistri: implicitly uses cx, xmm0 + * [v]pcmpistrm: implicitly uses xmm0 + * [v]pcmpestri: implicitly uses ax, dx, cx, xmm0 + * [v]pcmpestrm: implicitly uses ax, dx, xmm0 + * Evil SSE4.2 string comparison ops from hell. + * maskmovq/[v]maskmovdqu: implicitly uses (ds:rdi) as destination. + * Encoding: 0f f7 modrm, 66 0f f7 modrm, vex-encoded: c5 f9 f7 modrm. + * Store op1, byte-masked by op2 msb's in each byte, to (ds:rdi). + * AMD says it has no 3-operand form (vex.vvvv must be 1111) + * and that it can have only register operands, not mem + * (its modrm byte must have mode=11). + * If these restrictions will ever be lifted, + * we'll need code to prevent selection of di as scratch reg! + * + * Summary: I don't know any insns with modrm byte which + * use SI register implicitly. DI register is used only + * by one insn (maskmovq) and BX register is used + * only by one too (cmpxchg8b). + * BP is stack-segment based (may be a problem?). + * AX, DX, CX are off-limits (many implicit users). + * SP is unusable (it's stack pointer - think about "pop mem"; + * also, rsp+disp32 needs sib encoding -> insn length change). + */ + reg = MODRM_REG(insn); /* Fetch modrm.reg */ + reg2 = 0xff; /* Fetch vex.vvvv */ + if (insn->vex_prefix.nbytes == 2) + reg2 = insn->vex_prefix.bytes[1]; + else if (insn->vex_prefix.nbytes == 3) + reg2 = insn->vex_prefix.bytes[2]; + /* + * TODO: add XOP, EXEV vvvv reading. + * + * vex.vvvv field is in bits 6-3, bits are inverted. + * But in 32-bit mode, high-order bit may be ignored. + * Therefore, let's consider only 3 low-order bits. + */ + reg2 = ((reg2 >> 3) & 0x7) ^ 0x7; + /* + * Register numbering is ax,cx,dx,bx, sp,bp,si,di, r8..r15. + * + * Choose scratch reg. Order is important: must not select bx + * if we can use si (cmpxchg8b case!) + */ + if (reg != 6 && reg2 != 6) { + reg2 = 6; + auprobe->def.fixups |= UPROBE_FIX_RIP_SI; + } else if (reg != 7 && reg2 != 7) { + reg2 = 7; + auprobe->def.fixups |= UPROBE_FIX_RIP_DI; + /* TODO (paranoia): force maskmovq to not use di */ + } else { + reg2 = 3; + auprobe->def.fixups |= UPROBE_FIX_RIP_BX; + } /* * Point cursor at the modrm byte. The next 4 bytes are the * displacement. Beyond the displacement, for some instructions, @@ -296,41 +388,21 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) */ cursor = auprobe->insn + insn_offset_modrm(insn); /* - * Convert from rip-relative addressing - * to register-relative addressing via a scratch register. + * Change modrm from "00 reg 101" to "10 reg reg2". Example: + * 89 05 disp32 mov %eax,disp32(%rip) becomes + * 89 86 disp32 mov %eax,disp32(%rsi) */ - reg = MODRM_REG(insn); - if (reg == 0) { - /* - * The register operand (if any) is either the A register - * (%rax, %eax, etc.) or (if the 0x4 bit is set in the - * REX prefix) %r8. In any case, we know the C register - * is NOT the register operand, so we use %rcx (register - * #1) for the scratch register. - */ - auprobe->def.fixups |= UPROBE_FIX_RIP_CX; - /* - * Change modrm from "00 000 101" to "10 000 001". Example: - * 89 05 disp32 mov %eax,disp32(%rip) becomes - * 89 81 disp32 mov %eax,disp32(%rcx) - */ - *cursor = 0x81; - } else { - /* Use %rax (register #0) for the scratch register. */ - auprobe->def.fixups |= UPROBE_FIX_RIP_AX; - /* - * Change modrm from "00 reg 101" to "10 reg 000". Example: - * 89 1d disp32 mov %edx,disp32(%rip) becomes - * 89 98 disp32 mov %edx,disp32(%rax) - */ - *cursor = (reg << 3) | 0x80; - } + *cursor = 0x80 | (reg << 3) | reg2; } static inline unsigned long * scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) { - return (auprobe->def.fixups & UPROBE_FIX_RIP_AX) ? ®s->ax : ®s->cx; + if (auprobe->def.fixups & UPROBE_FIX_RIP_SI) + return ®s->si; + if (auprobe->def.fixups & UPROBE_FIX_RIP_DI) + return ®s->di; + return ®s->bx; } /* @@ -339,7 +411,7 @@ scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) */ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); @@ -350,7 +422,7 @@ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & (UPROBE_FIX_RIP_AX | UPROBE_FIX_RIP_CX)) { + if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); @@ -405,6 +477,23 @@ static int push_ret_address(struct pt_regs *regs, unsigned long ip) return 0; } +/* + * We have to fix things up as follows: + * + * Typically, the new ip is relative to the copied instruction. We need + * to make it relative to the original instruction (FIX_IP). Exceptions + * are return instructions and absolute or indirect jump or call instructions. + * + * If the single-stepped instruction was a call, the return address that + * is atop the stack is the address following the copied instruction. We + * need to make it the address following the original instruction (FIX_CALL). + * + * If the original instruction was a rip-relative instruction such as + * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent + * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rsi)". + * We need to restore the contents of the scratch register + * (FIX_RIP_reg). + */ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs) { struct uprobe_task *utask = current->utask; @@ -711,21 +800,6 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *t) * single-step, we single-stepped a copy of the instruction. * * This function prepares to resume execution after the single-step. - * We have to fix things up as follows: - * - * Typically, the new ip is relative to the copied instruction. We need - * to make it relative to the original instruction (FIX_IP). Exceptions - * are return instructions and absolute or indirect jump or call instructions. - * - * If the single-stepped instruction was a call, the return address that - * is atop the stack is the address following the copied instruction. We - * need to make it the address following the original instruction (FIX_CALL). - * - * If the original instruction was a rip-relative instruction such as - * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent - * instruction using a scratch register -- e.g., "movl %edx,0xnnnn(%rax)". - * We need to restore the contents of the scratch register - * (FIX_RIP_AX or FIX_RIP_CX). */ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { -- cgit v1.2.3 From 5e1b05beeca8139204324581a5b1ffb53d057f96 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 8 May 2014 20:34:00 +0200 Subject: x86/traps: Make math_error() static Trivial, make math_error() static. Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/traps.h | 1 - arch/x86/kernel/traps.c | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h index 58d66fe06b61..a7b212db9e04 100644 --- a/arch/x86/include/asm/traps.h +++ b/arch/x86/include/asm/traps.h @@ -98,7 +98,6 @@ static inline int get_si_code(unsigned long condition) extern int panic_on_unrecovered_nmi; -void math_error(struct pt_regs *, int, int); void math_emulate(struct math_emu_info *); #ifndef CONFIG_X86_32 asmlinkage void smp_thermal_interrupt(void); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 57409f6b8c62..8eddd628326e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -488,7 +488,7 @@ exit: * the correct behaviour even in the presence of the asynchronous * IRQ13 behaviour */ -void math_error(struct pt_regs *regs, int error_code, int trapnr) +static void math_error(struct pt_regs *regs, int error_code, int trapnr) { struct task_struct *task = current; siginfo_t info; -- cgit v1.2.3 From 38cad57be9800e46c52a3612fb9d963eee4fd9c3 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 7 May 2014 16:47:09 +0200 Subject: x86/traps: Use SEND_SIG_PRIV instead of force_sig() force_sig() is just force_sig_info(SEND_SIG_PRIV). Imho it should die, we have too many ugly "send signal" helpers. And do_trap() looks just ugly because it uses force_sig_info() or force_sig() depending on info != NULL. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 8eddd628326e..2cd429117a41 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -168,10 +168,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, } #endif - if (info) - force_sig_info(signr, info, tsk); - else - force_sig(signr, tsk); + force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); } #define DO_ERROR(trapnr, signr, str, name) \ @@ -305,7 +302,7 @@ do_general_protection(struct pt_regs *regs, long error_code) pr_cont("\n"); } - force_sig(SIGSEGV, tsk); + force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); exit: exception_exit(prev_state); } @@ -645,7 +642,7 @@ void math_state_restore(void) */ if (unlikely(restore_fpu_checking(tsk))) { drop_init_fpu(tsk); - force_sig(SIGSEGV, tsk); + force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk); return; } -- cgit v1.2.3 From dff0796e53c29147c9bd1f5567a261dcf0e528bc Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 7 May 2014 17:21:34 +0200 Subject: x86/traps: Introduce do_error_trap() Move the common code from DO_ERROR() and DO_ERROR_INFO() into the new helper, do_error_trap(). This simplifies define's and shaves 527 bytes from traps.o. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 38 +++++++++++++++++--------------------- 1 file changed, 17 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 2cd429117a41..ab8dad719b9e 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -171,41 +171,37 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, force_sig_info(signr, info ?: SEND_SIG_PRIV, tsk); } +static void do_error_trap(struct pt_regs *regs, long error_code, char *str, + unsigned long trapnr, int signr, siginfo_t *info) +{ + enum ctx_state prev_state = exception_enter(); + + if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != + NOTIFY_STOP) { + conditional_sti(regs); + do_trap(trapnr, signr, str, regs, error_code, info); + } + + exception_exit(prev_state); +} + #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - enum ctx_state prev_state; \ - \ - prev_state = exception_enter(); \ - if (notify_die(DIE_TRAP, str, regs, error_code, \ - trapnr, signr) == NOTIFY_STOP) { \ - exception_exit(prev_state); \ - return; \ - } \ - conditional_sti(regs); \ - do_trap(trapnr, signr, str, regs, error_code, NULL); \ - exception_exit(prev_state); \ + do_error_trap(regs, error_code, str, trapnr, signr, NULL); \ } #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ siginfo_t info; \ - enum ctx_state prev_state; \ \ info.si_signo = signr; \ info.si_errno = 0; \ info.si_code = sicode; \ info.si_addr = (void __user *)siaddr; \ - prev_state = exception_enter(); \ - if (notify_die(DIE_TRAP, str, regs, error_code, \ - trapnr, signr) == NOTIFY_STOP) { \ - exception_exit(prev_state); \ - return; \ - } \ - conditional_sti(regs); \ - do_trap(trapnr, signr, str, regs, error_code, &info); \ - exception_exit(prev_state); \ + \ + do_error_trap(regs, error_code, str, trapnr, signr, &info); \ } DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip ) -- cgit v1.2.3 From 958d3d729802f7d741cbe8400e69b89baae580ee Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Wed, 7 May 2014 17:59:39 +0200 Subject: x86/traps: Introduce fill_trap_info(), simplify DO_ERROR_INFO() Extract the fill-siginfo code from DO_ERROR_INFO() into the new helper, fill_trap_info(). It can calculate si_code and si_addr looking at trapnr, so we can remove these arguments from DO_ERROR_INFO() and simplify the source code. The generated code is the same, __builtin_constant_p(trapnr) == T. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 53 +++++++++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ab8dad719b9e..1cf8a4c409b6 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -136,6 +136,33 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, return -1; } +static void fill_trap_info(struct pt_regs *regs, int signr, int trapnr, + siginfo_t *info) +{ + unsigned long siaddr; + int sicode; + + switch (trapnr) { + case X86_TRAP_DE: + sicode = FPE_INTDIV; + siaddr = regs->ip; + break; + case X86_TRAP_UD: + sicode = ILL_ILLOPN; + siaddr = regs->ip; + break; + case X86_TRAP_AC: + sicode = BUS_ADRALN; + siaddr = 0; + break; + } + + info->si_signo = signr; + info->si_errno = 0; + info->si_code = sicode; + info->si_addr = (void __user *)siaddr; +} + static void __kprobes do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, long error_code, siginfo_t *info) @@ -191,30 +218,26 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ do_error_trap(regs, error_code, str, trapnr, signr, NULL); \ } -#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \ +#define DO_ERROR_INFO(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ siginfo_t info; \ \ - info.si_signo = signr; \ - info.si_errno = 0; \ - info.si_code = sicode; \ - info.si_addr = (void __user *)siaddr; \ - \ + fill_trap_info(regs, signr, trapnr, &info); \ do_error_trap(regs, error_code, str, trapnr, signr, &info); \ } -DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip ) -DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow ) -DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds ) -DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip ) -DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun ) -DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS ) -DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present ) +DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error) +DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow) +DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds) +DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) +DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) +DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) +DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) #ifdef CONFIG_X86_32 -DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment ) +DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) #endif -DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0 ) +DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_X86_64 /* Runs on IST stack */ -- cgit v1.2.3 From 1c326c4dfe182a1c4c1e39f2c00f04c380d11692 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 8 May 2014 20:04:11 +0200 Subject: x86/traps: Shift fill_trap_info() from DO_ERROR_INFO() to do_error_trap() Move the callsite of fill_trap_info() into do_error_trap() and remove the "siginfo_t *info" argument. This obviously breaks DO_ERROR() which passed info == NULL, we simply change fill_trap_info() to return "siginfo_t *" and add the "default" case which returns SEND_SIG_PRIV. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 1cf8a4c409b6..c9dd74124038 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -136,13 +136,16 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str, return -1; } -static void fill_trap_info(struct pt_regs *regs, int signr, int trapnr, - siginfo_t *info) +static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, + siginfo_t *info) { unsigned long siaddr; int sicode; switch (trapnr) { + default: + return SEND_SIG_PRIV; + case X86_TRAP_DE: sicode = FPE_INTDIV; siaddr = regs->ip; @@ -161,6 +164,7 @@ static void fill_trap_info(struct pt_regs *regs, int signr, int trapnr, info->si_errno = 0; info->si_code = sicode; info->si_addr = (void __user *)siaddr; + return info; } static void __kprobes @@ -199,14 +203,16 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, } static void do_error_trap(struct pt_regs *regs, long error_code, char *str, - unsigned long trapnr, int signr, siginfo_t *info) + unsigned long trapnr, int signr) { enum ctx_state prev_state = exception_enter(); + siginfo_t info; if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) != NOTIFY_STOP) { conditional_sti(regs); - do_trap(trapnr, signr, str, regs, error_code, info); + do_trap(trapnr, signr, str, regs, error_code, + fill_trap_info(regs, signr, trapnr, &info)); } exception_exit(prev_state); @@ -215,16 +221,13 @@ static void do_error_trap(struct pt_regs *regs, long error_code, char *str, #define DO_ERROR(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - do_error_trap(regs, error_code, str, trapnr, signr, NULL); \ + do_error_trap(regs, error_code, str, trapnr, signr); \ } #define DO_ERROR_INFO(trapnr, signr, str, name) \ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ { \ - siginfo_t info; \ - \ - fill_trap_info(regs, signr, trapnr, &info); \ - do_error_trap(regs, error_code, str, trapnr, signr, &info); \ + do_error_trap(regs, error_code, str, trapnr, signr); \ } DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error) -- cgit v1.2.3 From 0eb14833d5b1ea1accfeffb71be5de5929f85da9 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Thu, 8 May 2014 20:12:24 +0200 Subject: x86/traps: Kill DO_ERROR_INFO() Now that DO_ERROR_INFO() doesn't differ from DO_ERROR() we can remove it and use DO_ERROR() instead. Signed-off-by: Oleg Nesterov --- arch/x86/kernel/traps.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index c9dd74124038..73b3ea32245a 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -224,23 +224,17 @@ dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ do_error_trap(regs, error_code, str, trapnr, signr); \ } -#define DO_ERROR_INFO(trapnr, signr, str, name) \ -dotraplinkage void do_##name(struct pt_regs *regs, long error_code) \ -{ \ - do_error_trap(regs, error_code, str, trapnr, signr); \ -} - -DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error) -DO_ERROR (X86_TRAP_OF, SIGSEGV, "overflow", overflow) -DO_ERROR (X86_TRAP_BR, SIGSEGV, "bounds", bounds) -DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) -DO_ERROR (X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun) -DO_ERROR (X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) -DO_ERROR (X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) +DO_ERROR(X86_TRAP_DE, SIGFPE, "divide error", divide_error) +DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow) +DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds) +DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op) +DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) +DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) +DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) #ifdef CONFIG_X86_32 -DO_ERROR (X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) +DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) #endif -DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) +DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) #ifdef CONFIG_X86_64 /* Runs on IST stack */ -- cgit v1.2.3 From b02ef20a9fba08948e643d3eec0efadf1da01a44 Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Mon, 12 May 2014 18:24:45 +0200 Subject: uprobes/x86: Fix the wrong ->si_addr when xol triggers a trap If the probed insn triggers a trap, ->si_addr = regs->ip is technically correct, but this is not what the signal handler wants; we need to pass the address of the probed insn, not the address of xol slot. Add the new arch-agnostic helper, uprobe_get_trap_addr(), and change fill_trap_info() and math_error() to use it. !CONFIG_UPROBES case in uprobes.h uses a macro to avoid include hell and ensure that it can be compiled even if an architecture doesn't define instruction_pointer(). Test-case: #include #include #include extern void probe_div(void); void sigh(int sig, siginfo_t *info, void *c) { int passed = (info->si_addr == probe_div); printf(passed ? "PASS\n" : "FAIL\n"); _exit(!passed); } int main(void) { struct sigaction sa = { .sa_sigaction = sigh, .sa_flags = SA_SIGINFO, }; sigaction(SIGFPE, &sa, NULL); asm ( "xor %ecx,%ecx\n" ".globl probe_div; probe_div:\n" "idiv %ecx\n" ); return 0; } it fails if probe_div() is probed. Note: show_unhandled_signals users should probably use this helper too, but we need to cleanup them first. Signed-off-by: Oleg Nesterov Reviewed-by: Masami Hiramatsu --- arch/x86/kernel/traps.c | 7 ++++--- include/linux/uprobes.h | 4 ++++ kernel/events/uprobes.c | 10 ++++++++++ 3 files changed, 18 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 73b3ea32245a..3fdb20548c4b 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -148,11 +149,11 @@ static siginfo_t *fill_trap_info(struct pt_regs *regs, int signr, int trapnr, case X86_TRAP_DE: sicode = FPE_INTDIV; - siaddr = regs->ip; + siaddr = uprobe_get_trap_addr(regs); break; case X86_TRAP_UD: sicode = ILL_ILLOPN; - siaddr = regs->ip; + siaddr = uprobe_get_trap_addr(regs); break; case X86_TRAP_AC: sicode = BUS_ADRALN; @@ -531,7 +532,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr) task->thread.error_code = error_code; info.si_signo = SIGFPE; info.si_errno = 0; - info.si_addr = (void __user *)regs->ip; + info.si_addr = (void __user *)uprobe_get_trap_addr(regs); if (trapnr == X86_TRAP_MF) { unsigned short cwd, swd; /* diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index edff2b97b864..88c3b7e8b384 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -102,6 +102,7 @@ extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, u extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); extern bool __weak is_trap_insn(uprobe_opcode_t *insn); extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); +extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); @@ -130,6 +131,9 @@ extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *r #else /* !CONFIG_UPROBES */ struct uprobes_state { }; + +#define uprobe_get_trap_addr(regs) instruction_pointer(regs) + static inline int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) { diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index a13251e8bfa4..3b02c72938a8 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c @@ -1351,6 +1351,16 @@ unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs) return instruction_pointer(regs) - UPROBE_SWBP_INSN_SIZE; } +unsigned long uprobe_get_trap_addr(struct pt_regs *regs) +{ + struct uprobe_task *utask = current->utask; + + if (unlikely(utask && utask->active_uprobe)) + return utask->vaddr; + + return instruction_pointer(regs); +} + /* * Called with no locks held. * Called in context of a exiting or a exec-ing thread. -- cgit v1.2.3 From f3c2af7ba17a83809806880062c9ad541744fb95 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 13 May 2014 19:50:45 -0700 Subject: net: filter: x86: split bpf_jit_compile() Split bpf_jit_compile() into two functions to improve readability of for(pass++) loop. The change follows similar style of JIT compilers for arm, powerpc, s390 The body of new do_jit() was not reformatted to reduce noise in this patch, since the following patch replaces most of it. Tested with BPF testsuite. Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit_comp.c | 157 ++++++++++++++++++++++++++------------------ 1 file changed, 92 insertions(+), 65 deletions(-) (limited to 'arch') diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index dc017735bb91..c5fa7c9cb665 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -178,41 +178,26 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, return header; } -void bpf_jit_compile(struct sk_filter *fp) +struct jit_context { + unsigned int cleanup_addr; /* epilogue code offset */ + int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ + u8 seen; +}; + +static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, + int oldproglen, struct jit_context *ctx) { + const struct sock_filter *filter = bpf_prog->insns; + int flen = bpf_prog->len; u8 temp[64]; u8 *prog; - unsigned int proglen, oldproglen = 0; - int ilen, i; + int ilen, i, proglen; int t_offset, f_offset; - u8 t_op, f_op, seen = 0, pass; - u8 *image = NULL; - struct bpf_binary_header *header = NULL; + u8 t_op, f_op, seen = 0; u8 *func; - int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */ - unsigned int cleanup_addr; /* epilogue code offset */ - unsigned int *addrs; - const struct sock_filter *filter = fp->insns; - int flen = fp->len; - - if (!bpf_jit_enable) - return; - - addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL); - if (addrs == NULL) - return; - - /* Before first pass, make a rough estimation of addrs[] - * each bpf instruction is translated to less than 64 bytes - */ - for (proglen = 0, i = 0; i < flen; i++) { - proglen += 64; - addrs[i] = proglen; - } - cleanup_addr = proglen; /* epilogue address */ + unsigned int cleanup_addr = ctx->cleanup_addr; + u8 seen_or_pass0 = ctx->seen; - for (pass = 0; pass < 10; pass++) { - u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen; /* no prologue/epilogue for trivial filters (RET something) */ proglen = 0; prog = temp; @@ -325,12 +310,12 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ALU_DIV_X: /* A /= X; */ seen |= SEEN_XREG; EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (pc_ret0 > 0) { + if (ctx->pc_ret0 > 0) { /* addrs[pc_ret0 - 1] is start address of target * (addrs[i] - 4) is the address following this jmp * ("xor %edx,%edx; div %ebx" being 4 bytes long) */ - EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] - + EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] - (addrs[i] - 4)); } else { EMIT_COND_JMP(X86_JNE, 2 + 5); @@ -342,12 +327,12 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ALU_MOD_X: /* A %= X; */ seen |= SEEN_XREG; EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (pc_ret0 > 0) { + if (ctx->pc_ret0 > 0) { /* addrs[pc_ret0 - 1] is start address of target * (addrs[i] - 6) is the address following this jmp * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long) */ - EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] - + EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] - (addrs[i] - 6)); } else { EMIT_COND_JMP(X86_JNE, 2 + 5); @@ -441,8 +426,8 @@ void bpf_jit_compile(struct sk_filter *fp) break; case BPF_S_RET_K: if (!K) { - if (pc_ret0 == -1) - pc_ret0 = i; + if (ctx->pc_ret0 == -1) + ctx->pc_ret0 = i; CLEAR_A(); } else { EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ @@ -603,7 +588,7 @@ void bpf_jit_compile(struct sk_filter *fp) int off = pkt_type_offset(); if (off < 0) - goto out; + return -EINVAL; if (is_imm8(off)) { /* movzbl off8(%rdi),%eax */ EMIT4(0x0f, 0xb6, 0x47, off); @@ -725,36 +710,79 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; } EMIT_COND_JMP(f_op, f_offset); break; - default: - /* hmm, too complex filter, give up with jit compiler */ - goto out; - } - ilen = prog - temp; - if (image) { - if (unlikely(proglen + ilen > oldproglen)) { - pr_err("bpb_jit_compile fatal error\n"); - kfree(addrs); - module_free(NULL, header); - return; - } - memcpy(image + proglen, temp, ilen); + default: + /* hmm, too complex filter, give up with jit compiler */ + return -EINVAL; + } + ilen = prog - temp; + if (image) { + if (unlikely(proglen + ilen > oldproglen)) { + pr_err("bpb_jit_compile fatal error\n"); + return -EFAULT; } - proglen += ilen; - addrs[i] = proglen; - prog = temp; + memcpy(image + proglen, temp, ilen); } - /* last bpf instruction is always a RET : - * use it to give the cleanup instruction(s) addr - */ - cleanup_addr = proglen - 1; /* ret */ - if (seen_or_pass0) - cleanup_addr -= 1; /* leaveq */ - if (seen_or_pass0 & SEEN_XREG) - cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ + proglen += ilen; + addrs[i] = proglen; + prog = temp; + } + /* last bpf instruction is always a RET : + * use it to give the cleanup instruction(s) addr + */ + ctx->cleanup_addr = proglen - 1; /* ret */ + if (seen_or_pass0) + ctx->cleanup_addr -= 1; /* leaveq */ + if (seen_or_pass0 & SEEN_XREG) + ctx->cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ + + ctx->seen = seen; + + return proglen; +} + +void bpf_jit_compile(struct sk_filter *prog) +{ + struct bpf_binary_header *header = NULL; + int proglen, oldproglen = 0; + struct jit_context ctx = {}; + u8 *image = NULL; + int *addrs; + int pass; + int i; + + if (!bpf_jit_enable) + return; + if (!prog || !prog->len) + return; + + addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL); + if (!addrs) + return; + + /* Before first pass, make a rough estimation of addrs[] + * each bpf instruction is translated to less than 64 bytes + */ + for (proglen = 0, i = 0; i < prog->len; i++) { + proglen += 64; + addrs[i] = proglen; + } + ctx.cleanup_addr = proglen; + ctx.seen = SEEN_XREG | SEEN_DATAREF | SEEN_MEM; + ctx.pc_ret0 = -1; + + for (pass = 0; pass < 10; pass++) { + proglen = do_jit(prog, addrs, image, oldproglen, &ctx); + if (proglen <= 0) { + image = NULL; + if (header) + module_free(NULL, header); + goto out; + } if (image) { if (proglen != oldproglen) - pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen); + pr_err("bpf_jit: proglen=%d != oldproglen=%d\n", + proglen, oldproglen); break; } if (proglen == oldproglen) { @@ -766,17 +794,16 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; } if (bpf_jit_enable > 1) - bpf_jit_dump(flen, proglen, pass, image); + bpf_jit_dump(prog->len, proglen, 0, image); if (image) { bpf_flush_icache(header, image + proglen); set_memory_ro((unsigned long)header, header->pages); - fp->bpf_func = (void *)image; - fp->jited = 1; + prog->bpf_func = (void *)image; + prog->jited = 1; } out: kfree(addrs); - return; } static void bpf_jit_free_deferred(struct work_struct *work) -- cgit v1.2.3 From 622582786c9e041d0bd52bde201787adeab249f8 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Tue, 13 May 2014 19:50:46 -0700 Subject: net: filter: x86: internal BPF JIT Maps all internal BPF instructions into x86_64 instructions. This patch replaces original BPF x64 JIT with internal BPF x64 JIT. sysctl net.core.bpf_jit_enable is reused as on/off switch. Performance: 1. old BPF JIT and internal BPF JIT generate equivalent x86_64 code. No performance difference is observed for filters that were JIT-able before Example assembler code for BPF filter "tcpdump port 22" original BPF -> old JIT: original BPF -> internal BPF -> new JIT: 0: push %rbp 0: push %rbp 1: mov %rsp,%rbp 1: mov %rsp,%rbp 4: sub $0x60,%rsp 4: sub $0x228,%rsp 8: mov %rbx,-0x8(%rbp) b: mov %rbx,-0x228(%rbp) // prologue 12: mov %r13,-0x220(%rbp) 19: mov %r14,-0x218(%rbp) 20: mov %r15,-0x210(%rbp) 27: xor %eax,%eax // clear A c: xor %ebx,%ebx 29: xor %r13,%r13 // clear X e: mov 0x68(%rdi),%r9d 2c: mov 0x68(%rdi),%r9d 12: sub 0x6c(%rdi),%r9d 30: sub 0x6c(%rdi),%r9d 16: mov 0xd8(%rdi),%r8 34: mov 0xd8(%rdi),%r10 3b: mov %rdi,%rbx 1d: mov $0xc,%esi 3e: mov $0xc,%esi 22: callq 0xffffffffe1021e15 43: callq 0xffffffffe102bd75 27: cmp $0x86dd,%eax 48: cmp $0x86dd,%rax 2c: jne 0x0000000000000069 4f: jne 0x000000000000009a 2e: mov $0x14,%esi 51: mov $0x14,%esi 33: callq 0xffffffffe1021e31 56: callq 0xffffffffe102bd91 38: cmp $0x84,%eax 5b: cmp $0x84,%rax 3d: je 0x0000000000000049 62: je 0x0000000000000074 3f: cmp $0x6,%eax 64: cmp $0x6,%rax 42: je 0x0000000000000049 68: je 0x0000000000000074 44: cmp $0x11,%eax 6a: cmp $0x11,%rax 47: jne 0x00000000000000c6 6e: jne 0x0000000000000117 49: mov $0x36,%esi 74: mov $0x36,%esi 4e: callq 0xffffffffe1021e15 79: callq 0xffffffffe102bd75 53: cmp $0x16,%eax 7e: cmp $0x16,%rax 56: je 0x00000000000000bf 82: je 0x0000000000000110 58: mov $0x38,%esi 88: mov $0x38,%esi 5d: callq 0xffffffffe1021e15 8d: callq 0xffffffffe102bd75 62: cmp $0x16,%eax 92: cmp $0x16,%rax 65: je 0x00000000000000bf 96: je 0x0000000000000110 67: jmp 0x00000000000000c6 98: jmp 0x0000000000000117 69: cmp $0x800,%eax 9a: cmp $0x800,%rax 6e: jne 0x00000000000000c6 a1: jne 0x0000000000000117 70: mov $0x17,%esi a3: mov $0x17,%esi 75: callq 0xffffffffe1021e31 a8: callq 0xffffffffe102bd91 7a: cmp $0x84,%eax ad: cmp $0x84,%rax 7f: je 0x000000000000008b b4: je 0x00000000000000c2 81: cmp $0x6,%eax b6: cmp $0x6,%rax 84: je 0x000000000000008b ba: je 0x00000000000000c2 86: cmp $0x11,%eax bc: cmp $0x11,%rax 89: jne 0x00000000000000c6 c0: jne 0x0000000000000117 8b: mov $0x14,%esi c2: mov $0x14,%esi 90: callq 0xffffffffe1021e15 c7: callq 0xffffffffe102bd75 95: test $0x1fff,%ax cc: test $0x1fff,%rax 99: jne 0x00000000000000c6 d3: jne 0x0000000000000117 d5: mov %rax,%r14 9b: mov $0xe,%esi d8: mov $0xe,%esi a0: callq 0xffffffffe1021e44 dd: callq 0xffffffffe102bd91 // MSH e2: and $0xf,%eax e5: shl $0x2,%eax e8: mov %rax,%r13 eb: mov %r14,%rax ee: mov %r13,%rsi a5: lea 0xe(%rbx),%esi f1: add $0xe,%esi a8: callq 0xffffffffe1021e0d f4: callq 0xffffffffe102bd6d ad: cmp $0x16,%eax f9: cmp $0x16,%rax b0: je 0x00000000000000bf fd: je 0x0000000000000110 ff: mov %r13,%rsi b2: lea 0x10(%rbx),%esi 102: add $0x10,%esi b5: callq 0xffffffffe1021e0d 105: callq 0xffffffffe102bd6d ba: cmp $0x16,%eax 10a: cmp $0x16,%rax bd: jne 0x00000000000000c6 10e: jne 0x0000000000000117 bf: mov $0xffff,%eax 110: mov $0xffff,%eax c4: jmp 0x00000000000000c8 115: jmp 0x000000000000011c c6: xor %eax,%eax 117: mov $0x0,%eax c8: mov -0x8(%rbp),%rbx 11c: mov -0x228(%rbp),%rbx // epilogue cc: leaveq 123: mov -0x220(%rbp),%r13 cd: retq 12a: mov -0x218(%rbp),%r14 131: mov -0x210(%rbp),%r15 138: leaveq 139: retq On fully cached SKBs both JITed functions take 12 nsec to execute. BPF interpreter executes the program in 30 nsec. The difference in generated assembler is due to the following: Old BPF imlements LDX_MSH instruction via sk_load_byte_msh() helper function inside bpf_jit.S. New JIT removes the helper and does it explicitly, so ldx_msh cost is the same for both JITs, but generated code looks longer. New JIT has 4 registers to save, so prologue/epilogue are larger, but the cost is within noise on x64. Old JIT checks whether first insn clears A and if not emits 'xor %eax,%eax'. New JIT clears %rax unconditionally. 2. old BPF JIT doesn't support ANC_NLATTR, ANC_PAY_OFFSET, ANC_RANDOM extensions. New JIT supports all BPF extensions. Performance of such filters improves 2-4 times depending on a filter. The longer the filter the higher performance gain. Synthetic benchmarks with many ancillary loads see 20x speedup which seems to be the maximum gain from JIT Notes: . net.core.bpf_jit_enable=2 + tools/net/bpf_jit_disasm is still functional and can be used to see generated assembler . there are two jit_compile() functions and code flow for classic filters is: sk_attach_filter() - load classic BPF bpf_jit_compile() - try to JIT from classic BPF sk_convert_filter() - convert classic to internal bpf_int_jit_compile() - JIT from internal BPF seccomp and tracing filters will just call bpf_int_jit_compile() Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/x86/net/bpf_jit.S | 77 +-- arch/x86/net/bpf_jit_comp.c | 1314 +++++++++++++++++++++++-------------------- include/linux/filter.h | 3 + net/core/filter.c | 9 +- 4 files changed, 748 insertions(+), 655 deletions(-) (limited to 'arch') diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 01495755701b..6440221ced0d 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -12,13 +12,16 @@ /* * Calling convention : - * rdi : skb pointer + * rbx : skb pointer (callee saved) * esi : offset of byte(s) to fetch in skb (can be scratched) - * r8 : copy of skb->data + * r10 : copy of skb->data * r9d : hlen = skb->len - skb->data_len */ -#define SKBDATA %r8 +#define SKBDATA %r10 #define SKF_MAX_NEG_OFF $(-0x200000) /* SKF_LL_OFF from filter.h */ +#define MAX_BPF_STACK (512 /* from filter.h */ + \ + 32 /* space for rbx,r13,r14,r15 */ + \ + 8 /* space for skb_copy_bits */) sk_load_word: .globl sk_load_word @@ -68,53 +71,31 @@ sk_load_byte_positive_offset: movzbl (SKBDATA,%rsi),%eax ret -/** - * sk_load_byte_msh - BPF_S_LDX_B_MSH helper - * - * Implements BPF_S_LDX_B_MSH : ldxb 4*([offset]&0xf) - * Must preserve A accumulator (%eax) - * Inputs : %esi is the offset value - */ -sk_load_byte_msh: - .globl sk_load_byte_msh - test %esi,%esi - js bpf_slow_path_byte_msh_neg - -sk_load_byte_msh_positive_offset: - .globl sk_load_byte_msh_positive_offset - cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte_msh */ - jle bpf_slow_path_byte_msh - movzbl (SKBDATA,%rsi),%ebx - and $15,%bl - shl $2,%bl - ret - /* rsi contains offset and can be scratched */ #define bpf_slow_path_common(LEN) \ - push %rdi; /* save skb */ \ + mov %rbx, %rdi; /* arg1 == skb */ \ push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ mov $LEN,%ecx; /* len */ \ - lea -12(%rbp),%rdx; \ + lea - MAX_BPF_STACK + 32(%rbp),%rdx; \ call skb_copy_bits; \ test %eax,%eax; \ pop SKBDATA; \ - pop %r9; \ - pop %rdi + pop %r9; bpf_slow_path_word: bpf_slow_path_common(4) js bpf_error - mov -12(%rbp),%eax + mov - MAX_BPF_STACK + 32(%rbp),%eax bswap %eax ret bpf_slow_path_half: bpf_slow_path_common(2) js bpf_error - mov -12(%rbp),%ax + mov - MAX_BPF_STACK + 32(%rbp),%ax rol $8,%ax movzwl %ax,%eax ret @@ -122,21 +103,11 @@ bpf_slow_path_half: bpf_slow_path_byte: bpf_slow_path_common(1) js bpf_error - movzbl -12(%rbp),%eax - ret - -bpf_slow_path_byte_msh: - xchg %eax,%ebx /* dont lose A , X is about to be scratched */ - bpf_slow_path_common(1) - js bpf_error - movzbl -12(%rbp),%eax - and $15,%al - shl $2,%al - xchg %eax,%ebx + movzbl - MAX_BPF_STACK + 32(%rbp),%eax ret #define sk_negative_common(SIZE) \ - push %rdi; /* save skb */ \ + mov %rbx, %rdi; /* arg1 == skb */ \ push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ @@ -145,10 +116,8 @@ bpf_slow_path_byte_msh: test %rax,%rax; \ pop SKBDATA; \ pop %r9; \ - pop %rdi; \ jz bpf_error - bpf_slow_path_word_neg: cmp SKF_MAX_NEG_OFF, %esi /* test range */ jl bpf_error /* offset lower -> error */ @@ -179,22 +148,12 @@ sk_load_byte_negative_offset: movzbl (%rax), %eax ret -bpf_slow_path_byte_msh_neg: - cmp SKF_MAX_NEG_OFF, %esi - jl bpf_error -sk_load_byte_msh_negative_offset: - .globl sk_load_byte_msh_negative_offset - xchg %eax,%ebx /* dont lose A , X is about to be scratched */ - sk_negative_common(1) - movzbl (%rax),%eax - and $15,%al - shl $2,%al - xchg %eax,%ebx - ret - bpf_error: # force a return 0 from jit handler - xor %eax,%eax - mov -8(%rbp),%rbx + xor %eax,%eax + mov - MAX_BPF_STACK(%rbp),%rbx + mov - MAX_BPF_STACK + 8(%rbp),%r13 + mov - MAX_BPF_STACK + 16(%rbp),%r14 + mov - MAX_BPF_STACK + 24(%rbp),%r15 leaveq ret diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index c5fa7c9cb665..92aef8fdac2f 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1,6 +1,7 @@ /* bpf_jit_comp.c : BPF JIT compiler * * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com) + * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License @@ -14,28 +15,16 @@ #include #include -/* - * Conventions : - * EAX : BPF A accumulator - * EBX : BPF X accumulator - * RDI : pointer to skb (first argument given to JIT function) - * RBP : frame pointer (even if CONFIG_FRAME_POINTER=n) - * ECX,EDX,ESI : scratch registers - * r9d : skb->len - skb->data_len (headlen) - * r8 : skb->data - * -8(RBP) : saved RBX value - * -16(RBP)..-80(RBP) : BPF_MEMWORDS values - */ int bpf_jit_enable __read_mostly; /* * assembly code in arch/x86/net/bpf_jit.S */ -extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[]; +extern u8 sk_load_word[], sk_load_half[], sk_load_byte[]; extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[]; -extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[]; +extern u8 sk_load_byte_positive_offset[]; extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[]; -extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[]; +extern u8 sk_load_byte_negative_offset[]; static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) { @@ -56,30 +45,44 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len) #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2) #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3) #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4) -#define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0) - -#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */ -#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */ +#define EMIT1_off32(b1, off) \ + do {EMIT1(b1); EMIT(off, 4); } while (0) +#define EMIT2_off32(b1, b2, off) \ + do {EMIT2(b1, b2); EMIT(off, 4); } while (0) +#define EMIT3_off32(b1, b2, b3, off) \ + do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0) +#define EMIT4_off32(b1, b2, b3, b4, off) \ + do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0) static inline bool is_imm8(int value) { return value <= 127 && value >= -128; } -static inline bool is_near(int offset) +static inline bool is_simm32(s64 value) { - return offset <= 127 && offset >= -128; + return value == (s64) (s32) value; } -#define EMIT_JMP(offset) \ -do { \ - if (offset) { \ - if (is_near(offset)) \ - EMIT2(0xeb, offset); /* jmp .+off8 */ \ - else \ - EMIT1_off32(0xe9, offset); /* jmp .+off32 */ \ - } \ -} while (0) +/* mov A, X */ +#define EMIT_mov(A, X) \ + do {if (A != X) \ + EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \ + } while (0) + +static int bpf_size_to_x86_bytes(int bpf_size) +{ + if (bpf_size == BPF_W) + return 4; + else if (bpf_size == BPF_H) + return 2; + else if (bpf_size == BPF_B) + return 1; + else if (bpf_size == BPF_DW) + return 4; /* imm32 */ + else + return 0; +} /* list of x86 cond jumps opcodes (. + s8) * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32) @@ -90,27 +93,8 @@ do { \ #define X86_JNE 0x75 #define X86_JBE 0x76 #define X86_JA 0x77 - -#define EMIT_COND_JMP(op, offset) \ -do { \ - if (is_near(offset)) \ - EMIT2(op, offset); /* jxx .+off8 */ \ - else { \ - EMIT2(0x0f, op + 0x10); \ - EMIT(offset, 4); /* jxx .+off32 */ \ - } \ -} while (0) - -#define COND_SEL(CODE, TOP, FOP) \ - case CODE: \ - t_op = TOP; \ - f_op = FOP; \ - goto cond_branch - - -#define SEEN_DATAREF 1 /* might call external helpers */ -#define SEEN_XREG 2 /* ebx is used */ -#define SEEN_MEM 4 /* use mem[] for temporary storage */ +#define X86_JGE 0x7D +#define X86_JG 0x7F static inline void bpf_flush_icache(void *start, void *end) { @@ -125,26 +109,6 @@ static inline void bpf_flush_icache(void *start, void *end) #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) -/* Helper to find the offset of pkt_type in sk_buff - * We want to make sure its still a 3bit field starting at a byte boundary. - */ -#define PKT_TYPE_MAX 7 -static int pkt_type_offset(void) -{ - struct sk_buff skb_probe = { - .pkt_type = ~0, - }; - char *ct = (char *)&skb_probe; - unsigned int off; - - for (off = 0; off < sizeof(struct sk_buff); off++) { - if (ct[off] == PKT_TYPE_MAX) - return off; - } - pr_err_once("Please fix pkt_type_offset(), as pkt_type couldn't be found\n"); - return -1; -} - struct bpf_binary_header { unsigned int pages; /* Note : for security reasons, bpf code will follow a randomly @@ -178,546 +142,715 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int proglen, return header; } +/* pick a register outside of BPF range for JIT internal work */ +#define AUX_REG (MAX_BPF_REG + 1) + +/* the following table maps BPF registers to x64 registers. + * x64 register r12 is unused, since if used as base address register + * in load/store instructions, it always needs an extra byte of encoding + */ +static const int reg2hex[] = { + [BPF_REG_0] = 0, /* rax */ + [BPF_REG_1] = 7, /* rdi */ + [BPF_REG_2] = 6, /* rsi */ + [BPF_REG_3] = 2, /* rdx */ + [BPF_REG_4] = 1, /* rcx */ + [BPF_REG_5] = 0, /* r8 */ + [BPF_REG_6] = 3, /* rbx callee saved */ + [BPF_REG_7] = 5, /* r13 callee saved */ + [BPF_REG_8] = 6, /* r14 callee saved */ + [BPF_REG_9] = 7, /* r15 callee saved */ + [BPF_REG_FP] = 5, /* rbp readonly */ + [AUX_REG] = 3, /* r11 temp register */ +}; + +/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15 + * which need extra byte of encoding. + * rax,rcx,...,rbp have simpler encoding + */ +static inline bool is_ereg(u32 reg) +{ + if (reg == BPF_REG_5 || reg == AUX_REG || + (reg >= BPF_REG_7 && reg <= BPF_REG_9)) + return true; + else + return false; +} + +/* add modifiers if 'reg' maps to x64 registers r8..r15 */ +static inline u8 add_1mod(u8 byte, u32 reg) +{ + if (is_ereg(reg)) + byte |= 1; + return byte; +} + +static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) +{ + if (is_ereg(r1)) + byte |= 1; + if (is_ereg(r2)) + byte |= 4; + return byte; +} + +/* encode dest register 'a_reg' into x64 opcode 'byte' */ +static inline u8 add_1reg(u8 byte, u32 a_reg) +{ + return byte + reg2hex[a_reg]; +} + +/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */ +static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg) +{ + return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3); +} + struct jit_context { unsigned int cleanup_addr; /* epilogue code offset */ - int pc_ret0; /* bpf index of first RET #0 instruction (if any) */ - u8 seen; + bool seen_ld_abs; }; static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, int oldproglen, struct jit_context *ctx) { - const struct sock_filter *filter = bpf_prog->insns; - int flen = bpf_prog->len; + struct sock_filter_int *insn = bpf_prog->insnsi; + int insn_cnt = bpf_prog->len; u8 temp[64]; - u8 *prog; - int ilen, i, proglen; - int t_offset, f_offset; - u8 t_op, f_op, seen = 0; - u8 *func; - unsigned int cleanup_addr = ctx->cleanup_addr; - u8 seen_or_pass0 = ctx->seen; - - /* no prologue/epilogue for trivial filters (RET something) */ - proglen = 0; - prog = temp; + int i; + int proglen = 0; + u8 *prog = temp; + int stacksize = MAX_BPF_STACK + + 32 /* space for rbx, r13, r14, r15 */ + + 8 /* space for skb_copy_bits() buffer */; - if (seen_or_pass0) { - EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */ - EMIT4(0x48, 0x83, 0xec, 96); /* subq $96,%rsp */ - /* note : must save %rbx in case bpf_error is hit */ - if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF)) - EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */ - if (seen_or_pass0 & SEEN_XREG) - CLEAR_X(); /* make sure we dont leek kernel memory */ - - /* - * If this filter needs to access skb data, - * loads r9 and r8 with : - * r9 = skb->len - skb->data_len - * r8 = skb->data - */ - if (seen_or_pass0 & SEEN_DATAREF) { - if (offsetof(struct sk_buff, len) <= 127) - /* mov off8(%rdi),%r9d */ - EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len)); - else { - /* mov off32(%rdi),%r9d */ - EMIT3(0x44, 0x8b, 0x8f); - EMIT(offsetof(struct sk_buff, len), 4); - } - if (is_imm8(offsetof(struct sk_buff, data_len))) - /* sub off8(%rdi),%r9d */ - EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len)); - else { - EMIT3(0x44, 0x2b, 0x8f); - EMIT(offsetof(struct sk_buff, data_len), 4); - } + EMIT1(0x55); /* push rbp */ + EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */ - if (is_imm8(offsetof(struct sk_buff, data))) - /* mov off8(%rdi),%r8 */ - EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data)); - else { - /* mov off32(%rdi),%r8 */ - EMIT3(0x4c, 0x8b, 0x87); - EMIT(offsetof(struct sk_buff, data), 4); - } + /* sub rsp, stacksize */ + EMIT3_off32(0x48, 0x81, 0xEC, stacksize); + + /* all classic BPF filters use R6(rbx) save it */ + + /* mov qword ptr [rbp-X],rbx */ + EMIT3_off32(0x48, 0x89, 0x9D, -stacksize); + + /* sk_convert_filter() maps classic BPF register X to R7 and uses R8 + * as temporary, so all tcpdump filters need to spill/fill R7(r13) and + * R8(r14). R9(r15) spill could be made conditional, but there is only + * one 'bpf_error' return path out of helper functions inside bpf_jit.S + * The overhead of extra spill is negligible for any filter other + * than synthetic ones. Therefore not worth adding complexity. + */ + + /* mov qword ptr [rbp-X],r13 */ + EMIT3_off32(0x4C, 0x89, 0xAD, -stacksize + 8); + /* mov qword ptr [rbp-X],r14 */ + EMIT3_off32(0x4C, 0x89, 0xB5, -stacksize + 16); + /* mov qword ptr [rbp-X],r15 */ + EMIT3_off32(0x4C, 0x89, 0xBD, -stacksize + 24); + + /* clear A and X registers */ + EMIT2(0x31, 0xc0); /* xor eax, eax */ + EMIT3(0x4D, 0x31, 0xED); /* xor r13, r13 */ + + if (ctx->seen_ld_abs) { + /* r9d : skb->len - skb->data_len (headlen) + * r10 : skb->data + */ + if (is_imm8(offsetof(struct sk_buff, len))) + /* mov %r9d, off8(%rdi) */ + EMIT4(0x44, 0x8b, 0x4f, + offsetof(struct sk_buff, len)); + else + /* mov %r9d, off32(%rdi) */ + EMIT3_off32(0x44, 0x8b, 0x8f, + offsetof(struct sk_buff, len)); + + if (is_imm8(offsetof(struct sk_buff, data_len))) + /* sub %r9d, off8(%rdi) */ + EMIT4(0x44, 0x2b, 0x4f, + offsetof(struct sk_buff, data_len)); + else + EMIT3_off32(0x44, 0x2b, 0x8f, + offsetof(struct sk_buff, data_len)); + + if (is_imm8(offsetof(struct sk_buff, data))) + /* mov %r10, off8(%rdi) */ + EMIT4(0x4c, 0x8b, 0x57, + offsetof(struct sk_buff, data)); + else + /* mov %r10, off32(%rdi) */ + EMIT3_off32(0x4c, 0x8b, 0x97, + offsetof(struct sk_buff, data)); + } + + for (i = 0; i < insn_cnt; i++, insn++) { + const s32 K = insn->imm; + u32 a_reg = insn->a_reg; + u32 x_reg = insn->x_reg; + u8 b1 = 0, b2 = 0, b3 = 0; + s64 jmp_offset; + u8 jmp_cond; + int ilen; + u8 *func; + + switch (insn->code) { + /* ALU */ + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU64 | BPF_ADD | BPF_X: + case BPF_ALU64 | BPF_SUB | BPF_X: + case BPF_ALU64 | BPF_AND | BPF_X: + case BPF_ALU64 | BPF_OR | BPF_X: + case BPF_ALU64 | BPF_XOR | BPF_X: + switch (BPF_OP(insn->code)) { + case BPF_ADD: b2 = 0x01; break; + case BPF_SUB: b2 = 0x29; break; + case BPF_AND: b2 = 0x21; break; + case BPF_OR: b2 = 0x09; break; + case BPF_XOR: b2 = 0x31; break; } - } + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_2mod(0x48, a_reg, x_reg)); + else if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT1(add_2mod(0x40, a_reg, x_reg)); + EMIT2(b2, add_2reg(0xC0, a_reg, x_reg)); + break; - switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_CPU: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_QUEUE: - case BPF_S_ANC_PKTTYPE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - /* first instruction sets A register (or is RET 'constant') */ + /* mov A, X */ + case BPF_ALU64 | BPF_MOV | BPF_X: + EMIT_mov(a_reg, x_reg); break; - default: - /* make sure we dont leak kernel information to user */ - CLEAR_A(); /* A = 0 */ - } - for (i = 0; i < flen; i++) { - unsigned int K = filter[i].k; + /* mov32 A, X */ + case BPF_ALU | BPF_MOV | BPF_X: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT1(add_2mod(0x40, a_reg, x_reg)); + EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg)); + break; - switch (filter[i].code) { - case BPF_S_ALU_ADD_X: /* A += X; */ - seen |= SEEN_XREG; - EMIT2(0x01, 0xd8); /* add %ebx,%eax */ - break; - case BPF_S_ALU_ADD_K: /* A += K; */ - if (!K) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xc0, K); /* add imm8,%eax */ - else - EMIT1_off32(0x05, K); /* add imm32,%eax */ - break; - case BPF_S_ALU_SUB_X: /* A -= X; */ - seen |= SEEN_XREG; - EMIT2(0x29, 0xd8); /* sub %ebx,%eax */ - break; - case BPF_S_ALU_SUB_K: /* A -= K */ - if (!K) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */ - else - EMIT1_off32(0x2d, K); /* sub imm32,%eax */ - break; - case BPF_S_ALU_MUL_X: /* A *= X; */ - seen |= SEEN_XREG; - EMIT3(0x0f, 0xaf, 0xc3); /* imul %ebx,%eax */ - break; - case BPF_S_ALU_MUL_K: /* A *= K */ - if (is_imm8(K)) - EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */ - else { - EMIT2(0x69, 0xc0); /* imul imm32,%eax */ - EMIT(K, 4); - } - break; - case BPF_S_ALU_DIV_X: /* A /= X; */ - seen |= SEEN_XREG; - EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (ctx->pc_ret0 > 0) { - /* addrs[pc_ret0 - 1] is start address of target - * (addrs[i] - 4) is the address following this jmp - * ("xor %edx,%edx; div %ebx" being 4 bytes long) - */ - EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] - - (addrs[i] - 4)); - } else { - EMIT_COND_JMP(X86_JNE, 2 + 5); - CLEAR_A(); - EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */ - } - EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */ - break; - case BPF_S_ALU_MOD_X: /* A %= X; */ - seen |= SEEN_XREG; - EMIT2(0x85, 0xdb); /* test %ebx,%ebx */ - if (ctx->pc_ret0 > 0) { - /* addrs[pc_ret0 - 1] is start address of target - * (addrs[i] - 6) is the address following this jmp - * ("xor %edx,%edx; div %ebx;mov %edx,%eax" being 6 bytes long) - */ - EMIT_COND_JMP(X86_JE, addrs[ctx->pc_ret0 - 1] - - (addrs[i] - 6)); - } else { - EMIT_COND_JMP(X86_JNE, 2 + 5); - CLEAR_A(); - EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 6)); /* jmp .+off32 */ - } - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT2(0xf7, 0xf3); /* div %ebx */ - EMIT2(0x89, 0xd0); /* mov %edx,%eax */ - break; - case BPF_S_ALU_MOD_K: /* A %= K; */ - if (K == 1) { - CLEAR_A(); - break; - } - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ - EMIT2(0xf7, 0xf1); /* div %ecx */ - EMIT2(0x89, 0xd0); /* mov %edx,%eax */ - break; - case BPF_S_ALU_DIV_K: /* A /= K */ - if (K == 1) - break; - EMIT2(0x31, 0xd2); /* xor %edx,%edx */ - EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */ - EMIT2(0xf7, 0xf1); /* div %ecx */ - break; - case BPF_S_ALU_AND_X: - seen |= SEEN_XREG; - EMIT2(0x21, 0xd8); /* and %ebx,%eax */ - break; - case BPF_S_ALU_AND_K: - if (K >= 0xFFFFFF00) { - EMIT2(0x24, K & 0xFF); /* and imm8,%al */ - } else if (K >= 0xFFFF0000) { - EMIT2(0x66, 0x25); /* and imm16,%ax */ - EMIT(K, 2); - } else { - EMIT1_off32(0x25, K); /* and imm32,%eax */ - } - break; - case BPF_S_ALU_OR_X: - seen |= SEEN_XREG; - EMIT2(0x09, 0xd8); /* or %ebx,%eax */ - break; - case BPF_S_ALU_OR_K: - if (is_imm8(K)) - EMIT3(0x83, 0xc8, K); /* or imm8,%eax */ - else - EMIT1_off32(0x0d, K); /* or imm32,%eax */ - break; - case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ - case BPF_S_ALU_XOR_X: - seen |= SEEN_XREG; - EMIT2(0x31, 0xd8); /* xor %ebx,%eax */ - break; - case BPF_S_ALU_XOR_K: /* A ^= K; */ - if (K == 0) - break; - if (is_imm8(K)) - EMIT3(0x83, 0xf0, K); /* xor imm8,%eax */ - else - EMIT1_off32(0x35, K); /* xor imm32,%eax */ - break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ - seen |= SEEN_XREG; - EMIT4(0x89, 0xd9, 0xd3, 0xe0); /* mov %ebx,%ecx; shl %cl,%eax */ - break; - case BPF_S_ALU_LSH_K: - if (K == 0) - break; - else if (K == 1) - EMIT2(0xd1, 0xe0); /* shl %eax */ - else - EMIT3(0xc1, 0xe0, K); - break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ - seen |= SEEN_XREG; - EMIT4(0x89, 0xd9, 0xd3, 0xe8); /* mov %ebx,%ecx; shr %cl,%eax */ - break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ - if (K == 0) - break; - else if (K == 1) - EMIT2(0xd1, 0xe8); /* shr %eax */ - else - EMIT3(0xc1, 0xe8, K); - break; - case BPF_S_ALU_NEG: - EMIT2(0xf7, 0xd8); /* neg %eax */ - break; - case BPF_S_RET_K: - if (!K) { - if (ctx->pc_ret0 == -1) - ctx->pc_ret0 = i; - CLEAR_A(); - } else { - EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ - } - /* fallinto */ - case BPF_S_RET_A: - if (seen_or_pass0) { - if (i != flen - 1) { - EMIT_JMP(cleanup_addr - addrs[i]); - break; - } - if (seen_or_pass0 & SEEN_XREG) - EMIT4(0x48, 0x8b, 0x5d, 0xf8); /* mov -8(%rbp),%rbx */ - EMIT1(0xc9); /* leaveq */ - } - EMIT1(0xc3); /* ret */ - break; - case BPF_S_MISC_TAX: /* X = A */ - seen |= SEEN_XREG; - EMIT2(0x89, 0xc3); /* mov %eax,%ebx */ - break; - case BPF_S_MISC_TXA: /* A = X */ - seen |= SEEN_XREG; - EMIT2(0x89, 0xd8); /* mov %ebx,%eax */ - break; - case BPF_S_LD_IMM: /* A = K */ - if (!K) - CLEAR_A(); - else - EMIT1_off32(0xb8, K); /* mov $imm32,%eax */ + /* neg A */ + case BPF_ALU | BPF_NEG: + case BPF_ALU64 | BPF_NEG: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + EMIT2(0xF7, add_1reg(0xD8, a_reg)); + break; + + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU64 | BPF_ADD | BPF_K: + case BPF_ALU64 | BPF_SUB | BPF_K: + case BPF_ALU64 | BPF_AND | BPF_K: + case BPF_ALU64 | BPF_OR | BPF_K: + case BPF_ALU64 | BPF_XOR | BPF_K: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_ADD: b3 = 0xC0; break; + case BPF_SUB: b3 = 0xE8; break; + case BPF_AND: b3 = 0xE0; break; + case BPF_OR: b3 = 0xC8; break; + case BPF_XOR: b3 = 0xF0; break; + } + + if (is_imm8(K)) + EMIT3(0x83, add_1reg(b3, a_reg), K); + else + EMIT2_off32(0x81, add_1reg(b3, a_reg), K); + break; + + case BPF_ALU64 | BPF_MOV | BPF_K: + /* optimization: if imm32 is positive, + * use 'mov eax, imm32' (which zero-extends imm32) + * to save 2 bytes + */ + if (K < 0) { + /* 'mov rax, imm32' sign extends imm32 */ + b1 = add_1mod(0x48, a_reg); + b2 = 0xC7; + b3 = 0xC0; + EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K); break; - case BPF_S_LDX_IMM: /* X = K */ - seen |= SEEN_XREG; - if (!K) - CLEAR_X(); + } + + case BPF_ALU | BPF_MOV | BPF_K: + /* mov %eax, imm32 */ + if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + EMIT1_off32(add_1reg(0xB8, a_reg), K); + break; + + /* A %= X, A /= X, A %= K, A /= K */ + case BPF_ALU | BPF_MOD | BPF_X: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_MOD | BPF_K: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU64 | BPF_MOD | BPF_X: + case BPF_ALU64 | BPF_DIV | BPF_X: + case BPF_ALU64 | BPF_MOD | BPF_K: + case BPF_ALU64 | BPF_DIV | BPF_K: + EMIT1(0x50); /* push rax */ + EMIT1(0x52); /* push rdx */ + + if (BPF_SRC(insn->code) == BPF_X) + /* mov r11, X */ + EMIT_mov(AUX_REG, x_reg); + else + /* mov r11, K */ + EMIT3_off32(0x49, 0xC7, 0xC3, K); + + /* mov rax, A */ + EMIT_mov(BPF_REG_0, a_reg); + + /* xor edx, edx + * equivalent to 'xor rdx, rdx', but one byte less + */ + EMIT2(0x31, 0xd2); + + if (BPF_SRC(insn->code) == BPF_X) { + /* if (X == 0) return 0 */ + + /* cmp r11, 0 */ + EMIT4(0x49, 0x83, 0xFB, 0x00); + + /* jne .+9 (skip over pop, pop, xor and jmp) */ + EMIT2(X86_JNE, 1 + 1 + 2 + 5); + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + EMIT2(0x31, 0xc0); /* xor eax, eax */ + + /* jmp cleanup_addr + * addrs[i] - 11, because there are 11 bytes + * after this insn: div, mov, pop, pop, mov + */ + jmp_offset = ctx->cleanup_addr - (addrs[i] - 11); + EMIT1_off32(0xE9, jmp_offset); + } + + if (BPF_CLASS(insn->code) == BPF_ALU64) + /* div r11 */ + EMIT3(0x49, 0xF7, 0xF3); + else + /* div r11d */ + EMIT3(0x41, 0xF7, 0xF3); + + if (BPF_OP(insn->code) == BPF_MOD) + /* mov r11, rdx */ + EMIT3(0x49, 0x89, 0xD3); + else + /* mov r11, rax */ + EMIT3(0x49, 0x89, 0xC3); + + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + + /* mov A, r11 */ + EMIT_mov(a_reg, AUX_REG); + break; + + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU64 | BPF_MUL | BPF_K: + case BPF_ALU64 | BPF_MUL | BPF_X: + EMIT1(0x50); /* push rax */ + EMIT1(0x52); /* push rdx */ + + /* mov r11, A */ + EMIT_mov(AUX_REG, a_reg); + + if (BPF_SRC(insn->code) == BPF_X) + /* mov rax, X */ + EMIT_mov(BPF_REG_0, x_reg); + else + /* mov rax, K */ + EMIT3_off32(0x48, 0xC7, 0xC0, K); + + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, AUX_REG)); + else if (is_ereg(AUX_REG)) + EMIT1(add_1mod(0x40, AUX_REG)); + /* mul(q) r11 */ + EMIT2(0xF7, add_1reg(0xE0, AUX_REG)); + + /* mov r11, rax */ + EMIT_mov(AUX_REG, BPF_REG_0); + + EMIT1(0x5A); /* pop rdx */ + EMIT1(0x58); /* pop rax */ + + /* mov A, r11 */ + EMIT_mov(a_reg, AUX_REG); + break; + + /* shifts */ + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_ARSH | BPF_K: + case BPF_ALU64 | BPF_LSH | BPF_K: + case BPF_ALU64 | BPF_RSH | BPF_K: + case BPF_ALU64 | BPF_ARSH | BPF_K: + if (BPF_CLASS(insn->code) == BPF_ALU64) + EMIT1(add_1mod(0x48, a_reg)); + else if (is_ereg(a_reg)) + EMIT1(add_1mod(0x40, a_reg)); + + switch (BPF_OP(insn->code)) { + case BPF_LSH: b3 = 0xE0; break; + case BPF_RSH: b3 = 0xE8; break; + case BPF_ARSH: b3 = 0xF8; break; + } + EMIT3(0xC1, add_1reg(b3, a_reg), K); + break; + + case BPF_ALU | BPF_END | BPF_FROM_BE: + switch (K) { + case 16: + /* emit 'ror %ax, 8' to swap lower 2 bytes */ + EMIT1(0x66); + if (is_ereg(a_reg)) + EMIT1(0x41); + EMIT3(0xC1, add_1reg(0xC8, a_reg), 8); + break; + case 32: + /* emit 'bswap eax' to swap lower 4 bytes */ + if (is_ereg(a_reg)) + EMIT2(0x41, 0x0F); else - EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */ - break; - case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */ - seen |= SEEN_MEM; - EMIT3(0x8b, 0x45, 0xf0 - K*4); + EMIT1(0x0F); + EMIT1(add_1reg(0xC8, a_reg)); break; - case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */ - seen |= SEEN_XREG | SEEN_MEM; - EMIT3(0x8b, 0x5d, 0xf0 - K*4); - break; - case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */ - seen |= SEEN_MEM; - EMIT3(0x89, 0x45, 0xf0 - K*4); - break; - case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ - seen |= SEEN_XREG | SEEN_MEM; - EMIT3(0x89, 0x5d, 0xf0 - K*4); - break; - case BPF_S_LD_W_LEN: /* A = skb->len; */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); - if (is_imm8(offsetof(struct sk_buff, len))) - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len)); - else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, len), 4); - } - break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ - seen |= SEEN_XREG; - if (is_imm8(offsetof(struct sk_buff, len))) - /* mov off8(%rdi),%ebx */ - EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len)); - else { - EMIT2(0x8b, 0x9f); - EMIT(offsetof(struct sk_buff, len), 4); - } - break; - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - if (is_imm8(offsetof(struct sk_buff, protocol))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, protocol), 4); - } - EMIT2(0x86, 0xc4); /* ntohs() : xchg %al,%ah */ - break; - case BPF_S_ANC_IFINDEX: - if (is_imm8(offsetof(struct sk_buff, dev))) { - /* movq off8(%rdi),%rax */ - EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev)); - } else { - EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */ - EMIT(offsetof(struct sk_buff, dev), 4); - } - EMIT3(0x48, 0x85, 0xc0); /* test %rax,%rax */ - EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6)); - BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); - EMIT2(0x8b, 0x80); /* mov off32(%rax),%eax */ - EMIT(offsetof(struct net_device, ifindex), 4); - break; - case BPF_S_ANC_MARK: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); - if (is_imm8(offsetof(struct sk_buff, mark))) { - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark)); - } else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, mark), 4); - } - break; - case BPF_S_ANC_RXHASH: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); - if (is_imm8(offsetof(struct sk_buff, hash))) { - /* mov off8(%rdi),%eax */ - EMIT3(0x8b, 0x47, offsetof(struct sk_buff, hash)); - } else { - EMIT2(0x8b, 0x87); - EMIT(offsetof(struct sk_buff, hash), 4); - } - break; - case BPF_S_ANC_QUEUE: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); - if (is_imm8(offsetof(struct sk_buff, queue_mapping))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, queue_mapping), 4); - } - break; - case BPF_S_ANC_CPU: -#ifdef CONFIG_SMP - EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */ - EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */ -#else - CLEAR_A(); -#endif - break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); - if (is_imm8(offsetof(struct sk_buff, vlan_tci))) { - /* movzwl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, vlan_tci)); - } else { - EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */ - EMIT(offsetof(struct sk_buff, vlan_tci), 4); - } - BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) { - EMIT3(0x80, 0xe4, 0xef); /* and $0xef,%ah */ - } else { - EMIT3(0xc1, 0xe8, 0x0c); /* shr $0xc,%eax */ - EMIT3(0x83, 0xe0, 0x01); /* and $0x1,%eax */ - } - break; - case BPF_S_ANC_PKTTYPE: - { - int off = pkt_type_offset(); - - if (off < 0) - return -EINVAL; - if (is_imm8(off)) { - /* movzbl off8(%rdi),%eax */ - EMIT4(0x0f, 0xb6, 0x47, off); - } else { - /* movbl off32(%rdi),%eax */ - EMIT3(0x0f, 0xb6, 0x87); - EMIT(off, 4); - } - EMIT3(0x83, 0xe0, PKT_TYPE_MAX); /* and $0x7,%eax */ + case 64: + /* emit 'bswap rax' to swap 8 bytes */ + EMIT3(add_1mod(0x48, a_reg), 0x0F, + add_1reg(0xC8, a_reg)); break; } - case BPF_S_LD_W_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_word); -common_load: seen |= SEEN_DATAREF; - t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ - EMIT1_off32(0xe8, t_offset); /* call */ - break; - case BPF_S_LD_H_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_half); - goto common_load; - case BPF_S_LD_B_ABS: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte); - goto common_load; - case BPF_S_LDX_B_MSH: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); - seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = func - (image + addrs[i]); - EMIT1_off32(0xbe, K); /* mov imm32,%esi */ - EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */ - break; - case BPF_S_LD_W_IND: - func = sk_load_word; -common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; - t_offset = func - (image + addrs[i]); - if (K) { - if (is_imm8(K)) { - EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */ - } else { - EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */ - EMIT(K, 4); - } - } else { - EMIT2(0x89,0xde); /* mov %ebx,%esi */ - } - EMIT1_off32(0xe8, t_offset); /* call sk_load_xxx_ind */ - break; - case BPF_S_LD_H_IND: - func = sk_load_half; - goto common_load_ind; - case BPF_S_LD_B_IND: - func = sk_load_byte; - goto common_load_ind; - case BPF_S_JMP_JA: - t_offset = addrs[i + K] - addrs[i]; - EMIT_JMP(t_offset); - break; - COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE); - COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB); - COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE); - COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE); - COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE); - COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB); - COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE); - COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE); - -cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; - t_offset = addrs[i + filter[i].jt] - addrs[i]; - - /* same targets, can avoid doing the test :) */ - if (filter[i].jt == filter[i].jf) { - EMIT_JMP(t_offset); - break; - } + break; + + case BPF_ALU | BPF_END | BPF_FROM_LE: + break; + + /* ST: *(u8*)(a_reg + off) = imm */ + case BPF_ST | BPF_MEM | BPF_B: + if (is_ereg(a_reg)) + EMIT2(0x41, 0xC6); + else + EMIT1(0xC6); + goto st; + case BPF_ST | BPF_MEM | BPF_H: + if (is_ereg(a_reg)) + EMIT3(0x66, 0x41, 0xC7); + else + EMIT2(0x66, 0xC7); + goto st; + case BPF_ST | BPF_MEM | BPF_W: + if (is_ereg(a_reg)) + EMIT2(0x41, 0xC7); + else + EMIT1(0xC7); + goto st; + case BPF_ST | BPF_MEM | BPF_DW: + EMIT2(add_1mod(0x48, a_reg), 0xC7); + +st: if (is_imm8(insn->off)) + EMIT2(add_1reg(0x40, a_reg), insn->off); + else + EMIT1_off32(add_1reg(0x80, a_reg), insn->off); + + EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); + break; + + /* STX: *(u8*)(a_reg + off) = x_reg */ + case BPF_STX | BPF_MEM | BPF_B: + /* emit 'mov byte ptr [rax + off], al' */ + if (is_ereg(a_reg) || is_ereg(x_reg) || + /* have to add extra byte for x86 SIL, DIL regs */ + x_reg == BPF_REG_1 || x_reg == BPF_REG_2) + EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88); + else + EMIT1(0x88); + goto stx; + case BPF_STX | BPF_MEM | BPF_H: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89); + else + EMIT2(0x66, 0x89); + goto stx; + case BPF_STX | BPF_MEM | BPF_W: + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89); + else + EMIT1(0x89); + goto stx; + case BPF_STX | BPF_MEM | BPF_DW: + EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89); +stx: if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + insn->off); + break; + + /* LDX: a_reg = *(u8*)(x_reg + off) */ + case BPF_LDX | BPF_MEM | BPF_B: + /* emit 'movzx rax, byte ptr [rax + off]' */ + EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_H: + /* emit 'movzx rax, word ptr [rax + off]' */ + EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_W: + /* emit 'mov eax, dword ptr [rax+0x14]' */ + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B); + else + EMIT1(0x8B); + goto ldx; + case BPF_LDX | BPF_MEM | BPF_DW: + /* emit 'mov rax, qword ptr [rax+0x14]' */ + EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B); +ldx: /* if insn->off == 0 we can save one extra byte, but + * special case of x86 r13 which always needs an offset + * is not worth the hassle + */ + if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, x_reg, a_reg), + insn->off); + break; + + /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */ + case BPF_STX | BPF_XADD | BPF_W: + /* emit 'lock add dword ptr [rax + off], eax' */ + if (is_ereg(a_reg) || is_ereg(x_reg)) + EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01); + else + EMIT2(0xF0, 0x01); + goto xadd; + case BPF_STX | BPF_XADD | BPF_DW: + EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01); +xadd: if (is_imm8(insn->off)) + EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + else + EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + insn->off); + break; + + /* call */ + case BPF_JMP | BPF_CALL: + func = (u8 *) __bpf_call_base + K; + jmp_offset = func - (image + addrs[i]); + if (ctx->seen_ld_abs) { + EMIT2(0x41, 0x52); /* push %r10 */ + EMIT2(0x41, 0x51); /* push %r9 */ + /* need to adjust jmp offset, since + * pop %r9, pop %r10 take 4 bytes after call insn + */ + jmp_offset += 4; + } + if (!K || !is_simm32(jmp_offset)) { + pr_err("unsupported bpf func %d addr %p image %p\n", + K, func, image); + return -EINVAL; + } + EMIT1_off32(0xE8, jmp_offset); + if (ctx->seen_ld_abs) { + EMIT2(0x41, 0x59); /* pop %r9 */ + EMIT2(0x41, 0x5A); /* pop %r10 */ + } + break; + + /* cond jump */ + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JNE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JSGT | BPF_X: + case BPF_JMP | BPF_JSGE | BPF_X: + /* cmp a_reg, x_reg */ + EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39, + add_2reg(0xC0, a_reg, x_reg)); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JSET | BPF_X: + /* test a_reg, x_reg */ + EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85, + add_2reg(0xC0, a_reg, x_reg)); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JSET | BPF_K: + /* test a_reg, imm32 */ + EMIT1(add_1mod(0x48, a_reg)); + EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K); + goto emit_cond_jmp; + + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JNE | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JSGT | BPF_K: + case BPF_JMP | BPF_JSGE | BPF_K: + /* cmp a_reg, imm8/32 */ + EMIT1(add_1mod(0x48, a_reg)); + + if (is_imm8(K)) + EMIT3(0x83, add_1reg(0xF8, a_reg), K); + else + EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K); + +emit_cond_jmp: /* convert BPF opcode to x86 */ + switch (BPF_OP(insn->code)) { + case BPF_JEQ: + jmp_cond = X86_JE; + break; + case BPF_JSET: + case BPF_JNE: + jmp_cond = X86_JNE; + break; + case BPF_JGT: + /* GT is unsigned '>', JA in x86 */ + jmp_cond = X86_JA; + break; + case BPF_JGE: + /* GE is unsigned '>=', JAE in x86 */ + jmp_cond = X86_JAE; + break; + case BPF_JSGT: + /* signed '>', GT in x86 */ + jmp_cond = X86_JG; + break; + case BPF_JSGE: + /* signed '>=', GE in x86 */ + jmp_cond = X86_JGE; + break; + default: /* to silence gcc warning */ + return -EFAULT; + } + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (is_imm8(jmp_offset)) { + EMIT2(jmp_cond, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset); + } else { + pr_err("cond_jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + + break; - switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: - seen |= SEEN_XREG; - EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */ - break; - case BPF_S_JMP_JSET_X: - seen |= SEEN_XREG; - EMIT2(0x85, 0xd8); /* test %ebx,%eax */ - break; - case BPF_S_JMP_JEQ_K: - if (K == 0) { - EMIT2(0x85, 0xc0); /* test %eax,%eax */ - break; - } - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: - if (K <= 127) - EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */ + case BPF_JMP | BPF_JA: + jmp_offset = addrs[i + insn->off] - addrs[i]; + if (!jmp_offset) + /* optimize out nop jumps */ + break; +emit_jmp: + if (is_imm8(jmp_offset)) { + EMIT2(0xEB, jmp_offset); + } else if (is_simm32(jmp_offset)) { + EMIT1_off32(0xE9, jmp_offset); + } else { + pr_err("jmp gen bug %llx\n", jmp_offset); + return -EFAULT; + } + break; + + case BPF_LD | BPF_IND | BPF_W: + func = sk_load_word; + goto common_load; + case BPF_LD | BPF_ABS | BPF_W: + func = CHOOSE_LOAD_FUNC(K, sk_load_word); +common_load: ctx->seen_ld_abs = true; + jmp_offset = func - (image + addrs[i]); + if (!func || !is_simm32(jmp_offset)) { + pr_err("unsupported bpf func %d addr %p image %p\n", + K, func, image); + return -EINVAL; + } + if (BPF_MODE(insn->code) == BPF_ABS) { + /* mov %esi, imm32 */ + EMIT1_off32(0xBE, K); + } else { + /* mov %rsi, x_reg */ + EMIT_mov(BPF_REG_2, x_reg); + if (K) { + if (is_imm8(K)) + /* add %esi, imm8 */ + EMIT3(0x83, 0xC6, K); else - EMIT1_off32(0x3d, K); /* cmp imm32,%eax */ - break; - case BPF_S_JMP_JSET_K: - if (K <= 0xFF) - EMIT2(0xa8, K); /* test imm8,%al */ - else if (!(K & 0xFFFF00FF)) - EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */ - else if (K <= 0xFFFF) { - EMIT2(0x66, 0xa9); /* test imm16,%ax */ - EMIT(K, 2); - } else { - EMIT1_off32(0xa9, K); /* test imm32,%eax */ - } - break; + /* add %esi, imm32 */ + EMIT2_off32(0x81, 0xC6, K); } - if (filter[i].jt != 0) { - if (filter[i].jf && f_offset) - t_offset += is_near(f_offset) ? 2 : 5; - EMIT_COND_JMP(t_op, t_offset); - if (filter[i].jf) - EMIT_JMP(f_offset); - break; - } - EMIT_COND_JMP(f_op, f_offset); - break; + } + /* skb pointer is in R6 (%rbx), it will be copied into + * %rdi if skb_copy_bits() call is necessary. + * sk_load_* helpers also use %r10 and %r9d. + * See bpf_jit.S + */ + EMIT1_off32(0xE8, jmp_offset); /* call */ + break; + + case BPF_LD | BPF_IND | BPF_H: + func = sk_load_half; + goto common_load; + case BPF_LD | BPF_ABS | BPF_H: + func = CHOOSE_LOAD_FUNC(K, sk_load_half); + goto common_load; + case BPF_LD | BPF_IND | BPF_B: + func = sk_load_byte; + goto common_load; + case BPF_LD | BPF_ABS | BPF_B: + func = CHOOSE_LOAD_FUNC(K, sk_load_byte); + goto common_load; + + case BPF_JMP | BPF_EXIT: + if (i != insn_cnt - 1) { + jmp_offset = ctx->cleanup_addr - addrs[i]; + goto emit_jmp; + } + /* update cleanup_addr */ + ctx->cleanup_addr = proglen; + /* mov rbx, qword ptr [rbp-X] */ + EMIT3_off32(0x48, 0x8B, 0x9D, -stacksize); + /* mov r13, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xAD, -stacksize + 8); + /* mov r14, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xB5, -stacksize + 16); + /* mov r15, qword ptr [rbp-X] */ + EMIT3_off32(0x4C, 0x8B, 0xBD, -stacksize + 24); + + EMIT1(0xC9); /* leave */ + EMIT1(0xC3); /* ret */ + break; + default: - /* hmm, too complex filter, give up with jit compiler */ + /* By design x64 JIT should support all BPF instructions + * This error will be seen if new instruction was added + * to interpreter, but not to JIT + * or if there is junk in sk_filter + */ + pr_err("bpf_jit: unknown opcode %02x\n", insn->code); return -EINVAL; } + ilen = prog - temp; if (image) { if (unlikely(proglen + ilen > oldproglen)) { - pr_err("bpb_jit_compile fatal error\n"); + pr_err("bpf_jit_compile fatal error\n"); return -EFAULT; } memcpy(image + proglen, temp, ilen); @@ -726,21 +859,14 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i]; addrs[i] = proglen; prog = temp; } - /* last bpf instruction is always a RET : - * use it to give the cleanup instruction(s) addr - */ - ctx->cleanup_addr = proglen - 1; /* ret */ - if (seen_or_pass0) - ctx->cleanup_addr -= 1; /* leaveq */ - if (seen_or_pass0 & SEEN_XREG) - ctx->cleanup_addr -= 4; /* mov -8(%rbp),%rbx */ - - ctx->seen = seen; - return proglen; } void bpf_jit_compile(struct sk_filter *prog) +{ +} + +void bpf_int_jit_compile(struct sk_filter *prog) { struct bpf_binary_header *header = NULL; int proglen, oldproglen = 0; @@ -768,8 +894,6 @@ void bpf_jit_compile(struct sk_filter *prog) addrs[i] = proglen; } ctx.cleanup_addr = proglen; - ctx.seen = SEEN_XREG | SEEN_DATAREF | SEEN_MEM; - ctx.pc_ret0 = -1; for (pass = 0; pass < 10; pass++) { proglen = do_jit(prog, addrs, image, oldproglen, &ctx); diff --git a/include/linux/filter.h b/include/linux/filter.h index 4457b383961c..9d5ae0a2c954 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -207,6 +207,9 @@ void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to); void sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); +u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); +void bpf_int_jit_compile(struct sk_filter *fp); + #ifdef CONFIG_BPF_JIT #include #include diff --git a/net/core/filter.c b/net/core/filter.c index c442a0d7d0f7..32c5b44c537e 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1524,6 +1524,10 @@ out_err: return ERR_PTR(err); } +void __weak bpf_int_jit_compile(struct sk_filter *prog) +{ +} + static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, struct sock *sk) { @@ -1544,9 +1548,12 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp, /* JIT compiler couldn't process this filter, so do the * internal BPF translation for the optimized interpreter. */ - if (!fp->jited) + if (!fp->jited) { fp = __sk_migrate_filter(fp, sk); + /* Probe if internal BPF can be jit-ed */ + bpf_int_jit_compile(fp); + } return fp; } -- cgit v1.2.3 From 84f6e11f21d40025ae9b0ebbf1c9b3be91ea8e1a Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Fri, 16 May 2014 16:14:07 +0200 Subject: ARM: mvebu: use the fixed-link PHY DT binding for the Armada XP Matrix board The Armada XP Matrix board has an Ethernet PHY that isn't configurable through the MDIO bus, so we use the newly introduced fixed-link PHY DT binding to represent the PHY of this platform and get network working. Signed-off-by: Thomas Petazzoni Signed-off-by: David S. Miller --- arch/arm/boot/dts/armada-xp-matrix.dts | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm/boot/dts/armada-xp-matrix.dts b/arch/arm/boot/dts/armada-xp-matrix.dts index c2242745b9b8..3bb8c008b14c 100644 --- a/arch/arm/boot/dts/armada-xp-matrix.dts +++ b/arch/arm/boot/dts/armada-xp-matrix.dts @@ -61,6 +61,10 @@ ethernet@30000 { status = "okay"; phy-mode = "sgmii"; + fixed-link { + speed = <1000>; + full-duplex; + }; }; pcie-controller { -- cgit v1.2.3 From 4f4aa2ec24dc45881849833a439558d3a378028c Mon Sep 17 00:00:00 2001 From: RafaÅ‚ MiÅ‚ecki Date: Sun, 18 May 2014 00:22:38 +0200 Subject: ssb: sprom: add dev_id field for value overriding standard ID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some devices may have different features despite sharing the same ID (e.g. PCI ID). For example 14e4:4331 is usually a dual band, but this can be "limited". Device with "pci/x/y/devid=0x4332" supports 2.4 GHz only. Similarly 0x4333 will mean support for 5 GHz only. Add entry in SPROM so info described above can be extracted and stored. Signed-off-by: RafaÅ‚ MiÅ‚ecki Acked-by: Hauke Mehrtens Signed-off-by: John W. Linville --- arch/mips/bcm47xx/sprom.c | 1 + include/linux/ssb/ssb.h | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c index a8b5408dd349..da4cdb16844e 100644 --- a/arch/mips/bcm47xx/sprom.c +++ b/arch/mips/bcm47xx/sprom.c @@ -168,6 +168,7 @@ static void nvram_read_alpha2(const char *prefix, const char *name, static void bcm47xx_fill_sprom_r1234589(struct ssb_sprom *sprom, const char *prefix, bool fallback) { + nvram_read_u16(prefix, NULL, "devid", &sprom->dev_id, 0, fallback); nvram_read_u8(prefix, NULL, "ledbh0", &sprom->gpio0, 0xff, fallback); nvram_read_u8(prefix, NULL, "ledbh1", &sprom->gpio1, 0xff, fallback); nvram_read_u8(prefix, NULL, "ledbh2", &sprom->gpio2, 0xff, fallback); diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h index 07ef9b82b66d..4568a5cc9ab8 100644 --- a/include/linux/ssb/ssb.h +++ b/include/linux/ssb/ssb.h @@ -33,6 +33,7 @@ struct ssb_sprom { u8 et1phyaddr; /* MII address for enet1 */ u8 et0mdcport; /* MDIO for enet0 */ u8 et1mdcport; /* MDIO for enet1 */ + u16 dev_id; /* Device ID overriding e.g. PCI ID */ u16 board_rev; /* Board revision number from SPROM. */ u16 board_num; /* Board number from SPROM. */ u16 board_type; /* Board type from SPROM. */ -- cgit v1.2.3 From b16001d745fbc900cc96c8ca2cd2cd08e738c421 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 19 May 2014 22:42:33 +0200 Subject: ARM: shmobile: armadillo: initialize all struct pwm_lookup members Initializing all the struct pwm_lookup members allows to get rid of the struct tpu_pwm_platform_data as the polarity initialization will be taken care of by the PWM core. Signed-off-by: Alexandre Belloni Acked-by: Laurent Pinchart Acked-by: Simon Horman Signed-off-by: Thierry Reding --- arch/arm/mach-shmobile/board-armadillo800eva.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 2858f380beae..1bf61dad9a35 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -31,7 +31,7 @@ #include #include #include -#include +#include #include #include #include @@ -399,24 +399,22 @@ static struct resource pwm_resources[] = { }, }; -static struct tpu_pwm_platform_data pwm_device_data = { - .channels[2] = { - .polarity = PWM_POLARITY_INVERSED, - } -}; - static struct platform_device pwm_device = { .name = "renesas-tpu-pwm", .id = -1, - .dev = { - .platform_data = &pwm_device_data, - }, .num_resources = ARRAY_SIZE(pwm_resources), .resource = pwm_resources, }; static struct pwm_lookup pwm_lookup[] = { - PWM_LOOKUP("renesas-tpu-pwm", 2, "pwm-backlight.0", NULL), + { + .provider = "renesas-tpu-pwm", + .index = 2, + .dev_id = "pwm-backlight.0", + .con_id = NULL, + .period = 33333, + .polarity = PWM_POLARITY_INVERSED, + }, }; /* LCDC and backlight */ -- cgit v1.2.3 From e02a84a5ad2f46d4a262884df5ad9ab9e833a7b5 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 19 May 2014 22:42:35 +0200 Subject: ARM: OMAP3: Beagle: initialize all the struct pwm_lookup members The PWM core can retrieve the period from the PWM lookup table, so the struct led_pwm.pwm_period_ns member can be removed. Signed-off-by: Alexandre Belloni Acked-by: Tony Lindgren Signed-off-by: Thierry Reding --- arch/arm/mach-omap2/board-omap3beagle.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index d6ed819ff15c..f27e1ec90b5e 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -61,7 +61,14 @@ static struct pwm_lookup pwm_lookup[] = { /* LEDB -> PMU_STAT */ - PWM_LOOKUP("twl-pwmled", 1, "leds_pwm", "beagleboard::pmu_stat"), + { + .provider = "twl-pwmled", + .index = 1, + .dev_id = "leds_pwm", + .con_id = "beagleboard::pmu_stat", + .period = 7812500, + .polarity = PWM_POLARITY_NORMAL, + }, }; static struct led_pwm pwm_leds[] = { -- cgit v1.2.3 From fcb355063ffb12a834b3ca1383c9beec9285d568 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 19 May 2014 22:42:36 +0200 Subject: ARM: pxa: hx4700: initialize all the struct pwm_lookup members Instead of relying on the .pwm_period_ns member of the pwm-backlight driver's platform data, the PWM period can be retrieved from the PWM lookup table. Signed-off-by: Alexandre Belloni Acked-by: Philipp Zabel Signed-off-by: Thierry Reding --- arch/arm/mach-pxa/hx4700.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index a7c30eb0c8db..0788a1f171fe 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -574,7 +574,14 @@ static struct platform_device backlight = { }; static struct pwm_lookup hx4700_pwm_lookup[] = { - PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL), + { + .provider = "pxa27x-pwm.1", + .index = 0, + .dev_id = "pwm-backlight", + .con_id = NULL, + .period = 30923, + .polarity = PWM_POLARITY_NORMAL, + }, }; /* -- cgit v1.2.3 From dee401e1fd5eddc8e3d6ae0e8b5c4bd64aa2a369 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 19 May 2014 22:42:38 +0200 Subject: ARM: OMAP3: Beagle: use PWM_LOOKUP to initialize struct pwm_lookup Use the new variant of the PWM_LOOKUP macro to initialize the PWM lookup table. Signed-off-by: Alexandre Belloni Acked-by: Tony Lindgren Signed-off-by: Thierry Reding --- arch/arm/mach-omap2/board-omap3beagle.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index f27e1ec90b5e..54c135a5b4f7 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c @@ -61,14 +61,8 @@ static struct pwm_lookup pwm_lookup[] = { /* LEDB -> PMU_STAT */ - { - .provider = "twl-pwmled", - .index = 1, - .dev_id = "leds_pwm", - .con_id = "beagleboard::pmu_stat", - .period = 7812500, - .polarity = PWM_POLARITY_NORMAL, - }, + PWM_LOOKUP("twl-pwmled", 1, "leds_pwm", "beagleboard::pmu_stat", + 7812500, PWM_POLARITY_NORMAL), }; static struct led_pwm pwm_leds[] = { -- cgit v1.2.3 From 48d6f146dced79d1df0cb91b30f1cdb749ecdf9f Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 19 May 2014 22:42:39 +0200 Subject: ARM: shmobile: armadillo: use PWM_LOOKUP to initialize struct pwm_lookup Use the new variant of the PWM_LOOKUP macro to initialize the PWM lookup table. Signed-off-by: Alexandre Belloni Acked-by: Laurent Pinchart Acked-by: Simon Horman Signed-off-by: Thierry Reding --- arch/arm/mach-shmobile/board-armadillo800eva.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c index 1bf61dad9a35..ca82b1e2ebab 100644 --- a/arch/arm/mach-shmobile/board-armadillo800eva.c +++ b/arch/arm/mach-shmobile/board-armadillo800eva.c @@ -407,14 +407,8 @@ static struct platform_device pwm_device = { }; static struct pwm_lookup pwm_lookup[] = { - { - .provider = "renesas-tpu-pwm", - .index = 2, - .dev_id = "pwm-backlight.0", - .con_id = NULL, - .period = 33333, - .polarity = PWM_POLARITY_INVERSED, - }, + PWM_LOOKUP("renesas-tpu-pwm", 2, "pwm-backlight.0", NULL, + 33333, PWM_POLARITY_INVERSED), }; /* LCDC and backlight */ -- cgit v1.2.3 From 9becf5001130bcd24f57584e48467050e85eee03 Mon Sep 17 00:00:00 2001 From: Alexandre Belloni Date: Mon, 19 May 2014 22:42:40 +0200 Subject: ARM: pxa: hx4700: use PWM_LOOKUP to initialize struct pwm_lookup Use the new variant of the PWM_LOOKUP macro to initialize the PWM lookup table. Signed-off-by: Alexandre Belloni Acked-by: Philipp Zabel Signed-off-by: Thierry Reding --- arch/arm/mach-pxa/hx4700.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-pxa/hx4700.c b/arch/arm/mach-pxa/hx4700.c index 0788a1f171fe..c66ad4edc5e3 100644 --- a/arch/arm/mach-pxa/hx4700.c +++ b/arch/arm/mach-pxa/hx4700.c @@ -574,14 +574,8 @@ static struct platform_device backlight = { }; static struct pwm_lookup hx4700_pwm_lookup[] = { - { - .provider = "pxa27x-pwm.1", - .index = 0, - .dev_id = "pwm-backlight", - .con_id = NULL, - .period = 30923, - .polarity = PWM_POLARITY_NORMAL, - }, + PWM_LOOKUP("pxa27x-pwm.1", 0, "pwm-backlight", NULL, + 30923, PWM_POLARITY_NORMAL), }; /* -- cgit v1.2.3 From 1f8c486fac2cab3615c2872c4ba4bffc1c2ea93c Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 22 May 2014 09:47:51 -0700 Subject: powerpc/fsl: fsl_soc: remove 'fixed-link' parsing code Parsing and registration of fixed PHY devices was needed with the use of of_phy_connect_fixed_link() because this function was using the designated PHY address identifier (first cell of the property) as the address to bind the PHY on the emulated bus. Since commit 3be2a49e5c08d268f8af0dd4fe89a24ea8cdc339 ("of: provide a binding for fixed link PHYs") a new pair of functions has been introduced which allows for dynamic address allocation of these fixed PHY devices, but also parses the old 'fixed-link' 5-digit property. Registration of fixed PHY early in platform code was needed because we could not issue a fixed MDIO bus re-scan within network drivers. The fixed PHYs had to be registered before the network drivers would call of_phy_connect_fixed_link(). All of these caveats are solved now, such that we can safely remove of_add_fixed_phys() now. Signed-off-by: Florian Fainelli Signed-off-by: David S. Miller --- arch/powerpc/sysdev/fsl_soc.c | 32 -------------------------------- 1 file changed, 32 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index 228cf91b91c1..ffd1169ebaab 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include #include @@ -178,37 +177,6 @@ u32 get_baudrate(void) EXPORT_SYMBOL(get_baudrate); #endif /* CONFIG_CPM2 */ -#ifdef CONFIG_FIXED_PHY -static int __init of_add_fixed_phys(void) -{ - int ret; - struct device_node *np; - u32 *fixed_link; - struct fixed_phy_status status = {}; - - for_each_node_by_name(np, "ethernet") { - fixed_link = (u32 *)of_get_property(np, "fixed-link", NULL); - if (!fixed_link) - continue; - - status.link = 1; - status.duplex = fixed_link[1]; - status.speed = fixed_link[2]; - status.pause = fixed_link[3]; - status.asym_pause = fixed_link[4]; - - ret = fixed_phy_add(PHY_POLL, fixed_link[0], &status); - if (ret) { - of_node_put(np); - return ret; - } - } - - return 0; -} -arch_initcall(of_add_fixed_phys); -#endif /* CONFIG_FIXED_PHY */ - #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) static __be32 __iomem *rstcr; -- cgit v1.2.3 From ba159fd387a4d47bb41ddc3b06bfc28f33e8d936 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 23 May 2014 10:54:25 +0200 Subject: tile: Update comments for generic idle conversion As of commit 0dc8153cfebac68c9523b8852b14f10b31209f08 ("tile: Use generic idle loop"), this applies to arch_cpu_idle() instead of cpu_idle(). Signed-off-by: Geert Uytterhoeven Signed-off-by: Chris Metcalf --- arch/tile/include/asm/thread_info.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h index 729aa107f64e..80ba7d40ecc0 100644 --- a/arch/tile/include/asm/thread_info.h +++ b/arch/tile/include/asm/thread_info.h @@ -94,7 +94,7 @@ register unsigned long stack_pointer __asm__("sp"); /* Sit on a nap instruction until interrupted. */ extern void smp_nap(void); -/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ +/* Enable interrupts racelessly and nap forever: helper for arch_cpu_idle(). */ extern void _cpu_idle(void); #else /* __ASSEMBLY__ */ -- cgit v1.2.3 From aa76fcf473f6bfa839f37f77b6fdb71f0fb88d8f Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Fri, 21 Feb 2014 17:36:21 +0200 Subject: CLK: TI: DPLL: add support for omap2 core dpll OMAP2 has slightly different DPLL compared to later OMAP generations. This patch adds support for the ti,omap2-dpll-core-clock and also adds the bindings documentation. Signed-off-by: Tero Kristo --- .../devicetree/bindings/clock/ti/dpll.txt | 9 +++ arch/arm/mach-omap2/clock.h | 1 - arch/arm/mach-omap2/clock2xxx.h | 4 -- drivers/clk/ti/dpll.c | 78 +++++++++++++++++++--- include/linux/clk/ti.h | 6 ++ 5 files changed, 82 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/Documentation/devicetree/bindings/clock/ti/dpll.txt b/Documentation/devicetree/bindings/clock/ti/dpll.txt index 30bfdb7c9f18..50a1a427608f 100644 --- a/Documentation/devicetree/bindings/clock/ti/dpll.txt +++ b/Documentation/devicetree/bindings/clock/ti/dpll.txt @@ -30,6 +30,7 @@ Required properties: "ti,am3-dpll-clock", "ti,am3-dpll-core-clock", "ti,am3-dpll-x2-clock", + "ti,omap2-dpll-core-clock", - #clock-cells : from common clock binding; shall be set to 0. - clocks : link phandles of parent clocks, first entry lists reference clock @@ -41,6 +42,7 @@ Required properties: "mult-div1" - contains the multiplier / divider register base address "autoidle" - contains the autoidle register base address (optional) ti,am3-* dpll types do not have autoidle register + ti,omap2-* dpll type does not support idlest / autoidle registers Optional properties: - DPLL mode setting - defining any one or more of the following overrides @@ -73,3 +75,10 @@ Examples: clocks = <&sys_clkin_ck>, <&sys_clkin_ck>; reg = <0x90>, <0x5c>, <0x68>; }; + + dpll_ck: dpll_ck { + #clock-cells = <0>; + compatible = "ti,omap2-dpll-core-clock"; + clocks = <&sys_ck>, <&sys_ck>; + reg = <0x0500>, <0x0540>; + }; diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index bda767a9dea8..f6e9904d7a75 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -279,7 +279,6 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait; extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait; extern const struct clk_hw_omap_ops clkhwops_apll54; extern const struct clk_hw_omap_ops clkhwops_apll96; -extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait; /* clksel_rate blocks shared between OMAP44xx and AM33xx */ diff --git a/arch/arm/mach-omap2/clock2xxx.h b/arch/arm/mach-omap2/clock2xxx.h index 539dc08afbba..45f41a411603 100644 --- a/arch/arm/mach-omap2/clock2xxx.h +++ b/arch/arm/mach-omap2/clock2xxx.h @@ -21,10 +21,6 @@ unsigned long omap2xxx_sys_clk_recalc(struct clk_hw *clk, unsigned long parent_rate); unsigned long omap2_osc_clk_recalc(struct clk_hw *clk, unsigned long parent_rate); -unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, - unsigned long parent_rate); -int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, - unsigned long parent_rate); void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); unsigned long omap2_clk_apll54_recalc(struct clk_hw *hw, unsigned long parent_rate); diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c index dda262db42ea..34e233990212 100644 --- a/drivers/clk/ti/dpll.c +++ b/drivers/clk/ti/dpll.c @@ -35,21 +35,18 @@ static const struct clk_ops dpll_m4xen_ck_ops = { .set_rate = &omap3_noncore_dpll_set_rate, .get_parent = &omap2_init_dpll_parent, }; +#else +static const struct clk_ops dpll_m4xen_ck_ops = {}; #endif +#if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \ + defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \ + defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX) static const struct clk_ops dpll_core_ck_ops = { .recalc_rate = &omap3_dpll_recalc, .get_parent = &omap2_init_dpll_parent, }; -#ifdef CONFIG_ARCH_OMAP3 -static const struct clk_ops omap3_dpll_core_ck_ops = { - .get_parent = &omap2_init_dpll_parent, - .recalc_rate = &omap3_dpll_recalc, - .round_rate = &omap2_dpll_round_rate, -}; -#endif - static const struct clk_ops dpll_ck_ops = { .enable = &omap3_noncore_dpll_enable, .disable = &omap3_noncore_dpll_disable, @@ -65,6 +62,33 @@ static const struct clk_ops dpll_no_gate_ck_ops = { .round_rate = &omap2_dpll_round_rate, .set_rate = &omap3_noncore_dpll_set_rate, }; +#else +static const struct clk_ops dpll_core_ck_ops = {}; +static const struct clk_ops dpll_ck_ops = {}; +static const struct clk_ops dpll_no_gate_ck_ops = {}; +const struct clk_hw_omap_ops clkhwops_omap3_dpll = {}; +#endif + +#ifdef CONFIG_ARCH_OMAP2 +static const struct clk_ops omap2_dpll_core_ck_ops = { + .get_parent = &omap2_init_dpll_parent, + .recalc_rate = &omap2_dpllcore_recalc, + .round_rate = &omap2_dpll_round_rate, + .set_rate = &omap2_reprogram_dpllcore, +}; +#else +static const struct clk_ops omap2_dpll_core_ck_ops = {}; +#endif + +#ifdef CONFIG_ARCH_OMAP3 +static const struct clk_ops omap3_dpll_core_ck_ops = { + .get_parent = &omap2_init_dpll_parent, + .recalc_rate = &omap3_dpll_recalc, + .round_rate = &omap2_dpll_round_rate, +}; +#else +static const struct clk_ops omap3_dpll_core_ck_ops = {}; +#endif #ifdef CONFIG_ARCH_OMAP3 static const struct clk_ops omap3_dpll_ck_ops = { @@ -237,10 +261,27 @@ static void __init of_ti_dpll_setup(struct device_node *node, init->parent_names = parent_names; dd->control_reg = ti_clk_get_reg_addr(node, 0); - dd->idlest_reg = ti_clk_get_reg_addr(node, 1); - dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2); - if (!dd->control_reg || !dd->idlest_reg || !dd->mult_div1_reg) + /* + * Special case for OMAP2 DPLL, register order is different due to + * missing idlest_reg, also clkhwops is different. Detected from + * missing idlest_mask. + */ + if (!dd->idlest_mask) { + dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1); +#ifdef CONFIG_ARCH_OMAP2 + clk_hw->ops = &clkhwops_omap2xxx_dpll; + omap2xxx_clkt_dpllcore_init(&clk_hw->hw); +#endif + } else { + dd->idlest_reg = ti_clk_get_reg_addr(node, 1); + if (!dd->idlest_reg) + goto cleanup; + + dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2); + } + + if (!dd->control_reg || !dd->mult_div1_reg) goto cleanup; if (dd->autoidle_mask) { @@ -547,3 +588,18 @@ static void __init of_ti_am3_core_dpll_setup(struct device_node *node) } CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock", of_ti_am3_core_dpll_setup); + +static void __init of_ti_omap2_core_dpll_setup(struct device_node *node) +{ + const struct dpll_data dd = { + .enable_mask = 0x3, + .mult_mask = 0x3ff << 12, + .div1_mask = 0xf << 8, + .max_divider = 16, + .min_divider = 1, + }; + + of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd); +} +CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock", + of_ti_omap2_core_dpll_setup); diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 4a21a872dbbd..753878c6fa52 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -259,6 +259,11 @@ int omap2_dflt_clk_enable(struct clk_hw *hw); void omap2_dflt_clk_disable(struct clk_hw *hw); int omap2_dflt_clk_is_enabled(struct clk_hw *hw); void omap3_clk_lock_dpll5(void); +unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, + unsigned long parent_rate); +int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, + unsigned long parent_rate); +void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); void ti_dt_clocks_register(struct ti_dt_clk *oclks); @@ -287,6 +292,7 @@ static inline void of_ti_clk_allow_autoidle_all(void) { } static inline void of_ti_clk_deny_autoidle_all(void) { } #endif +extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; extern const struct clk_hw_omap_ops clkhwops_omap3_dpll; extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx; extern const struct clk_hw_omap_ops clkhwops_wait; -- cgit v1.2.3 From 4d008589e271e28eae728eef7f5fb1f658f12b9f Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 24 Feb 2014 16:06:34 +0200 Subject: CLK: TI: APLL: add support for omap2 aplls This patch adds support for omap2 type aplls, which have gating and autoidle functionality. Signed-off-by: Tero Kristo --- .../devicetree/bindings/clock/ti/apll.txt | 24 ++- arch/arm/mach-omap2/clock.h | 11 -- drivers/clk/ti/apll.c | 181 +++++++++++++++++++++ include/linux/clk/ti.h | 21 ++- 4 files changed, 220 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/Documentation/devicetree/bindings/clock/ti/apll.txt b/Documentation/devicetree/bindings/clock/ti/apll.txt index 7faf5a68b3be..ade4dd4c30f0 100644 --- a/Documentation/devicetree/bindings/clock/ti/apll.txt +++ b/Documentation/devicetree/bindings/clock/ti/apll.txt @@ -14,18 +14,32 @@ a subtype of a DPLL [2], although a simplified one at that. [2] Documentation/devicetree/bindings/clock/ti/dpll.txt Required properties: -- compatible : shall be "ti,dra7-apll-clock" +- compatible : shall be "ti,dra7-apll-clock" or "ti,omap2-apll-clock" - #clock-cells : from common clock binding; shall be set to 0. - clocks : link phandles of parent clocks (clk-ref and clk-bypass) - reg : address and length of the register set for controlling the APLL. It contains the information of registers in the following order: - "control" - contains the control register base address - "idlest" - contains the idlest register base address + "control" - contains the control register offset + "idlest" - contains the idlest register offset + "autoidle" - contains the autoidle register offset (OMAP2 only) +- ti,clock-frequency : static clock frequency for the clock (OMAP2 only) +- ti,idlest-shift : bit-shift for the idlest field (OMAP2 only) +- ti,bit-shift : bit-shift for enable and autoidle fields (OMAP2 only) Examples: - apll_pcie_ck: apll_pcie_ck@4a008200 { + apll_pcie_ck: apll_pcie_ck { #clock-cells = <0>; clocks = <&apll_pcie_in_clk_mux>, <&dpll_pcie_ref_ck>; - reg = <0x4a00821c 0x4>, <0x4a008220 0x4>; + reg = <0x021c>, <0x0220>; compatible = "ti,dra7-apll-clock"; }; + + apll96_ck: apll96_ck { + #clock-cells = <0>; + compatible = "ti,omap2-apll-clock"; + clocks = <&sys_ck>; + ti,bit-shift = <2>; + ti,idlest-shift = <8>; + ti,clock-frequency = <96000000>; + reg = <0x0500>, <0x0530>, <0x0520>; + }; diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index f6e9904d7a75..eb441d137843 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -178,17 +178,6 @@ struct clksel { const struct clksel_rate *rates; }; -struct clk_hw_omap_ops { - void (*find_idlest)(struct clk_hw_omap *oclk, - void __iomem **idlest_reg, - u8 *idlest_bit, u8 *idlest_val); - void (*find_companion)(struct clk_hw_omap *oclk, - void __iomem **other_reg, - u8 *other_bit); - void (*allow_idle)(struct clk_hw_omap *oclk); - void (*deny_idle)(struct clk_hw_omap *oclk); -}; - unsigned long omap_fixed_divisor_recalc(struct clk_hw *hw, unsigned long parent_rate); diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c index b986f61f5a77..5428c9c547cd 100644 --- a/drivers/clk/ti/apll.c +++ b/drivers/clk/ti/apll.c @@ -221,3 +221,184 @@ cleanup: kfree(init); } CLK_OF_DECLARE(dra7_apll_clock, "ti,dra7-apll-clock", of_dra7_apll_setup); + +#define OMAP2_EN_APLL_LOCKED 0x3 +#define OMAP2_EN_APLL_STOPPED 0x0 + +static int omap2_apll_is_enabled(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *ad = clk->dpll_data; + u32 v; + + v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v &= ad->enable_mask; + + v >>= __ffs(ad->enable_mask); + + return v == OMAP2_EN_APLL_LOCKED ? 1 : 0; +} + +static unsigned long omap2_apll_recalc(struct clk_hw *hw, + unsigned long parent_rate) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + + if (omap2_apll_is_enabled(hw)) + return clk->fixed_rate; + + return 0; +} + +static int omap2_apll_enable(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *ad = clk->dpll_data; + u32 v; + int i = 0; + + v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v &= ~ad->enable_mask; + v |= OMAP2_EN_APLL_LOCKED << __ffs(ad->enable_mask); + ti_clk_ll_ops->clk_writel(v, ad->control_reg); + + while (1) { + v = ti_clk_ll_ops->clk_readl(ad->idlest_reg); + if (v & ad->idlest_mask) + break; + if (i > MAX_APLL_WAIT_TRIES) + break; + i++; + udelay(1); + } + + if (i == MAX_APLL_WAIT_TRIES) { + pr_warn("%s failed to transition to locked\n", + __clk_get_name(clk->hw.clk)); + return -EBUSY; + } + + return 0; +} + +static void omap2_apll_disable(struct clk_hw *hw) +{ + struct clk_hw_omap *clk = to_clk_hw_omap(hw); + struct dpll_data *ad = clk->dpll_data; + u32 v; + + v = ti_clk_ll_ops->clk_readl(ad->control_reg); + v &= ~ad->enable_mask; + v |= OMAP2_EN_APLL_STOPPED << __ffs(ad->enable_mask); + ti_clk_ll_ops->clk_writel(v, ad->control_reg); +} + +static struct clk_ops omap2_apll_ops = { + .enable = &omap2_apll_enable, + .disable = &omap2_apll_disable, + .is_enabled = &omap2_apll_is_enabled, + .recalc_rate = &omap2_apll_recalc, +}; + +static void omap2_apll_set_autoidle(struct clk_hw_omap *clk, u32 val) +{ + struct dpll_data *ad = clk->dpll_data; + u32 v; + + v = ti_clk_ll_ops->clk_readl(ad->autoidle_reg); + v &= ~ad->autoidle_mask; + v |= val << __ffs(ad->autoidle_mask); + ti_clk_ll_ops->clk_writel(v, ad->control_reg); +} + +#define OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP 0x3 +#define OMAP2_APLL_AUTOIDLE_DISABLE 0x0 + +static void omap2_apll_allow_idle(struct clk_hw_omap *clk) +{ + omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_LOW_POWER_STOP); +} + +static void omap2_apll_deny_idle(struct clk_hw_omap *clk) +{ + omap2_apll_set_autoidle(clk, OMAP2_APLL_AUTOIDLE_DISABLE); +} + +static struct clk_hw_omap_ops omap2_apll_hwops = { + .allow_idle = &omap2_apll_allow_idle, + .deny_idle = &omap2_apll_deny_idle, +}; + +static void __init of_omap2_apll_setup(struct device_node *node) +{ + struct dpll_data *ad = NULL; + struct clk_hw_omap *clk_hw = NULL; + struct clk_init_data *init = NULL; + struct clk *clk; + const char *parent_name; + u32 val; + + ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL); + clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); + init = kzalloc(sizeof(*init), GFP_KERNEL); + + if (!ad || !clk_hw || !init) + goto cleanup; + + clk_hw->dpll_data = ad; + clk_hw->hw.init = init; + init->ops = &omap2_apll_ops; + init->name = node->name; + clk_hw->ops = &omap2_apll_hwops; + + init->num_parents = of_clk_get_parent_count(node); + if (init->num_parents != 1) { + pr_err("%s must have one parent\n", node->name); + goto cleanup; + } + + parent_name = of_clk_get_parent_name(node, 0); + init->parent_names = &parent_name; + + if (of_property_read_u32(node, "ti,clock-frequency", &val)) { + pr_err("%s missing clock-frequency\n", node->name); + goto cleanup; + } + clk_hw->fixed_rate = val; + + if (of_property_read_u32(node, "ti,bit-shift", &val)) { + pr_err("%s missing bit-shift\n", node->name); + goto cleanup; + } + + clk_hw->enable_bit = val; + ad->enable_mask = 0x3 << val; + ad->autoidle_mask = 0x3 << val; + + if (of_property_read_u32(node, "ti,idlest-shift", &val)) { + pr_err("%s missing idlest-shift\n", node->name); + goto cleanup; + } + + ad->idlest_mask = 1 << val; + + ad->control_reg = ti_clk_get_reg_addr(node, 0); + ad->autoidle_reg = ti_clk_get_reg_addr(node, 1); + ad->idlest_reg = ti_clk_get_reg_addr(node, 2); + + if (!ad->control_reg || !ad->autoidle_reg || !ad->idlest_reg) + goto cleanup; + + clk = clk_register(NULL, &clk_hw->hw); + if (!IS_ERR(clk)) { + of_clk_add_provider(node, of_clk_src_simple_get, clk); + kfree(init); + return; + } +cleanup: + kfree(ad); + kfree(clk_hw); + kfree(init); +} +CLK_OF_DECLARE(omap2_apll_clock, "ti,omap2-apll-clock", + of_omap2_apll_setup); diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 753878c6fa52..44bf84002a34 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -94,7 +94,26 @@ struct dpll_data { u8 flags; }; -struct clk_hw_omap_ops; +struct clk_hw_omap; + +/** + * struct clk_hw_omap_ops - OMAP clk ops + * @find_idlest: find idlest register information for a clock + * @find_companion: find companion clock register information for a clock, + * basically converts CM_ICLKEN* <-> CM_FCLKEN* + * @allow_idle: enables autoidle hardware functionality for a clock + * @deny_idle: prevent autoidle hardware functionality for a clock + */ +struct clk_hw_omap_ops { + void (*find_idlest)(struct clk_hw_omap *oclk, + void __iomem **idlest_reg, + u8 *idlest_bit, u8 *idlest_val); + void (*find_companion)(struct clk_hw_omap *oclk, + void __iomem **other_reg, + u8 *other_bit); + void (*allow_idle)(struct clk_hw_omap *oclk); + void (*deny_idle)(struct clk_hw_omap *oclk); +}; /** * struct clk_hw_omap - OMAP struct clk -- cgit v1.2.3 From de742570745e12b53c70130ace958f2a60044000 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Tue, 25 Feb 2014 19:16:07 +0200 Subject: CLK: TI: interface: add support for omap2430 specific interface clock OMAP2430 I2CHS modules require specific hardware ops to be used, so added a new compatible string for this. Signed-off-by: Tero Kristo --- Documentation/devicetree/bindings/clock/ti/interface.txt | 2 ++ arch/arm/mach-omap2/clock.h | 1 - drivers/clk/ti/interface.c | 11 +++++++++++ include/linux/clk/ti.h | 1 + 4 files changed, 14 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/Documentation/devicetree/bindings/clock/ti/interface.txt b/Documentation/devicetree/bindings/clock/ti/interface.txt index 064e8caccac3..3111a409fea6 100644 --- a/Documentation/devicetree/bindings/clock/ti/interface.txt +++ b/Documentation/devicetree/bindings/clock/ti/interface.txt @@ -21,6 +21,8 @@ Required properties: "ti,omap3-dss-interface-clock" - interface clock with DSS specific HW handling "ti,omap3-ssi-interface-clock" - interface clock with SSI specific HW handling "ti,am35xx-interface-clock" - interface clock with AM35xx specific HW handling + "ti,omap2430-interface-clock" - interface clock with OMAP2430 specific HW + handling - #clock-cells : from common clock binding; shall be set to 0 - clocks : link to phandle of parent clock - reg : base address for the control register diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h index eb441d137843..12f54d428d7c 100644 --- a/arch/arm/mach-omap2/clock.h +++ b/arch/arm/mach-omap2/clock.h @@ -268,7 +268,6 @@ extern const struct clk_hw_omap_ops clkhwops_omap3430es2_hsotgusb_wait; extern const struct clk_hw_omap_ops clkhwops_am35xx_ipss_module_wait; extern const struct clk_hw_omap_ops clkhwops_apll54; extern const struct clk_hw_omap_ops clkhwops_apll96; -extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait; /* clksel_rate blocks shared between OMAP44xx and AM33xx */ extern const struct clksel_rate div_1_0_rates[]; diff --git a/drivers/clk/ti/interface.c b/drivers/clk/ti/interface.c index 320a2b168bb2..9c3e8c4aaa40 100644 --- a/drivers/clk/ti/interface.c +++ b/drivers/clk/ti/interface.c @@ -94,6 +94,7 @@ static void __init of_ti_no_wait_interface_clk_setup(struct device_node *node) CLK_OF_DECLARE(ti_no_wait_interface_clk, "ti,omap3-no-wait-interface-clock", of_ti_no_wait_interface_clk_setup); +#ifdef CONFIG_ARCH_OMAP3 static void __init of_ti_hsotgusb_interface_clk_setup(struct device_node *node) { _of_ti_interface_clk_setup(node, @@ -123,3 +124,13 @@ static void __init of_ti_am35xx_interface_clk_setup(struct device_node *node) } CLK_OF_DECLARE(ti_am35xx_interface_clk, "ti,am35xx-interface-clock", of_ti_am35xx_interface_clk_setup); +#endif + +#ifdef CONFIG_SOC_OMAP2430 +static void __init of_ti_omap2430_interface_clk_setup(struct device_node *node) +{ + _of_ti_interface_clk_setup(node, &clkhwops_omap2430_i2chs_wait); +} +CLK_OF_DECLARE(ti_omap2430_interface_clk, "ti,omap2430-interface-clock", + of_ti_omap2430_interface_clk_setup); +#endif diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 44bf84002a34..a8390d478528 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -312,6 +312,7 @@ static inline void of_ti_clk_deny_autoidle_all(void) { } #endif extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; +extern const struct clk_hw_omap_ops clkhwops_omap2430_i2chs_wait; extern const struct clk_hw_omap_ops clkhwops_omap3_dpll; extern const struct clk_hw_omap_ops clkhwops_omap4_dpllmx; extern const struct clk_hw_omap_ops clkhwops_wait; -- cgit v1.2.3 From 61f25ca76ccc7b63371a7a6b0b8b9a8a46745b79 Mon Sep 17 00:00:00 2001 From: Tero Kristo Date: Mon, 24 Feb 2014 18:49:35 +0200 Subject: ARM: OMAP2: clock: add DT boot support for cpufreq_ck The clock and clkdev for this are added manually. Signed-off-by: Tero Kristo --- arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c | 53 ++++++++++++++++++++++++++++ drivers/clk/ti/clk-2xxx.c | 2 ++ include/linux/clk/ti.h | 1 + 3 files changed, 56 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c index b935ed2922d8..85e0b0c06718 100644 --- a/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c +++ b/arch/arm/mach-omap2/clkt2xxx_virt_prcm_set.c @@ -208,3 +208,56 @@ void omap2xxx_clkt_vps_late_init(void) clk_put(c); } } + +#ifdef CONFIG_OF +#include +#include + +static const struct clk_ops virt_prcm_set_ops = { + .recalc_rate = &omap2_table_mpu_recalc, + .set_rate = &omap2_select_table_rate, + .round_rate = &omap2_round_to_table_rate, +}; + +/** + * omap2xxx_clkt_vps_init - initialize virt_prcm_set clock + * + * Does a manual init for the virtual prcm DVFS clock for OMAP2. This + * function is called only from omap2 DT clock init, as the virtual + * node is not modelled in the DT clock data. + */ +void omap2xxx_clkt_vps_init(void) +{ + struct clk_init_data init = { NULL }; + struct clk_hw_omap *hw = NULL; + struct clk *clk; + const char *parent_name = "mpu_ck"; + struct clk_lookup *lookup = NULL; + + omap2xxx_clkt_vps_late_init(); + omap2xxx_clkt_vps_check_bootloader_rates(); + + hw = kzalloc(sizeof(*hw), GFP_KERNEL); + lookup = kzalloc(sizeof(*lookup), GFP_KERNEL); + if (!hw || !lookup) + goto cleanup; + init.name = "virt_prcm_set"; + init.ops = &virt_prcm_set_ops; + init.parent_names = &parent_name; + init.num_parents = 1; + + hw->hw.init = &init; + + clk = clk_register(NULL, &hw->hw); + + lookup->dev_id = NULL; + lookup->con_id = "cpufreq_ck"; + lookup->clk = clk; + + clkdev_add(lookup); + return; +cleanup: + kfree(hw); + kfree(lookup); +} +#endif diff --git a/drivers/clk/ti/clk-2xxx.c b/drivers/clk/ti/clk-2xxx.c index f6400fb5ee3e..c808ab3d2bb2 100644 --- a/drivers/clk/ti/clk-2xxx.c +++ b/drivers/clk/ti/clk-2xxx.c @@ -229,6 +229,8 @@ static int __init omap2xxx_dt_clk_init(int soc_type) else ti_dt_clocks_register(omap2430_clks); + omap2xxx_clkt_vps_init(); + omap2_clk_disable_autoidle_all(); omap2_clk_enable_init_clocks(enable_init_clks, diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 188f0cbb26c2..4231c41bed51 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -283,6 +283,7 @@ unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, unsigned long parent_rate); void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); +void omap2xxx_clkt_vps_init(void); void __iomem *ti_clk_get_reg_addr(struct device_node *node, int index); void ti_dt_clocks_register(struct ti_dt_clk *oclks); -- cgit v1.2.3 From 0cccd9190009aa6c037aa88a85f2a1b72a6a7963 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 7 May 2014 13:20:45 +0300 Subject: ARM: dts: dra7xx-clocks: Correct name for atl clkin3 clock To allign the name with the other atl clock names: atlclkin3_ck -> atl_clkin3_ck Signed-off-by: Peter Ujfalusi Signed-off-by: Tero Kristo --- arch/arm/boot/dts/dra7xx-clocks.dtsi | 22 +++++++++++----------- drivers/clk/ti/clk-7xx.c | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index cfb8fc753f50..30160348934c 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -26,7 +26,7 @@ clock-frequency = <0>; }; - atlclkin3_ck: atlclkin3_ck { + atl_clkin3_ck: atl_clkin3_ck { #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <0>; @@ -730,7 +730,7 @@ mcasp1_ahclkr_mux: mcasp1_ahclkr_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <28>; reg = <0x0550>; }; @@ -738,7 +738,7 @@ mcasp1_ahclkx_mux: mcasp1_ahclkx_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x0550>; }; @@ -1631,7 +1631,7 @@ mcasp2_ahclkr_mux: mcasp2_ahclkr_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <28>; reg = <0x1860>; }; @@ -1639,7 +1639,7 @@ mcasp2_ahclkx_mux: mcasp2_ahclkx_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x1860>; }; @@ -1655,7 +1655,7 @@ mcasp3_ahclkx_mux: mcasp3_ahclkx_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x1868>; }; @@ -1671,7 +1671,7 @@ mcasp4_ahclkx_mux: mcasp4_ahclkx_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x1898>; }; @@ -1687,7 +1687,7 @@ mcasp5_ahclkx_mux: mcasp5_ahclkx_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x1878>; }; @@ -1703,7 +1703,7 @@ mcasp6_ahclkx_mux: mcasp6_ahclkx_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x1904>; }; @@ -1719,7 +1719,7 @@ mcasp7_ahclkx_mux: mcasp7_ahclkx_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <24>; reg = <0x1908>; }; @@ -1735,7 +1735,7 @@ mcasp8_ahclk_mux: mcasp8_ahclk_mux { #clock-cells = <0>; compatible = "ti,mux-clock"; - clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atlclkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; + clocks = <&abe_24m_fclk>, <&abe_sys_clk_div>, <&func_24m_clk>, <&atl_clkin3_ck>, <&atl_clkin2_ck>, <&atl_clkin1_ck>, <&atl_clkin0_ck>, <&sys_clkin2>, <&ref_clkin0_ck>, <&ref_clkin1_ck>, <&ref_clkin2_ck>, <&ref_clkin3_ck>, <&mlb_clk>, <&mlbp_clk>; ti,bit-shift = <22>; reg = <0x1890>; }; diff --git a/drivers/clk/ti/clk-7xx.c b/drivers/clk/ti/clk-7xx.c index f7e40734c819..e1581335937d 100644 --- a/drivers/clk/ti/clk-7xx.c +++ b/drivers/clk/ti/clk-7xx.c @@ -24,7 +24,7 @@ static struct ti_dt_clk dra7xx_clks[] = { DT_CLK(NULL, "atl_clkin0_ck", "atl_clkin0_ck"), DT_CLK(NULL, "atl_clkin1_ck", "atl_clkin1_ck"), DT_CLK(NULL, "atl_clkin2_ck", "atl_clkin2_ck"), - DT_CLK(NULL, "atlclkin3_ck", "atlclkin3_ck"), + DT_CLK(NULL, "atl_clkin3_ck", "atl_clkin3_ck"), DT_CLK(NULL, "hdmi_clkin_ck", "hdmi_clkin_ck"), DT_CLK(NULL, "mlb_clkin_ck", "mlb_clkin_ck"), DT_CLK(NULL, "mlbp_clkin_ck", "mlbp_clkin_ck"), -- cgit v1.2.3 From b2dfa048bae3b4afe1944237c1ee9525df00bb6f Mon Sep 17 00:00:00 2001 From: Daniel Walter Date: Mon, 26 May 2014 22:59:32 +0100 Subject: replace strict_strto* call with kstrto* remove obsolete calls to strict_strto* and replace them with kstrto* calls accordingly. Signed-off-by: Daniel Walter Signed-off-by: Chris Metcalf --- arch/tile/kernel/setup.c | 7 ++----- arch/tile/kernel/signal.c | 7 +++---- arch/tile/kernel/traps.c | 5 ++--- arch/tile/mm/init.c | 2 +- 4 files changed, 8 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c index 00732474fb55..112ababa9e55 100644 --- a/arch/tile/kernel/setup.c +++ b/arch/tile/kernel/setup.c @@ -228,13 +228,10 @@ early_param("isolnodes", setup_isolnodes); #if defined(CONFIG_PCI) && !defined(__tilegx__) static int __init setup_pci_reserve(char* str) { - unsigned long mb; - - if (str == NULL || strict_strtoul(str, 0, &mb) != 0 || - mb > 3 * 1024) + if (str == NULL || kstrtouint(str, 0, &pci_reserve_mb) != 0 || + pci_reserve_mb > 3 * 1024) return -EINVAL; - pci_reserve_mb = mb; pr_info("Reserving %dMB for PCIE root complex mappings\n", pci_reserve_mb); return 0; diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c index 2d1dbf38a9ab..d1d026f01267 100644 --- a/arch/tile/kernel/signal.c +++ b/arch/tile/kernel/signal.c @@ -321,14 +321,13 @@ int show_unhandled_signals = 1; static int __init crashinfo(char *str) { - unsigned long val; const char *word; if (*str == '\0') - val = 2; - else if (*str != '=' || strict_strtoul(++str, 0, &val) != 0) + show_unhandled_signals = 2; + else if (*str != '=' || kstrtoint(++str, 0, &show_unhandled_signals) != 0) return 0; - show_unhandled_signals = val; + switch (show_unhandled_signals) { case 0: word = "No"; diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c index 6b603d556ca6..f3ceb6308e42 100644 --- a/arch/tile/kernel/traps.c +++ b/arch/tile/kernel/traps.c @@ -42,10 +42,9 @@ static int __init setup_unaligned_fixup(char *str) * will still parse the instruction, then fire a SIGBUS with * the correct address from inside the single_step code. */ - long val; - if (strict_strtol(str, 0, &val) != 0) + if (kstrtoint(str, 0, &unaligned_fixup) != 0) return 0; - unaligned_fixup = val; + pr_info("Fixups for unaligned data accesses are %s\n", unaligned_fixup >= 0 ? (unaligned_fixup ? "enabled" : "disabled") : diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 7ba1dc3d41fa..bfb3127b4df9 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -912,7 +912,7 @@ static long __write_once initfree = 1; static int __init set_initfree(char *str) { long val; - if (strict_strtol(str, 0, &val) == 0) { + if (kstrtol(str, 0, &val) == 0) { initfree = val; pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't"); -- cgit v1.2.3 From 3480593131e0b781287dae0139bf7ccee7cba7ff Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Thu, 29 May 2014 10:22:50 +0200 Subject: net: filter: get rid of BPF_S_* enum This patch finally allows us to get rid of the BPF_S_* enum. Currently, the code performs unnecessary encode and decode workarounds in seccomp and filter migration itself when a filter is being attached in order to overcome BPF_S_* encoding which is not used anymore by the new interpreter resp. JIT compilers. Keeping it around would mean that also in future we would need to extend and maintain this enum and related encoders/decoders. We can get rid of all that and save us these operations during filter attaching. Naturally, also JIT compilers need to be updated by this. Before JIT conversion is being done, each compiler checks if A is being loaded at startup to obtain information if it needs to emit instructions to clear A first. Since BPF extensions are a subset of BPF_LD | BPF_{W,H,B} | BPF_ABS variants, case statements for extensions can be removed at that point. To ease and minimalize code changes in the classic JITs, we have introduced bpf_anc_helper(). Tested with test_bpf on x86_64 (JIT, int), s390x (JIT, int), arm (JIT, int), i368 (int), ppc64 (JIT, int); for sparc we unfortunately didn't have access, but changes are analogous to the rest. Joint work with Alexei Starovoitov. Signed-off-by: Daniel Borkmann Signed-off-by: Alexei Starovoitov Cc: Benjamin Herrenschmidt Cc: Martin Schwidefsky Cc: Mircea Gherzan Cc: Kees Cook Acked-by: Chema Gonzalez Signed-off-by: David S. Miller --- arch/arm/net/bpf_jit_32.c | 139 ++++++++-------- arch/powerpc/net/bpf_jit_64.S | 2 +- arch/powerpc/net/bpf_jit_comp.c | 157 +++++++++--------- arch/s390/net/bpf_jit_comp.c | 163 +++++++++---------- arch/sparc/net/bpf_jit_comp.c | 154 +++++++++--------- include/linux/filter.h | 108 +++++-------- kernel/seccomp.c | 83 +++++----- net/core/filter.c | 341 +++++++++++++++------------------------- 8 files changed, 498 insertions(+), 649 deletions(-) (limited to 'arch') diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c index 6f879c319a9d..fb5503ce016f 100644 --- a/arch/arm/net/bpf_jit_32.c +++ b/arch/arm/net/bpf_jit_32.c @@ -136,7 +136,7 @@ static u16 saved_regs(struct jit_ctx *ctx) u16 ret = 0; if ((ctx->skf->len > 1) || - (ctx->skf->insns[0].code == BPF_S_RET_A)) + (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) ret |= 1 << r_A; #ifdef CONFIG_FRAME_POINTER @@ -164,18 +164,10 @@ static inline int mem_words_used(struct jit_ctx *ctx) static inline bool is_load_to_a(u16 inst) { switch (inst) { - case BPF_S_LD_W_LEN: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - case BPF_S_ANC_CPU: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_QUEUE: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: return true; default: return false; @@ -215,7 +207,7 @@ static void build_prologue(struct jit_ctx *ctx) emit(ARM_MOV_I(r_X, 0), ctx); /* do not leak kernel data to userspace */ - if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) + if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst))) emit(ARM_MOV_I(r_A, 0), ctx); /* stack space for the BPF_MEM words */ @@ -480,36 +472,39 @@ static int build_body(struct jit_ctx *ctx) u32 k; for (i = 0; i < prog->len; i++) { + u16 code; + inst = &(prog->insns[i]); /* K as an immediate value operand */ k = inst->k; + code = bpf_anc_helper(inst); /* compute offsets only in the fake pass */ if (ctx->target == NULL) ctx->offsets[i] = ctx->idx * 4; - switch (inst->code) { - case BPF_S_LD_IMM: + switch (code) { + case BPF_LD | BPF_IMM: emit_mov_i(r_A, k, ctx); break; - case BPF_S_LD_W_LEN: + case BPF_LD | BPF_W | BPF_LEN: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); emit(ARM_LDR_I(r_A, r_skb, offsetof(struct sk_buff, len)), ctx); break; - case BPF_S_LD_MEM: + case BPF_LD | BPF_MEM: /* A = scratch[k] */ ctx->seen |= SEEN_MEM_WORD(k); emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: load_order = 2; goto load; - case BPF_S_LD_H_ABS: + case BPF_LD | BPF_H | BPF_ABS: load_order = 1; goto load; - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_B | BPF_ABS: load_order = 0; load: /* the interpreter will deal with the negative K */ @@ -552,31 +547,31 @@ load_common: emit_err_ret(ARM_COND_NE, ctx); emit(ARM_MOV_R(r_A, ARM_R0), ctx); break; - case BPF_S_LD_W_IND: + case BPF_LD | BPF_W | BPF_IND: load_order = 2; goto load_ind; - case BPF_S_LD_H_IND: + case BPF_LD | BPF_H | BPF_IND: load_order = 1; goto load_ind; - case BPF_S_LD_B_IND: + case BPF_LD | BPF_B | BPF_IND: load_order = 0; load_ind: OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); goto load_common; - case BPF_S_LDX_IMM: + case BPF_LDX | BPF_IMM: ctx->seen |= SEEN_X; emit_mov_i(r_X, k, ctx); break; - case BPF_S_LDX_W_LEN: + case BPF_LDX | BPF_W | BPF_LEN: ctx->seen |= SEEN_X | SEEN_SKB; emit(ARM_LDR_I(r_X, r_skb, offsetof(struct sk_buff, len)), ctx); break; - case BPF_S_LDX_MEM: + case BPF_LDX | BPF_MEM: ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: /* x = ((*(frame + k)) & 0xf) << 2; */ ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; /* the interpreter should deal with the negative K */ @@ -606,113 +601,113 @@ load_ind: emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); emit(ARM_LSL_I(r_X, r_X, 2), ctx); break; - case BPF_S_ST: + case BPF_ST: ctx->seen |= SEEN_MEM_WORD(k); emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_STX: + case BPF_STX: update_on_xread(ctx); ctx->seen |= SEEN_MEM_WORD(k); emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); break; - case BPF_S_ALU_ADD_K: + case BPF_ALU | BPF_ADD | BPF_K: /* A += K */ OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); break; - case BPF_S_ALU_ADD_X: + case BPF_ALU | BPF_ADD | BPF_X: update_on_xread(ctx); emit(ARM_ADD_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_SUB_K: + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); break; - case BPF_S_ALU_SUB_X: + case BPF_ALU | BPF_SUB | BPF_X: update_on_xread(ctx); emit(ARM_SUB_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_MUL_K: + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ emit_mov_i(r_scratch, k, ctx); emit(ARM_MUL(r_A, r_A, r_scratch), ctx); break; - case BPF_S_ALU_MUL_X: + case BPF_ALU | BPF_MUL | BPF_X: update_on_xread(ctx); emit(ARM_MUL(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_DIV_K: + case BPF_ALU | BPF_DIV | BPF_K: if (k == 1) break; emit_mov_i(r_scratch, k, ctx); emit_udiv(r_A, r_A, r_scratch, ctx); break; - case BPF_S_ALU_DIV_X: + case BPF_ALU | BPF_DIV | BPF_X: update_on_xread(ctx); emit(ARM_CMP_I(r_X, 0), ctx); emit_err_ret(ARM_COND_EQ, ctx); emit_udiv(r_A, r_A, r_X, ctx); break; - case BPF_S_ALU_OR_K: + case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); break; - case BPF_S_ALU_OR_X: + case BPF_ALU | BPF_OR | BPF_X: update_on_xread(ctx); emit(ARM_ORR_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_XOR_K: + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K; */ OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); break; - case BPF_S_ANC_ALU_XOR_X: - case BPF_S_ALU_XOR_X: + case BPF_ANC | SKF_AD_ALU_XOR_X: + case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ update_on_xread(ctx); emit(ARM_EOR_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_AND_K: + case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ OP_IMM3(ARM_AND, r_A, r_A, k, ctx); break; - case BPF_S_ALU_AND_X: + case BPF_ALU | BPF_AND | BPF_X: update_on_xread(ctx); emit(ARM_AND_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_LSH_K: + case BPF_ALU | BPF_LSH | BPF_K: if (unlikely(k > 31)) return -1; emit(ARM_LSL_I(r_A, r_A, k), ctx); break; - case BPF_S_ALU_LSH_X: + case BPF_ALU | BPF_LSH | BPF_X: update_on_xread(ctx); emit(ARM_LSL_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_RSH_K: + case BPF_ALU | BPF_RSH | BPF_K: if (unlikely(k > 31)) return -1; emit(ARM_LSR_I(r_A, r_A, k), ctx); break; - case BPF_S_ALU_RSH_X: + case BPF_ALU | BPF_RSH | BPF_X: update_on_xread(ctx); emit(ARM_LSR_R(r_A, r_A, r_X), ctx); break; - case BPF_S_ALU_NEG: + case BPF_ALU | BPF_NEG: /* A = -A */ emit(ARM_RSB_I(r_A, r_A, 0), ctx); break; - case BPF_S_JMP_JA: + case BPF_JMP | BPF_JA: /* pc += K */ emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); break; - case BPF_S_JMP_JEQ_K: + case BPF_JMP | BPF_JEQ | BPF_K: /* pc += (A == K) ? pc->jt : pc->jf */ condt = ARM_COND_EQ; goto cmp_imm; - case BPF_S_JMP_JGT_K: + case BPF_JMP | BPF_JGT | BPF_K: /* pc += (A > K) ? pc->jt : pc->jf */ condt = ARM_COND_HI; goto cmp_imm; - case BPF_S_JMP_JGE_K: + case BPF_JMP | BPF_JGE | BPF_K: /* pc += (A >= K) ? pc->jt : pc->jf */ condt = ARM_COND_HS; cmp_imm: @@ -731,22 +726,22 @@ cond_jump: _emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1, ctx)), ctx); break; - case BPF_S_JMP_JEQ_X: + case BPF_JMP | BPF_JEQ | BPF_X: /* pc += (A == X) ? pc->jt : pc->jf */ condt = ARM_COND_EQ; goto cmp_x; - case BPF_S_JMP_JGT_X: + case BPF_JMP | BPF_JGT | BPF_X: /* pc += (A > X) ? pc->jt : pc->jf */ condt = ARM_COND_HI; goto cmp_x; - case BPF_S_JMP_JGE_X: + case BPF_JMP | BPF_JGE | BPF_X: /* pc += (A >= X) ? pc->jt : pc->jf */ condt = ARM_COND_CS; cmp_x: update_on_xread(ctx); emit(ARM_CMP_R(r_A, r_X), ctx); goto cond_jump; - case BPF_S_JMP_JSET_K: + case BPF_JMP | BPF_JSET | BPF_K: /* pc += (A & K) ? pc->jt : pc->jf */ condt = ARM_COND_NE; /* not set iff all zeroes iff Z==1 iff EQ */ @@ -759,16 +754,16 @@ cmp_x: emit(ARM_TST_I(r_A, imm12), ctx); } goto cond_jump; - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_X: /* pc += (A & X) ? pc->jt : pc->jf */ update_on_xread(ctx); condt = ARM_COND_NE; emit(ARM_TST_R(r_A, r_X), ctx); goto cond_jump; - case BPF_S_RET_A: + case BPF_RET | BPF_A: emit(ARM_MOV_R(ARM_R0, r_A), ctx); goto b_epilogue; - case BPF_S_RET_K: + case BPF_RET | BPF_K: if ((k == 0) && (ctx->ret0_fp_idx < 0)) ctx->ret0_fp_idx = i; emit_mov_i(ARM_R0, k, ctx); @@ -776,17 +771,17 @@ b_epilogue: if (i != ctx->skf->len - 1) emit(ARM_B(b_imm(prog->len, ctx)), ctx); break; - case BPF_S_MISC_TAX: + case BPF_MISC | BPF_TAX: /* X = A */ ctx->seen |= SEEN_X; emit(ARM_MOV_R(r_X, r_A), ctx); break; - case BPF_S_MISC_TXA: + case BPF_MISC | BPF_TXA: /* A = X */ update_on_xread(ctx); emit(ARM_MOV_R(r_A, r_X), ctx); break; - case BPF_S_ANC_PROTOCOL: + case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol) */ ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, @@ -795,7 +790,7 @@ b_epilogue: emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); emit_swap16(r_A, r_scratch, ctx); break; - case BPF_S_ANC_CPU: + case BPF_ANC | SKF_AD_CPU: /* r_scratch = current_thread_info() */ OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); /* A = current_thread_info()->cpu */ @@ -803,7 +798,7 @@ b_epilogue: off = offsetof(struct thread_info, cpu); emit(ARM_LDR_I(r_A, r_scratch, off), ctx); break; - case BPF_S_ANC_IFINDEX: + case BPF_ANC | SKF_AD_IFINDEX: /* A = skb->dev->ifindex */ ctx->seen |= SEEN_SKB; off = offsetof(struct sk_buff, dev); @@ -817,30 +812,30 @@ b_epilogue: off = offsetof(struct net_device, ifindex); emit(ARM_LDR_I(r_A, r_scratch, off), ctx); break; - case BPF_S_ANC_MARK: + case BPF_ANC | SKF_AD_MARK: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); off = offsetof(struct sk_buff, mark); emit(ARM_LDR_I(r_A, r_skb, off), ctx); break; - case BPF_S_ANC_RXHASH: + case BPF_ANC | SKF_AD_RXHASH: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); off = offsetof(struct sk_buff, hash); emit(ARM_LDR_I(r_A, r_skb, off), ctx); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); off = offsetof(struct sk_buff, vlan_tci); emit(ARM_LDRH_I(r_A, r_skb, off), ctx); - if (inst->code == BPF_S_ANC_VLAN_TAG) + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); else OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); break; - case BPF_S_ANC_QUEUE: + case BPF_ANC | SKF_AD_QUEUE: ctx->seen |= SEEN_SKB; BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); diff --git a/arch/powerpc/net/bpf_jit_64.S b/arch/powerpc/net/bpf_jit_64.S index e76eba74d9da..8f87d9217122 100644 --- a/arch/powerpc/net/bpf_jit_64.S +++ b/arch/powerpc/net/bpf_jit_64.S @@ -78,7 +78,7 @@ sk_load_byte_positive_offset: blr /* - * BPF_S_LDX_B_MSH: ldxb 4*([offset]&0xf) + * BPF_LDX | BPF_B | BPF_MSH: ldxb 4*([offset]&0xf) * r_addr is the offset value */ .globl sk_load_byte_msh diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 808ce1cae21a..6dcdadefd8d0 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -79,19 +79,11 @@ static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image, } switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_CPU: - case BPF_S_ANC_QUEUE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: + case BPF_RET | BPF_K: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: /* first instruction sets A register (or is RET 'constant') */ break; default: @@ -144,6 +136,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, for (i = 0; i < flen; i++) { unsigned int K = filter[i].k; + u16 code = bpf_anc_helper(&filter[i]); /* * addrs[] maps a BPF bytecode address into a real offset from @@ -151,35 +144,35 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, */ addrs[i] = ctx->idx * 4; - switch (filter[i].code) { + switch (code) { /*** ALU ops ***/ - case BPF_S_ALU_ADD_X: /* A += X; */ + case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ ctx->seen |= SEEN_XREG; PPC_ADD(r_A, r_A, r_X); break; - case BPF_S_ALU_ADD_K: /* A += K; */ + case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ if (!K) break; PPC_ADDI(r_A, r_A, IMM_L(K)); if (K >= 32768) PPC_ADDIS(r_A, r_A, IMM_HA(K)); break; - case BPF_S_ALU_SUB_X: /* A -= X; */ + case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ ctx->seen |= SEEN_XREG; PPC_SUB(r_A, r_A, r_X); break; - case BPF_S_ALU_SUB_K: /* A -= K */ + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ if (!K) break; PPC_ADDI(r_A, r_A, IMM_L(-K)); if (K >= 32768) PPC_ADDIS(r_A, r_A, IMM_HA(-K)); break; - case BPF_S_ALU_MUL_X: /* A *= X; */ + case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ ctx->seen |= SEEN_XREG; PPC_MUL(r_A, r_A, r_X); break; - case BPF_S_ALU_MUL_K: /* A *= K */ + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ if (K < 32768) PPC_MULI(r_A, r_A, K); else { @@ -187,7 +180,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_MUL(r_A, r_A, r_scratch1); } break; - case BPF_S_ALU_MOD_X: /* A %= X; */ + case BPF_ALU | BPF_MOD | BPF_X: /* A %= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { @@ -201,13 +194,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_MUL(r_scratch1, r_X, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1); break; - case BPF_S_ALU_MOD_K: /* A %= K; */ + case BPF_ALU | BPF_MOD | BPF_K: /* A %= K; */ PPC_LI32(r_scratch2, K); PPC_DIVWU(r_scratch1, r_A, r_scratch2); PPC_MUL(r_scratch1, r_scratch2, r_scratch1); PPC_SUB(r_A, r_A, r_scratch1); break; - case BPF_S_ALU_DIV_X: /* A /= X; */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ ctx->seen |= SEEN_XREG; PPC_CMPWI(r_X, 0); if (ctx->pc_ret0 != -1) { @@ -223,17 +216,17 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, } PPC_DIVWU(r_A, r_A, r_X); break; - case BPF_S_ALU_DIV_K: /* A /= K */ + case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; PPC_LI32(r_scratch1, K); PPC_DIVWU(r_A, r_A, r_scratch1); break; - case BPF_S_ALU_AND_X: + case BPF_ALU | BPF_AND | BPF_X: ctx->seen |= SEEN_XREG; PPC_AND(r_A, r_A, r_X); break; - case BPF_S_ALU_AND_K: + case BPF_ALU | BPF_AND | BPF_K: if (!IMM_H(K)) PPC_ANDI(r_A, r_A, K); else { @@ -241,51 +234,51 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_AND(r_A, r_A, r_scratch1); } break; - case BPF_S_ALU_OR_X: + case BPF_ALU | BPF_OR | BPF_X: ctx->seen |= SEEN_XREG; PPC_OR(r_A, r_A, r_X); break; - case BPF_S_ALU_OR_K: + case BPF_ALU | BPF_OR | BPF_K: if (IMM_L(K)) PPC_ORI(r_A, r_A, IMM_L(K)); if (K >= 65536) PPC_ORIS(r_A, r_A, IMM_H(K)); break; - case BPF_S_ANC_ALU_XOR_X: - case BPF_S_ALU_XOR_X: /* A ^= X */ + case BPF_ANC | SKF_AD_ALU_XOR_X: + case BPF_ALU | BPF_XOR | BPF_X: /* A ^= X */ ctx->seen |= SEEN_XREG; PPC_XOR(r_A, r_A, r_X); break; - case BPF_S_ALU_XOR_K: /* A ^= K */ + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ if (IMM_L(K)) PPC_XORI(r_A, r_A, IMM_L(K)); if (K >= 65536) PPC_XORIS(r_A, r_A, IMM_H(K)); break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ + case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ ctx->seen |= SEEN_XREG; PPC_SLW(r_A, r_A, r_X); break; - case BPF_S_ALU_LSH_K: + case BPF_ALU | BPF_LSH | BPF_K: if (K == 0) break; else PPC_SLWI(r_A, r_A, K); break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ + case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ ctx->seen |= SEEN_XREG; PPC_SRW(r_A, r_A, r_X); break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ + case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ if (K == 0) break; else PPC_SRWI(r_A, r_A, K); break; - case BPF_S_ALU_NEG: + case BPF_ALU | BPF_NEG: PPC_NEG(r_A, r_A); break; - case BPF_S_RET_K: + case BPF_RET | BPF_K: PPC_LI32(r_ret, K); if (!K) { if (ctx->pc_ret0 == -1) @@ -312,7 +305,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_BLR(); } break; - case BPF_S_RET_A: + case BPF_RET | BPF_A: PPC_MR(r_ret, r_A); if (i != flen - 1) { if (ctx->seen) @@ -321,53 +314,53 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_BLR(); } break; - case BPF_S_MISC_TAX: /* X = A */ + case BPF_MISC | BPF_TAX: /* X = A */ PPC_MR(r_X, r_A); break; - case BPF_S_MISC_TXA: /* A = X */ + case BPF_MISC | BPF_TXA: /* A = X */ ctx->seen |= SEEN_XREG; PPC_MR(r_A, r_X); break; /*** Constant loads/M[] access ***/ - case BPF_S_LD_IMM: /* A = K */ + case BPF_LD | BPF_IMM: /* A = K */ PPC_LI32(r_A, K); break; - case BPF_S_LDX_IMM: /* X = K */ + case BPF_LDX | BPF_IMM: /* X = K */ PPC_LI32(r_X, K); break; - case BPF_S_LD_MEM: /* A = mem[K] */ + case BPF_LD | BPF_MEM: /* A = mem[K] */ PPC_MR(r_A, r_M + (K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_LDX_MEM: /* X = mem[K] */ + case BPF_LDX | BPF_MEM: /* X = mem[K] */ PPC_MR(r_X, r_M + (K & 0xf)); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_ST: /* mem[K] = A */ + case BPF_ST: /* mem[K] = A */ PPC_MR(r_M + (K & 0xf), r_A); ctx->seen |= SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_STX: /* mem[K] = X */ + case BPF_STX: /* mem[K] = X */ PPC_MR(r_M + (K & 0xf), r_X); ctx->seen |= SEEN_XREG | SEEN_MEM | (1<<(K & 0xf)); break; - case BPF_S_LD_W_LEN: /* A = skb->len; */ + case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, len)); break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ PPC_LWZ_OFFS(r_X, r_skb, offsetof(struct sk_buff, len)); break; /*** Ancillary info loads ***/ - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ + case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); PPC_NTOHS_OFFS(r_A, r_skb, offsetof(struct sk_buff, protocol)); break; - case BPF_S_ANC_IFINDEX: + case BPF_ANC | SKF_AD_IFINDEX: PPC_LD_OFFS(r_scratch1, r_skb, offsetof(struct sk_buff, dev)); PPC_CMPDI(r_scratch1, 0); @@ -384,33 +377,33 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_LWZ_OFFS(r_A, r_scratch1, offsetof(struct net_device, ifindex)); break; - case BPF_S_ANC_MARK: + case BPF_ANC | SKF_AD_MARK: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, mark)); break; - case BPF_S_ANC_RXHASH: + case BPF_ANC | SKF_AD_RXHASH: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); PPC_LWZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, hash)); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, vlan_tci)); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) PPC_ANDI(r_A, r_A, VLAN_VID_MASK); else PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); break; - case BPF_S_ANC_QUEUE: + case BPF_ANC | SKF_AD_QUEUE: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, queue_mapping)); break; - case BPF_S_ANC_CPU: + case BPF_ANC | SKF_AD_CPU: #ifdef CONFIG_SMP /* * PACA ptr is r13: @@ -426,13 +419,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; /*** Absolute loads from packet header/data ***/ - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_word); goto common_load; - case BPF_S_LD_H_ABS: + case BPF_LD | BPF_H | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_half); goto common_load; - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_B | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, sk_load_byte); common_load: /* Load from [K]. */ @@ -449,13 +442,13 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; /*** Indirect loads from packet header/data ***/ - case BPF_S_LD_W_IND: + case BPF_LD | BPF_W | BPF_IND: func = sk_load_word; goto common_load_ind; - case BPF_S_LD_H_IND: + case BPF_LD | BPF_H | BPF_IND: func = sk_load_half; goto common_load_ind; - case BPF_S_LD_B_IND: + case BPF_LD | BPF_B | BPF_IND: func = sk_load_byte; common_load_ind: /* @@ -473,31 +466,31 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_BCC(COND_LT, exit_addr); break; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh); goto common_load; break; /*** Jump and branches ***/ - case BPF_S_JMP_JA: + case BPF_JMP | BPF_JA: if (K != 0) PPC_JMP(addrs[i + 1 + K]); break; - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: true_cond = COND_GT; goto cond_branch; - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: true_cond = COND_GE; goto cond_branch; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: true_cond = COND_EQ; goto cond_branch; - case BPF_S_JMP_JSET_K: - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: true_cond = COND_NE; /* Fall through */ cond_branch: @@ -508,20 +501,20 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, break; } - switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: + switch (code) { + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: ctx->seen |= SEEN_XREG; PPC_CMPLW(r_A, r_X); break; - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_X: ctx->seen |= SEEN_XREG; PPC_AND_DOT(r_scratch1, r_A, r_X); break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: if (K < 32768) PPC_CMPLWI(r_A, K); else { @@ -529,7 +522,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image, PPC_CMPLW(r_A, r_scratch1); } break; - case BPF_S_JMP_JSET_K: + case BPF_JMP | BPF_JSET | BPF_K: if (K < 32768) /* PPC_ANDI is /only/ dot-form */ PPC_ANDI(r_scratch1, r_A, K); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index e9f8fa9337fe..a2cbd875543a 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -269,27 +269,17 @@ static void bpf_jit_noleaks(struct bpf_jit *jit, struct sock_filter *filter) EMIT4(0xa7c80000); /* Clear A if the first register does not set it. */ switch (filter[0].code) { - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: - case BPF_S_LD_W_LEN: - case BPF_S_LD_W_IND: - case BPF_S_LD_H_IND: - case BPF_S_LD_B_IND: - case BPF_S_LD_IMM: - case BPF_S_LD_MEM: - case BPF_S_MISC_TXA: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_PKTTYPE: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_QUEUE: - case BPF_S_ANC_HATYPE: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_CPU: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_RET_K: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_IND: + case BPF_LD | BPF_H | BPF_IND: + case BPF_LD | BPF_B | BPF_IND: + case BPF_LD | BPF_IMM: + case BPF_LD | BPF_MEM: + case BPF_MISC | BPF_TXA: + case BPF_RET | BPF_K: /* first instruction sets A register */ break; default: /* A = 0 */ @@ -304,15 +294,18 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, unsigned int K; int offset; unsigned int mask; + u16 code; K = filter->k; - switch (filter->code) { - case BPF_S_ALU_ADD_X: /* A += X */ + code = bpf_anc_helper(filter); + + switch (code) { + case BPF_ALU | BPF_ADD | BPF_X: /* A += X */ jit->seen |= SEEN_XREG; /* ar %r5,%r12 */ EMIT2(0x1a5c); break; - case BPF_S_ALU_ADD_K: /* A += K */ + case BPF_ALU | BPF_ADD | BPF_K: /* A += K */ if (!K) break; if (K <= 16383) @@ -325,12 +318,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* a %r5,(%r13) */ EMIT4_DISP(0x5a50d000, EMIT_CONST(K)); break; - case BPF_S_ALU_SUB_X: /* A -= X */ + case BPF_ALU | BPF_SUB | BPF_X: /* A -= X */ jit->seen |= SEEN_XREG; /* sr %r5,%r12 */ EMIT2(0x1b5c); break; - case BPF_S_ALU_SUB_K: /* A -= K */ + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ if (!K) break; if (K <= 16384) @@ -343,12 +336,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* s %r5,(%r13) */ EMIT4_DISP(0x5b50d000, EMIT_CONST(K)); break; - case BPF_S_ALU_MUL_X: /* A *= X */ + case BPF_ALU | BPF_MUL | BPF_X: /* A *= X */ jit->seen |= SEEN_XREG; /* msr %r5,%r12 */ EMIT4(0xb252005c); break; - case BPF_S_ALU_MUL_K: /* A *= K */ + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ if (K <= 16383) /* mhi %r5,K */ EMIT4_IMM(0xa75c0000, K); @@ -359,7 +352,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* ms %r5,(%r13) */ EMIT4_DISP(0x7150d000, EMIT_CONST(K)); break; - case BPF_S_ALU_DIV_X: /* A /= X */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X */ jit->seen |= SEEN_XREG | SEEN_RET0; /* ltr %r12,%r12 */ EMIT2(0x12cc); @@ -370,7 +363,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* dlr %r4,%r12 */ EMIT4(0xb997004c); break; - case BPF_S_ALU_DIV_K: /* A /= K */ + case BPF_ALU | BPF_DIV | BPF_K: /* A /= K */ if (K == 1) break; /* lhi %r4,0 */ @@ -378,7 +371,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* dl %r4,(%r13) */ EMIT6_DISP(0xe340d000, 0x0097, EMIT_CONST(K)); break; - case BPF_S_ALU_MOD_X: /* A %= X */ + case BPF_ALU | BPF_MOD | BPF_X: /* A %= X */ jit->seen |= SEEN_XREG | SEEN_RET0; /* ltr %r12,%r12 */ EMIT2(0x12cc); @@ -391,7 +384,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* lr %r5,%r4 */ EMIT2(0x1854); break; - case BPF_S_ALU_MOD_K: /* A %= K */ + case BPF_ALU | BPF_MOD | BPF_K: /* A %= K */ if (K == 1) { /* lhi %r5,0 */ EMIT4(0xa7580000); @@ -404,12 +397,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* lr %r5,%r4 */ EMIT2(0x1854); break; - case BPF_S_ALU_AND_X: /* A &= X */ + case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ jit->seen |= SEEN_XREG; /* nr %r5,%r12 */ EMIT2(0x145c); break; - case BPF_S_ALU_AND_K: /* A &= K */ + case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ if (test_facility(21)) /* nilf %r5, */ EMIT6_IMM(0xc05b0000, K); @@ -417,12 +410,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* n %r5,(%r13) */ EMIT4_DISP(0x5450d000, EMIT_CONST(K)); break; - case BPF_S_ALU_OR_X: /* A |= X */ + case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ jit->seen |= SEEN_XREG; /* or %r5,%r12 */ EMIT2(0x165c); break; - case BPF_S_ALU_OR_K: /* A |= K */ + case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ if (test_facility(21)) /* oilf %r5, */ EMIT6_IMM(0xc05d0000, K); @@ -430,55 +423,55 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, /* o %r5,(%r13) */ EMIT4_DISP(0x5650d000, EMIT_CONST(K)); break; - case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ - case BPF_S_ALU_XOR_X: + case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ + case BPF_ALU | BPF_XOR | BPF_X: jit->seen |= SEEN_XREG; /* xr %r5,%r12 */ EMIT2(0x175c); break; - case BPF_S_ALU_XOR_K: /* A ^= K */ + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ if (!K) break; /* x %r5,(%r13) */ EMIT4_DISP(0x5750d000, EMIT_CONST(K)); break; - case BPF_S_ALU_LSH_X: /* A <<= X; */ + case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X; */ jit->seen |= SEEN_XREG; /* sll %r5,0(%r12) */ EMIT4(0x8950c000); break; - case BPF_S_ALU_LSH_K: /* A <<= K */ + case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ if (K == 0) break; /* sll %r5,K */ EMIT4_DISP(0x89500000, K); break; - case BPF_S_ALU_RSH_X: /* A >>= X; */ + case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X; */ jit->seen |= SEEN_XREG; /* srl %r5,0(%r12) */ EMIT4(0x8850c000); break; - case BPF_S_ALU_RSH_K: /* A >>= K; */ + case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K; */ if (K == 0) break; /* srl %r5,K */ EMIT4_DISP(0x88500000, K); break; - case BPF_S_ALU_NEG: /* A = -A */ + case BPF_ALU | BPF_NEG: /* A = -A */ /* lnr %r5,%r5 */ EMIT2(0x1155); break; - case BPF_S_JMP_JA: /* ip += K */ + case BPF_JMP | BPF_JA: /* ip += K */ offset = addrs[i + K] + jit->start - jit->prg; EMIT4_PCREL(0xa7f40000, offset); break; - case BPF_S_JMP_JGT_K: /* ip += (A > K) ? jt : jf */ + case BPF_JMP | BPF_JGT | BPF_K: /* ip += (A > K) ? jt : jf */ mask = 0x200000; /* jh */ goto kbranch; - case BPF_S_JMP_JGE_K: /* ip += (A >= K) ? jt : jf */ + case BPF_JMP | BPF_JGE | BPF_K: /* ip += (A >= K) ? jt : jf */ mask = 0xa00000; /* jhe */ goto kbranch; - case BPF_S_JMP_JEQ_K: /* ip += (A == K) ? jt : jf */ + case BPF_JMP | BPF_JEQ | BPF_K: /* ip += (A == K) ? jt : jf */ mask = 0x800000; /* je */ kbranch: /* Emit compare if the branch targets are different */ if (filter->jt != filter->jf) { @@ -511,7 +504,7 @@ branch: if (filter->jt == filter->jf) { EMIT4_PCREL(0xa7040000 | (mask ^ 0xf00000), offset); } break; - case BPF_S_JMP_JSET_K: /* ip += (A & K) ? jt : jf */ + case BPF_JMP | BPF_JSET | BPF_K: /* ip += (A & K) ? jt : jf */ mask = 0x700000; /* jnz */ /* Emit test if the branch targets are different */ if (filter->jt != filter->jf) { @@ -525,13 +518,13 @@ branch: if (filter->jt == filter->jf) { EMIT4_IMM(0xa7510000, K); } goto branch; - case BPF_S_JMP_JGT_X: /* ip += (A > X) ? jt : jf */ + case BPF_JMP | BPF_JGT | BPF_X: /* ip += (A > X) ? jt : jf */ mask = 0x200000; /* jh */ goto xbranch; - case BPF_S_JMP_JGE_X: /* ip += (A >= X) ? jt : jf */ + case BPF_JMP | BPF_JGE | BPF_X: /* ip += (A >= X) ? jt : jf */ mask = 0xa00000; /* jhe */ goto xbranch; - case BPF_S_JMP_JEQ_X: /* ip += (A == X) ? jt : jf */ + case BPF_JMP | BPF_JEQ | BPF_X: /* ip += (A == X) ? jt : jf */ mask = 0x800000; /* je */ xbranch: /* Emit compare if the branch targets are different */ if (filter->jt != filter->jf) { @@ -540,7 +533,7 @@ xbranch: /* Emit compare if the branch targets are different */ EMIT2(0x195c); } goto branch; - case BPF_S_JMP_JSET_X: /* ip += (A & X) ? jt : jf */ + case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */ mask = 0x700000; /* jnz */ /* Emit test if the branch targets are different */ if (filter->jt != filter->jf) { @@ -551,15 +544,15 @@ xbranch: /* Emit compare if the branch targets are different */ EMIT2(0x144c); } goto branch; - case BPF_S_LD_W_ABS: /* A = *(u32 *) (skb->data+K) */ + case BPF_LD | BPF_W | BPF_ABS: /* A = *(u32 *) (skb->data+K) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_WORD; offset = jit->off_load_word; goto load_abs; - case BPF_S_LD_H_ABS: /* A = *(u16 *) (skb->data+K) */ + case BPF_LD | BPF_H | BPF_ABS: /* A = *(u16 *) (skb->data+K) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_HALF; offset = jit->off_load_half; goto load_abs; - case BPF_S_LD_B_ABS: /* A = *(u8 *) (skb->data+K) */ + case BPF_LD | BPF_B | BPF_ABS: /* A = *(u8 *) (skb->data+K) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_BYTE; offset = jit->off_load_byte; load_abs: if ((int) K < 0) @@ -573,19 +566,19 @@ call_fn: /* lg %r1,(%r13) */ /* jnz */ EMIT4_PCREL(0xa7740000, (jit->ret0_ip - jit->prg)); break; - case BPF_S_LD_W_IND: /* A = *(u32 *) (skb->data+K+X) */ + case BPF_LD | BPF_W | BPF_IND: /* A = *(u32 *) (skb->data+K+X) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IWORD; offset = jit->off_load_iword; goto call_fn; - case BPF_S_LD_H_IND: /* A = *(u16 *) (skb->data+K+X) */ + case BPF_LD | BPF_H | BPF_IND: /* A = *(u16 *) (skb->data+K+X) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IHALF; offset = jit->off_load_ihalf; goto call_fn; - case BPF_S_LD_B_IND: /* A = *(u8 *) (skb->data+K+X) */ + case BPF_LD | BPF_B | BPF_IND: /* A = *(u8 *) (skb->data+K+X) */ jit->seen |= SEEN_DATAREF | SEEN_RET0 | SEEN_LOAD_IBYTE; offset = jit->off_load_ibyte; goto call_fn; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */ jit->seen |= SEEN_RET0; if ((int) K < 0) { @@ -596,17 +589,17 @@ call_fn: /* lg %r1,(%r13) */ jit->seen |= SEEN_DATAREF | SEEN_LOAD_BMSH; offset = jit->off_load_bmsh; goto call_fn; - case BPF_S_LD_W_LEN: /* A = skb->len; */ + case BPF_LD | BPF_W | BPF_LEN: /* A = skb->len; */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); /* l %r5,(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, len)); break; - case BPF_S_LDX_W_LEN: /* X = skb->len; */ + case BPF_LDX | BPF_W | BPF_LEN: /* X = skb->len; */ jit->seen |= SEEN_XREG; /* l %r12,(%r2) */ EMIT4_DISP(0x58c02000, offsetof(struct sk_buff, len)); break; - case BPF_S_LD_IMM: /* A = K */ + case BPF_LD | BPF_IMM: /* A = K */ if (K <= 16383) /* lhi %r5,K */ EMIT4_IMM(0xa7580000, K); @@ -617,7 +610,7 @@ call_fn: /* lg %r1,(%r13) */ /* l %r5,(%r13) */ EMIT4_DISP(0x5850d000, EMIT_CONST(K)); break; - case BPF_S_LDX_IMM: /* X = K */ + case BPF_LDX | BPF_IMM: /* X = K */ jit->seen |= SEEN_XREG; if (K <= 16383) /* lhi %r12, */ @@ -629,29 +622,29 @@ call_fn: /* lg %r1,(%r13) */ /* l %r12,(%r13) */ EMIT4_DISP(0x58c0d000, EMIT_CONST(K)); break; - case BPF_S_LD_MEM: /* A = mem[K] */ + case BPF_LD | BPF_MEM: /* A = mem[K] */ jit->seen |= SEEN_MEM; /* l %r5,(%r15) */ EMIT4_DISP(0x5850f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_LDX_MEM: /* X = mem[K] */ + case BPF_LDX | BPF_MEM: /* X = mem[K] */ jit->seen |= SEEN_XREG | SEEN_MEM; /* l %r12,(%r15) */ EMIT4_DISP(0x58c0f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_MISC_TAX: /* X = A */ + case BPF_MISC | BPF_TAX: /* X = A */ jit->seen |= SEEN_XREG; /* lr %r12,%r5 */ EMIT2(0x18c5); break; - case BPF_S_MISC_TXA: /* A = X */ + case BPF_MISC | BPF_TXA: /* A = X */ jit->seen |= SEEN_XREG; /* lr %r5,%r12 */ EMIT2(0x185c); break; - case BPF_S_RET_K: + case BPF_RET | BPF_K: if (K == 0) { jit->seen |= SEEN_RET0; if (last) @@ -671,33 +664,33 @@ call_fn: /* lg %r1,(%r13) */ EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); } break; - case BPF_S_RET_A: + case BPF_RET | BPF_A: /* llgfr %r2,%r5 */ EMIT4(0xb9160025); /* j */ EMIT4_PCREL(0xa7f40000, jit->exit_ip - jit->prg); break; - case BPF_S_ST: /* mem[K] = A */ + case BPF_ST: /* mem[K] = A */ jit->seen |= SEEN_MEM; /* st %r5,(%r15) */ EMIT4_DISP(0x5050f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ + case BPF_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */ jit->seen |= SEEN_XREG | SEEN_MEM; /* st %r12,(%r15) */ EMIT4_DISP(0x50c0f000, (jit->seen & SEEN_DATAREF) ? 160 + K*4 : K*4); break; - case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */ + case BPF_ANC | SKF_AD_PROTOCOL: /* A = ntohs(skb->protocol); */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); /* lhi %r5,0 */ EMIT4(0xa7580000); /* icm %r5,3,(%r2) */ EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, protocol)); break; - case BPF_S_ANC_IFINDEX: /* if (!skb->dev) return 0; - * A = skb->dev->ifindex */ + case BPF_ANC | SKF_AD_IFINDEX: /* if (!skb->dev) return 0; + * A = skb->dev->ifindex */ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4); jit->seen |= SEEN_RET0; /* lg %r1,(%r2) */ @@ -709,20 +702,20 @@ call_fn: /* lg %r1,(%r13) */ /* l %r5,(%r1) */ EMIT4_DISP(0x58501000, offsetof(struct net_device, ifindex)); break; - case BPF_S_ANC_MARK: /* A = skb->mark */ + case BPF_ANC | SKF_AD_MARK: /* A = skb->mark */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); /* l %r5,(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, mark)); break; - case BPF_S_ANC_QUEUE: /* A = skb->queue_mapping */ + case BPF_ANC | SKF_AD_QUEUE: /* A = skb->queue_mapping */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2); /* lhi %r5,0 */ EMIT4(0xa7580000); /* icm %r5,3,(%r2) */ EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, queue_mapping)); break; - case BPF_S_ANC_HATYPE: /* if (!skb->dev) return 0; - * A = skb->dev->type */ + case BPF_ANC | SKF_AD_HATYPE: /* if (!skb->dev) return 0; + * A = skb->dev->type */ BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, type) != 2); jit->seen |= SEEN_RET0; /* lg %r1,(%r2) */ @@ -736,20 +729,20 @@ call_fn: /* lg %r1,(%r13) */ /* icm %r5,3,(%r1) */ EMIT4_DISP(0xbf531000, offsetof(struct net_device, type)); break; - case BPF_S_ANC_RXHASH: /* A = skb->hash */ + case BPF_ANC | SKF_AD_RXHASH: /* A = skb->hash */ BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); /* l %r5,(%r2) */ EMIT4_DISP(0x58502000, offsetof(struct sk_buff, hash)); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); /* lhi %r5,0 */ EMIT4(0xa7580000); /* icm %r5,3,(%r2) */ EMIT4_DISP(0xbf532000, offsetof(struct sk_buff, vlan_tci)); - if (filter->code == BPF_S_ANC_VLAN_TAG) { + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { /* nill %r5,0xefff */ EMIT4_IMM(0xa5570000, ~VLAN_TAG_PRESENT); } else { @@ -759,7 +752,7 @@ call_fn: /* lg %r1,(%r13) */ EMIT4_DISP(0x88500000, 12); } break; - case BPF_S_ANC_PKTTYPE: + case BPF_ANC | SKF_AD_PKTTYPE: if (pkt_type_offset < 0) goto out; /* lhi %r5,0 */ @@ -769,7 +762,7 @@ call_fn: /* lg %r1,(%r13) */ /* srl %r5,5 */ EMIT4_DISP(0x88500000, 5); break; - case BPF_S_ANC_CPU: /* A = smp_processor_id() */ + case BPF_ANC | SKF_AD_CPU: /* A = smp_processor_id() */ #ifdef CONFIG_SMP /* l %r5, */ EMIT4_DISP(0x58500000, offsetof(struct _lowcore, cpu_nr)); diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index a82c6b2a9780..c88cf147deed 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -415,20 +415,11 @@ void bpf_jit_compile(struct sk_filter *fp) emit_reg_move(O7, r_saved_O7); switch (filter[0].code) { - case BPF_S_RET_K: - case BPF_S_LD_W_LEN: - case BPF_S_ANC_PROTOCOL: - case BPF_S_ANC_PKTTYPE: - case BPF_S_ANC_IFINDEX: - case BPF_S_ANC_MARK: - case BPF_S_ANC_RXHASH: - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: - case BPF_S_ANC_CPU: - case BPF_S_ANC_QUEUE: - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: + case BPF_RET | BPF_K: + case BPF_LD | BPF_W | BPF_LEN: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: /* The first instruction sets the A register (or is * a "RET 'constant'") */ @@ -445,59 +436,60 @@ void bpf_jit_compile(struct sk_filter *fp) unsigned int t_offset; unsigned int f_offset; u32 t_op, f_op; + u16 code = bpf_anc_helper(&filter[i]); int ilen; - switch (filter[i].code) { - case BPF_S_ALU_ADD_X: /* A += X; */ + switch (code) { + case BPF_ALU | BPF_ADD | BPF_X: /* A += X; */ emit_alu_X(ADD); break; - case BPF_S_ALU_ADD_K: /* A += K; */ + case BPF_ALU | BPF_ADD | BPF_K: /* A += K; */ emit_alu_K(ADD, K); break; - case BPF_S_ALU_SUB_X: /* A -= X; */ + case BPF_ALU | BPF_SUB | BPF_X: /* A -= X; */ emit_alu_X(SUB); break; - case BPF_S_ALU_SUB_K: /* A -= K */ + case BPF_ALU | BPF_SUB | BPF_K: /* A -= K */ emit_alu_K(SUB, K); break; - case BPF_S_ALU_AND_X: /* A &= X */ + case BPF_ALU | BPF_AND | BPF_X: /* A &= X */ emit_alu_X(AND); break; - case BPF_S_ALU_AND_K: /* A &= K */ + case BPF_ALU | BPF_AND | BPF_K: /* A &= K */ emit_alu_K(AND, K); break; - case BPF_S_ALU_OR_X: /* A |= X */ + case BPF_ALU | BPF_OR | BPF_X: /* A |= X */ emit_alu_X(OR); break; - case BPF_S_ALU_OR_K: /* A |= K */ + case BPF_ALU | BPF_OR | BPF_K: /* A |= K */ emit_alu_K(OR, K); break; - case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */ - case BPF_S_ALU_XOR_X: + case BPF_ANC | SKF_AD_ALU_XOR_X: /* A ^= X; */ + case BPF_ALU | BPF_XOR | BPF_X: emit_alu_X(XOR); break; - case BPF_S_ALU_XOR_K: /* A ^= K */ + case BPF_ALU | BPF_XOR | BPF_K: /* A ^= K */ emit_alu_K(XOR, K); break; - case BPF_S_ALU_LSH_X: /* A <<= X */ + case BPF_ALU | BPF_LSH | BPF_X: /* A <<= X */ emit_alu_X(SLL); break; - case BPF_S_ALU_LSH_K: /* A <<= K */ + case BPF_ALU | BPF_LSH | BPF_K: /* A <<= K */ emit_alu_K(SLL, K); break; - case BPF_S_ALU_RSH_X: /* A >>= X */ + case BPF_ALU | BPF_RSH | BPF_X: /* A >>= X */ emit_alu_X(SRL); break; - case BPF_S_ALU_RSH_K: /* A >>= K */ + case BPF_ALU | BPF_RSH | BPF_K: /* A >>= K */ emit_alu_K(SRL, K); break; - case BPF_S_ALU_MUL_X: /* A *= X; */ + case BPF_ALU | BPF_MUL | BPF_X: /* A *= X; */ emit_alu_X(MUL); break; - case BPF_S_ALU_MUL_K: /* A *= K */ + case BPF_ALU | BPF_MUL | BPF_K: /* A *= K */ emit_alu_K(MUL, K); break; - case BPF_S_ALU_DIV_K: /* A /= K with K != 0*/ + case BPF_ALU | BPF_DIV | BPF_K: /* A /= K with K != 0*/ if (K == 1) break; emit_write_y(G0); @@ -512,7 +504,7 @@ void bpf_jit_compile(struct sk_filter *fp) #endif emit_alu_K(DIV, K); break; - case BPF_S_ALU_DIV_X: /* A /= X; */ + case BPF_ALU | BPF_DIV | BPF_X: /* A /= X; */ emit_cmpi(r_X, 0); if (pc_ret0 > 0) { t_offset = addrs[pc_ret0 - 1]; @@ -544,10 +536,10 @@ void bpf_jit_compile(struct sk_filter *fp) #endif emit_alu_X(DIV); break; - case BPF_S_ALU_NEG: + case BPF_ALU | BPF_NEG: emit_neg(); break; - case BPF_S_RET_K: + case BPF_RET | BPF_K: if (!K) { if (pc_ret0 == -1) pc_ret0 = i; @@ -556,7 +548,7 @@ void bpf_jit_compile(struct sk_filter *fp) emit_loadimm(K, r_A); } /* Fallthrough */ - case BPF_S_RET_A: + case BPF_RET | BPF_A: if (seen_or_pass0) { if (i != flen - 1) { emit_jump(cleanup_addr); @@ -573,18 +565,18 @@ void bpf_jit_compile(struct sk_filter *fp) emit_jmpl(r_saved_O7, 8, G0); emit_reg_move(r_A, O0); /* delay slot */ break; - case BPF_S_MISC_TAX: + case BPF_MISC | BPF_TAX: seen |= SEEN_XREG; emit_reg_move(r_A, r_X); break; - case BPF_S_MISC_TXA: + case BPF_MISC | BPF_TXA: seen |= SEEN_XREG; emit_reg_move(r_X, r_A); break; - case BPF_S_ANC_CPU: + case BPF_ANC | SKF_AD_CPU: emit_load_cpu(r_A); break; - case BPF_S_ANC_PROTOCOL: + case BPF_ANC | SKF_AD_PROTOCOL: emit_skb_load16(protocol, r_A); break; #if 0 @@ -592,38 +584,38 @@ void bpf_jit_compile(struct sk_filter *fp) * a bit field even though we very much * know what we are doing here. */ - case BPF_S_ANC_PKTTYPE: + case BPF_ANC | SKF_AD_PKTTYPE: __emit_skb_load8(pkt_type, r_A); emit_alu_K(SRL, 5); break; #endif - case BPF_S_ANC_IFINDEX: + case BPF_ANC | SKF_AD_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BNE_PTR, cleanup_addr + 4); emit_nop(); emit_load32(r_A, struct net_device, ifindex, r_A); break; - case BPF_S_ANC_MARK: + case BPF_ANC | SKF_AD_MARK: emit_skb_load32(mark, r_A); break; - case BPF_S_ANC_QUEUE: + case BPF_ANC | SKF_AD_QUEUE: emit_skb_load16(queue_mapping, r_A); break; - case BPF_S_ANC_HATYPE: + case BPF_ANC | SKF_AD_HATYPE: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); emit_branch(BNE_PTR, cleanup_addr + 4); emit_nop(); emit_load16(r_A, struct net_device, type, r_A); break; - case BPF_S_ANC_RXHASH: + case BPF_ANC | SKF_AD_RXHASH: emit_skb_load32(hash, r_A); break; - case BPF_S_ANC_VLAN_TAG: - case BPF_S_ANC_VLAN_TAG_PRESENT: + case BPF_ANC | SKF_AD_VLAN_TAG: + case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: emit_skb_load16(vlan_tci, r_A); - if (filter[i].code == BPF_S_ANC_VLAN_TAG) { + if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) { emit_andi(r_A, VLAN_VID_MASK, r_A); } else { emit_loadimm(VLAN_TAG_PRESENT, r_TMP); @@ -631,44 +623,44 @@ void bpf_jit_compile(struct sk_filter *fp) } break; - case BPF_S_LD_IMM: + case BPF_LD | BPF_IMM: emit_loadimm(K, r_A); break; - case BPF_S_LDX_IMM: + case BPF_LDX | BPF_IMM: emit_loadimm(K, r_X); break; - case BPF_S_LD_MEM: + case BPF_LD | BPF_MEM: emit_ldmem(K * 4, r_A); break; - case BPF_S_LDX_MEM: + case BPF_LDX | BPF_MEM: emit_ldmem(K * 4, r_X); break; - case BPF_S_ST: + case BPF_ST: emit_stmem(K * 4, r_A); break; - case BPF_S_STX: + case BPF_STX: emit_stmem(K * 4, r_X); break; #define CHOOSE_LOAD_FUNC(K, func) \ ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset) - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word); common_load: seen |= SEEN_DATAREF; emit_loadimm(K, r_OFF); emit_call(func); break; - case BPF_S_LD_H_ABS: + case BPF_LD | BPF_H | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half); goto common_load; - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_B | BPF_ABS: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte); goto common_load; - case BPF_S_LDX_B_MSH: + case BPF_LDX | BPF_B | BPF_MSH: func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh); goto common_load; - case BPF_S_LD_W_IND: + case BPF_LD | BPF_W | BPF_IND: func = bpf_jit_load_word; common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; if (K) { @@ -683,13 +675,13 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; } emit_call(func); break; - case BPF_S_LD_H_IND: + case BPF_LD | BPF_H | BPF_IND: func = bpf_jit_load_half; goto common_load_ind; - case BPF_S_LD_B_IND: + case BPF_LD | BPF_B | BPF_IND: func = bpf_jit_load_byte; goto common_load_ind; - case BPF_S_JMP_JA: + case BPF_JMP | BPF_JA: emit_jump(addrs[i + K]); emit_nop(); break; @@ -700,14 +692,14 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG; f_op = FOP; \ goto cond_branch - COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU); - COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU); - COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE); - COND_SEL(BPF_S_JMP_JSET_K, BNE, BE); - COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU); - COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU); - COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE); - COND_SEL(BPF_S_JMP_JSET_X, BNE, BE); + COND_SEL(BPF_JMP | BPF_JGT | BPF_K, BGU, BLEU); + COND_SEL(BPF_JMP | BPF_JGE | BPF_K, BGEU, BLU); + COND_SEL(BPF_JMP | BPF_JEQ | BPF_K, BE, BNE); + COND_SEL(BPF_JMP | BPF_JSET | BPF_K, BNE, BE); + COND_SEL(BPF_JMP | BPF_JGT | BPF_X, BGU, BLEU); + COND_SEL(BPF_JMP | BPF_JGE | BPF_X, BGEU, BLU); + COND_SEL(BPF_JMP | BPF_JEQ | BPF_X, BE, BNE); + COND_SEL(BPF_JMP | BPF_JSET | BPF_X, BNE, BE); cond_branch: f_offset = addrs[i + filter[i].jf]; t_offset = addrs[i + filter[i].jt]; @@ -719,20 +711,20 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; break; } - switch (filter[i].code) { - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JEQ_X: + switch (code) { + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JEQ | BPF_X: seen |= SEEN_XREG; emit_cmp(r_A, r_X); break; - case BPF_S_JMP_JSET_X: + case BPF_JMP | BPF_JSET | BPF_X: seen |= SEEN_XREG; emit_btst(r_A, r_X); break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGE_K: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGE | BPF_K: if (is_simm13(K)) { emit_cmpi(r_A, K); } else { @@ -740,7 +732,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf]; emit_cmp(r_A, r_TMP); } break; - case BPF_S_JMP_JSET_K: + case BPF_JMP | BPF_JSET | BPF_K: if (is_simm13(K)) { emit_btsti(r_A, K); } else { diff --git a/include/linux/filter.h b/include/linux/filter.h index 625f4de9bdf2..49ef7a298c92 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -197,7 +197,6 @@ int sk_detach_filter(struct sock *sk); int sk_chk_filter(struct sock_filter *filter, unsigned int flen); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); -void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to); void sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); @@ -205,6 +204,41 @@ void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); void bpf_int_jit_compile(struct sk_filter *fp); +#define BPF_ANC BIT(15) + +static inline u16 bpf_anc_helper(const struct sock_filter *ftest) +{ + BUG_ON(ftest->code & BPF_ANC); + + switch (ftest->code) { + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: +#define BPF_ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ + return BPF_ANC | SKF_AD_##CODE + switch (ftest->k) { + BPF_ANCILLARY(PROTOCOL); + BPF_ANCILLARY(PKTTYPE); + BPF_ANCILLARY(IFINDEX); + BPF_ANCILLARY(NLATTR); + BPF_ANCILLARY(NLATTR_NEST); + BPF_ANCILLARY(MARK); + BPF_ANCILLARY(QUEUE); + BPF_ANCILLARY(HATYPE); + BPF_ANCILLARY(RXHASH); + BPF_ANCILLARY(CPU); + BPF_ANCILLARY(ALU_XOR_X); + BPF_ANCILLARY(VLAN_TAG); + BPF_ANCILLARY(VLAN_TAG_PRESENT); + BPF_ANCILLARY(PAY_OFFSET); + BPF_ANCILLARY(RANDOM); + } + /* Fallthrough. */ + default: + return ftest->code; + } +} + #ifdef CONFIG_BPF_JIT #include #include @@ -224,86 +258,20 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, } #else #include + static inline void bpf_jit_compile(struct sk_filter *fp) { } + static inline void bpf_jit_free(struct sk_filter *fp) { kfree(fp); } -#endif +#endif /* CONFIG_BPF_JIT */ static inline int bpf_tell_extensions(void) { return SKF_AD_MAX; } -enum { - BPF_S_RET_K = 1, - BPF_S_RET_A, - BPF_S_ALU_ADD_K, - BPF_S_ALU_ADD_X, - BPF_S_ALU_SUB_K, - BPF_S_ALU_SUB_X, - BPF_S_ALU_MUL_K, - BPF_S_ALU_MUL_X, - BPF_S_ALU_DIV_X, - BPF_S_ALU_MOD_K, - BPF_S_ALU_MOD_X, - BPF_S_ALU_AND_K, - BPF_S_ALU_AND_X, - BPF_S_ALU_OR_K, - BPF_S_ALU_OR_X, - BPF_S_ALU_XOR_K, - BPF_S_ALU_XOR_X, - BPF_S_ALU_LSH_K, - BPF_S_ALU_LSH_X, - BPF_S_ALU_RSH_K, - BPF_S_ALU_RSH_X, - BPF_S_ALU_NEG, - BPF_S_LD_W_ABS, - BPF_S_LD_H_ABS, - BPF_S_LD_B_ABS, - BPF_S_LD_W_LEN, - BPF_S_LD_W_IND, - BPF_S_LD_H_IND, - BPF_S_LD_B_IND, - BPF_S_LD_IMM, - BPF_S_LDX_W_LEN, - BPF_S_LDX_B_MSH, - BPF_S_LDX_IMM, - BPF_S_MISC_TAX, - BPF_S_MISC_TXA, - BPF_S_ALU_DIV_K, - BPF_S_LD_MEM, - BPF_S_LDX_MEM, - BPF_S_ST, - BPF_S_STX, - BPF_S_JMP_JA, - BPF_S_JMP_JEQ_K, - BPF_S_JMP_JEQ_X, - BPF_S_JMP_JGE_K, - BPF_S_JMP_JGE_X, - BPF_S_JMP_JGT_K, - BPF_S_JMP_JGT_X, - BPF_S_JMP_JSET_K, - BPF_S_JMP_JSET_X, - /* Ancillary data */ - BPF_S_ANC_PROTOCOL, - BPF_S_ANC_PKTTYPE, - BPF_S_ANC_IFINDEX, - BPF_S_ANC_NLATTR, - BPF_S_ANC_NLATTR_NEST, - BPF_S_ANC_MARK, - BPF_S_ANC_QUEUE, - BPF_S_ANC_HATYPE, - BPF_S_ANC_RXHASH, - BPF_S_ANC_CPU, - BPF_S_ANC_ALU_XOR_X, - BPF_S_ANC_VLAN_TAG, - BPF_S_ANC_VLAN_TAG_PRESENT, - BPF_S_ANC_PAY_OFFSET, - BPF_S_ANC_RANDOM, -}; - #endif /* __LINUX_FILTER_H__ */ diff --git a/kernel/seccomp.c b/kernel/seccomp.c index 1036b6f2fded..44e69483b383 100644 --- a/kernel/seccomp.c +++ b/kernel/seccomp.c @@ -103,60 +103,59 @@ static int seccomp_check_filter(struct sock_filter *filter, unsigned int flen) u32 k = ftest->k; switch (code) { - case BPF_S_LD_W_ABS: + case BPF_LD | BPF_W | BPF_ABS: ftest->code = BPF_LDX | BPF_W | BPF_ABS; /* 32-bit aligned and not out of bounds. */ if (k >= sizeof(struct seccomp_data) || k & 3) return -EINVAL; continue; - case BPF_S_LD_W_LEN: + case BPF_LD | BPF_W | BPF_LEN: ftest->code = BPF_LD | BPF_IMM; ftest->k = sizeof(struct seccomp_data); continue; - case BPF_S_LDX_W_LEN: + case BPF_LDX | BPF_W | BPF_LEN: ftest->code = BPF_LDX | BPF_IMM; ftest->k = sizeof(struct seccomp_data); continue; /* Explicitly include allowed calls. */ - case BPF_S_RET_K: - case BPF_S_RET_A: - case BPF_S_ALU_ADD_K: - case BPF_S_ALU_ADD_X: - case BPF_S_ALU_SUB_K: - case BPF_S_ALU_SUB_X: - case BPF_S_ALU_MUL_K: - case BPF_S_ALU_MUL_X: - case BPF_S_ALU_DIV_X: - case BPF_S_ALU_AND_K: - case BPF_S_ALU_AND_X: - case BPF_S_ALU_OR_K: - case BPF_S_ALU_OR_X: - case BPF_S_ALU_XOR_K: - case BPF_S_ALU_XOR_X: - case BPF_S_ALU_LSH_K: - case BPF_S_ALU_LSH_X: - case BPF_S_ALU_RSH_K: - case BPF_S_ALU_RSH_X: - case BPF_S_ALU_NEG: - case BPF_S_LD_IMM: - case BPF_S_LDX_IMM: - case BPF_S_MISC_TAX: - case BPF_S_MISC_TXA: - case BPF_S_ALU_DIV_K: - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: - case BPF_S_ST: - case BPF_S_STX: - case BPF_S_JMP_JA: - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_K: - case BPF_S_JMP_JSET_X: - sk_decode_filter(ftest, ftest); + case BPF_RET | BPF_K: + case BPF_RET | BPF_A: + case BPF_ALU | BPF_ADD | BPF_K: + case BPF_ALU | BPF_ADD | BPF_X: + case BPF_ALU | BPF_SUB | BPF_K: + case BPF_ALU | BPF_SUB | BPF_X: + case BPF_ALU | BPF_MUL | BPF_K: + case BPF_ALU | BPF_MUL | BPF_X: + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_DIV | BPF_X: + case BPF_ALU | BPF_AND | BPF_K: + case BPF_ALU | BPF_AND | BPF_X: + case BPF_ALU | BPF_OR | BPF_K: + case BPF_ALU | BPF_OR | BPF_X: + case BPF_ALU | BPF_XOR | BPF_K: + case BPF_ALU | BPF_XOR | BPF_X: + case BPF_ALU | BPF_LSH | BPF_K: + case BPF_ALU | BPF_LSH | BPF_X: + case BPF_ALU | BPF_RSH | BPF_K: + case BPF_ALU | BPF_RSH | BPF_X: + case BPF_ALU | BPF_NEG: + case BPF_LD | BPF_IMM: + case BPF_LDX | BPF_IMM: + case BPF_MISC | BPF_TAX: + case BPF_MISC | BPF_TXA: + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: + case BPF_ST: + case BPF_STX: + case BPF_JMP | BPF_JA: + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: continue; default: return -EINVAL; diff --git a/net/core/filter.c b/net/core/filter.c index 2c2d35d9d101..328aaf6ff4d1 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -536,11 +536,13 @@ load_word: * Output: * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ + ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be32(ptr); CONT; } + return 0; LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ off = K; @@ -550,6 +552,7 @@ load_half: BPF_R0 = get_unaligned_be16(ptr); CONT; } + return 0; LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ off = K; @@ -559,6 +562,7 @@ load_byte: BPF_R0 = *(u8 *)ptr; CONT; } + return 0; LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ off = K + X; @@ -1136,44 +1140,46 @@ err: */ static int check_load_and_stores(struct sock_filter *filter, int flen) { - u16 *masks, memvalid = 0; /* one bit per cell, 16 cells */ + u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */ int pc, ret = 0; BUILD_BUG_ON(BPF_MEMWORDS > 16); + masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); if (!masks) return -ENOMEM; + memset(masks, 0xff, flen * sizeof(*masks)); for (pc = 0; pc < flen; pc++) { memvalid &= masks[pc]; switch (filter[pc].code) { - case BPF_S_ST: - case BPF_S_STX: + case BPF_ST: + case BPF_STX: memvalid |= (1 << filter[pc].k); break; - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: if (!(memvalid & (1 << filter[pc].k))) { ret = -EINVAL; goto error; } break; - case BPF_S_JMP_JA: - /* a jump must set masks on target */ + case BPF_JMP | BPF_JA: + /* A jump must set masks on target */ masks[pc + 1 + filter[pc].k] &= memvalid; memvalid = ~0; break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_X: - case BPF_S_JMP_JSET_K: - /* a jump must set masks on targets */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + /* A jump must set masks on targets */ masks[pc + 1 + filter[pc].jt] &= memvalid; masks[pc + 1 + filter[pc].jf] &= memvalid; memvalid = ~0; @@ -1185,6 +1191,72 @@ error: return ret; } +static bool chk_code_allowed(u16 code_to_probe) +{ + static const bool codes[] = { + /* 32 bit ALU operations */ + [BPF_ALU | BPF_ADD | BPF_K] = true, + [BPF_ALU | BPF_ADD | BPF_X] = true, + [BPF_ALU | BPF_SUB | BPF_K] = true, + [BPF_ALU | BPF_SUB | BPF_X] = true, + [BPF_ALU | BPF_MUL | BPF_K] = true, + [BPF_ALU | BPF_MUL | BPF_X] = true, + [BPF_ALU | BPF_DIV | BPF_K] = true, + [BPF_ALU | BPF_DIV | BPF_X] = true, + [BPF_ALU | BPF_MOD | BPF_K] = true, + [BPF_ALU | BPF_MOD | BPF_X] = true, + [BPF_ALU | BPF_AND | BPF_K] = true, + [BPF_ALU | BPF_AND | BPF_X] = true, + [BPF_ALU | BPF_OR | BPF_K] = true, + [BPF_ALU | BPF_OR | BPF_X] = true, + [BPF_ALU | BPF_XOR | BPF_K] = true, + [BPF_ALU | BPF_XOR | BPF_X] = true, + [BPF_ALU | BPF_LSH | BPF_K] = true, + [BPF_ALU | BPF_LSH | BPF_X] = true, + [BPF_ALU | BPF_RSH | BPF_K] = true, + [BPF_ALU | BPF_RSH | BPF_X] = true, + [BPF_ALU | BPF_NEG] = true, + /* Load instructions */ + [BPF_LD | BPF_W | BPF_ABS] = true, + [BPF_LD | BPF_H | BPF_ABS] = true, + [BPF_LD | BPF_B | BPF_ABS] = true, + [BPF_LD | BPF_W | BPF_LEN] = true, + [BPF_LD | BPF_W | BPF_IND] = true, + [BPF_LD | BPF_H | BPF_IND] = true, + [BPF_LD | BPF_B | BPF_IND] = true, + [BPF_LD | BPF_IMM] = true, + [BPF_LD | BPF_MEM] = true, + [BPF_LDX | BPF_W | BPF_LEN] = true, + [BPF_LDX | BPF_B | BPF_MSH] = true, + [BPF_LDX | BPF_IMM] = true, + [BPF_LDX | BPF_MEM] = true, + /* Store instructions */ + [BPF_ST] = true, + [BPF_STX] = true, + /* Misc instructions */ + [BPF_MISC | BPF_TAX] = true, + [BPF_MISC | BPF_TXA] = true, + /* Return instructions */ + [BPF_RET | BPF_K] = true, + [BPF_RET | BPF_A] = true, + /* Jump instructions */ + [BPF_JMP | BPF_JA] = true, + [BPF_JMP | BPF_JEQ | BPF_K] = true, + [BPF_JMP | BPF_JEQ | BPF_X] = true, + [BPF_JMP | BPF_JGE | BPF_K] = true, + [BPF_JMP | BPF_JGE | BPF_X] = true, + [BPF_JMP | BPF_JGT | BPF_K] = true, + [BPF_JMP | BPF_JGT | BPF_X] = true, + [BPF_JMP | BPF_JSET | BPF_K] = true, + [BPF_JMP | BPF_JSET | BPF_X] = true, + }; + + if (code_to_probe >= ARRAY_SIZE(codes)) + return false; + + return codes[code_to_probe]; +} + /** * sk_chk_filter - verify socket filter code * @filter: filter to verify @@ -1201,154 +1273,76 @@ error: */ int sk_chk_filter(struct sock_filter *filter, unsigned int flen) { - /* - * Valid instructions are initialized to non-0. - * Invalid instructions are initialized to 0. - */ - static const u8 codes[] = { - [BPF_ALU|BPF_ADD|BPF_K] = BPF_S_ALU_ADD_K, - [BPF_ALU|BPF_ADD|BPF_X] = BPF_S_ALU_ADD_X, - [BPF_ALU|BPF_SUB|BPF_K] = BPF_S_ALU_SUB_K, - [BPF_ALU|BPF_SUB|BPF_X] = BPF_S_ALU_SUB_X, - [BPF_ALU|BPF_MUL|BPF_K] = BPF_S_ALU_MUL_K, - [BPF_ALU|BPF_MUL|BPF_X] = BPF_S_ALU_MUL_X, - [BPF_ALU|BPF_DIV|BPF_X] = BPF_S_ALU_DIV_X, - [BPF_ALU|BPF_MOD|BPF_K] = BPF_S_ALU_MOD_K, - [BPF_ALU|BPF_MOD|BPF_X] = BPF_S_ALU_MOD_X, - [BPF_ALU|BPF_AND|BPF_K] = BPF_S_ALU_AND_K, - [BPF_ALU|BPF_AND|BPF_X] = BPF_S_ALU_AND_X, - [BPF_ALU|BPF_OR|BPF_K] = BPF_S_ALU_OR_K, - [BPF_ALU|BPF_OR|BPF_X] = BPF_S_ALU_OR_X, - [BPF_ALU|BPF_XOR|BPF_K] = BPF_S_ALU_XOR_K, - [BPF_ALU|BPF_XOR|BPF_X] = BPF_S_ALU_XOR_X, - [BPF_ALU|BPF_LSH|BPF_K] = BPF_S_ALU_LSH_K, - [BPF_ALU|BPF_LSH|BPF_X] = BPF_S_ALU_LSH_X, - [BPF_ALU|BPF_RSH|BPF_K] = BPF_S_ALU_RSH_K, - [BPF_ALU|BPF_RSH|BPF_X] = BPF_S_ALU_RSH_X, - [BPF_ALU|BPF_NEG] = BPF_S_ALU_NEG, - [BPF_LD|BPF_W|BPF_ABS] = BPF_S_LD_W_ABS, - [BPF_LD|BPF_H|BPF_ABS] = BPF_S_LD_H_ABS, - [BPF_LD|BPF_B|BPF_ABS] = BPF_S_LD_B_ABS, - [BPF_LD|BPF_W|BPF_LEN] = BPF_S_LD_W_LEN, - [BPF_LD|BPF_W|BPF_IND] = BPF_S_LD_W_IND, - [BPF_LD|BPF_H|BPF_IND] = BPF_S_LD_H_IND, - [BPF_LD|BPF_B|BPF_IND] = BPF_S_LD_B_IND, - [BPF_LD|BPF_IMM] = BPF_S_LD_IMM, - [BPF_LDX|BPF_W|BPF_LEN] = BPF_S_LDX_W_LEN, - [BPF_LDX|BPF_B|BPF_MSH] = BPF_S_LDX_B_MSH, - [BPF_LDX|BPF_IMM] = BPF_S_LDX_IMM, - [BPF_MISC|BPF_TAX] = BPF_S_MISC_TAX, - [BPF_MISC|BPF_TXA] = BPF_S_MISC_TXA, - [BPF_RET|BPF_K] = BPF_S_RET_K, - [BPF_RET|BPF_A] = BPF_S_RET_A, - [BPF_ALU|BPF_DIV|BPF_K] = BPF_S_ALU_DIV_K, - [BPF_LD|BPF_MEM] = BPF_S_LD_MEM, - [BPF_LDX|BPF_MEM] = BPF_S_LDX_MEM, - [BPF_ST] = BPF_S_ST, - [BPF_STX] = BPF_S_STX, - [BPF_JMP|BPF_JA] = BPF_S_JMP_JA, - [BPF_JMP|BPF_JEQ|BPF_K] = BPF_S_JMP_JEQ_K, - [BPF_JMP|BPF_JEQ|BPF_X] = BPF_S_JMP_JEQ_X, - [BPF_JMP|BPF_JGE|BPF_K] = BPF_S_JMP_JGE_K, - [BPF_JMP|BPF_JGE|BPF_X] = BPF_S_JMP_JGE_X, - [BPF_JMP|BPF_JGT|BPF_K] = BPF_S_JMP_JGT_K, - [BPF_JMP|BPF_JGT|BPF_X] = BPF_S_JMP_JGT_X, - [BPF_JMP|BPF_JSET|BPF_K] = BPF_S_JMP_JSET_K, - [BPF_JMP|BPF_JSET|BPF_X] = BPF_S_JMP_JSET_X, - }; - int pc; bool anc_found; + int pc; if (flen == 0 || flen > BPF_MAXINSNS) return -EINVAL; - /* check the filter code now */ + /* Check the filter code now */ for (pc = 0; pc < flen; pc++) { struct sock_filter *ftest = &filter[pc]; - u16 code = ftest->code; - if (code >= ARRAY_SIZE(codes)) - return -EINVAL; - code = codes[code]; - if (!code) + /* May we actually operate on this code? */ + if (!chk_code_allowed(ftest->code)) return -EINVAL; + /* Some instructions need special checks */ - switch (code) { - case BPF_S_ALU_DIV_K: - case BPF_S_ALU_MOD_K: - /* check for division by zero */ + switch (ftest->code) { + case BPF_ALU | BPF_DIV | BPF_K: + case BPF_ALU | BPF_MOD | BPF_K: + /* Check for division by zero */ if (ftest->k == 0) return -EINVAL; break; - case BPF_S_LD_MEM: - case BPF_S_LDX_MEM: - case BPF_S_ST: - case BPF_S_STX: - /* check for invalid memory addresses */ + case BPF_LD | BPF_MEM: + case BPF_LDX | BPF_MEM: + case BPF_ST: + case BPF_STX: + /* Check for invalid memory addresses */ if (ftest->k >= BPF_MEMWORDS) return -EINVAL; break; - case BPF_S_JMP_JA: - /* - * Note, the large ftest->k might cause loops. + case BPF_JMP | BPF_JA: + /* Note, the large ftest->k might cause loops. * Compare this with conditional jumps below, * where offsets are limited. --ANK (981016) */ - if (ftest->k >= (unsigned int)(flen-pc-1)) + if (ftest->k >= (unsigned int)(flen - pc - 1)) return -EINVAL; break; - case BPF_S_JMP_JEQ_K: - case BPF_S_JMP_JEQ_X: - case BPF_S_JMP_JGE_K: - case BPF_S_JMP_JGE_X: - case BPF_S_JMP_JGT_K: - case BPF_S_JMP_JGT_X: - case BPF_S_JMP_JSET_X: - case BPF_S_JMP_JSET_K: - /* for conditionals both must be safe */ + case BPF_JMP | BPF_JEQ | BPF_K: + case BPF_JMP | BPF_JEQ | BPF_X: + case BPF_JMP | BPF_JGE | BPF_K: + case BPF_JMP | BPF_JGE | BPF_X: + case BPF_JMP | BPF_JGT | BPF_K: + case BPF_JMP | BPF_JGT | BPF_X: + case BPF_JMP | BPF_JSET | BPF_K: + case BPF_JMP | BPF_JSET | BPF_X: + /* Both conditionals must be safe */ if (pc + ftest->jt + 1 >= flen || pc + ftest->jf + 1 >= flen) return -EINVAL; break; - case BPF_S_LD_W_ABS: - case BPF_S_LD_H_ABS: - case BPF_S_LD_B_ABS: + case BPF_LD | BPF_W | BPF_ABS: + case BPF_LD | BPF_H | BPF_ABS: + case BPF_LD | BPF_B | BPF_ABS: anc_found = false; -#define ANCILLARY(CODE) case SKF_AD_OFF + SKF_AD_##CODE: \ - code = BPF_S_ANC_##CODE; \ - anc_found = true; \ - break - switch (ftest->k) { - ANCILLARY(PROTOCOL); - ANCILLARY(PKTTYPE); - ANCILLARY(IFINDEX); - ANCILLARY(NLATTR); - ANCILLARY(NLATTR_NEST); - ANCILLARY(MARK); - ANCILLARY(QUEUE); - ANCILLARY(HATYPE); - ANCILLARY(RXHASH); - ANCILLARY(CPU); - ANCILLARY(ALU_XOR_X); - ANCILLARY(VLAN_TAG); - ANCILLARY(VLAN_TAG_PRESENT); - ANCILLARY(PAY_OFFSET); - ANCILLARY(RANDOM); - } - - /* ancillary operation unknown or unsupported */ + if (bpf_anc_helper(ftest) & BPF_ANC) + anc_found = true; + /* Ancillary operation unknown or unsupported */ if (anc_found == false && ftest->k >= SKF_AD_OFF) return -EINVAL; } - ftest->code = code; } - /* last instruction must be a RET code */ + /* Last instruction must be a RET code */ switch (filter[flen - 1].code) { - case BPF_S_RET_K: - case BPF_S_RET_A: + case BPF_RET | BPF_K: + case BPF_RET | BPF_A: return check_load_and_stores(filter, flen); } + return -EINVAL; } EXPORT_SYMBOL(sk_chk_filter); @@ -1448,7 +1442,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, { struct sock_filter *old_prog; struct sk_filter *old_fp; - int i, err, new_len, old_len = fp->len; + int err, new_len, old_len = fp->len; /* We are free to overwrite insns et al right here as it * won't be used at this point in time anymore internally @@ -1458,13 +1452,6 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp, BUILD_BUG_ON(sizeof(struct sock_filter) != sizeof(struct sock_filter_int)); - /* For now, we need to unfiddle BPF_S_* identifiers in place. - * This can sooner or later on be subject to removal, e.g. when - * JITs have been converted. - */ - for (i = 0; i < fp->len; i++) - sk_decode_filter(&fp->insns[i], &fp->insns[i]); - /* Conversion cannot happen on overlapping memory areas, * so we need to keep the user BPF around until the 2nd * pass. At this time, the user BPF is stored in fp->insns. @@ -1706,84 +1693,6 @@ int sk_detach_filter(struct sock *sk) } EXPORT_SYMBOL_GPL(sk_detach_filter); -void sk_decode_filter(struct sock_filter *filt, struct sock_filter *to) -{ - static const u16 decodes[] = { - [BPF_S_ALU_ADD_K] = BPF_ALU|BPF_ADD|BPF_K, - [BPF_S_ALU_ADD_X] = BPF_ALU|BPF_ADD|BPF_X, - [BPF_S_ALU_SUB_K] = BPF_ALU|BPF_SUB|BPF_K, - [BPF_S_ALU_SUB_X] = BPF_ALU|BPF_SUB|BPF_X, - [BPF_S_ALU_MUL_K] = BPF_ALU|BPF_MUL|BPF_K, - [BPF_S_ALU_MUL_X] = BPF_ALU|BPF_MUL|BPF_X, - [BPF_S_ALU_DIV_X] = BPF_ALU|BPF_DIV|BPF_X, - [BPF_S_ALU_MOD_K] = BPF_ALU|BPF_MOD|BPF_K, - [BPF_S_ALU_MOD_X] = BPF_ALU|BPF_MOD|BPF_X, - [BPF_S_ALU_AND_K] = BPF_ALU|BPF_AND|BPF_K, - [BPF_S_ALU_AND_X] = BPF_ALU|BPF_AND|BPF_X, - [BPF_S_ALU_OR_K] = BPF_ALU|BPF_OR|BPF_K, - [BPF_S_ALU_OR_X] = BPF_ALU|BPF_OR|BPF_X, - [BPF_S_ALU_XOR_K] = BPF_ALU|BPF_XOR|BPF_K, - [BPF_S_ALU_XOR_X] = BPF_ALU|BPF_XOR|BPF_X, - [BPF_S_ALU_LSH_K] = BPF_ALU|BPF_LSH|BPF_K, - [BPF_S_ALU_LSH_X] = BPF_ALU|BPF_LSH|BPF_X, - [BPF_S_ALU_RSH_K] = BPF_ALU|BPF_RSH|BPF_K, - [BPF_S_ALU_RSH_X] = BPF_ALU|BPF_RSH|BPF_X, - [BPF_S_ALU_NEG] = BPF_ALU|BPF_NEG, - [BPF_S_LD_W_ABS] = BPF_LD|BPF_W|BPF_ABS, - [BPF_S_LD_H_ABS] = BPF_LD|BPF_H|BPF_ABS, - [BPF_S_LD_B_ABS] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PROTOCOL] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PKTTYPE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_IFINDEX] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_NLATTR] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_NLATTR_NEST] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_MARK] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_QUEUE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_HATYPE] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_RXHASH] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_CPU] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_ALU_XOR_X] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_VLAN_TAG] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_VLAN_TAG_PRESENT] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_PAY_OFFSET] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_ANC_RANDOM] = BPF_LD|BPF_B|BPF_ABS, - [BPF_S_LD_W_LEN] = BPF_LD|BPF_W|BPF_LEN, - [BPF_S_LD_W_IND] = BPF_LD|BPF_W|BPF_IND, - [BPF_S_LD_H_IND] = BPF_LD|BPF_H|BPF_IND, - [BPF_S_LD_B_IND] = BPF_LD|BPF_B|BPF_IND, - [BPF_S_LD_IMM] = BPF_LD|BPF_IMM, - [BPF_S_LDX_W_LEN] = BPF_LDX|BPF_W|BPF_LEN, - [BPF_S_LDX_B_MSH] = BPF_LDX|BPF_B|BPF_MSH, - [BPF_S_LDX_IMM] = BPF_LDX|BPF_IMM, - [BPF_S_MISC_TAX] = BPF_MISC|BPF_TAX, - [BPF_S_MISC_TXA] = BPF_MISC|BPF_TXA, - [BPF_S_RET_K] = BPF_RET|BPF_K, - [BPF_S_RET_A] = BPF_RET|BPF_A, - [BPF_S_ALU_DIV_K] = BPF_ALU|BPF_DIV|BPF_K, - [BPF_S_LD_MEM] = BPF_LD|BPF_MEM, - [BPF_S_LDX_MEM] = BPF_LDX|BPF_MEM, - [BPF_S_ST] = BPF_ST, - [BPF_S_STX] = BPF_STX, - [BPF_S_JMP_JA] = BPF_JMP|BPF_JA, - [BPF_S_JMP_JEQ_K] = BPF_JMP|BPF_JEQ|BPF_K, - [BPF_S_JMP_JEQ_X] = BPF_JMP|BPF_JEQ|BPF_X, - [BPF_S_JMP_JGE_K] = BPF_JMP|BPF_JGE|BPF_K, - [BPF_S_JMP_JGE_X] = BPF_JMP|BPF_JGE|BPF_X, - [BPF_S_JMP_JGT_K] = BPF_JMP|BPF_JGT|BPF_K, - [BPF_S_JMP_JGT_X] = BPF_JMP|BPF_JGT|BPF_X, - [BPF_S_JMP_JSET_K] = BPF_JMP|BPF_JSET|BPF_K, - [BPF_S_JMP_JSET_X] = BPF_JMP|BPF_JSET|BPF_X, - }; - u16 code; - - code = filt->code; - - to->code = decodes[code]; - to->jt = filt->jt; - to->jf = filt->jf; - to->k = filt->k; -} - int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf, unsigned int len) { -- cgit v1.2.3 From ca8ce3d0b144c318a5a9ce99649053e9029061ea Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 26 May 2014 18:19:39 -0400 Subject: sched: Final power vs. capacity cleanups It is better not to think about compute capacity as being equivalent to "CPU power". The upcoming "power aware" scheduler work may create confusion with the notion of energy consumption if "power" is used too liberally. This contains the architecture visible changes. Incidentally, only ARM takes advantage of the available pow^H^H^Hcapacity scaling hooks and therefore those changes outside kernel/sched/ are confined to one ARM specific file. The default arch_scale_smt_power() hook is not overridden by anyone. Replacements are as follows: arch_scale_freq_power --> arch_scale_freq_capacity arch_scale_smt_power --> arch_scale_smt_capacity SCHED_POWER_SCALE --> SCHED_CAPACITY_SCALE SCHED_POWER_SHIFT --> SCHED_CAPACITY_SHIFT The local usage of "power" in arch/arm/kernel/topology.c is also changed to "capacity" as appropriate. Signed-off-by: Nicolas Pitre Signed-off-by: Peter Zijlstra Cc: Vincent Guittot Cc: Daniel Lezcano Cc: Morten Rasmussen Cc: "Rafael J. Wysocki" Cc: linaro-kernel@lists.linaro.org Cc: Arnd Bergmann Cc: Dietmar Eggemann Cc: Grant Likely Cc: Linus Torvalds Cc: Mark Brown Cc: Rob Herring Cc: Russell King Cc: Sudeep KarkadaNagesha Cc: Vincent Guittot Cc: devicetree@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/n/tip-48zba9qbznvglwelgq2cfygh@git.kernel.org Signed-off-by: Ingo Molnar --- arch/arm/kernel/topology.c | 54 +++++++++++++++++++++--------------------- include/linux/sched.h | 6 ++--- kernel/sched/core.c | 6 ++--- kernel/sched/fair.c | 59 +++++++++++++++++++++++----------------------- 4 files changed, 63 insertions(+), 62 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index 71e1fec6d31a..d42a7db22236 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -26,30 +26,30 @@ #include /* - * cpu power scale management + * cpu capacity scale management */ /* - * cpu power table + * cpu capacity table * This per cpu data structure describes the relative capacity of each core. * On a heteregenous system, cores don't have the same computation capacity - * and we reflect that difference in the cpu_power field so the scheduler can - * take this difference into account during load balance. A per cpu structure - * is preferred because each CPU updates its own cpu_power field during the - * load balance except for idle cores. One idle core is selected to run the - * rebalance_domains for all idle cores and the cpu_power can be updated - * during this sequence. + * and we reflect that difference in the cpu_capacity field so the scheduler + * can take this difference into account during load balance. A per cpu + * structure is preferred because each CPU updates its own cpu_capacity field + * during the load balance except for idle cores. One idle core is selected + * to run the rebalance_domains for all idle cores and the cpu_capacity can be + * updated during this sequence. */ static DEFINE_PER_CPU(unsigned long, cpu_scale); -unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu) +unsigned long arch_scale_freq_capacity(struct sched_domain *sd, int cpu) { return per_cpu(cpu_scale, cpu); } -static void set_power_scale(unsigned int cpu, unsigned long power) +static void set_capacity_scale(unsigned int cpu, unsigned long capacity) { - per_cpu(cpu_scale, cpu) = power; + per_cpu(cpu_scale, cpu) = capacity; } #ifdef CONFIG_OF @@ -62,11 +62,11 @@ struct cpu_efficiency { * Table of relative efficiency of each processors * The efficiency value must fit in 20bit and the final * cpu_scale value must be in the range - * 0 < cpu_scale < 3*SCHED_POWER_SCALE/2 + * 0 < cpu_scale < 3*SCHED_CAPACITY_SCALE/2 * in order to return at most 1 when DIV_ROUND_CLOSEST * is used to compute the capacity of a CPU. * Processors that are not defined in the table, - * use the default SCHED_POWER_SCALE value for cpu_scale. + * use the default SCHED_CAPACITY_SCALE value for cpu_scale. */ static const struct cpu_efficiency table_efficiency[] = { {"arm,cortex-a15", 3891}, @@ -83,9 +83,9 @@ static unsigned long middle_capacity = 1; * Iterate all CPUs' descriptor in DT and compute the efficiency * (as per table_efficiency). Also calculate a middle efficiency * as close as possible to (max{eff_i} - min{eff_i}) / 2 - * This is later used to scale the cpu_power field such that an - * 'average' CPU is of middle power. Also see the comments near - * table_efficiency[] and update_cpu_power(). + * This is later used to scale the cpu_capacity field such that an + * 'average' CPU is of middle capacity. Also see the comments near + * table_efficiency[] and update_cpu_capacity(). */ static void __init parse_dt_topology(void) { @@ -141,15 +141,15 @@ static void __init parse_dt_topology(void) * cpu_scale because all CPUs have the same capacity. Otherwise, we * compute a middle_capacity factor that will ensure that the capacity * of an 'average' CPU of the system will be as close as possible to - * SCHED_POWER_SCALE, which is the default value, but with the + * SCHED_CAPACITY_SCALE, which is the default value, but with the * constraint explained near table_efficiency[]. */ if (4*max_capacity < (3*(max_capacity + min_capacity))) middle_capacity = (min_capacity + max_capacity) - >> (SCHED_POWER_SHIFT+1); + >> (SCHED_CAPACITY_SHIFT+1); else middle_capacity = ((max_capacity / 3) - >> (SCHED_POWER_SHIFT-1)) + 1; + >> (SCHED_CAPACITY_SHIFT-1)) + 1; } @@ -158,20 +158,20 @@ static void __init parse_dt_topology(void) * boot. The update of all CPUs is in O(n^2) for heteregeneous system but the * function returns directly for SMP system. */ -static void update_cpu_power(unsigned int cpu) +static void update_cpu_capacity(unsigned int cpu) { if (!cpu_capacity(cpu)) return; - set_power_scale(cpu, cpu_capacity(cpu) / middle_capacity); + set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); - printk(KERN_INFO "CPU%u: update cpu_power %lu\n", - cpu, arch_scale_freq_power(NULL, cpu)); + printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n", + cpu, arch_scale_freq_capacity(NULL, cpu)); } #else static inline void parse_dt_topology(void) {} -static inline void update_cpu_power(unsigned int cpuid) {} +static inline void update_cpu_capacity(unsigned int cpuid) {} #endif /* @@ -267,7 +267,7 @@ void store_cpu_topology(unsigned int cpuid) update_siblings_masks(cpuid); - update_cpu_power(cpuid); + update_cpu_capacity(cpuid); printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, @@ -297,7 +297,7 @@ void __init init_cpu_topology(void) { unsigned int cpu; - /* init core mask and power*/ + /* init core mask and capacity */ for_each_possible_cpu(cpu) { struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); @@ -307,7 +307,7 @@ void __init init_cpu_topology(void) cpumask_clear(&cpu_topo->core_sibling); cpumask_clear(&cpu_topo->thread_sibling); - set_power_scale(cpu, SCHED_POWER_SCALE); + set_capacity_scale(cpu, SCHED_CAPACITY_SCALE); } smp_wmb(); diff --git a/include/linux/sched.h b/include/linux/sched.h index a96f03598c61..322110affe63 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -854,10 +854,10 @@ enum cpu_idle_type { }; /* - * Increase resolution of cpu_power calculations + * Increase resolution of cpu_capacity calculations */ -#define SCHED_POWER_SHIFT 10 -#define SCHED_POWER_SCALE (1L << SCHED_POWER_SHIFT) +#define SCHED_CAPACITY_SHIFT 10 +#define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) /* * sched-domains (multiprocessor balancing) declarations: diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 07bc78a50329..7ba4f5413a10 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5249,7 +5249,7 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, cpulist_scnprintf(str, sizeof(str), sched_group_cpus(group)); printk(KERN_CONT " %s", str); - if (group->sgc->capacity != SCHED_POWER_SCALE) { + if (group->sgc->capacity != SCHED_CAPACITY_SCALE) { printk(KERN_CONT " (cpu_capacity = %d)", group->sgc->capacity); } @@ -5715,7 +5715,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu) * domains and no possible iteration will get us here, we won't * die on a /0 trap. */ - sg->sgc->capacity = SCHED_POWER_SCALE * cpumask_weight(sg_span); + sg->sgc->capacity = SCHED_CAPACITY_SCALE * cpumask_weight(sg_span); sg->sgc->capacity_orig = sg->sgc->capacity; /* @@ -6921,7 +6921,7 @@ void __init sched_init(void) #ifdef CONFIG_SMP rq->sd = NULL; rq->rd = NULL; - rq->cpu_capacity = SCHED_POWER_SCALE; + rq->cpu_capacity = SCHED_CAPACITY_SCALE; rq->post_schedule = 0; rq->active_balance = 0; rq->next_balance = jiffies; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 58684f684fa8..dc7d6527a282 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1062,9 +1062,9 @@ static void update_numa_stats(struct numa_stats *ns, int nid) if (!cpus) return; - ns->load = (ns->load * SCHED_POWER_SCALE) / ns->compute_capacity; + ns->load = (ns->load * SCHED_CAPACITY_SCALE) / ns->compute_capacity; ns->task_capacity = - DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_POWER_SCALE); + DIV_ROUND_CLOSEST(ns->compute_capacity, SCHED_CAPACITY_SCALE); ns->has_free_capacity = (ns->nr_running < ns->task_capacity); } @@ -4370,7 +4370,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, } /* Adjust by relative CPU capacity of the group */ - avg_load = (avg_load * SCHED_POWER_SCALE) / group->sgc->capacity; + avg_load = (avg_load * SCHED_CAPACITY_SCALE) / group->sgc->capacity; if (local_group) { this_load = avg_load; @@ -5609,10 +5609,10 @@ static inline int get_sd_load_idx(struct sched_domain *sd, static unsigned long default_scale_capacity(struct sched_domain *sd, int cpu) { - return SCHED_POWER_SCALE; + return SCHED_CAPACITY_SCALE; } -unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu) +unsigned long __weak arch_scale_freq_capacity(struct sched_domain *sd, int cpu) { return default_scale_capacity(sd, cpu); } @@ -5627,7 +5627,7 @@ static unsigned long default_scale_smt_capacity(struct sched_domain *sd, int cpu return smt_gain; } -unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu) +unsigned long __weak arch_scale_smt_capacity(struct sched_domain *sd, int cpu) { return default_scale_smt_capacity(sd, cpu); } @@ -5658,10 +5658,10 @@ static unsigned long scale_rt_capacity(int cpu) available = total - avg; } - if (unlikely((s64)total < SCHED_POWER_SCALE)) - total = SCHED_POWER_SCALE; + if (unlikely((s64)total < SCHED_CAPACITY_SCALE)) + total = SCHED_CAPACITY_SCALE; - total >>= SCHED_POWER_SHIFT; + total >>= SCHED_CAPACITY_SHIFT; return div_u64(available, total); } @@ -5669,29 +5669,29 @@ static unsigned long scale_rt_capacity(int cpu) static void update_cpu_capacity(struct sched_domain *sd, int cpu) { unsigned long weight = sd->span_weight; - unsigned long capacity = SCHED_POWER_SCALE; + unsigned long capacity = SCHED_CAPACITY_SCALE; struct sched_group *sdg = sd->groups; if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { if (sched_feat(ARCH_POWER)) - capacity *= arch_scale_smt_power(sd, cpu); + capacity *= arch_scale_smt_capacity(sd, cpu); else capacity *= default_scale_smt_capacity(sd, cpu); - capacity >>= SCHED_POWER_SHIFT; + capacity >>= SCHED_CAPACITY_SHIFT; } sdg->sgc->capacity_orig = capacity; if (sched_feat(ARCH_POWER)) - capacity *= arch_scale_freq_power(sd, cpu); + capacity *= arch_scale_freq_capacity(sd, cpu); else capacity *= default_scale_capacity(sd, cpu); - capacity >>= SCHED_POWER_SHIFT; + capacity >>= SCHED_CAPACITY_SHIFT; capacity *= scale_rt_capacity(cpu); - capacity >>= SCHED_POWER_SHIFT; + capacity >>= SCHED_CAPACITY_SHIFT; if (!capacity) capacity = 1; @@ -5780,7 +5780,7 @@ static inline int fix_small_capacity(struct sched_domain *sd, struct sched_group *group) { /* - * Only siblings can have significantly less than SCHED_POWER_SCALE + * Only siblings can have significantly less than SCHED_CAPACITY_SCALE */ if (!(sd->flags & SD_SHARE_CPUPOWER)) return 0; @@ -5845,11 +5845,11 @@ static inline int sg_capacity_factor(struct lb_env *env, struct sched_group *gro cpus = group->group_weight; /* smt := ceil(cpus / capacity), assumes: 1 < smt_capacity < 2 */ - smt = DIV_ROUND_UP(SCHED_POWER_SCALE * cpus, capacity_orig); + smt = DIV_ROUND_UP(SCHED_CAPACITY_SCALE * cpus, capacity_orig); capacity_factor = cpus / smt; /* cores */ capacity_factor = min_t(unsigned, - capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_POWER_SCALE)); + capacity_factor, DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE)); if (!capacity_factor) capacity_factor = fix_small_capacity(env->sd, group); @@ -5895,7 +5895,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, /* Adjust by relative CPU capacity of the group */ sgs->group_capacity = group->sgc->capacity; - sgs->avg_load = (sgs->group_load*SCHED_POWER_SCALE) / sgs->group_capacity; + sgs->avg_load = (sgs->group_load*SCHED_CAPACITY_SCALE) / sgs->group_capacity; if (sgs->sum_nr_running) sgs->load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running; @@ -6089,7 +6089,7 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) env->imbalance = DIV_ROUND_CLOSEST( sds->busiest_stat.avg_load * sds->busiest_stat.group_capacity, - SCHED_POWER_SCALE); + SCHED_CAPACITY_SCALE); return 1; } @@ -6118,7 +6118,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) imbn = 1; scaled_busy_load_per_task = - (busiest->load_per_task * SCHED_POWER_SCALE) / + (busiest->load_per_task * SCHED_CAPACITY_SCALE) / busiest->group_capacity; if (busiest->avg_load + scaled_busy_load_per_task >= @@ -6137,7 +6137,7 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) min(busiest->load_per_task, busiest->avg_load); capa_now += local->group_capacity * min(local->load_per_task, local->avg_load); - capa_now /= SCHED_POWER_SCALE; + capa_now /= SCHED_CAPACITY_SCALE; /* Amount of load we'd subtract */ if (busiest->avg_load > scaled_busy_load_per_task) { @@ -6148,16 +6148,16 @@ void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) /* Amount of load we'd add */ if (busiest->avg_load * busiest->group_capacity < - busiest->load_per_task * SCHED_POWER_SCALE) { + busiest->load_per_task * SCHED_CAPACITY_SCALE) { tmp = (busiest->avg_load * busiest->group_capacity) / local->group_capacity; } else { - tmp = (busiest->load_per_task * SCHED_POWER_SCALE) / + tmp = (busiest->load_per_task * SCHED_CAPACITY_SCALE) / local->group_capacity; } capa_move += local->group_capacity * min(local->load_per_task, local->avg_load + tmp); - capa_move /= SCHED_POWER_SCALE; + capa_move /= SCHED_CAPACITY_SCALE; /* Move if we gain throughput */ if (capa_move > capa_now) @@ -6207,7 +6207,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s load_above_capacity = (busiest->sum_nr_running - busiest->group_capacity_factor); - load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_POWER_SCALE); + load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_CAPACITY_SCALE); load_above_capacity /= busiest->group_capacity; } @@ -6225,7 +6225,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s env->imbalance = min( max_pull * busiest->group_capacity, (sds->avg_load - local->avg_load) * local->group_capacity - ) / SCHED_POWER_SCALE; + ) / SCHED_CAPACITY_SCALE; /* * if *imbalance is less than the average load per runnable task @@ -6279,7 +6279,8 @@ static struct sched_group *find_busiest_group(struct lb_env *env) if (!sds.busiest || busiest->sum_nr_running == 0) goto out_balanced; - sds.avg_load = (SCHED_POWER_SCALE * sds.total_load) / sds.total_capacity; + sds.avg_load = (SCHED_CAPACITY_SCALE * sds.total_load) + / sds.total_capacity; /* * If the busiest group is imbalanced the below checks don't @@ -6378,7 +6379,7 @@ static struct rq *find_busiest_queue(struct lb_env *env, continue; capacity = capacity_of(i); - capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_POWER_SCALE); + capacity_factor = DIV_ROUND_CLOSEST(capacity, SCHED_CAPACITY_SCALE); if (!capacity_factor) capacity_factor = fix_small_capacity(env->sd, group); -- cgit v1.2.3 From 5d4dfddd4f02b028d6ddaaa04d75d3b0cad1c9ae Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 27 May 2014 13:50:41 -0400 Subject: sched: Rename capacity related flags It is better not to think about compute capacity as being equivalent to "CPU power". The upcoming "power aware" scheduler work may create confusion with the notion of energy consumption if "power" is used too liberally. Let's rename the following feature flags since they do relate to capacity: SD_SHARE_CPUPOWER -> SD_SHARE_CPUCAPACITY ARCH_POWER -> ARCH_CAPACITY NONTASK_POWER -> NONTASK_CAPACITY Signed-off-by: Nicolas Pitre Signed-off-by: Peter Zijlstra Cc: Vincent Guittot Cc: Daniel Lezcano Cc: Morten Rasmussen Cc: "Rafael J. Wysocki" Cc: linaro-kernel@lists.linaro.org Cc: Andy Fleming Cc: Anton Blanchard Cc: Benjamin Herrenschmidt Cc: Grant Likely Cc: Linus Torvalds Cc: Michael Ellerman Cc: Paul Gortmaker Cc: Paul Mackerras Cc: Preeti U Murthy Cc: Rob Herring Cc: Srivatsa S. Bhat Cc: Toshi Kani Cc: Vasant Hegde Cc: Vincent Guittot Cc: devicetree@vger.kernel.org Cc: linux-kernel@vger.kernel.org Cc: linuxppc-dev@lists.ozlabs.org Link: http://lkml.kernel.org/n/tip-e93lpnxb87owfievqatey6b5@git.kernel.org Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/smp.c | 2 +- include/linux/sched.h | 4 ++-- kernel/sched/core.c | 14 +++++++------- kernel/sched/fair.c | 8 ++++---- kernel/sched/features.h | 8 ++++---- 5 files changed, 18 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 10ffffef0414..c51d16379cba 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c @@ -770,7 +770,7 @@ int setup_profiling_timer(unsigned int multiplier) /* cpumask of CPUs with asymetric SMT dependancy */ static const int powerpc_smt_flags(void) { - int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; + int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); diff --git a/include/linux/sched.h b/include/linux/sched.h index 322110affe63..ce93768a3312 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -869,7 +869,7 @@ enum cpu_idle_type { #define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ #define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ #define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ -#define SD_SHARE_CPUPOWER 0x0080 /* Domain members share cpu power */ +#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */ #define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ #define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ #define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ @@ -881,7 +881,7 @@ enum cpu_idle_type { #ifdef CONFIG_SCHED_SMT static inline const int cpu_smt_flags(void) { - return SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES; + return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; } #endif diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7ba4f5413a10..5976ca579d3e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -872,7 +872,7 @@ static void update_rq_clock_task(struct rq *rq, s64 delta) rq->clock_task += delta; #if defined(CONFIG_IRQ_TIME_ACCOUNTING) || defined(CONFIG_PARAVIRT_TIME_ACCOUNTING) - if ((irq_delta + steal) && sched_feat(NONTASK_POWER)) + if ((irq_delta + steal) && sched_feat(NONTASK_CAPACITY)) sched_rt_avg_update(rq, irq_delta + steal); #endif } @@ -5309,7 +5309,7 @@ static int sd_degenerate(struct sched_domain *sd) SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | - SD_SHARE_CPUPOWER | + SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN)) { if (sd->groups != sd->groups->next) @@ -5340,7 +5340,7 @@ sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) SD_BALANCE_NEWIDLE | SD_BALANCE_FORK | SD_BALANCE_EXEC | - SD_SHARE_CPUPOWER | + SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES | SD_PREFER_SIBLING | SD_SHARE_POWERDOMAIN); @@ -5947,7 +5947,7 @@ static int sched_domains_curr_level; /* * SD_flags allowed in topology descriptions. * - * SD_SHARE_CPUPOWER - describes SMT topologies + * SD_SHARE_CPUCAPACITY - describes SMT topologies * SD_SHARE_PKG_RESOURCES - describes shared caches * SD_NUMA - describes NUMA topologies * SD_SHARE_POWERDOMAIN - describes shared power domain @@ -5956,7 +5956,7 @@ static int sched_domains_curr_level; * SD_ASYM_PACKING - describes SMT quirks */ #define TOPOLOGY_SD_FLAGS \ - (SD_SHARE_CPUPOWER | \ + (SD_SHARE_CPUCAPACITY | \ SD_SHARE_PKG_RESOURCES | \ SD_NUMA | \ SD_ASYM_PACKING | \ @@ -6002,7 +6002,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) | 1*SD_BALANCE_FORK | 0*SD_BALANCE_WAKE | 1*SD_WAKE_AFFINE - | 0*SD_SHARE_CPUPOWER + | 0*SD_SHARE_CPUCAPACITY | 0*SD_SHARE_PKG_RESOURCES | 0*SD_SERIALIZE | 0*SD_PREFER_SIBLING @@ -6024,7 +6024,7 @@ sd_init(struct sched_domain_topology_level *tl, int cpu) * Convert topological properties into behaviour. */ - if (sd->flags & SD_SHARE_CPUPOWER) { + if (sd->flags & SD_SHARE_CPUCAPACITY) { sd->imbalance_pct = 110; sd->smt_gain = 1178; /* ~15% */ diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index dc7d6527a282..d3c731222199 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5672,8 +5672,8 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) unsigned long capacity = SCHED_CAPACITY_SCALE; struct sched_group *sdg = sd->groups; - if ((sd->flags & SD_SHARE_CPUPOWER) && weight > 1) { - if (sched_feat(ARCH_POWER)) + if ((sd->flags & SD_SHARE_CPUCAPACITY) && weight > 1) { + if (sched_feat(ARCH_CAPACITY)) capacity *= arch_scale_smt_capacity(sd, cpu); else capacity *= default_scale_smt_capacity(sd, cpu); @@ -5683,7 +5683,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu) sdg->sgc->capacity_orig = capacity; - if (sched_feat(ARCH_POWER)) + if (sched_feat(ARCH_CAPACITY)) capacity *= arch_scale_freq_capacity(sd, cpu); else capacity *= default_scale_capacity(sd, cpu); @@ -5782,7 +5782,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group) /* * Only siblings can have significantly less than SCHED_CAPACITY_SCALE */ - if (!(sd->flags & SD_SHARE_CPUPOWER)) + if (!(sd->flags & SD_SHARE_CPUCAPACITY)) return 0; /* diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 5716929a2e3a..90284d117fe6 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -37,18 +37,18 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) SCHED_FEAT(WAKEUP_PREEMPTION, true) /* - * Use arch dependent cpu power functions + * Use arch dependent cpu capacity functions */ -SCHED_FEAT(ARCH_POWER, true) +SCHED_FEAT(ARCH_CAPACITY, true) SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false) SCHED_FEAT(LB_BIAS, true) /* - * Decrement CPU power based on time not spent running tasks + * Decrement CPU capacity based on time not spent running tasks */ -SCHED_FEAT(NONTASK_POWER, true) +SCHED_FEAT(NONTASK_CAPACITY, true) /* * Queue remote wakeups on the target CPU and process them -- cgit v1.2.3 From edcb4d3c36a6429caa03ddfeab4cbb153c7002b2 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Fri, 16 May 2014 17:15:49 -0400 Subject: perf/ARM: Use common PMU interrupt disabled code Make the ARM perf code use the new common PMU interrupt disabled code. This allows perf to work on ARM machines without a working PMU interrupt (for example, raspberry pi). Acked-by: Will Deacon Signed-off-by: Vince Weaver [peterz: applied changes suggested by Will] Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Grant Likely Cc: Linus Torvalds Cc: Paul Mackerras Cc: Rob Herring Cc: Russell King Cc: Will Deacon Cc: devicetree@vger.kernel.org Cc: linux-arm-kernel@lists.infradead.org Cc: linux-kernel@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1405161712190.11099@vincent-weaver-1.umelst.maine.edu [ Small readability tweaks to the code. ] Signed-off-by: Ingo Molnar Signed-off-by: Ingo Molnar --- arch/arm/kernel/perf_event.c | 2 +- arch/arm/kernel/perf_event_cpu.c | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index a6bc431cde70..4238bcba9d60 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -410,7 +410,7 @@ __hw_perf_event_init(struct perf_event *event) */ hwc->config_base |= (unsigned long)mapping; - if (!hwc->sample_period) { + if (!is_sampling_event(event)) { /* * For non-sampling runs, limit the sample_period to half * of the counter width. That way, the new counter value diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 51798d7854ac..bbdbffd06cf7 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c @@ -126,8 +126,8 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) irqs = min(pmu_device->num_resources, num_possible_cpus()); if (irqs < 1) { - pr_err("no irqs for PMUs defined\n"); - return -ENODEV; + printk_once("perf/ARM: No irqs for PMU defined, sampling events not supported\n"); + return 0; } irq = platform_get_irq(pmu_device, 0); @@ -191,6 +191,10 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) /* Ensure the PMU has sane values out of reset. */ if (cpu_pmu->reset) on_each_cpu(cpu_pmu->reset, cpu_pmu, 1); + + /* If no interrupts available, set the corresponding capability flag */ + if (!platform_get_irq(cpu_pmu->plat_device, 0)) + cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; } /* -- cgit v1.2.3 From c184c980de30dc5f6fec4b281928aa6743708da9 Mon Sep 17 00:00:00 2001 From: Vince Weaver Date: Fri, 16 May 2014 17:18:07 -0400 Subject: perf/x86: Use common PMU interrupt disabled code Make the x86 perf code use the new common PMU interrupt disabled code. Typically most x86 machines have working PMU interrupts, although some older p6-class machines had this problem. Signed-off-by: Vince Weaver Signed-off-by: Peter Zijlstra Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/alpine.DEB.2.10.1405161715560.11099@vincent-weaver-1.umelst.maine.edu Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 32029e35f2b9..2bdfbff8a4f6 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -303,15 +303,6 @@ int x86_setup_perfctr(struct perf_event *event) hwc->sample_period = x86_pmu.max_period; hwc->last_period = hwc->sample_period; local64_set(&hwc->period_left, hwc->sample_period); - } else { - /* - * If we have a PMU initialized but no APIC - * interrupts, we cannot sample hardware - * events (user-space has to fall back and - * sample via a hrtimer based software event): - */ - if (!x86_pmu.apic) - return -EOPNOTSUPP; } if (attr->type == PERF_TYPE_RAW) @@ -1367,6 +1358,15 @@ static void __init pmu_check_apic(void) x86_pmu.apic = 0; pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); pr_info("no hardware sampling interrupt available.\n"); + + /* + * If we have a PMU initialized but no APIC + * interrupts, we cannot sample hardware + * events (user-space has to fall back and + * sample via a hrtimer based software event): + */ + pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; + } static struct attribute_group x86_pmu_format_group = { -- cgit v1.2.3 From 37548914fbfcd56e1955a9b7e55dc3b84a3e9e25 Mon Sep 17 00:00:00 2001 From: Anshuman Khandual Date: Thu, 22 May 2014 12:50:09 +0530 Subject: perf/x86: Add conditional branch filtering support This patch adds conditional branch filtering support, enabling it for PERF_SAMPLE_BRANCH_COND in perf branch stack sampling framework by utilizing an available software filter X86_BR_JCC. Signed-off-by: Anshuman Khandual Reviewed-by: Stephane Eranian Reviewed-by: Andi Kleen Signed-off-by: Peter Zijlstra Cc: mpe@ellerman.id.au Cc: benh@kernel.crashing.org Cc: Arnaldo Carvalho de Melo Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1400743210-32289-3-git-send-email-khandual@linux.vnet.ibm.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_event_intel_lbr.c | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index d82d155aca8c..9dd2459a4c73 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -384,6 +384,9 @@ static void intel_pmu_setup_sw_lbr_filter(struct perf_event *event) if (br_type & PERF_SAMPLE_BRANCH_NO_TX) mask |= X86_BR_NO_TX; + if (br_type & PERF_SAMPLE_BRANCH_COND) + mask |= X86_BR_JCC; + /* * stash actual user request into reg, it may * be used by fixup code for some CPU @@ -678,6 +681,7 @@ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { * NHM/WSM erratum: must include IND_JMP to capture IND_CALL */ [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL | LBR_IND_JMP, + [PERF_SAMPLE_BRANCH_COND] = LBR_JCC, }; static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { @@ -689,6 +693,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX] = { [PERF_SAMPLE_BRANCH_ANY_CALL] = LBR_REL_CALL | LBR_IND_CALL | LBR_FAR, [PERF_SAMPLE_BRANCH_IND_CALL] = LBR_IND_CALL, + [PERF_SAMPLE_BRANCH_COND] = LBR_JCC, }; /* core */ -- cgit v1.2.3 From 5cdb76d6f0b657c1140de74ed5af7cc8c5ed5faf Mon Sep 17 00:00:00 2001 From: Oleg Nesterov Date: Sun, 1 Jun 2014 21:13:46 +0200 Subject: uprobes/x86: Rename arch_uprobe->def to ->defparam, minor comment updates Purely cosmetic, no changes in .o, 1. As Jim pointed out arch_uprobe->def looks ambiguous, rename it to ->defparam. 2. Add the comment into default_post_xol_op() to explain "regs->sp +=". 3. Remove the stale part of the comment in arch_uprobe_analyze_insn(). Suggested-by: Jim Keniston Reviewed-by: Masami Hiramatsu Acked-by: Srikar Dronamraju Signed-off-by: Oleg Nesterov --- arch/x86/include/asm/uprobes.h | 2 +- arch/x86/kernel/uprobes.c | 37 ++++++++++++++++++------------------- 2 files changed, 19 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uprobes.h b/arch/x86/include/asm/uprobes.h index 7be3c079e389..74f4c2ff6427 100644 --- a/arch/x86/include/asm/uprobes.h +++ b/arch/x86/include/asm/uprobes.h @@ -52,7 +52,7 @@ struct arch_uprobe { struct { u8 fixups; u8 ilen; - } def; + } defparam; }; }; diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c index 159ca520ef5b..5d1cbfe4ae58 100644 --- a/arch/x86/kernel/uprobes.c +++ b/arch/x86/kernel/uprobes.c @@ -254,7 +254,7 @@ static inline bool is_64bit_mm(struct mm_struct *mm) * If arch_uprobe->insn doesn't use rip-relative addressing, return * immediately. Otherwise, rewrite the instruction so that it accesses * its memory operand indirectly through a scratch register. Set - * def->fixups accordingly. (The contents of the scratch register + * defparam->fixups accordingly. (The contents of the scratch register * will be saved before we single-step the modified instruction, * and restored afterward). * @@ -372,14 +372,14 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) */ if (reg != 6 && reg2 != 6) { reg2 = 6; - auprobe->def.fixups |= UPROBE_FIX_RIP_SI; + auprobe->defparam.fixups |= UPROBE_FIX_RIP_SI; } else if (reg != 7 && reg2 != 7) { reg2 = 7; - auprobe->def.fixups |= UPROBE_FIX_RIP_DI; + auprobe->defparam.fixups |= UPROBE_FIX_RIP_DI; /* TODO (paranoia): force maskmovq to not use di */ } else { reg2 = 3; - auprobe->def.fixups |= UPROBE_FIX_RIP_BX; + auprobe->defparam.fixups |= UPROBE_FIX_RIP_BX; } /* * Point cursor at the modrm byte. The next 4 bytes are the @@ -398,9 +398,9 @@ static void riprel_analyze(struct arch_uprobe *auprobe, struct insn *insn) static inline unsigned long * scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & UPROBE_FIX_RIP_SI) + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_SI) return ®s->si; - if (auprobe->def.fixups & UPROBE_FIX_RIP_DI) + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_DI) return ®s->di; return ®s->bx; } @@ -411,18 +411,18 @@ scratch_reg(struct arch_uprobe *auprobe, struct pt_regs *regs) */ static void riprel_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); utask->autask.saved_scratch_register = *sr; - *sr = utask->vaddr + auprobe->def.ilen; + *sr = utask->vaddr + auprobe->defparam.ilen; } } static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { - if (auprobe->def.fixups & UPROBE_FIX_RIP_MASK) { + if (auprobe->defparam.fixups & UPROBE_FIX_RIP_MASK) { struct uprobe_task *utask = current->utask; unsigned long *sr = scratch_reg(auprobe, regs); @@ -499,16 +499,16 @@ static int default_post_xol_op(struct arch_uprobe *auprobe, struct pt_regs *regs struct uprobe_task *utask = current->utask; riprel_post_xol(auprobe, regs); - if (auprobe->def.fixups & UPROBE_FIX_IP) { + if (auprobe->defparam.fixups & UPROBE_FIX_IP) { long correction = utask->vaddr - utask->xol_vaddr; regs->ip += correction; - } else if (auprobe->def.fixups & UPROBE_FIX_CALL) { - regs->sp += sizeof_long(); - if (push_ret_address(regs, utask->vaddr + auprobe->def.ilen)) + } else if (auprobe->defparam.fixups & UPROBE_FIX_CALL) { + regs->sp += sizeof_long(); /* Pop incorrect return address */ + if (push_ret_address(regs, utask->vaddr + auprobe->defparam.ilen)) return -ERESTART; } /* popf; tell the caller to not touch TF */ - if (auprobe->def.fixups & UPROBE_FIX_SETF) + if (auprobe->defparam.fixups & UPROBE_FIX_SETF) utask->autask.saved_tf = true; return 0; @@ -711,12 +711,11 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, /* * Figure out which fixups default_post_xol_op() will need to perform, - * and annotate def->fixups accordingly. To start with, ->fixups is - * either zero or it reflects rip-related fixups. + * and annotate defparam->fixups accordingly. */ switch (OPCODE1(&insn)) { case 0x9d: /* popf */ - auprobe->def.fixups |= UPROBE_FIX_SETF; + auprobe->defparam.fixups |= UPROBE_FIX_SETF; break; case 0xc3: /* ret or lret -- ip is correct */ case 0xcb: @@ -742,8 +741,8 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, riprel_analyze(auprobe, &insn); } - auprobe->def.ilen = insn.length; - auprobe->def.fixups |= fix_ip_or_call; + auprobe->defparam.ilen = insn.length; + auprobe->defparam.fixups |= fix_ip_or_call; auprobe->ops = &default_xol_ops; return 0; -- cgit v1.2.3 From d4bc590f8716f7dde6b7bca319097ac30a8cb0b4 Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Thu, 5 Jun 2014 17:44:44 +0200 Subject: um: Fix for relative objtree when generating x86 headers In an O= build, rely on the generated Makefile to call the main Makefile properly. When building in the source tree, we do not need to specify the -C and O= either. This fixes the problem when $(objtree) is a relative path and the -C changes the directory. Reported-by: Richard Weinberger Signed-off-by: Michal Marek --- arch/um/Makefile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/um/Makefile b/arch/um/Makefile index 36e658a4291c..e4b1a9639c4d 100644 --- a/arch/um/Makefile +++ b/arch/um/Makefile @@ -111,8 +111,7 @@ endef KBUILD_KCONFIG := $(HOST_DIR)/um/Kconfig archheaders: - $(Q)$(MAKE) -C '$(srctree)' KBUILD_SRC= \ - ARCH=$(HEADER_ARCH) O='$(objtree)' archheaders + $(Q)$(MAKE) KBUILD_SRC= ARCH=$(HEADER_ARCH) archheaders archprepare: include/generated/user_constants.h -- cgit v1.2.3 From bd01ec1a13f9a327950c8e3080096446c7804753 Mon Sep 17 00:00:00 2001 From: Waiman Long Date: Mon, 3 Feb 2014 13:18:57 +0100 Subject: x86, locking/rwlocks: Enable qrwlocks on x86 Make x86 use the fair rwlock_t. Implement the custom queue_write_unlock() for best performance. Signed-off-by: Waiman Long [peterz: near complete rewrite] Signed-off-by: Peter Zijlstra Cc: Dave Jones Cc: Jeremy Fitzhardinge Cc: Konrad Rzeszutek Wilk Cc: Linus Torvalds Cc: Oleg Nesterov Cc: Raghavendra K T Cc: "Paul E.McKenney" Cc: linux-kernel@vger.kernel.org Cc: x86@kernel.org Link: http://lkml.kernel.org/n/tip-r1xuzmdysvuhl3h86n5fbxi7@git.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 1 + arch/x86/include/asm/qrwlock.h | 17 +++++++++++++++++ arch/x86/include/asm/spinlock.h | 4 ++++ arch/x86/include/asm/spinlock_types.h | 4 ++++ 4 files changed, 26 insertions(+) create mode 100644 arch/x86/include/asm/qrwlock.h (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 25d2c6f7325e..bf7626fcd363 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -121,6 +121,7 @@ config X86 select MODULES_USE_ELF_RELA if X86_64 select CLONE_BACKWARDS if X86_32 select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_QUEUE_RWLOCK select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION select OLD_SIGACTION if X86_32 select COMPAT_OLD_SIGACTION if IA32_EMULATION diff --git a/arch/x86/include/asm/qrwlock.h b/arch/x86/include/asm/qrwlock.h new file mode 100644 index 000000000000..70f46f07f94e --- /dev/null +++ b/arch/x86/include/asm/qrwlock.h @@ -0,0 +1,17 @@ +#ifndef _ASM_X86_QRWLOCK_H +#define _ASM_X86_QRWLOCK_H + +#include + +#if !defined(CONFIG_X86_OOSTORE) && !defined(CONFIG_X86_PPRO_FENCE) +#define queue_write_unlock queue_write_unlock +static inline void queue_write_unlock(struct qrwlock *lock) +{ + barrier(); + ACCESS_ONCE(*(u8 *)&lock->cnts) = 0; +} +#endif + +#include + +#endif /* _ASM_X86_QRWLOCK_H */ diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 0f62f5482d91..54f1c8068c02 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -187,6 +187,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) cpu_relax(); } +#ifndef CONFIG_QUEUE_RWLOCK /* * Read-write spinlocks, allowing multiple readers * but only one writer. @@ -269,6 +270,9 @@ static inline void arch_write_unlock(arch_rwlock_t *rw) asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0" : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory"); } +#else +#include +#endif /* CONFIG_QUEUE_RWLOCK */ #define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock) diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 4f1bea19945b..73c4c007200f 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -34,6 +34,10 @@ typedef struct arch_spinlock { #define __ARCH_SPIN_LOCK_UNLOCKED { { 0 } } +#ifdef CONFIG_QUEUE_RWLOCK +#include +#else #include +#endif #endif /* _ASM_X86_SPINLOCK_TYPES_H */ -- cgit v1.2.3 From 569810d1e3278907264f5b115281fca3f0038d53 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Thu, 5 Jun 2014 22:47:44 -0700 Subject: net: filter: fix typo in sparc BPF JIT fix typo in sparc codegen for SKF_AD_IFINDEX and SKF_AD_HATYPE classic BPF extensions Fixes: 2809a2087cc4 ("net: filter: Just In Time compiler for sparc") Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/sparc/net/bpf_jit_comp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index a82c6b2a9780..b19a0c2a1ae6 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_) #define BNE (F2(0, 2) | CONDNE) #ifdef CONFIG_SPARC64 -#define BNE_PTR (F2(0, 1) | CONDNE | (2 << 20)) +#define BE_PTR (F2(0, 1) | CONDE | (2 << 20)) #else -#define BNE_PTR BNE +#define BE_PTR BNE #endif #define SETHI(K, REG) \ @@ -600,7 +600,7 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ANC_IFINDEX: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); - emit_branch(BNE_PTR, cleanup_addr + 4); + emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load32(r_A, struct net_device, ifindex, r_A); break; @@ -613,7 +613,7 @@ void bpf_jit_compile(struct sk_filter *fp) case BPF_S_ANC_HATYPE: emit_skb_loadptr(dev, r_A); emit_cmpi(r_A, 0); - emit_branch(BNE_PTR, cleanup_addr + 4); + emit_branch(BE_PTR, cleanup_addr + 4); emit_nop(); emit_load16(r_A, struct net_device, type, r_A); break; -- cgit v1.2.3 From ce369a545aac3da653dd95d8117093a862bf94d3 Mon Sep 17 00:00:00 2001 From: Andrii Tseglytskyi Date: Fri, 16 May 2014 05:45:58 -0500 Subject: ARM: OMAP5+: dpll: support Duty Cycle Correction(DCC) Duty Cycle Correction(DCC) needs to be enabled if the MPU is to run at frequencies beyond 1.4GHz for OMAP5, DRA75x, DRA72x. MPU DPLL has a limitation on the maximum frequency it can be locked at. Duty Cycle Correction circuit is used to recover a correct duty cycle for achieving higher frequencies (hardware internally switches output to M3 output(CLKOUTHIF) from M2 output (CLKOUT)). For further information, See the note on OMAP5432 Technical Reference Manual(SWPU282U) chapter 3.6.3.3.1 "DPLLs Output Clocks Parameters", and also the "OMAP543x ES2.0 DM Operating Conditions Addendum v0.5" chapter 2.1 "Micro Processor Unit (MPU)". Equivalent information is present in relevant DRA75x, 72x documentation(SPRUHP2E, SPRUHI2P). Signed-off-by: Andrii Tseglytskyi Signed-off-by: Taras Kondratiuk Signed-off-by: J Keerthy Signed-off-by: Nishanth Menon [t-kristo@ti.com: added TRM / DM references for DCC clock rate] Signed-off-by: Tero Kristo --- arch/arm/mach-omap2/dpll3xxx.c | 9 +++++++++ include/linux/clk/ti.h | 4 ++++ 2 files changed, 13 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index fcd8036af910..6d7ba37e2257 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -319,6 +319,15 @@ static int omap3_noncore_dpll_program(struct clk_hw_omap *clk, u16 freqsel) /* Set DPLL multiplier, divider */ v = omap2_clk_readl(clk, dd->mult_div1_reg); + + /* Handle Duty Cycle Correction */ + if (dd->dcc_mask) { + if (dd->last_rounded_rate >= dd->dcc_rate) + v |= dd->dcc_mask; /* Enable DCC */ + else + v &= ~dd->dcc_mask; /* Disable DCC */ + } + v &= ~(dd->mult_mask | dd->div1_mask); v |= dd->last_rounded_m << __ffs(dd->mult_mask); v |= (dd->last_rounded_n - 1) << __ffs(dd->div1_mask); diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 4231c41bed51..e8d8a35034a5 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -41,6 +41,8 @@ * @idlest_reg: register containing the DPLL idle status bitfield * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg + * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg + * @dcc_rate: rate atleast which DCC @dcc_mask must be set * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg @@ -86,6 +88,8 @@ struct dpll_data { u32 idlest_mask; u32 dco_mask; u32 sddiv_mask; + u32 dcc_mask; + unsigned long dcc_rate; u32 lpmode_mask; u32 m4xen_mask; u8 auto_recal_bit; -- cgit v1.2.3 From 7e148070001ae82df08966199580a29b934e3bf3 Mon Sep 17 00:00:00 2001 From: Nishanth Menon Date: Fri, 16 May 2014 05:46:00 -0500 Subject: ARM: dts: OMAP5/DRA7: use omap5-mpu-dpll-clock capable of dealing with higher frequencies OMAP5432, DRA75x and DRA72x have MPU DPLLs that need Duty Cycle Correction(DCC) to operate safely at frequencies >= 1.4GHz. Switch to "ti,omap5-mpu-dpll-clock" compatible property which provides this support. Signed-off-by: Nishanth Menon Signed-off-by: Tero Kristo --- arch/arm/boot/dts/dra7xx-clocks.dtsi | 2 +- arch/arm/boot/dts/omap54xx-clocks.dtsi | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi index 30160348934c..264b9caa9eef 100644 --- a/arch/arm/boot/dts/dra7xx-clocks.dtsi +++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi @@ -277,7 +277,7 @@ dpll_mpu_ck: dpll_mpu_ck { #clock-cells = <0>; - compatible = "ti,omap4-dpll-clock"; + compatible = "ti,omap5-mpu-dpll-clock"; clocks = <&sys_clkin1>, <&mpu_dpll_hs_clk_div>; reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>; }; diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index d487fdab3921..465505cada59 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi @@ -362,7 +362,7 @@ dpll_mpu_ck: dpll_mpu_ck { #clock-cells = <0>; - compatible = "ti,omap4-dpll-clock"; + compatible = "ti,omap5-mpu-dpll-clock"; clocks = <&sys_clkin>, <&mpu_dpll_hs_clk_div>; reg = <0x0160>, <0x0164>, <0x016c>, <0x0168>; }; -- cgit v1.2.3 From 588f5d629b3369aba88f52217d1c473a28fa7723 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 6 Jun 2014 08:35:59 -0700 Subject: net: filter: fix sparc32 typo Fixes: 569810d1e327 ("net: filter: fix typo in sparc BPF JIT") Signed-off-by: Alexei Starovoitov Signed-off-by: David S. Miller --- arch/sparc/net/bpf_jit_comp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c index b19a0c2a1ae6..49cee4af16f4 100644 --- a/arch/sparc/net/bpf_jit_comp.c +++ b/arch/sparc/net/bpf_jit_comp.c @@ -85,7 +85,7 @@ static void bpf_flush_icache(void *start_, void *end_) #ifdef CONFIG_SPARC64 #define BE_PTR (F2(0, 1) | CONDE | (2 << 20)) #else -#define BE_PTR BNE +#define BE_PTR BE #endif #define SETHI(K, REG) \ -- cgit v1.2.3 From 50b9ac1813f7f2f13c937a1484c2fe09c4d590ce Mon Sep 17 00:00:00 2001 From: Rickard Strandqvist Date: Sun, 1 Jun 2014 01:00:13 +0200 Subject: arch: tile: kernel: unaligned.c: Cleaning up uninitialized variables There is a risk that the variable will be used without being initialized. This was largely found by using a static code analysis program called cppcheck. Signed-off-by: Rickard Strandqvist Signed-off-by: Chris Metcalf [minor cleanups] --- arch/tile/kernel/unaligned.c | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/tile/kernel/unaligned.c b/arch/tile/kernel/unaligned.c index b030b4e78845..c02ea2a45f67 100644 --- a/arch/tile/kernel/unaligned.c +++ b/arch/tile/kernel/unaligned.c @@ -182,18 +182,7 @@ static void find_regs(tilegx_bundle_bits bundle, uint64_t *rd, uint64_t *ra, int i; uint64_t reg; uint64_t reg_map = 0, alias_reg_map = 0, map; - bool alias; - - *ra = -1; - *rb = -1; - - if (rd) - *rd = -1; - - *clob1 = -1; - *clob2 = -1; - *clob3 = -1; - alias = false; + bool alias = false; /* * Parse fault bundle, find potential used registers and mark @@ -569,7 +558,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle, tilegx_bundle_bits bundle_2 = 0; /* If bundle_2_enable = false, bundle_2 is fnop/nop operation. */ bool bundle_2_enable = true; - uint64_t ra, rb, rd = -1, clob1, clob2, clob3; + uint64_t ra = -1, rb = -1, rd = -1, clob1 = -1, clob2 = -1, clob3 = -1; /* * Indicate if the unalign access * instruction's registers hit with -- cgit v1.2.3 From bc4fef6f32c56798bb17df9e98a6e07ba7dc4101 Mon Sep 17 00:00:00 2001 From: Steven Miao Date: Tue, 6 May 2014 13:25:51 +0800 Subject: bf533: fix build error: add linux/gpio.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit build error arch/blackfin/mach-bf533/boards/stamp.c: In function ‘stamp_init’: arch/blackfin/mach-bf533/boards/stamp.c:866: error: implicit declaration of function ‘gpio_request’ arch/blackfin/mach-bf533/boards/stamp.c:868: error: implicit declaration of function ‘gpio_direction_output’ arch/blackfin/mach-bf533/boards/stamp.c:869: error: implicit declaration of function ‘gpio_free’ Signed-off-by: Steven Miao --- arch/blackfin/mach-bf533/boards/stamp.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index d0989290f54c..6f4bac969bf7 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -17,6 +17,7 @@ #if IS_ENABLED(CONFIG_USB_ISP1362_HCD) #include #endif +#include #include #include #include -- cgit v1.2.3 From ff5ae12d84d3f7eaeb3850ff6de4289c0c849344 Mon Sep 17 00:00:00 2001 From: Steven Miao Date: Tue, 13 May 2014 16:49:13 +0800 Subject: Revert "blackfin: dma: current count mmr is read only" curr_x_count/curr_y_count need to be cleared here, keep this workaround This reverts commit dfb02f95f5430e47d0c49adbc4469d08eea38b94. Signed-off-by: Steven Miao --- arch/blackfin/include/asm/dma.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/blackfin/include/asm/dma.h b/arch/blackfin/include/asm/dma.h index 8d1e4c2d2c36..40e9c2bbc6e3 100644 --- a/arch/blackfin/include/asm/dma.h +++ b/arch/blackfin/include/asm/dma.h @@ -316,6 +316,8 @@ static inline void disable_dma(unsigned int channel) } static inline void enable_dma(unsigned int channel) { + dma_ch[channel].regs->curr_x_count = 0; + dma_ch[channel].regs->curr_y_count = 0; dma_ch[channel].regs->cfg |= DMAEN; } int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data); -- cgit v1.2.3 From 4d048b0255e3dd4fb001c5f1f609fb67463d04d6 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Tue, 10 Jun 2014 14:25:26 -0700 Subject: x86, vdso: Remove one final use of htole16() One final use of the macros from which are not available on older system. In this case we had one sole case of *writing* a littleendian number, but the number is SHN_UNDEF which is the constant zero, so rather than dealing with the general case of littleendian puts here, just document that the constant is zero and be done with it. Reported-and-Tested-by: Andrew Morton Signed-off-by: H. Peter Anvin Cc: Andy Lutomirski Link: http://lkml.kernel.org/r/20140610135051.c3c34165f73d67d218b62bd9@linux-foundation.org --- arch/x86/vdso/vdso2c.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h index 8a074637a576..d9f6f61aef1c 100644 --- a/arch/x86/vdso/vdso2c.h +++ b/arch/x86/vdso/vdso2c.h @@ -116,7 +116,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) hdr->e_shoff = 0; hdr->e_shentsize = 0; hdr->e_shnum = 0; - hdr->e_shstrndx = htole16(SHN_UNDEF); + hdr->e_shstrndx = SHN_UNDEF; /* SHN_UNDEF == 0 */ if (!name) { fwrite(addr, load_size, 1, outfile); -- cgit v1.2.3 From dd58a092c4202f2bd490adab7285b3ff77f8e467 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Tue, 10 Jun 2014 15:04:40 +1000 Subject: powerpc: Add AT_HWCAP2 to indicate V.CRYPTO category support The Vector Crypto category instructions are supported by current POWER8 chips, advertise them to userspace using a specific bit to properly differentiate with chips of the same architecture level that might not have them. Signed-off-by: Benjamin Herrenschmidt CC: [v3.10+] --- arch/powerpc/include/uapi/asm/cputable.h | 1 + arch/powerpc/kernel/cputable.c | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h index 5b7657959faa..de2c0e4ee1aa 100644 --- a/arch/powerpc/include/uapi/asm/cputable.h +++ b/arch/powerpc/include/uapi/asm/cputable.h @@ -41,5 +41,6 @@ #define PPC_FEATURE2_EBB 0x10000000 #define PPC_FEATURE2_ISEL 0x08000000 #define PPC_FEATURE2_TAR 0x04000000 +#define PPC_FEATURE2_VEC_CRYPTO 0x02000000 #endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */ diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index c1faade6506d..11da04a4625a 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void); PPC_FEATURE_PSERIES_PERFMON_COMPAT) #define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \ - PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR) + PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \ + PPC_FEATURE2_VEC_CRYPTO) #define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ PPC_FEATURE_TRUE_LE | \ PPC_FEATURE_HAS_ALTIVEC_COMP) -- cgit v1.2.3 From 94314290ed719cf64619dfc42df8f84161a36892 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Tue, 20 May 2014 23:02:56 +0200 Subject: powerpc: Remove check for CONFIG_SERIAL_TEXT_DEBUG The Kconfig symbol SERIAL_TEXT_DEBUG was removed from arch/powerpc/Kconfig.debug in v2.6.22. (In v2.6.27 it was also removed from arch/ppc/Kconfig.debug.) So the check for its macro has evaluated to false for over five years now. Remove that check and the few lines of code hidden behind it. Signed-off-by: Paul Bolle Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/head_40x.S | 19 ------------------- 1 file changed, 19 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 67ee0d6c1070..7d7d8635227a 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -930,25 +930,6 @@ initial_mmu: tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ -#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE) - - /* Load a TLB entry for the UART, so that ppc4xx_progress() can use - * the UARTs nice and early. We use a 4k real==virtual mapping. */ - - lis r3,SERIAL_DEBUG_IO_BASE@h - ori r3,r3,SERIAL_DEBUG_IO_BASE@l - mr r4,r3 - clrrwi r4,r4,12 - ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G) - - clrrwi r3,r3,12 - ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K)) - - li r0,0 /* TLB slot 0 */ - tlbwe r4,r0,TLB_DATA - tlbwe r3,r0,TLB_TAG -#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */ - isync /* Establish the exception vector base -- cgit v1.2.3 From fb5a515704d7e84c139140a83c5eff515adfc000 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 2 Jun 2014 11:20:46 +1000 Subject: powerpc: Remove platforms/wsp and associated pieces __attribute__ ((unused)) WSP is the last user of CONFIG_PPC_A2, so we remove that as well. Although CONFIG_PPC_ICSWX still exists, it's no longer selectable for any Book3E platform, so we can remove the code in mmu-book3e.h that depended on it. Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/Kconfig.debug | 5 - arch/powerpc/configs/chroma_defconfig | 307 --------- arch/powerpc/include/asm/mmu-book3e.h | 4 - arch/powerpc/include/asm/reg_a2.h | 9 - arch/powerpc/include/asm/wsp.h | 14 - arch/powerpc/kernel/Makefile | 1 - arch/powerpc/kernel/cpu_setup_a2.S | 120 ---- arch/powerpc/kernel/cputable.c | 38 -- arch/powerpc/kernel/exceptions-64e.S | 16 - arch/powerpc/kernel/udbg.c | 2 - arch/powerpc/kernel/udbg_16550.c | 11 - arch/powerpc/platforms/Kconfig | 1 - arch/powerpc/platforms/Kconfig.cputype | 6 +- arch/powerpc/platforms/Makefile | 1 - arch/powerpc/platforms/wsp/Kconfig | 30 - arch/powerpc/platforms/wsp/Makefile | 10 - arch/powerpc/platforms/wsp/chroma.c | 56 -- arch/powerpc/platforms/wsp/h8.c | 135 ---- arch/powerpc/platforms/wsp/ics.c | 762 --------------------- arch/powerpc/platforms/wsp/ics.h | 25 - arch/powerpc/platforms/wsp/msi.c | 102 --- arch/powerpc/platforms/wsp/msi.h | 19 - arch/powerpc/platforms/wsp/opb_pic.c | 321 --------- arch/powerpc/platforms/wsp/psr2.c | 67 -- arch/powerpc/platforms/wsp/scom_smp.c | 435 ------------ arch/powerpc/platforms/wsp/scom_wsp.c | 82 --- arch/powerpc/platforms/wsp/setup.c | 36 - arch/powerpc/platforms/wsp/smp.c | 88 --- arch/powerpc/platforms/wsp/wsp.c | 117 ---- arch/powerpc/platforms/wsp/wsp.h | 29 - arch/powerpc/platforms/wsp/wsp_pci.c | 1134 -------------------------------- arch/powerpc/platforms/wsp/wsp_pci.h | 268 -------- 32 files changed, 1 insertion(+), 4250 deletions(-) delete mode 100644 arch/powerpc/configs/chroma_defconfig delete mode 100644 arch/powerpc/include/asm/wsp.h delete mode 100644 arch/powerpc/kernel/cpu_setup_a2.S delete mode 100644 arch/powerpc/platforms/wsp/Kconfig delete mode 100644 arch/powerpc/platforms/wsp/Makefile delete mode 100644 arch/powerpc/platforms/wsp/chroma.c delete mode 100644 arch/powerpc/platforms/wsp/h8.c delete mode 100644 arch/powerpc/platforms/wsp/ics.c delete mode 100644 arch/powerpc/platforms/wsp/ics.h delete mode 100644 arch/powerpc/platforms/wsp/msi.c delete mode 100644 arch/powerpc/platforms/wsp/msi.h delete mode 100644 arch/powerpc/platforms/wsp/opb_pic.c delete mode 100644 arch/powerpc/platforms/wsp/psr2.c delete mode 100644 arch/powerpc/platforms/wsp/scom_smp.c delete mode 100644 arch/powerpc/platforms/wsp/scom_wsp.c delete mode 100644 arch/powerpc/platforms/wsp/setup.c delete mode 100644 arch/powerpc/platforms/wsp/smp.c delete mode 100644 arch/powerpc/platforms/wsp/wsp.c delete mode 100644 arch/powerpc/platforms/wsp/wsp.h delete mode 100644 arch/powerpc/platforms/wsp/wsp_pci.c delete mode 100644 arch/powerpc/platforms/wsp/wsp_pci.h (limited to 'arch') diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 21c9f304e96c..790352f93700 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug @@ -235,11 +235,6 @@ config PPC_EARLY_DEBUG_USBGECKO Select this to enable early debugging for Nintendo GameCube/Wii consoles via an external USB Gecko adapter. -config PPC_EARLY_DEBUG_WSP - bool "Early debugging via WSP's internal UART" - depends on PPC_WSP - select PPC_UDBG_16550 - config PPC_EARLY_DEBUG_PS3GELIC bool "Early debugging through the PS3 Ethernet port" depends on PPC_PS3 diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig deleted file mode 100644 index 4f35fc462385..000000000000 --- a/arch/powerpc/configs/chroma_defconfig +++ /dev/null @@ -1,307 +0,0 @@ -CONFIG_PPC64=y -CONFIG_PPC_BOOK3E_64=y -# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set -CONFIG_SMP=y -CONFIG_NR_CPUS=256 -CONFIG_EXPERIMENTAL=y -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y -CONFIG_AUDIT=y -CONFIG_AUDITSYSCALL=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=19 -CONFIG_CGROUPS=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_CGROUP_MEMCG=y -CONFIG_CGROUP_MEMCG_SWAP=y -CONFIG_NAMESPACES=y -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_INITRAMFS_COMPRESSION_GZIP=y -CONFIG_KALLSYMS_ALL=y -CONFIG_EMBEDDED=y -CONFIG_PERF_EVENTS=y -CONFIG_PROFILING=y -CONFIG_OPROFILE=y -CONFIG_KPROBES=y -CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y -CONFIG_MODULE_UNLOAD=y -CONFIG_MODULE_FORCE_UNLOAD=y -CONFIG_MODVERSIONS=y -CONFIG_MODULE_SRCVERSION_ALL=y -CONFIG_SCOM_DEBUGFS=y -CONFIG_PPC_A2_DD2=y -CONFIG_KVM_GUEST=y -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_HZ_100=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_MISC=y -CONFIG_NUMA=y -# CONFIG_MIGRATION is not set -CONFIG_PPC_64K_PAGES=y -CONFIG_SCHED_SMT=y -CONFIG_CMDLINE_BOOL=y -CONFIG_CMDLINE="" -# CONFIG_SECCOMP is not set -CONFIG_PCIEPORTBUS=y -# CONFIG_PCIEASPM is not set -CONFIG_PCI_MSI=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_XFRM_USER=m -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_STATISTICS=y -CONFIG_NET_KEY=m -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_NET_IPIP=y -CONFIG_IP_MROUTE=y -CONFIG_IP_PIMSM_V1=y -CONFIG_IP_PIMSM_V2=y -CONFIG_SYN_COOKIES=y -CONFIG_INET_AH=m -CONFIG_INET_ESP=m -CONFIG_INET_IPCOMP=m -CONFIG_IPV6=y -CONFIG_IPV6_PRIVACY=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y -CONFIG_IPV6_TUNNEL=y -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETFILTER=y -CONFIG_NF_CONNTRACK=m -CONFIG_NF_CONNTRACK_EVENTS=y -CONFIG_NF_CT_PROTO_UDPLITE=m -CONFIG_NF_CONNTRACK_FTP=m -CONFIG_NF_CONNTRACK_IRC=m -CONFIG_NF_CONNTRACK_TFTP=m -CONFIG_NF_CT_NETLINK=m -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m -CONFIG_NETFILTER_XT_TARGET_CONNMARK=m -CONFIG_NETFILTER_XT_TARGET_MARK=m -CONFIG_NETFILTER_XT_TARGET_NFLOG=m -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m -CONFIG_NETFILTER_XT_TARGET_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_COMMENT=m -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m -CONFIG_NETFILTER_XT_MATCH_CONNMARK=m -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m -CONFIG_NETFILTER_XT_MATCH_DCCP=m -CONFIG_NETFILTER_XT_MATCH_DSCP=m -CONFIG_NETFILTER_XT_MATCH_ESP=m -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m -CONFIG_NETFILTER_XT_MATCH_HELPER=m -CONFIG_NETFILTER_XT_MATCH_IPRANGE=m -CONFIG_NETFILTER_XT_MATCH_LENGTH=m -CONFIG_NETFILTER_XT_MATCH_LIMIT=m -CONFIG_NETFILTER_XT_MATCH_MAC=m -CONFIG_NETFILTER_XT_MATCH_MARK=m -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m -CONFIG_NETFILTER_XT_MATCH_OWNER=m -CONFIG_NETFILTER_XT_MATCH_POLICY=m -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m -CONFIG_NETFILTER_XT_MATCH_QUOTA=m -CONFIG_NETFILTER_XT_MATCH_RATEEST=m -CONFIG_NETFILTER_XT_MATCH_REALM=m -CONFIG_NETFILTER_XT_MATCH_RECENT=m -CONFIG_NETFILTER_XT_MATCH_SCTP=m -CONFIG_NETFILTER_XT_MATCH_STATE=m -CONFIG_NETFILTER_XT_MATCH_STATISTIC=m -CONFIG_NETFILTER_XT_MATCH_STRING=m -CONFIG_NETFILTER_XT_MATCH_TCPMSS=m -CONFIG_NETFILTER_XT_MATCH_TIME=m -CONFIG_NETFILTER_XT_MATCH_U32=m -CONFIG_NF_CONNTRACK_IPV4=m -CONFIG_IP_NF_QUEUE=m -CONFIG_IP_NF_IPTABLES=m -CONFIG_IP_NF_MATCH_AH=m -CONFIG_IP_NF_MATCH_ECN=m -CONFIG_IP_NF_MATCH_TTL=m -CONFIG_IP_NF_FILTER=m -CONFIG_IP_NF_TARGET_REJECT=m -CONFIG_IP_NF_TARGET_LOG=m -CONFIG_IP_NF_TARGET_ULOG=m -CONFIG_NF_NAT=m -CONFIG_IP_NF_TARGET_MASQUERADE=m -CONFIG_IP_NF_TARGET_NETMAP=m -CONFIG_IP_NF_TARGET_REDIRECT=m -CONFIG_NET_TCPPROBE=y -# CONFIG_WIRELESS is not set -CONFIG_NET_9P=y -CONFIG_NET_9P_DEBUG=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -CONFIG_DEVTMPFS=y -CONFIG_MTD=y -CONFIG_MTD_CHAR=y -CONFIG_MTD_BLOCK=y -CONFIG_MTD_CFI=y -CONFIG_MTD_CFI_ADV_OPTIONS=y -CONFIG_MTD_CFI_LE_BYTE_SWAP=y -CONFIG_MTD_CFI_INTELEXT=y -CONFIG_MTD_CFI_AMDSTD=y -CONFIG_MTD_CFI_STAA=y -CONFIG_MTD_PHYSMAP_OF=y -CONFIG_PROC_DEVICETREE=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_CRYPTOLOOP=y -CONFIG_BLK_DEV_NBD=m -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_SIZE=65536 -CONFIG_CDROM_PKTCDVD=y -CONFIG_MISC_DEVICES=y -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_BLK_DEV_SR_VENDOR=y -CONFIG_CHR_DEV_SG=y -CONFIG_SCSI_MULTI_LUN=y -CONFIG_SCSI_CONSTANTS=y -CONFIG_SCSI_SPI_ATTRS=y -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=m -CONFIG_SCSI_SAS_ATTRS=m -CONFIG_SCSI_SRP_ATTRS=y -CONFIG_ATA=y -CONFIG_SATA_AHCI=y -CONFIG_SATA_SIL24=y -CONFIG_SATA_MV=y -CONFIG_SATA_SIL=y -CONFIG_PATA_CMD64X=y -CONFIG_PATA_MARVELL=y -CONFIG_PATA_SIL680=y -CONFIG_MD=y -CONFIG_BLK_DEV_MD=y -CONFIG_MD_LINEAR=y -CONFIG_BLK_DEV_DM=y -CONFIG_DM_CRYPT=y -CONFIG_DM_SNAPSHOT=y -CONFIG_DM_MIRROR=y -CONFIG_DM_ZERO=y -CONFIG_DM_UEVENT=y -CONFIG_NETDEVICES=y -CONFIG_TUN=y -CONFIG_E1000E=y -CONFIG_TIGON3=y -# CONFIG_WLAN is not set -# CONFIG_INPUT is not set -# CONFIG_SERIO is not set -# CONFIG_VT is not set -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_HW_RANDOM=y -CONFIG_RAW_DRIVER=y -CONFIG_MAX_RAW_DEVS=1024 -# CONFIG_HWMON is not set -# CONFIG_VGA_ARB is not set -# CONFIG_USB_SUPPORT is not set -CONFIG_EDAC=y -CONFIG_EDAC_MM_EDAC=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_DRV_DS1511=y -CONFIG_RTC_DRV_DS1553=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -CONFIG_EXT2_FS_XIP=y -CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -# CONFIG_DNOTIFY is not set -CONFIG_FUSE_FS=y -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=m -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_PROC_KCORE=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_CONFIGFS_FS=m -CONFIG_CRAMFS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -CONFIG_NFS_V4_1=y -CONFIG_ROOT_NFS=y -CONFIG_CIFS=y -CONFIG_CIFS_WEAK_PW_HASH=y -CONFIG_CIFS_XATTR=y -CONFIG_CIFS_POSIX=y -CONFIG_NLS_CODEPAGE_437=y -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -CONFIG_CRC_CCITT=m -CONFIG_CRC_T10DIF=y -CONFIG_LIBCRC32C=m -CONFIG_PRINTK_TIME=y -CONFIG_MAGIC_SYSRQ=y -CONFIG_STRIP_ASM_SYMS=y -CONFIG_DETECT_HUNG_TASK=y -# CONFIG_SCHED_DEBUG is not set -CONFIG_DEBUG_INFO=y -CONFIG_FTRACE_SYSCALLS=y -CONFIG_PPC_EMULATED_STATS=y -CONFIG_XMON=y -CONFIG_XMON_DEFAULT=y -CONFIG_IRQ_DOMAIN_DEBUG=y -CONFIG_PPC_EARLY_DEBUG=y -CONFIG_KEYS_DEBUG_PROC_KEYS=y -CONFIG_CRYPTO_NULL=m -CONFIG_CRYPTO_TEST=m -CONFIG_CRYPTO_CCM=m -CONFIG_CRYPTO_GCM=m -CONFIG_CRYPTO_PCBC=m -CONFIG_CRYPTO_MICHAEL_MIC=m -CONFIG_CRYPTO_SHA256=m -CONFIG_CRYPTO_SHA512=m -CONFIG_CRYPTO_TGR192=m -CONFIG_CRYPTO_WP512=m -CONFIG_CRYPTO_AES=m -CONFIG_CRYPTO_ANUBIS=m -CONFIG_CRYPTO_BLOWFISH=m -CONFIG_CRYPTO_CAST5=m -CONFIG_CRYPTO_CAST6=m -CONFIG_CRYPTO_KHAZAD=m -CONFIG_CRYPTO_SALSA20=m -CONFIG_CRYPTO_SERPENT=m -CONFIG_CRYPTO_TEA=m -CONFIG_CRYPTO_TWOFISH=m -CONFIG_CRYPTO_LZO=m -# CONFIG_CRYPTO_ANSI_CPRNG is not set -CONFIG_VIRTUALIZATION=y diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 901dac6b6cb7..d0918e09557f 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h @@ -223,10 +223,6 @@ typedef struct { unsigned int id; unsigned int active; unsigned long vdso_base; -#ifdef CONFIG_PPC_ICSWX - struct spinlock *cop_lockp; /* guard cop related stuff */ - unsigned long acop; /* mask of enabled coprocessor types */ -#endif /* CONFIG_PPC_ICSWX */ #ifdef CONFIG_PPC_MM_SLICES u64 low_slices_psize; /* SLB page size encodings */ u64 high_slices_psize; /* 4 bits per slice for now */ diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h index 3d52a1132f3d..3ba9c6f096fc 100644 --- a/arch/powerpc/include/asm/reg_a2.h +++ b/arch/powerpc/include/asm/reg_a2.h @@ -110,15 +110,6 @@ #define TLB1_UR ASM_CONST(0x0000000000000002) #define TLB1_SR ASM_CONST(0x0000000000000001) -#ifdef CONFIG_PPC_EARLY_DEBUG_WSP -#define WSP_UART_PHYS 0xffc000c000 -/* This needs to be careful chosen to hit a !0 congruence class - * in the TLB since we bolt it in way 3, which is already occupied - * by our linear mapping primary bolted entry in CC 0. - */ -#define WSP_UART_VIRT 0xf000000000001000 -#endif - /* A2 erativax attributes definitions */ #define ERATIVAX_RS_IS_ALL 0x000 #define ERATIVAX_RS_IS_TID 0x040 diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h deleted file mode 100644 index c7dc83088a33..000000000000 --- a/arch/powerpc/include/asm/wsp.h +++ /dev/null @@ -1,14 +0,0 @@ -/* - * Copyright 2011 Michael Ellerman, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef __ASM_POWERPC_WSP_H -#define __ASM_POWERPC_WSP_H - -extern int wsp_get_chip_id(struct device_node *dn); - -#endif /* __ASM_POWERPC_WSP_H */ diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index fab19ec25597..670c312d914e 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -43,7 +43,6 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o obj64-$(CONFIG_RELOCATABLE) += reloc_64.o obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o -obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o obj-$(CONFIG_PPC64) += vdso64/ obj-$(CONFIG_ALTIVEC) += vecemu.o obj-$(CONFIG_PPC_970_NAP) += idle_power4.o diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S deleted file mode 100644 index 61f079e05b61..000000000000 --- a/arch/powerpc/kernel/cpu_setup_a2.S +++ /dev/null @@ -1,120 +0,0 @@ -/* - * A2 specific assembly support code - * - * Copyright 2009 Ben Herrenschmidt, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include - -/* - * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity. - * This also prevents external LPID accesses but that isn't a problem when not a - * guest. Under PV, this setting will be ignored and MMUCR will return the right - * number of PID bits we can use. - */ -#define MMUCR1_EXTEND_PID \ - (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \ - MMUCR1_DTTID | MMUCR1_DCCD) - -/* - * Use extended PIDs if enabled. - * Don't clear the ERATs on context sync events and enable I & D LRU. - * Enable ERAT back invalidate when tlbwe overwrites an entry. - */ -#define INITIAL_MMUCR1 \ - (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \ - MMUCR1_DRRE | MMUCR1_TLBWE_BINV) - -_GLOBAL(__setup_cpu_a2) - /* Some of these are actually thread local and some are - * core local but doing it always won't hurt - */ - -#ifdef CONFIG_PPC_ICSWX - /* Make sure ACOP starts out as zero */ - li r3,0 - mtspr SPRN_ACOP,r3 - - /* Skip the following if we are in Guest mode */ - mfmsr r3 - andis. r0,r3,MSR_GS@h - bne _icswx_skip_guest - - /* Enable icswx instruction */ - mfspr r3,SPRN_A2_CCR2 - ori r3,r3,A2_CCR2_ENABLE_ICSWX - mtspr SPRN_A2_CCR2,r3 - - /* Unmask all CTs in HACOP */ - li r3,-1 - mtspr SPRN_HACOP,r3 -_icswx_skip_guest: -#endif /* CONFIG_PPC_ICSWX */ - - /* Enable doorbell */ - mfspr r3,SPRN_A2_CCR2 - oris r3,r3,A2_CCR2_ENABLE_PC@h - mtspr SPRN_A2_CCR2,r3 - isync - - /* Setup CCR0 to disable power saving for now as it's busted - * in the current implementations. Setup CCR1 to wake on - * interrupts normally (we write the default value but who - * knows what FW may have clobbered...) - */ - li r3,0 - mtspr SPRN_A2_CCR0, r3 - LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f) - mtspr SPRN_A2_CCR1, r3 - - /* Initialise MMUCR1 */ - lis r3,INITIAL_MMUCR1@h - ori r3,r3,INITIAL_MMUCR1@l - mtspr SPRN_MMUCR1,r3 - - /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ - LOAD_REG_IMMEDIATE(r3, 0x000a7531) - mtspr SPRN_MMUCR2,r3 - - /* Set MMUCR3 to write all thids bit to the TLB */ - LOAD_REG_IMMEDIATE(r3, 0x0000000f) - mtspr SPRN_MMUCR3,r3 - - /* Don't do ERAT stuff if running guest mode */ - mfmsr r3 - andis. r0,r3,MSR_GS@h - bne 1f - - /* Now set the I-ERAT watermark to 15 */ - lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h - mtspr SPRN_MMUCR0, r4 - li r4,A2_IERAT_SIZE-1 - PPC_ERATWE(R4,R4,3) - - /* Now set the D-ERAT watermark to 31 */ - lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h - mtspr SPRN_MMUCR0, r4 - li r4,A2_DERAT_SIZE-1 - PPC_ERATWE(R4,R4,3) - - /* And invalidate the beast just in case. That won't get rid of - * a bolted entry though it will be in LRU and so will go away eventually - * but let's not bother for now - */ - PPC_ERATILX(0,0,R0) -1: - blr - -_GLOBAL(__restore_cpu_a2) - b __setup_cpu_a2 diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 11da04a4625a..965291b4c2fa 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2149,44 +2149,6 @@ static struct cpu_spec __initdata cpu_specs[] = { } #endif /* CONFIG_PPC32 */ #endif /* CONFIG_E500 */ - -#ifdef CONFIG_PPC_A2 - { /* Standard A2 (>= DD2) + FPU core */ - .pvr_mask = 0xffff0000, - .pvr_value = 0x00480000, - .cpu_name = "A2 (>= DD2)", - .cpu_features = CPU_FTRS_A2, - .cpu_user_features = COMMON_USER_PPC64, - .mmu_features = MMU_FTRS_A2, - .icache_bsize = 64, - .dcache_bsize = 64, - .num_pmcs = 0, - .cpu_setup = __setup_cpu_a2, - .cpu_restore = __restore_cpu_a2, - .machine_check = machine_check_generic, - .platform = "ppca2", - }, - { /* This is a default entry to get going, to be replaced by - * a real one at some stage - */ -#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \ - CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \ - CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) - .pvr_mask = 0x00000000, - .pvr_value = 0x00000000, - .cpu_name = "Book3E", - .cpu_features = CPU_FTRS_BASE_BOOK3E, - .cpu_user_features = COMMON_USER_PPC64, - .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | - MMU_FTR_USE_TLBIVAX_BCAST | - MMU_FTR_LOCK_BCAST_INVAL, - .icache_bsize = 64, - .dcache_bsize = 64, - .num_pmcs = 0, - .machine_check = machine_check_generic, - .platform = "power6", - }, -#endif /* CONFIG_PPC_A2 */ }; static struct cpu_spec the_cpu_spec; diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 771b4e92e5d9..bb9cac6c8051 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -1467,22 +1467,6 @@ a2_tlbinit_after_linear_map: .globl a2_tlbinit_after_iprot_flush a2_tlbinit_after_iprot_flush: -#ifdef CONFIG_PPC_EARLY_DEBUG_WSP - /* Now establish early debug mappings if applicable */ - /* Restore the MAS0 we used for linear mapping load */ - mtspr SPRN_MAS0,r11 - - lis r3,(MAS1_VALID | MAS1_IPROT)@h - ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT) - mtspr SPRN_MAS1,r3 - LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G) - mtspr SPRN_MAS2,r3 - LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW) - mtspr SPRN_MAS7_MAS3,r3 - /* re-use the MAS8 value from the linear mapping */ - tlbwe -#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ - PPC_TLBILX(0,0,R0) sync isync diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index a15837519dca..b7aa07279a63 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c @@ -62,8 +62,6 @@ void __init udbg_early_init(void) udbg_init_cpm(); #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) udbg_init_usbgecko(); -#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) - udbg_init_wsp(); #elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) /* In memory console */ udbg_init_memcons(); diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 75702e207b29..6e7c4923b5ea 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c @@ -296,14 +296,3 @@ void __init udbg_init_40x_realmode(void) } #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ - - -#ifdef CONFIG_PPC_EARLY_DEBUG_WSP - -void __init udbg_init_wsp(void) -{ - udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1); - udbg_uart_setup(57600, 50000000); -} - -#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index bf9c6d4cd26c..391b3f6b54a3 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig @@ -19,7 +19,6 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig" source "arch/powerpc/platforms/44x/Kconfig" source "arch/powerpc/platforms/40x/Kconfig" source "arch/powerpc/platforms/amigaone/Kconfig" -source "arch/powerpc/platforms/wsp/Kconfig" config KVM_GUEST bool "KVM Guest support" diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index 43b65ad1970a..a41bd023647a 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype @@ -148,10 +148,6 @@ config POWER4 depends on PPC64 && PPC_BOOK3S def_bool y -config PPC_A2 - bool - depends on PPC_BOOK3E_64 - config TUNE_CELL bool "Optimize for Cell Broadband Engine" depends on PPC64 && PPC_BOOK3S @@ -280,7 +276,7 @@ config VSX config PPC_ICSWX bool "Support for PowerPC icswx coprocessor instruction" - depends on POWER4 || PPC_A2 + depends on POWER4 default n ---help--- diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index 879b4a448498..469ef170d218 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile @@ -22,4 +22,3 @@ obj-$(CONFIG_PPC_CELL) += cell/ obj-$(CONFIG_PPC_PS3) += ps3/ obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ obj-$(CONFIG_AMIGAONE) += amigaone/ -obj-$(CONFIG_PPC_WSP) += wsp/ diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig deleted file mode 100644 index 422a175b10ee..000000000000 --- a/arch/powerpc/platforms/wsp/Kconfig +++ /dev/null @@ -1,30 +0,0 @@ -config PPC_WSP - bool - select PPC_A2 - select GENERIC_TBSYNC - select PPC_ICSWX - select PPC_SCOM - select PPC_XICS - select PPC_ICP_NATIVE - select PCI - select PPC_IO_WORKAROUNDS if PCI - select PPC_INDIRECT_PIO if PCI - default n - -menu "WSP platform selection" - depends on PPC_BOOK3E_64 - -config PPC_PSR2 - bool "PowerEN System Reference Platform 2" - select EPAPR_BOOT - select PPC_WSP - default y - -config PPC_CHROMA - bool "PowerEN PCIe Chroma Card" - select EPAPR_BOOT - select PPC_WSP - select OF_DYNAMIC - default y - -endmenu diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile deleted file mode 100644 index 162fc60125a2..000000000000 --- a/arch/powerpc/platforms/wsp/Makefile +++ /dev/null @@ -1,10 +0,0 @@ -ccflags-y += $(NO_MINIMAL_TOC) - -obj-y += setup.o ics.o wsp.o -obj-$(CONFIG_PPC_PSR2) += psr2.o -obj-$(CONFIG_PPC_CHROMA) += chroma.o h8.o -obj-$(CONFIG_PPC_WSP) += opb_pic.o -obj-$(CONFIG_PPC_WSP) += scom_wsp.o -obj-$(CONFIG_SMP) += smp.o scom_smp.o -obj-$(CONFIG_PCI) += wsp_pci.o -obj-$(CONFIG_PCI_MSI) += msi.o diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c deleted file mode 100644 index aaa46b353715..000000000000 --- a/arch/powerpc/platforms/wsp/chroma.c +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2008-2011, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "ics.h" -#include "wsp.h" - -void __init chroma_setup_arch(void) -{ - wsp_setup_arch(); - wsp_setup_h8(); - -} - -static int __init chroma_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); - - if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) - return 0; - - return 1; -} - -define_machine(chroma_md) { - .name = "Chroma PCIe", - .probe = chroma_probe, - .setup_arch = chroma_setup_arch, - .restart = wsp_h8_restart, - .power_off = wsp_h8_power_off, - .halt = wsp_halt, - .calibrate_decr = generic_calibrate_decr, - .init_IRQ = wsp_setup_irq, - .progress = udbg_progress, - .power_save = book3e_idle, -}; - -machine_arch_initcall(chroma_md, wsp_probe_devices); diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c deleted file mode 100644 index a3c87f395750..000000000000 --- a/arch/powerpc/platforms/wsp/h8.c +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright 2008-2011, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include - -#include "wsp.h" - -/* - * The UART connection to the H8 is over ttyS1 which is just a 16550. - * We assume that FW has it setup right and no one messes with it. - */ - - -static u8 __iomem *h8; - -#define RBR 0 /* Receiver Buffer Register */ -#define THR 0 /* Transmitter Holding Register */ -#define LSR 5 /* Line Status Register */ -#define LSR_DR 0x01 /* LSR value for Data-Ready */ -#define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */ -static void wsp_h8_putc(int c) -{ - u8 lsr; - - do { - lsr = readb(h8 + LSR); - } while ((lsr & LSR_THRE) != LSR_THRE); - writeb(c, h8 + THR); -} - -static int wsp_h8_getc(void) -{ - u8 lsr; - - do { - lsr = readb(h8 + LSR); - } while ((lsr & LSR_DR) != LSR_DR); - - return readb(h8 + RBR); -} - -static void wsp_h8_puts(const char *s, int sz) -{ - int i; - - for (i = 0; i < sz; i++) { - wsp_h8_putc(s[i]); - - /* no flow control so wait for echo */ - wsp_h8_getc(); - } - wsp_h8_putc('\r'); - wsp_h8_putc('\n'); -} - -static void wsp_h8_terminal_cmd(const char *cmd, int sz) -{ - hard_irq_disable(); - wsp_h8_puts(cmd, sz); - /* should never return, but just in case */ - for (;;) - continue; -} - - -void wsp_h8_restart(char *cmd) -{ - static const char restart[] = "warm-reset"; - - (void)cmd; - wsp_h8_terminal_cmd(restart, sizeof(restart) - 1); -} - -void wsp_h8_power_off(void) -{ - static const char off[] = "power-off"; - - wsp_h8_terminal_cmd(off, sizeof(off) - 1); -} - -static void __iomem *wsp_h8_getaddr(void) -{ - struct device_node *aliases; - struct device_node *uart; - struct property *path; - void __iomem *va = NULL; - - /* - * there is nothing in the devtree to tell us which is mapped - * to the H8, but se know it is the second serial port. - */ - - aliases = of_find_node_by_path("/aliases"); - if (aliases == NULL) - return NULL; - - path = of_find_property(aliases, "serial1", NULL); - if (path == NULL) - goto out; - - uart = of_find_node_by_path(path->value); - if (uart == NULL) - goto out; - - va = of_iomap(uart, 0); - - /* remove it so no one messes with it */ - of_detach_node(uart); - of_node_put(uart); - -out: - of_node_put(aliases); - - return va; -} - -void __init wsp_setup_h8(void) -{ - h8 = wsp_h8_getaddr(); - - /* Devtree change? lets hard map it anyway */ - if (h8 == NULL) { - pr_warn("UART to H8 could not be found"); - h8 = ioremap(0xffc0008000ULL, 0x100); - } -} diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c deleted file mode 100644 index 9cd92e645028..000000000000 --- a/arch/powerpc/platforms/wsp/ics.c +++ /dev/null @@ -1,762 +0,0 @@ -/* - * Copyright 2008-2011 IBM Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "wsp.h" -#include "ics.h" - - -/* WSP ICS */ - -struct wsp_ics { - struct ics ics; - struct device_node *dn; - void __iomem *regs; - spinlock_t lock; - unsigned long *bitmap; - u32 chip_id; - u32 lsi_base; - u32 lsi_count; - u64 hwirq_start; - u64 count; -#ifdef CONFIG_SMP - int *hwirq_cpu_map; -#endif -}; - -#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics) - -#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00) -#define IODA_TBL_ADDR_REG(base) ((base) + 0x18) -#define IODA_TBL_DATA_REG(base) ((base) + 0x20) -#define XIVE_UPDATE_REG(base) ((base) + 0x28) -#define ICS_INT_CAPS_REG(base) ((base) + 0x30) - -#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15)) -#define TBL_SELECT_XIST (1UL << 48) -#define TBL_SELECT_XIVT (1UL << 49) - -#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */ - -#define XIST_REQUIRED 0x8 -#define XIST_REJECTED 0x4 -#define XIST_PRESENTED 0x2 -#define XIST_PENDING 0x1 - -#define XIVE_SERVER_SHIFT 42 -#define XIVE_SERVER_MASK 0xFFFFULL -#define XIVE_PRIORITY_MASK 0xFFULL -#define XIVE_PRIORITY_SHIFT 32 -#define XIVE_WRITE_ENABLE (1ULL << 63) - -/* - * The docs refer to a 6 bit field called ChipID, which consists of a - * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero - * so we ignore it, and every where we use "chip id" in this code we - * mean the NodeID. - */ -#define WSP_ICS_CHIP_SHIFT 17 - - -static struct wsp_ics *ics_list; -static int num_ics; - -/* ICS Source controller accessors */ - -static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq) -{ - unsigned long flags; - u64 xive; - - spin_lock_irqsave(&ics->lock, flags); - out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq)); - xive = in_be64(IODA_TBL_DATA_REG(ics->regs)); - spin_unlock_irqrestore(&ics->lock, flags); - - return xive; -} - -static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive) -{ - xive &= ~XIVE_ADDR_MASK; - xive |= (irq & XIVE_ADDR_MASK); - xive |= XIVE_WRITE_ENABLE; - - out_be64(XIVE_UPDATE_REG(ics->regs), xive); -} - -static u64 xive_set_server(u64 xive, unsigned int server) -{ - u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT); - - xive &= mask; - xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT; - - return xive; -} - -static u64 xive_set_priority(u64 xive, unsigned int priority) -{ - u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT); - - xive &= mask; - xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT; - - return xive; -} - - -#ifdef CONFIG_SMP -/* Find logical CPUs within mask on a given chip and store result in ret */ -void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret) -{ - int cpu, chip; - struct device_node *cpu_dn, *dn; - const u32 *prop; - - cpumask_clear(ret); - for_each_cpu(cpu, mask) { - cpu_dn = of_get_cpu_node(cpu, NULL); - if (!cpu_dn) - continue; - - prop = of_get_property(cpu_dn, "at-node", NULL); - if (!prop) { - of_node_put(cpu_dn); - continue; - } - - dn = of_find_node_by_phandle(*prop); - of_node_put(cpu_dn); - - chip = wsp_get_chip_id(dn); - if (chip == chip_id) - cpumask_set_cpu(cpu, ret); - - of_node_put(dn); - } -} - -/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */ -static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, - const cpumask_t *affinity) -{ - cpumask_var_t avail, newmask; - int ret = -ENOMEM, cpu, cpu_rover = 0, target; - int index = hwirq - ics->hwirq_start; - unsigned int nodeid; - - BUG_ON(index < 0 || index >= ics->count); - - if (!ics->hwirq_cpu_map) - return -ENOMEM; - - if (!distribute_irqs) { - ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server; - return 0; - } - - /* Allocate needed CPU masks */ - if (!alloc_cpumask_var(&avail, GFP_KERNEL)) - goto ret; - if (!alloc_cpumask_var(&newmask, GFP_KERNEL)) - goto freeavail; - - /* Find PBus attached to the source of this IRQ */ - nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */ - - /* Find CPUs that could handle this IRQ */ - if (affinity) - cpumask_and(avail, cpu_online_mask, affinity); - else - cpumask_copy(avail, cpu_online_mask); - - /* Narrow selection down to logical CPUs on the same chip */ - cpus_on_chip(nodeid, avail, newmask); - - /* Ensure we haven't narrowed it down to 0 */ - if (unlikely(cpumask_empty(newmask))) { - if (unlikely(cpumask_empty(avail))) { - ret = -1; - goto out; - } - cpumask_copy(newmask, avail); - } - - /* Choose a CPU out of those we narrowed it down to in round robin */ - target = hwirq % cpumask_weight(newmask); - for_each_cpu(cpu, newmask) { - if (cpu_rover++ >= target) { - ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu); - ret = 0; - goto out; - } - } - - /* Shouldn't happen */ - WARN_ON(1); - -out: - free_cpumask_var(newmask); -freeavail: - free_cpumask_var(avail); -ret: - if (ret < 0) { - ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask); - pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n", - hwirq, ics->hwirq_cpu_map[index]); - } - return ret; -} - -static void alloc_irq_map(struct wsp_ics *ics) -{ - int i; - - ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL); - if (!ics->hwirq_cpu_map) { - pr_warning("Allocate hwirq_cpu_map failed, " - "IRQ balancing disabled\n"); - return; - } - - for (i=0; i < ics->count; i++) - ics->hwirq_cpu_map[i] = xics_default_server; -} - -static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) -{ - int index = hwirq - ics->hwirq_start; - - BUG_ON(index < 0 || index >= ics->count); - - if (!ics->hwirq_cpu_map) - return xics_default_server; - - return ics->hwirq_cpu_map[index]; -} -#else /* !CONFIG_SMP */ -static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, - const cpumask_t *affinity) -{ - return 0; -} - -static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) -{ - return xics_default_server; -} - -static void alloc_irq_map(struct wsp_ics *ics) { } -#endif - -static void wsp_chip_unmask_irq(struct irq_data *d) -{ - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - struct wsp_ics *ics; - int server; - u64 xive; - - if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) - return; - - ics = d->chip_data; - if (WARN_ON(!ics)) - return; - - server = get_irq_server(ics, hw_irq); - - xive = wsp_ics_get_xive(ics, hw_irq); - xive = xive_set_server(xive, server); - xive = xive_set_priority(xive, DEFAULT_PRIORITY); - wsp_ics_set_xive(ics, hw_irq, xive); -} - -static unsigned int wsp_chip_startup(struct irq_data *d) -{ - /* unmask it */ - wsp_chip_unmask_irq(d); - return 0; -} - -static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics) -{ - u64 xive; - - if (hw_irq == XICS_IPI) - return; - - if (WARN_ON(!ics)) - return; - xive = wsp_ics_get_xive(ics, hw_irq); - xive = xive_set_server(xive, xics_default_server); - xive = xive_set_priority(xive, LOWEST_PRIORITY); - wsp_ics_set_xive(ics, hw_irq, xive); -} - -static void wsp_chip_mask_irq(struct irq_data *d) -{ - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - struct wsp_ics *ics = d->chip_data; - - if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) - return; - - wsp_mask_real_irq(hw_irq, ics); -} - -static int wsp_chip_set_affinity(struct irq_data *d, - const struct cpumask *cpumask, bool force) -{ - unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); - struct wsp_ics *ics; - int ret; - u64 xive; - - if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) - return -1; - - ics = d->chip_data; - if (WARN_ON(!ics)) - return -1; - xive = wsp_ics_get_xive(ics, hw_irq); - - /* - * For the moment only implement delivery to all cpus or one cpu. - * Get current irq_server for the given irq - */ - ret = cache_hwirq_map(ics, hw_irq, cpumask); - if (ret == -1) { - char cpulist[128]; - cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); - pr_warning("%s: No online cpus in the mask %s for irq %d\n", - __func__, cpulist, d->irq); - return -1; - } else if (ret == -ENOMEM) { - pr_warning("%s: Out of memory\n", __func__); - return -1; - } - - xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); - wsp_ics_set_xive(ics, hw_irq, xive); - - return IRQ_SET_MASK_OK; -} - -static struct irq_chip wsp_irq_chip = { - .name = "WSP ICS", - .irq_startup = wsp_chip_startup, - .irq_mask = wsp_chip_mask_irq, - .irq_unmask = wsp_chip_unmask_irq, - .irq_set_affinity = wsp_chip_set_affinity -}; - -static int wsp_ics_host_match(struct ics *ics, struct device_node *dn) -{ - /* All ICSs in the system implement a global irq number space, - * so match against them all. */ - return of_device_is_compatible(dn, "ibm,ppc-xics"); -} - -static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq) -{ - if (hwirq >= wsp_ics->hwirq_start && - hwirq < wsp_ics->hwirq_start + wsp_ics->count) - return 1; - - return 0; -} - -static int wsp_ics_map(struct ics *ics, unsigned int virq) -{ - struct wsp_ics *wsp_ics = to_wsp_ics(ics); - unsigned int hw_irq = virq_to_hw(virq); - unsigned long flags; - - if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) - return -ENOENT; - - irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq); - - irq_set_chip_data(virq, wsp_ics); - - spin_lock_irqsave(&wsp_ics->lock, flags); - bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0); - spin_unlock_irqrestore(&wsp_ics->lock, flags); - - return 0; -} - -static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq) -{ - struct wsp_ics *wsp_ics = to_wsp_ics(ics); - - if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) - return; - - pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq); - wsp_mask_real_irq(hw_irq, wsp_ics); -} - -static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq) -{ - struct wsp_ics *wsp_ics = to_wsp_ics(ics); - - if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) - return -ENOENT; - - return get_irq_server(wsp_ics, hw_irq); -} - -/* HW Number allocation API */ - -static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn) -{ - struct device_node *iparent; - int i; - - iparent = of_irq_find_parent(dn); - if (!iparent) { - pr_err("wsp_ics: Failed to find interrupt parent!\n"); - return NULL; - } - - for(i = 0; i < num_ics; i++) { - if(ics_list[i].dn == iparent) - break; - } - - if (i >= num_ics) { - pr_err("wsp_ics: Unable to find parent bitmap!\n"); - return NULL; - } - - return &ics_list[i]; -} - -int wsp_ics_alloc_irq(struct device_node *dn, int num) -{ - struct wsp_ics *ics; - int order, offset; - - ics = wsp_ics_find_dn_ics(dn); - if (!ics) - return -ENODEV; - - /* Fast, but overly strict if num isn't a power of two */ - order = get_count_order(num); - - spin_lock_irq(&ics->lock); - offset = bitmap_find_free_region(ics->bitmap, ics->count, order); - spin_unlock_irq(&ics->lock); - - if (offset < 0) - return offset; - - return offset + ics->hwirq_start; -} - -void wsp_ics_free_irq(struct device_node *dn, unsigned int irq) -{ - struct wsp_ics *ics; - - ics = wsp_ics_find_dn_ics(dn); - if (WARN_ON(!ics)) - return; - - spin_lock_irq(&ics->lock); - bitmap_release_region(ics->bitmap, irq, 0); - spin_unlock_irq(&ics->lock); -} - -/* Initialisation */ - -static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics, - struct device_node *dn) -{ - int len, i, j, size; - u32 start, count; - const u32 *p; - - size = BITS_TO_LONGS(ics->count) * sizeof(long); - ics->bitmap = kzalloc(size, GFP_KERNEL); - if (!ics->bitmap) { - pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n"); - return -ENOMEM; - } - - spin_lock_init(&ics->lock); - - p = of_get_property(dn, "available-ranges", &len); - if (!p || !len) { - /* FIXME this should be a WARN() once mambo is updated */ - pr_err("wsp_ics: No available-ranges defined for %s\n", - dn->full_name); - return 0; - } - - if (len % (2 * sizeof(u32)) != 0) { - /* FIXME this should be a WARN() once mambo is updated */ - pr_err("wsp_ics: Invalid available-ranges for %s\n", - dn->full_name); - return 0; - } - - bitmap_fill(ics->bitmap, ics->count); - - for (i = 0; i < len / sizeof(u32); i += 2) { - start = of_read_number(p + i, 1); - count = of_read_number(p + i + 1, 1); - - pr_devel("%s: start: %d count: %d\n", __func__, start, count); - - if ((start + count) > (ics->hwirq_start + ics->count) || - start < ics->hwirq_start) { - pr_err("wsp_ics: Invalid range! -> %d to %d\n", - start, start + count); - break; - } - - for (j = 0; j < count; j++) - bitmap_release_region(ics->bitmap, - (start + j) - ics->hwirq_start, 0); - } - - /* Ensure LSIs are not available for allocation */ - bitmap_allocate_region(ics->bitmap, ics->lsi_base, - get_count_order(ics->lsi_count)); - - return 0; -} - -static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn) -{ - u32 lsi_buid, msi_buid, msi_base, msi_count; - void __iomem *regs; - const u32 *p; - int rc, len, i; - u64 caps, buid; - - p = of_get_property(dn, "interrupt-ranges", &len); - if (!p || len < (2 * sizeof(u32))) { - pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n", - dn->full_name); - return -ENOENT; - } - - if (len > (2 * sizeof(u32))) { - pr_err("wsp_ics: Multiple ics ranges not supported.\n"); - return -EINVAL; - } - - regs = of_iomap(dn, 0); - if (!regs) { - pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name); - return -ENXIO; - } - - ics->hwirq_start = of_read_number(p, 1); - ics->count = of_read_number(p + 1, 1); - ics->regs = regs; - - ics->chip_id = wsp_get_chip_id(dn); - if (WARN_ON(ics->chip_id < 0)) - ics->chip_id = 0; - - /* Get some informations about the critter */ - caps = in_be64(ICS_INT_CAPS_REG(ics->regs)); - buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs)); - ics->lsi_count = caps >> 56; - msi_count = (caps >> 44) & 0x7ff; - - /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the - * rest is mixed in the interrupt number. We store the whole - * thing though - */ - lsi_buid = (buid >> 48) & 0x1ff; - ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5; - msi_buid = (buid >> 37) & 0x7; - msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11; - - pr_info("wsp_ics: Found %s\n", dn->full_name); - pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n", - ics->hwirq_start, ics->hwirq_start + ics->count - 1); - pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n", - ics->lsi_count, ics->lsi_base, - ics->lsi_base + ics->lsi_count - 1); - pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n", - msi_count, msi_base, - msi_base + msi_count - 1); - - /* Let's check the HW config is sane */ - if (ics->lsi_base < ics->hwirq_start || - (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count)) - pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n"); - if (msi_base < ics->hwirq_start || - (msi_base + msi_count) > (ics->hwirq_start + ics->count)) - pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n"); - - /* We don't check for overlap between LSI and MSI, which will happen - * if we use the same BUID, I'm not sure yet how legit that is. - */ - - rc = wsp_ics_bitmap_setup(ics, dn); - if (rc) { - iounmap(regs); - return rc; - } - - ics->dn = of_node_get(dn); - alloc_irq_map(ics); - - for(i = 0; i < ics->count; i++) - wsp_mask_real_irq(ics->hwirq_start + i, ics); - - ics->ics.map = wsp_ics_map; - ics->ics.mask_unknown = wsp_ics_mask_unknown; - ics->ics.get_server = wsp_ics_get_server; - ics->ics.host_match = wsp_ics_host_match; - - xics_register_ics(&ics->ics); - - return 0; -} - -static void __init wsp_ics_set_default_server(void) -{ - struct device_node *np; - u32 hwid; - - /* Find the server number for the boot cpu. */ - np = of_get_cpu_node(boot_cpuid, NULL); - BUG_ON(!np); - - hwid = get_hard_smp_processor_id(boot_cpuid); - - pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name); - xics_default_server = hwid; - - of_node_put(np); -} - -static int __init wsp_ics_init(void) -{ - struct device_node *dn; - struct wsp_ics *ics; - int rc, found; - - wsp_ics_set_default_server(); - - found = 0; - for_each_compatible_node(dn, NULL, "ibm,ppc-xics") - found++; - - if (found == 0) { - pr_err("wsp_ics: No ICS's found!\n"); - return -ENODEV; - } - - ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL); - if (!ics_list) { - pr_err("wsp_ics: No memory for structs.\n"); - return -ENOMEM; - } - - num_ics = 0; - ics = ics_list; - for_each_compatible_node(dn, NULL, "ibm,wsp-xics") { - rc = wsp_ics_setup(ics, dn); - if (rc == 0) { - ics++; - num_ics++; - } - } - - if (found != num_ics) { - pr_err("wsp_ics: Failed setting up %d ICS's\n", - found - num_ics); - return -1; - } - - return 0; -} - -void __init wsp_init_irq(void) -{ - wsp_ics_init(); - xics_init(); - - /* We need to patch our irq chip's EOI to point to the right ICP */ - wsp_irq_chip.irq_eoi = icp_ops->eoi; -} - -#ifdef CONFIG_PCI_MSI -static void wsp_ics_msi_unmask_irq(struct irq_data *d) -{ - wsp_chip_unmask_irq(d); - unmask_msi_irq(d); -} - -static unsigned int wsp_ics_msi_startup(struct irq_data *d) -{ - wsp_ics_msi_unmask_irq(d); - return 0; -} - -static void wsp_ics_msi_mask_irq(struct irq_data *d) -{ - mask_msi_irq(d); - wsp_chip_mask_irq(d); -} - -/* - * we do it this way because we reassinge default EOI handling in - * irq_init() above - */ -static void wsp_ics_eoi(struct irq_data *data) -{ - wsp_irq_chip.irq_eoi(data); -} - -static struct irq_chip wsp_ics_msi = { - .name = "WSP ICS MSI", - .irq_startup = wsp_ics_msi_startup, - .irq_mask = wsp_ics_msi_mask_irq, - .irq_unmask = wsp_ics_msi_unmask_irq, - .irq_eoi = wsp_ics_eoi, - .irq_set_affinity = wsp_chip_set_affinity -}; - -void wsp_ics_set_msi_chip(unsigned int irq) -{ - irq_set_chip(irq, &wsp_ics_msi); -} - -void wsp_ics_set_std_chip(unsigned int irq) -{ - irq_set_chip(irq, &wsp_irq_chip); -} -#endif /* CONFIG_PCI_MSI */ diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h deleted file mode 100644 index 07b644e0cf97..000000000000 --- a/arch/powerpc/platforms/wsp/ics.h +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright 2009 IBM Corporation. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef __ICS_H -#define __ICS_H - -#define XIVE_ADDR_MASK 0x7FFULL - -extern void wsp_init_irq(void); - -extern int wsp_ics_alloc_irq(struct device_node *dn, int num); -extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq); - -#ifdef CONFIG_PCI_MSI -extern void wsp_ics_set_msi_chip(unsigned int irq); -extern void wsp_ics_set_std_chip(unsigned int irq); -#endif /* CONFIG_PCI_MSI */ - -#endif /* __ICS_H */ diff --git a/arch/powerpc/platforms/wsp/msi.c b/arch/powerpc/platforms/wsp/msi.c deleted file mode 100644 index 380882f27add..000000000000 --- a/arch/powerpc/platforms/wsp/msi.c +++ /dev/null @@ -1,102 +0,0 @@ -/* - * Copyright 2011 Michael Ellerman, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include - -#include "msi.h" -#include "ics.h" -#include "wsp_pci.h" - -/* Magic addresses for 32 & 64-bit MSIs with hardcoded MVE 0 */ -#define MSI_ADDR_32 0xFFFF0000ul -#define MSI_ADDR_64 0x1000000000000000ul - -int wsp_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -{ - struct pci_controller *phb; - struct msi_desc *entry; - struct msi_msg msg; - unsigned int virq; - int hwirq; - - phb = pci_bus_to_host(dev->bus); - if (!phb) - return -ENOENT; - - entry = list_first_entry(&dev->msi_list, struct msi_desc, list); - if (entry->msi_attrib.is_64) { - msg.address_lo = 0; - msg.address_hi = MSI_ADDR_64 >> 32; - } else { - msg.address_lo = MSI_ADDR_32; - msg.address_hi = 0; - } - - list_for_each_entry(entry, &dev->msi_list, list) { - hwirq = wsp_ics_alloc_irq(phb->dn, 1); - if (hwirq < 0) { - dev_warn(&dev->dev, "wsp_msi: hwirq alloc failed!\n"); - return hwirq; - } - - virq = irq_create_mapping(NULL, hwirq); - if (virq == NO_IRQ) { - dev_warn(&dev->dev, "wsp_msi: virq alloc failed!\n"); - return -1; - } - - dev_dbg(&dev->dev, "wsp_msi: allocated irq %#x/%#x\n", - hwirq, virq); - - wsp_ics_set_msi_chip(virq); - irq_set_msi_desc(virq, entry); - msg.data = hwirq & XIVE_ADDR_MASK; - write_msi_msg(virq, &msg); - } - - return 0; -} - -void wsp_teardown_msi_irqs(struct pci_dev *dev) -{ - struct pci_controller *phb; - struct msi_desc *entry; - int hwirq; - - phb = pci_bus_to_host(dev->bus); - - dev_dbg(&dev->dev, "wsp_msi: tearing down msi irqs\n"); - - list_for_each_entry(entry, &dev->msi_list, list) { - if (entry->irq == NO_IRQ) - continue; - - irq_set_msi_desc(entry->irq, NULL); - wsp_ics_set_std_chip(entry->irq); - - hwirq = virq_to_hw(entry->irq); - /* In this order to avoid racing with irq_create_mapping() */ - irq_dispose_mapping(entry->irq); - wsp_ics_free_irq(phb->dn, hwirq); - } -} - -void wsp_setup_phb_msi(struct pci_controller *phb) -{ - /* Create a single MVE at offset 0 that matches everything */ - out_be64(phb->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_MVT); - out_be64(phb->cfg_data + PCIE_REG_IODA_DATA0, 1ull << 63); - - ppc_md.setup_msi_irqs = wsp_setup_msi_irqs; - ppc_md.teardown_msi_irqs = wsp_teardown_msi_irqs; -} diff --git a/arch/powerpc/platforms/wsp/msi.h b/arch/powerpc/platforms/wsp/msi.h deleted file mode 100644 index 0ab27b71b24d..000000000000 --- a/arch/powerpc/platforms/wsp/msi.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2011 Michael Ellerman, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef __WSP_MSI_H -#define __WSP_MSI_H - -#ifdef CONFIG_PCI_MSI -extern void wsp_setup_phb_msi(struct pci_controller *phb); -#else -static inline void wsp_setup_phb_msi(struct pci_controller *phb) { } -#endif - -#endif /* __WSP_MSI_H */ diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c deleted file mode 100644 index 3f6729807938..000000000000 --- a/arch/powerpc/platforms/wsp/opb_pic.c +++ /dev/null @@ -1,321 +0,0 @@ -/* - * IBM Onboard Peripheral Bus Interrupt Controller - * - * Copyright 2010 Jack Miller, IBM Corporation. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#define OPB_NR_IRQS 32 - -#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */ -#define OPB_MLSIR 0x50 /* MLS Interrupt Register */ -#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */ -#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */ -#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */ - -static int opb_index = 0; - -struct opb_pic { - struct irq_domain *host; - void *regs; - int index; - spinlock_t lock; -}; - -static u32 opb_in(struct opb_pic *opb, int offset) -{ - return in_be32(opb->regs + offset); -} - -static void opb_out(struct opb_pic *opb, int offset, u32 val) -{ - out_be32(opb->regs + offset, val); -} - -static void opb_unmask_irq(struct irq_data *d) -{ - struct opb_pic *opb; - unsigned long flags; - u32 ier, bitset; - - opb = d->chip_data; - bitset = (1 << (31 - irqd_to_hwirq(d))); - - spin_lock_irqsave(&opb->lock, flags); - - ier = opb_in(opb, OPB_MLSIER); - opb_out(opb, OPB_MLSIER, ier | bitset); - ier = opb_in(opb, OPB_MLSIER); - - spin_unlock_irqrestore(&opb->lock, flags); -} - -static void opb_mask_irq(struct irq_data *d) -{ - struct opb_pic *opb; - unsigned long flags; - u32 ier, mask; - - opb = d->chip_data; - mask = ~(1 << (31 - irqd_to_hwirq(d))); - - spin_lock_irqsave(&opb->lock, flags); - - ier = opb_in(opb, OPB_MLSIER); - opb_out(opb, OPB_MLSIER, ier & mask); - ier = opb_in(opb, OPB_MLSIER); // Flush posted writes - - spin_unlock_irqrestore(&opb->lock, flags); -} - -static void opb_ack_irq(struct irq_data *d) -{ - struct opb_pic *opb; - unsigned long flags; - u32 bitset; - - opb = d->chip_data; - bitset = (1 << (31 - irqd_to_hwirq(d))); - - spin_lock_irqsave(&opb->lock, flags); - - opb_out(opb, OPB_MLSIR, bitset); - opb_in(opb, OPB_MLSIR); // Flush posted writes - - spin_unlock_irqrestore(&opb->lock, flags); -} - -static void opb_mask_ack_irq(struct irq_data *d) -{ - struct opb_pic *opb; - unsigned long flags; - u32 bitset; - u32 ier, ir; - - opb = d->chip_data; - bitset = (1 << (31 - irqd_to_hwirq(d))); - - spin_lock_irqsave(&opb->lock, flags); - - ier = opb_in(opb, OPB_MLSIER); - opb_out(opb, OPB_MLSIER, ier & ~bitset); - ier = opb_in(opb, OPB_MLSIER); // Flush posted writes - - opb_out(opb, OPB_MLSIR, bitset); - ir = opb_in(opb, OPB_MLSIR); // Flush posted writes - - spin_unlock_irqrestore(&opb->lock, flags); -} - -static int opb_set_irq_type(struct irq_data *d, unsigned int flow) -{ - struct opb_pic *opb; - unsigned long flags; - int invert, ipr, mask, bit; - - opb = d->chip_data; - - /* The only information we're interested in in the type is whether it's - * a high or low trigger. For high triggered interrupts, the polarity - * set for it in the MLS Interrupt Polarity Register is 0, for low - * interrupts it's 1 so that the proper input in the MLS Interrupt Input - * Register is interrupted as asserting the interrupt. */ - - switch (flow) { - case IRQ_TYPE_NONE: - opb_mask_irq(d); - return 0; - - case IRQ_TYPE_LEVEL_HIGH: - invert = 0; - break; - - case IRQ_TYPE_LEVEL_LOW: - invert = 1; - break; - - default: - return -EINVAL; - } - - bit = (1 << (31 - irqd_to_hwirq(d))); - mask = ~bit; - - spin_lock_irqsave(&opb->lock, flags); - - ipr = opb_in(opb, OPB_MLSIPR); - ipr = (ipr & mask) | (invert ? bit : 0); - opb_out(opb, OPB_MLSIPR, ipr); - ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes - - spin_unlock_irqrestore(&opb->lock, flags); - - /* Record the type in the interrupt descriptor */ - irqd_set_trigger_type(d, flow); - - return 0; -} - -static struct irq_chip opb_irq_chip = { - .name = "OPB", - .irq_mask = opb_mask_irq, - .irq_unmask = opb_unmask_irq, - .irq_mask_ack = opb_mask_ack_irq, - .irq_ack = opb_ack_irq, - .irq_set_type = opb_set_irq_type -}; - -static int opb_host_map(struct irq_domain *host, unsigned int virq, - irq_hw_number_t hwirq) -{ - struct opb_pic *opb; - - opb = host->host_data; - - /* Most of the important stuff is handled by the generic host code, like - * the lookup, so just attach some info to the virtual irq */ - - irq_set_chip_data(virq, opb); - irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq); - irq_set_irq_type(virq, IRQ_TYPE_NONE); - - return 0; -} - -static const struct irq_domain_ops opb_host_ops = { - .map = opb_host_map, - .xlate = irq_domain_xlate_twocell, -}; - -irqreturn_t opb_irq_handler(int irq, void *private) -{ - struct opb_pic *opb; - u32 ir, src, subvirq; - - opb = (struct opb_pic *) private; - - /* Read the OPB MLS Interrupt Register for - * asserted interrupts */ - ir = opb_in(opb, OPB_MLSIR); - if (!ir) - return IRQ_NONE; - - do { - /* Get 1 - 32 source, *NOT* bit */ - src = 32 - ffs(ir); - - /* Translate from the OPB's conception of interrupt number to - * Linux's virtual IRQ */ - - subvirq = irq_linear_revmap(opb->host, src); - - generic_handle_irq(subvirq); - } while ((ir = opb_in(opb, OPB_MLSIR))); - - return IRQ_HANDLED; -} - -struct opb_pic *opb_pic_init_one(struct device_node *dn) -{ - struct opb_pic *opb; - struct resource res; - - if (of_address_to_resource(dn, 0, &res)) { - printk(KERN_ERR "opb: Couldn't translate resource\n"); - return NULL; - } - - opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL); - if (!opb) { - printk(KERN_ERR "opb: Failed to allocate opb struct!\n"); - return NULL; - } - - /* Get access to the OPB MMIO registers */ - opb->regs = ioremap(res.start + 0x10000, 0x1000); - if (!opb->regs) { - printk(KERN_ERR "opb: Failed to allocate register space!\n"); - goto free_opb; - } - - /* Allocate an irq domain so that Linux knows that despite only - * having one interrupt to issue, we're the controller for multiple - * hardware IRQs, so later we can lookup their virtual IRQs. */ - - opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb); - if (!opb->host) { - printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); - goto free_regs; - } - - opb->index = opb_index++; - spin_lock_init(&opb->lock); - - /* Disable all interrupts by default */ - opb_out(opb, OPB_MLSASIER, 0); - opb_out(opb, OPB_MLSIER, 0); - - /* ACK any interrupts left by FW */ - opb_out(opb, OPB_MLSIR, 0xFFFFFFFF); - - return opb; - -free_regs: - iounmap(opb->regs); -free_opb: - kfree(opb); - return NULL; -} - -void __init opb_pic_init(void) -{ - struct device_node *dn; - struct opb_pic *opb; - int virq; - int rc; - - /* Call init_one for each OPB device */ - for_each_compatible_node(dn, NULL, "ibm,opb") { - - /* Fill in an OPB struct */ - opb = opb_pic_init_one(dn); - if (!opb) { - printk(KERN_WARNING "opb: Failed to init node, skipped!\n"); - continue; - } - - /* Map / get opb's hardware virtual irq */ - virq = irq_of_parse_and_map(dn, 0); - if (virq <= 0) { - printk("opb: irq_op_parse_and_map failed!\n"); - continue; - } - - /* Attach opb interrupt handler to new virtual IRQ */ - rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD, - "OPB LS Cascade", opb); - if (rc) { - printk("opb: request_irq failed: %d\n", rc); - continue; - } - - printk("OPB%d init with %d IRQs at %p\n", opb->index, - OPB_NR_IRQS, opb->regs); - } -} diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c deleted file mode 100644 index a87b414c766a..000000000000 --- a/arch/powerpc/platforms/wsp/psr2.c +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2008-2011, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include - -#include "ics.h" -#include "wsp.h" - - -static void psr2_spin(void) -{ - hard_irq_disable(); - for (;;) - continue; -} - -static void psr2_restart(char *cmd) -{ - psr2_spin(); -} - -static int __init psr2_probe(void) -{ - unsigned long root = of_get_flat_dt_root(); - - if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) { - /* chroma systems also claim they are psr2s */ - return 0; - } - - if (!of_flat_dt_is_compatible(root, "ibm,psr2")) - return 0; - - return 1; -} - -define_machine(psr2_md) { - .name = "PSR2 A2", - .probe = psr2_probe, - .setup_arch = wsp_setup_arch, - .restart = psr2_restart, - .power_off = psr2_spin, - .halt = psr2_spin, - .calibrate_decr = generic_calibrate_decr, - .init_IRQ = wsp_setup_irq, - .progress = udbg_progress, - .power_save = book3e_idle, -}; - -machine_arch_initcall(psr2_md, wsp_probe_devices); diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c deleted file mode 100644 index 8c79ce016cf1..000000000000 --- a/arch/powerpc/platforms/wsp/scom_smp.c +++ /dev/null @@ -1,435 +0,0 @@ -/* - * SCOM support for A2 platforms - * - * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson, - * Michael Ellerman, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include "wsp.h" - -#define SCOM_RAMC 0x2a /* Ram Command */ -#define SCOM_RAMC_TGT1_EXT 0x80000000 -#define SCOM_RAMC_SRC1_EXT 0x40000000 -#define SCOM_RAMC_SRC2_EXT 0x20000000 -#define SCOM_RAMC_SRC3_EXT 0x10000000 -#define SCOM_RAMC_ENABLE 0x00080000 -#define SCOM_RAMC_THREADSEL 0x00060000 -#define SCOM_RAMC_EXECUTE 0x00010000 -#define SCOM_RAMC_MSR_OVERRIDE 0x00008000 -#define SCOM_RAMC_MSR_PR 0x00004000 -#define SCOM_RAMC_MSR_GS 0x00002000 -#define SCOM_RAMC_FORCE 0x00001000 -#define SCOM_RAMC_FLUSH 0x00000800 -#define SCOM_RAMC_INTERRUPT 0x00000004 -#define SCOM_RAMC_ERROR 0x00000002 -#define SCOM_RAMC_DONE 0x00000001 -#define SCOM_RAMI 0x29 /* Ram Instruction */ -#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */ -#define SCOM_RAMIC_INSN 0xffffffff00000000 -#define SCOM_RAMD 0x2d /* Ram Data */ -#define SCOM_RAMDH 0x2e /* Ram Data High */ -#define SCOM_RAMDL 0x2f /* Ram Data Low */ -#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */ -#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000 -#define SCOM_PCCR0_ENABLE_RAM 0x40000000 -#define SCOM_THRCTL 0x30 /* Thread Control and Status */ -#define SCOM_THRCTL_T0_STOP 0x80000000 -#define SCOM_THRCTL_T1_STOP 0x40000000 -#define SCOM_THRCTL_T2_STOP 0x20000000 -#define SCOM_THRCTL_T3_STOP 0x10000000 -#define SCOM_THRCTL_T0_STEP 0x08000000 -#define SCOM_THRCTL_T1_STEP 0x04000000 -#define SCOM_THRCTL_T2_STEP 0x02000000 -#define SCOM_THRCTL_T3_STEP 0x01000000 -#define SCOM_THRCTL_T0_RUN 0x00800000 -#define SCOM_THRCTL_T1_RUN 0x00400000 -#define SCOM_THRCTL_T2_RUN 0x00200000 -#define SCOM_THRCTL_T3_RUN 0x00100000 -#define SCOM_THRCTL_T0_PM 0x00080000 -#define SCOM_THRCTL_T1_PM 0x00040000 -#define SCOM_THRCTL_T2_PM 0x00020000 -#define SCOM_THRCTL_T3_PM 0x00010000 -#define SCOM_THRCTL_T0_UDE 0x00008000 -#define SCOM_THRCTL_T1_UDE 0x00004000 -#define SCOM_THRCTL_T2_UDE 0x00002000 -#define SCOM_THRCTL_T3_UDE 0x00001000 -#define SCOM_THRCTL_ASYNC_DIS 0x00000800 -#define SCOM_THRCTL_TB_DIS 0x00000400 -#define SCOM_THRCTL_DEC_DIS 0x00000200 -#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */ -#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */ - - -static DEFINE_PER_CPU(scom_map_t, scom_ptrs); - -static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread) -{ - scom_map_t scom = per_cpu(scom_ptrs, cpu); - int tcpu; - - if (scom_map_ok(scom)) { - *first_thread = 0; - return scom; - } - - *first_thread = 1; - - scom = scom_map_device(np, 0); - - for (tcpu = cpu_first_thread_sibling(cpu); - tcpu <= cpu_last_thread_sibling(cpu); tcpu++) - per_cpu(scom_ptrs, tcpu) = scom; - - /* Hack: for the boot core, this will actually get called on - * the second thread up, not the first so our test above will - * set first_thread incorrectly. */ - if (cpu_first_thread_sibling(cpu) == 0) - *first_thread = 0; - - return scom; -} - -static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask) -{ - u64 cmd, mask, val; - int n = 0; - - cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28) - | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE; - mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR; - - scom_write(scom, SCOM_RAMIC, cmd); - - for (;;) { - if (scom_read(scom, SCOM_RAMC, &val) != 0) { - pr_err("SCOM error on instruction 0x%08x, thread %d\n", - insn, thread); - return -1; - } - if (val & mask) - break; - pr_devel("Waiting on RAMC = 0x%llx\n", val); - if (++n == 3) { - pr_err("RAMC timeout on instruction 0x%08x, thread %d\n", - insn, thread); - return -1; - } - } - - if (val & SCOM_RAMC_INTERRUPT) { - pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n", - insn, thread); - return -SCOM_RAMC_INTERRUPT; - } - - if (val & SCOM_RAMC_ERROR) { - pr_err("RAMC error on instruction 0x%08x, thread %d\n", - insn, thread); - return -SCOM_RAMC_ERROR; - } - - return 0; -} - -static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt, - u64 *out_gpr) -{ - int rc; - - /* or rN, rN, rN */ - u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11); - rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0); - if (rc) - return rc; - - return scom_read(scom, SCOM_RAMD, out_gpr); -} - -static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr) -{ - int rc, sprhi, sprlo; - u32 insn; - - sprhi = spr >> 5; - sprlo = spr & 0x1f; - insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */ - - if (spr == 0x0ff0) - insn = 0x7c2000a6; /* mfmsr r1 */ - - rc = a2_scom_ram(scom, thread, insn, 0xf); - if (rc) - return rc; - return a2_scom_getgpr(scom, thread, 1, 1, out_spr); -} - -static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr, - int alt, u64 val) -{ - u32 lis = 0x3c000000 | (gpr << 21); - u32 li = 0x38000000 | (gpr << 21); - u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16); - u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16); - u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16); - u32 highest = val >> 48; - u32 higher = (val >> 32) & 0xffff; - u32 high = (val >> 16) & 0xffff; - u32 low = val & 0xffff; - int lext = alt ? 0x8 : 0x0; - int oext = alt ? 0xf : 0x0; - int rc = 0; - - if (highest) - rc |= a2_scom_ram(scom, thread, lis | highest, lext); - - if (higher) { - if (highest) - rc |= a2_scom_ram(scom, thread, oris | higher, oext); - else - rc |= a2_scom_ram(scom, thread, li | higher, lext); - } - - if (highest || higher) - rc |= a2_scom_ram(scom, thread, rldicr32, oext); - - if (high) { - if (highest || higher) - rc |= a2_scom_ram(scom, thread, oris | high, oext); - else - rc |= a2_scom_ram(scom, thread, lis | high, lext); - } - - if (highest || higher || high) - rc |= a2_scom_ram(scom, thread, ori | low, oext); - else - rc |= a2_scom_ram(scom, thread, li | low, lext); - - return rc; -} - -static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val) -{ - int sprhi = spr >> 5; - int sprlo = spr & 0x1f; - /* mtspr spr, r1 */ - u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11); - - if (spr == 0x0ff0) - insn = 0x7c200124; /* mtmsr r1 */ - - if (a2_scom_setgpr(scom, thread, 1, 1, val)) - return -1; - - return a2_scom_ram(scom, thread, insn, 0xf); -} - -static int a2_scom_initial_tlb(scom_map_t scom, int thread) -{ - extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[]; - extern u32 a2_tlbinit_after_iprot_flush[]; - extern u32 a2_tlbinit_after_linear_map[]; - u32 assoc, entries, i; - u64 epn, tlbcfg; - u32 *p; - int rc; - - /* Invalidate all entries (including iprot) */ - - rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg); - if (rc) - goto scom_fail; - entries = tlbcfg & TLBnCFG_N_ENTRY; - assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24; - epn = 0; - - /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ - a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531); - /* Set MMUCR3 to write all thids bit to the TLB */ - a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f); - - /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */ - a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB)); - a2_scom_setspr(scom, thread, SPRN_MAS2, epn); - for (i = 0; i < entries; i++) { - - a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc)); - - /* tlbwe */ - rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0); - if (rc) - goto scom_fail; - - /* Next entry is new address? */ - if((i + 1) % assoc == 0) { - epn += (1 << 30); - a2_scom_setspr(scom, thread, SPRN_MAS2, epn); - } - } - - /* Setup args for linear mapping */ - rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0)); - if (rc) - goto scom_fail; - - /* Linear mapping */ - for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) { - rc = a2_scom_ram(scom, thread, *p, 0); - if (rc) - goto scom_fail; - } - - /* - * For the boot thread, between the linear mapping and the debug - * mappings there is a loop to flush iprot mappings. Ramming doesn't do - * branches, but the secondary threads don't need to be nearly as smart - * (i.e. we don't need to worry about invalidating the mapping we're - * standing on). - */ - - /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */ - for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) { - rc = a2_scom_ram(scom, thread, *p, 0); - if (rc) - goto scom_fail; - } - -scom_fail: - if (rc) - pr_err("Setting up initial TLB failed, err %d\n", rc); - - if (rc == -SCOM_RAMC_INTERRUPT) { - /* Interrupt, dump some status */ - int rc[10]; - u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2; - rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar); - rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0); - rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1); - rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr); - rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0); - rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1); - rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2); - rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3); - rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8); - rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2); - pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]); - pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]); - pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]); - pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]); - pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]); - pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]); - pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]); - pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]); - pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]); - pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]); - } - - return rc; -} - -int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np) -{ - u64 init_iar, init_msr, init_ccr2; - unsigned long start_here; - int rc, core_setup; - scom_map_t scom; - u64 pccr0; - - scom = get_scom(lcpu, np, &core_setup); - if (!scom) { - printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu); - return -1; - } - - pr_devel("Bringing up CPU%d using SCOM...\n", lcpu); - - if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) { - printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu); - return -1; - } - scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG | - SCOM_PCCR0_ENABLE_RAM); - - /* Stop the thead with THRCTL. If we are setting up the TLB we stop all - * threads. We also disable asynchronous interrupts while RAMing. - */ - if (core_setup) - scom_write(scom, SCOM_THRCTL_OR, - SCOM_THRCTL_T0_STOP | - SCOM_THRCTL_T1_STOP | - SCOM_THRCTL_T2_STOP | - SCOM_THRCTL_T3_STOP | - SCOM_THRCTL_ASYNC_DIS); - else - scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx); - - /* Flush its pipeline just in case */ - scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) | - SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE); - - a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar); - a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr); - a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2); - - /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */ - rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM); - if (rc) { - pr_err("Failed to set MSR ! err %d\n", rc); - return rc; - } - - /* RAM in an sync/isync for the sake of it */ - a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0); - a2_scom_ram(scom, thr_idx, 0x4c00012c, 0); - - if (core_setup) { - pr_devel("CPU%d is first thread in core, initializing TLB...\n", - lcpu); - rc = a2_scom_initial_tlb(scom, thr_idx); - if (rc) - goto fail; - } - - start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init - : generic_secondary_thread_init); - pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); - - rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here); - rc |= a2_scom_setgpr(scom, thr_idx, 3, 0, - get_hard_smp_processor_id(lcpu)); - /* - * Tell book3e_secondary_core_init not to set up the TLB, we've - * already done that. - */ - rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1); - - rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx); - - scom_write(scom, SCOM_RAMC, 0); - scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx)); - scom_write(scom, SCOM_PCCR0, pccr0); -fail: - pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded"); - if (rc) { - pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n", - init_iar, init_msr, init_ccr2); - } - - return rc; -} diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c deleted file mode 100644 index 6538b4de34fc..000000000000 --- a/arch/powerpc/platforms/wsp/scom_wsp.c +++ /dev/null @@ -1,82 +0,0 @@ -/* - * SCOM backend for WSP - * - * Copyright 2010 Benjamin Herrenschmidt, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include "wsp.h" - - -static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count) -{ - struct resource r; - u64 xscom_addr; - - if (!of_get_property(dev, "scom-controller", NULL)) { - pr_err("%s: device %s is not a SCOM controller\n", - __func__, dev->full_name); - return SCOM_MAP_INVALID; - } - - if (of_address_to_resource(dev, 0, &r)) { - pr_debug("Failed to find SCOM controller address\n"); - return 0; - } - - /* Transform the SCOM address into an XSCOM offset */ - xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3); - - return (scom_map_t)ioremap(r.start + xscom_addr, count << 3); -} - -static void wsp_scom_unmap(scom_map_t map) -{ - iounmap((void *)map); -} - -static int wsp_scom_read(scom_map_t map, u64 reg, u64 *value) -{ - u64 __iomem *addr = (u64 __iomem *)map; - - *value = in_be64(addr + reg); - - return 0; -} - -static int wsp_scom_write(scom_map_t map, u64 reg, u64 value) -{ - u64 __iomem *addr = (u64 __iomem *)map; - - out_be64(addr + reg, value); - - return 0; -} - -static const struct scom_controller wsp_scom_controller = { - .map = wsp_scom_map, - .unmap = wsp_scom_unmap, - .read = wsp_scom_read, - .write = wsp_scom_write -}; - -void scom_init_wsp(void) -{ - scom_init(&wsp_scom_controller); -} diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c deleted file mode 100644 index 11ac2f05e01c..000000000000 --- a/arch/powerpc/platforms/wsp/setup.c +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2010 Michael Ellerman, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include - -#include "wsp.h" - -/* - * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property. - * Won't work for nodes that are not a descendant of a wsp node. - */ -int wsp_get_chip_id(struct device_node *dn) -{ - const u32 *p; - int rc; - - /* Start looking at the specified node, not its parent */ - dn = of_node_get(dn); - while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL))) - dn = of_get_next_parent(dn); - - if (!dn) - return -1; - - rc = *p; - of_node_put(dn); - - return rc; -} diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c deleted file mode 100644 index 332a18b81403..000000000000 --- a/arch/powerpc/platforms/wsp/smp.c +++ /dev/null @@ -1,88 +0,0 @@ -/* - * SMP Support for A2 platforms - * - * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - * - */ - -#include -#include -#include -#include -#include - -#include -#include -#include - -#include "ics.h" -#include "wsp.h" - -static void smp_a2_setup_cpu(int cpu) -{ - doorbell_setup_this_cpu(); - - if (cpu != boot_cpuid) - xics_setup_cpu(); -} - -int smp_a2_kick_cpu(int nr) -{ - const char *enable_method; - struct device_node *np; - int thr_idx; - - if (nr < 0 || nr >= NR_CPUS) - return -ENOENT; - - np = of_get_cpu_node(nr, &thr_idx); - if (!np) - return -ENODEV; - - enable_method = of_get_property(np, "enable-method", NULL); - pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method); - - if (!enable_method) { - printk(KERN_ERR "CPU%d has no enable-method\n", nr); - return -ENOENT; - } else if (strcmp(enable_method, "ibm,a2-scom") == 0) { - if (a2_scom_startup_cpu(nr, thr_idx, np)) - return -1; - } else { - printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n", - nr, enable_method); - return -EINVAL; - } - - /* - * The processor is currently spinning, waiting for the - * cpu_start field to become non-zero After we set cpu_start, - * the processor will continue on to secondary_start - */ - paca[nr].cpu_start = 1; - - return 0; -} - -static int __init smp_a2_probe(void) -{ - return num_possible_cpus(); -} - -static struct smp_ops_t a2_smp_ops = { - .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */ - .cause_ipi = doorbell_cause_ipi, - .probe = smp_a2_probe, - .kick_cpu = smp_a2_kick_cpu, - .setup_cpu = smp_a2_setup_cpu, -}; - -void __init a2_setup_smp(void) -{ - smp_ops = &a2_smp_ops; -} diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c deleted file mode 100644 index 58cd1f00e1ef..000000000000 --- a/arch/powerpc/platforms/wsp/wsp.c +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2008-2011, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "wsp.h" -#include "ics.h" - -#define WSP_SOC_COMPATIBLE "ibm,wsp-soc" -#define PBIC_COMPATIBLE "ibm,wsp-pbic" -#define COPRO_COMPATIBLE "ibm,wsp-coprocessor" - -static int __init wsp_probe_buses(void) -{ - static __initdata struct of_device_id bus_ids[] = { - /* - * every node in between needs to be here or you won't - * find it - */ - { .compatible = WSP_SOC_COMPATIBLE, }, - { .compatible = PBIC_COMPATIBLE, }, - { .compatible = COPRO_COMPATIBLE, }, - {}, - }; - of_platform_bus_probe(NULL, bus_ids, NULL); - - return 0; -} - -void __init wsp_setup_arch(void) -{ - /* init to some ~sane value until calibrate_delay() runs */ - loops_per_jiffy = 50000000; - - scom_init_wsp(); - - /* Setup SMP callback */ -#ifdef CONFIG_SMP - a2_setup_smp(); -#endif -#ifdef CONFIG_PCI - wsp_setup_pci(); -#endif -} - -void __init wsp_setup_irq(void) -{ - wsp_init_irq(); - opb_pic_init(); -} - - -int __init wsp_probe_devices(void) -{ - struct device_node *np; - - /* Our RTC is a ds1500. It seems to be programatically compatible - * with the ds1511 for which we have a driver so let's use that - */ - np = of_find_compatible_node(NULL, NULL, "dallas,ds1500"); - if (np != NULL) { - struct resource res; - if (of_address_to_resource(np, 0, &res) == 0) - platform_device_register_simple("ds1511", 0, &res, 1); - } - - wsp_probe_buses(); - - return 0; -} - -void wsp_halt(void) -{ - u64 val; - scom_map_t m; - struct device_node *dn; - struct device_node *mine; - struct device_node *me; - int rc; - - me = of_get_cpu_node(smp_processor_id(), NULL); - mine = scom_find_parent(me); - - /* This will halt all the A2s but not power off the chip */ - for_each_node_with_property(dn, "scom-controller") { - if (dn == mine) - continue; - m = scom_map(dn, 0, 1); - - /* read-modify-write it so the HW probe does not get - * confused */ - rc = scom_read(m, 0, &val); - if (rc == 0) - scom_write(m, 0, val | 1); - scom_unmap(m); - } - m = scom_map(mine, 0, 1); - rc = scom_read(m, 0, &val); - if (rc == 0) - scom_write(m, 0, val | 1); - /* should never return */ - scom_unmap(m); -} diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h deleted file mode 100644 index a563a8aaf812..000000000000 --- a/arch/powerpc/platforms/wsp/wsp.h +++ /dev/null @@ -1,29 +0,0 @@ -#ifndef __WSP_H -#define __WSP_H - -#include - -/* Devtree compatible strings for major devices */ -#define PCIE_COMPATIBLE "ibm,wsp-pciex" - -extern void wsp_setup_arch(void); -extern void wsp_setup_irq(void); -extern int wsp_probe_devices(void); -extern void wsp_halt(void); - -extern void wsp_setup_pci(void); -extern void scom_init_wsp(void); - -extern void a2_setup_smp(void); -extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, - struct device_node *np); -extern int smp_a2_kick_cpu(int nr); - -extern void opb_pic_init(void); - -/* chroma specific managment */ -extern void wsp_h8_restart(char *cmd); -extern void wsp_h8_power_off(void); -extern void __init wsp_setup_h8(void); - -#endif /* __WSP_H */ diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c deleted file mode 100644 index 9a15e5b39bb8..000000000000 --- a/arch/powerpc/platforms/wsp/wsp_pci.c +++ /dev/null @@ -1,1134 +0,0 @@ -/* - * Copyright 2010 Ben Herrenschmidt, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#define DEBUG - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "wsp.h" -#include "wsp_pci.h" -#include "msi.h" - - -/* Max number of TVTs for one table. Only 32-bit tables can use - * multiple TVTs and so the max currently supported is thus 8 - * since only 2G of DMA space is supported - */ -#define MAX_TABLE_TVT_COUNT 8 - -struct wsp_dma_table { - struct list_head link; - struct iommu_table table; - struct wsp_phb *phb; - struct page *tces[MAX_TABLE_TVT_COUNT]; -}; - -/* We support DMA regions from 0...2G in 32bit space (no support for - * 64-bit DMA just yet). Each device gets a separate TCE table (TVT - * entry) with validation enabled (though not supported by SimiCS - * just yet). - * - * To simplify things, we divide this 2G space into N regions based - * on the constant below which could be turned into a tunable eventually - * - * We then assign dynamically those regions to devices as they show up. - * - * We use a bitmap as an allocator for these. - * - * Tables are allocated/created dynamically as devices are discovered, - * multiple TVT entries are used if needed - * - * When 64-bit DMA support is added we should simply use a separate set - * of larger regions (the HW supports 64 TVT entries). We can - * additionally create a bypass region in 64-bit space for performances - * though that would have a cost in term of security. - * - * If you set NUM_DMA32_REGIONS to 1, then a single table is shared - * for all devices and bus/dev/fn validation is disabled - * - * Note that a DMA32 region cannot be smaller than 256M so the max - * supported here for now is 8. We don't yet support sharing regions - * between multiple devices so the max number of devices supported - * is MAX_TABLE_TVT_COUNT. - */ -#define NUM_DMA32_REGIONS 1 - -struct wsp_phb { - struct pci_controller *hose; - - /* Lock controlling access to the list of dma tables. - * It does -not- protect against dma_* operations on - * those tables, those should be stopped before an entry - * is removed from the list. - * - * The lock is also used for error handling operations - */ - spinlock_t lock; - struct list_head dma_tables; - unsigned long dma32_map; - unsigned long dma32_base; - unsigned int dma32_num_regions; - unsigned long dma32_region_size; - - /* Debugfs stuff */ - struct dentry *ddir; - - struct list_head all; -}; -static LIST_HEAD(wsp_phbs); - -//#define cfg_debug(fmt...) pr_debug(fmt) -#define cfg_debug(fmt...) - - -static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 *val) -{ - struct pci_controller *hose; - int suboff; - u64 addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = PCIE_REG_CA_ENABLE | - ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT | - ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT | - ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT; - suboff = offset & 3; - - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - - switch (len) { - case 1: - addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT; - out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); - *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA) - >> (suboff << 3)) & 0xff; - cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n", - bus->number, devfn >> 3, devfn & 7, - offset, suboff, addr, *val); - break; - case 2: - addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT; - out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); - *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA) - >> (suboff << 3)) & 0xffff; - cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n", - bus->number, devfn >> 3, devfn & 7, - offset, suboff, addr, *val); - break; - default: - addr |= 0xful << PCIE_REG_CA_BE_SHIFT; - out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); - *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA); - cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n", - bus->number, devfn >> 3, devfn & 7, - offset, suboff, addr, *val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn, - int offset, int len, u32 val) -{ - struct pci_controller *hose; - int suboff; - u64 addr; - - hose = pci_bus_to_host(bus); - if (hose == NULL) - return PCIBIOS_DEVICE_NOT_FOUND; - if (offset >= 0x1000) - return PCIBIOS_BAD_REGISTER_NUMBER; - addr = PCIE_REG_CA_ENABLE | - ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT | - ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT | - ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT; - suboff = offset & 3; - - /* - * Note: the caller has already checked that offset is - * suitably aligned and that len is 1, 2 or 4. - */ - switch (len) { - case 1: - addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT; - val <<= suboff << 3; - out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); - out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val); - cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n", - bus->number, devfn >> 3, devfn & 7, - offset, suboff, addr, val); - break; - case 2: - addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT; - val <<= suboff << 3; - out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); - out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val); - cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n", - bus->number, devfn >> 3, devfn & 7, - offset, suboff, addr, val); - break; - default: - addr |= 0xful << PCIE_REG_CA_BE_SHIFT; - out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr); - out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val); - cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n", - bus->number, devfn >> 3, devfn & 7, - offset, suboff, addr, val); - break; - } - return PCIBIOS_SUCCESSFUL; -} - -static struct pci_ops wsp_pcie_pci_ops = -{ - .read = wsp_pcie_read_config, - .write = wsp_pcie_write_config, -}; - -#define TCE_SHIFT 12 -#define TCE_PAGE_SIZE (1 << TCE_SHIFT) -#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */ -#define TCE_PCI_READ 0x1 /* read from PCI allowed */ -#define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */ -#define TCE_RPN_SHIFT 12 - -//#define dma_debug(fmt...) pr_debug(fmt) -#define dma_debug(fmt...) - -static int tce_build_wsp(struct iommu_table *tbl, long index, long npages, - unsigned long uaddr, enum dma_data_direction direction, - struct dma_attrs *attrs) -{ - struct wsp_dma_table *ptbl = container_of(tbl, - struct wsp_dma_table, - table); - u64 proto_tce; - u64 *tcep; - u64 rpn; - - proto_tce = TCE_PCI_READ; -#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS - proto_tce |= TCE_PCI_WRITE; -#else - if (direction != DMA_TO_DEVICE) - proto_tce |= TCE_PCI_WRITE; -#endif - - /* XXX Make this faster by factoring out the page address for - * within a TCE table - */ - while (npages--) { - /* We don't use it->base as the table can be scattered */ - tcep = (u64 *)page_address(ptbl->tces[index >> 16]); - tcep += (index & 0xffff); - - /* can't move this out since we might cross LMB boundary */ - rpn = __pa(uaddr) >> TCE_SHIFT; - *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT; - - dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n", - tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K); - - uaddr += TCE_PAGE_SIZE; - index++; - } - return 0; -} - -static void tce_free_wsp(struct iommu_table *tbl, long index, long npages) -{ - struct wsp_dma_table *ptbl = container_of(tbl, - struct wsp_dma_table, - table); -#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS - struct pci_controller *hose = ptbl->phb->hose; -#endif - u64 *tcep; - - /* XXX Make this faster by factoring out the page address for - * within a TCE table. Also use line-kill option to kill multiple - * TCEs at once - */ - while (npages--) { - /* We don't use it->base as the table can be scattered */ - tcep = (u64 *)page_address(ptbl->tces[index >> 16]); - tcep += (index & 0xffff); - dma_debug("[DMA] TCE %p cleared\n", tcep); - *tcep = 0; -#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS - /* Don't write there since it would pollute other MMIO accesses */ - out_be64(hose->cfg_data + PCIE_REG_TCE_KILL, - PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K | - (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK)); -#endif - index++; - } -} - -static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb, - unsigned int region, - struct pci_dev *validate) -{ - struct pci_controller *hose = phb->hose; - unsigned long size = phb->dma32_region_size; - unsigned long addr = phb->dma32_region_size * region + phb->dma32_base; - struct wsp_dma_table *tbl; - int tvts_per_table, i, tvt, nid; - unsigned long flags; - - nid = of_node_to_nid(phb->hose->dn); - - /* Calculate how many TVTs are needed */ - tvts_per_table = size / 0x10000000; - if (tvts_per_table == 0) - tvts_per_table = 1; - - /* Calculate the base TVT index. We know all tables have the same - * size so we just do a simple multiply here - */ - tvt = region * tvts_per_table; - - pr_debug(" Region : %d\n", region); - pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1); - pr_debug(" Number of TVTs : %d\n", tvts_per_table); - pr_debug(" Base TVT : %d\n", tvt); - pr_debug(" Node : %d\n", nid); - - tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid); - if (!tbl) - return ERR_PTR(-ENOMEM); - tbl->phb = phb; - - /* Create as many TVTs as needed, each represents 256M at most */ - for (i = 0; i < tvts_per_table; i++) { - u64 tvt_data1, tvt_data0; - - /* Allocate table. We use a 4K TCE size for now always so - * one table is always 8 * (258M / 4K) == 512K - */ - tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000)); - if (tbl->tces[i] == NULL) - goto fail; - memset(page_address(tbl->tces[i]), 0, 0x80000); - - pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i])); - - /* Table size. We currently set it to be the whole 256M region */ - tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT; - /* IO page size set to 4K */ - tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT; - /* Shift in the address */ - tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT; - - /* Validation stuff. We only validate fully bus/dev/fn for now - * one day maybe we can group devices but that isn't the case - * at the moment - */ - if (validate) { - tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK; - tvt_data0 |= validate->bus->number; - tvt_data1 |= IODA_TVT1_DEVNUM_VALID; - tvt_data1 |= ((u64)PCI_SLOT(validate->devfn)) - << IODA_TVT1_DEVNUM_VALUE_SHIFT; - tvt_data1 |= IODA_TVT1_FUNCNUM_VALID; - tvt_data1 |= ((u64)PCI_FUNC(validate->devfn)) - << IODA_TVT1_FUNCNUM_VALUE_SHIFT; - } - - /* XX PE number is always 0 for now */ - - /* Program the values using the PHB lock */ - spin_lock_irqsave(&phb->lock, flags); - out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR, - (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT); - out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1); - out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0); - spin_unlock_irqrestore(&phb->lock, flags); - } - - /* Init bits and pieces */ - tbl->table.it_blocksize = 16; - tbl->table.it_page_shift = IOMMU_PAGE_SHIFT_4K; - tbl->table.it_offset = addr >> tbl->table.it_page_shift; - tbl->table.it_size = size >> tbl->table.it_page_shift; - - /* - * It's already blank but we clear it anyway. - * Consider an aditiona interface that makes cleaing optional - */ - iommu_init_table(&tbl->table, nid); - - list_add(&tbl->link, &phb->dma_tables); - return tbl; - - fail: - pr_debug(" Failed to allocate a 256M TCE table !\n"); - for (i = 0; i < tvts_per_table; i++) - if (tbl->tces[i]) - __free_pages(tbl->tces[i], get_order(0x80000)); - kfree(tbl); - return ERR_PTR(-ENOMEM); -} - -static void wsp_pci_dma_dev_setup(struct pci_dev *pdev) -{ - struct dev_archdata *archdata = &pdev->dev.archdata; - struct pci_controller *hose = pci_bus_to_host(pdev->bus); - struct wsp_phb *phb = hose->private_data; - struct wsp_dma_table *table = NULL; - unsigned long flags; - int i; - - /* Don't assign an iommu table to a bridge */ - if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) - return; - - pr_debug("%s: Setting up DMA...\n", pci_name(pdev)); - - spin_lock_irqsave(&phb->lock, flags); - - /* If only one region, check if it already exist */ - if (phb->dma32_num_regions == 1) { - spin_unlock_irqrestore(&phb->lock, flags); - if (list_empty(&phb->dma_tables)) - table = wsp_pci_create_dma32_table(phb, 0, NULL); - else - table = list_first_entry(&phb->dma_tables, - struct wsp_dma_table, - link); - } else { - /* else find a free region */ - for (i = 0; i < phb->dma32_num_regions && !table; i++) { - if (__test_and_set_bit(i, &phb->dma32_map)) - continue; - spin_unlock_irqrestore(&phb->lock, flags); - table = wsp_pci_create_dma32_table(phb, i, pdev); - } - } - - /* Check if we got an error */ - if (IS_ERR(table)) { - pr_err("%s: Failed to create DMA table, err %ld !\n", - pci_name(pdev), PTR_ERR(table)); - return; - } - - /* Or a valid table */ - if (table) { - pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n", - pci_name(pdev), - table->table.it_offset << IOMMU_PAGE_SHIFT_4K, - (table->table.it_offset << IOMMU_PAGE_SHIFT_4K) - + phb->dma32_region_size - 1); - archdata->dma_data.iommu_table_base = &table->table; - return; - } - - /* Or no room */ - spin_unlock_irqrestore(&phb->lock, flags); - pr_err("%s: Out of DMA space !\n", pci_name(pdev)); -} - -static void __init wsp_pcie_configure_hw(struct pci_controller *hose) -{ - u64 val; - int i; - -#define DUMP_REG(x) \ - pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x)) - - /* - * Some WSP variants has a bogus class code by default in the PCI-E - * root complex's built-in P2P bridge - */ - val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1); - pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val); - out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1, - (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8)); - pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1)); - -#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS - /* XXX Disable TCE caching, it doesn't work on DD1 */ - out_be64(hose->cfg_data + 0xe50, - in_be64(hose->cfg_data + 0xe50) | (3ull << 62)); - printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50)); -#endif - - /* Configure M32A and IO. IO is hard wired to be 1M for now */ - out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys); - out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK, - (~(hose->io_resource.end - hose->io_resource.start)) & - 0x3fffffff000ul); - out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1); - - out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR, - hose->mem_resources[0].start); - printk("Want to write to M32A_BASE_MASK : 0x%llx\n", - (~(hose->mem_resources[0].end - - hose->mem_resources[0].start)) & 0x3ffffff0000ul); - out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK, - (~(hose->mem_resources[0].end - - hose->mem_resources[0].start)) & 0x3ffffff0000ul); - out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR, - (hose->mem_resources[0].start - hose->mem_offset[0]) | 1); - - /* Clear all TVT entries - * - * XX Might get TVT count from device-tree - */ - for (i = 0; i < IODA_TVT_COUNT; i++) { - out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR, - PCIE_REG_IODA_AD_TBL_TVT | i); - out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0); - out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0); - } - - /* Kill the TCE cache */ - out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, - in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) | - PCIE_REG_PHBC_64B_TCE_EN); - - /* Enable 32 & 64-bit MSIs, IO space and M32A */ - val = PCIE_REG_PHBC_32BIT_MSI_EN | - PCIE_REG_PHBC_IO_EN | - PCIE_REG_PHBC_64BIT_MSI_EN | - PCIE_REG_PHBC_M32A_EN; - if (iommu_is_off) - val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS; - pr_debug("Will write config: 0x%llx\n", val); - out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val); - - /* Enable error reporting */ - out_be64(hose->cfg_data + 0xe00, - in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull); - - /* Mask an error that's generated when doing config space probe - * - * XXX Maybe we should only mask it around config space cycles... that or - * ignore it when we know we had a config space cycle recently ? - */ - out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull); - out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull); - - /* Enable UTL errors, for now, all of them got to UTL irq 1 - * - * We similarily mask one UTL error caused apparently during normal - * probing. We also mask the link up error - */ - out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0); - out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0); - out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0); - out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull); - out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull); - out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull); - - DUMP_REG(PCIE_REG_IO_BASE_ADDR); - DUMP_REG(PCIE_REG_IO_BASE_MASK); - DUMP_REG(PCIE_REG_IO_START_ADDR); - DUMP_REG(PCIE_REG_M32A_BASE_ADDR); - DUMP_REG(PCIE_REG_M32A_BASE_MASK); - DUMP_REG(PCIE_REG_M32A_START_ADDR); - DUMP_REG(PCIE_REG_M32B_BASE_ADDR); - DUMP_REG(PCIE_REG_M32B_BASE_MASK); - DUMP_REG(PCIE_REG_M32B_START_ADDR); - DUMP_REG(PCIE_REG_M64_BASE_ADDR); - DUMP_REG(PCIE_REG_M64_BASE_MASK); - DUMP_REG(PCIE_REG_M64_START_ADDR); - DUMP_REG(PCIE_REG_PHB_CONFIG); -} - -static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port) -{ - u64 val; - int i; - - for (i = 0; i < 10000; i++) { - val = in_be64(phb->hose->cfg_data + 0xe08); - if ((val & 0x1900000000000000ull) == 0x0100000000000000ull) - return; - udelay(1); - } - pr_warning("PCI IO timeout on domain %d port 0x%lx\n", - phb->hose->global_number, port); -} - -#define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \ -static ret wsp_pci_##name at \ -{ \ - struct iowa_bus *bus; \ - struct wsp_phb *phb; \ - unsigned long flags; \ - ret rval; \ - bus = iowa_pio_find_bus(aa); \ - WARN_ON(!bus); \ - phb = bus->private; \ - spin_lock_irqsave(&phb->lock, flags); \ - wsp_pci_wait_io_idle(phb, aa); \ - rval = __do_##name al; \ - spin_unlock_irqrestore(&phb->lock, flags); \ - return rval; \ -} - -#define DEF_PCI_AC_NORET_pio(name, at, al, aa) \ -static void wsp_pci_##name at \ -{ \ - struct iowa_bus *bus; \ - struct wsp_phb *phb; \ - unsigned long flags; \ - bus = iowa_pio_find_bus(aa); \ - WARN_ON(!bus); \ - phb = bus->private; \ - spin_lock_irqsave(&phb->lock, flags); \ - wsp_pci_wait_io_idle(phb, aa); \ - __do_##name al; \ - spin_unlock_irqrestore(&phb->lock, flags); \ -} - -#define DEF_PCI_AC_RET_mem(name, ret, at, al, aa) -#define DEF_PCI_AC_NORET_mem(name, at, al, aa) - -#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ - DEF_PCI_AC_RET_##space(name, ret, at, al, aa) - -#define DEF_PCI_AC_NORET(name, at, al, space, aa) \ - DEF_PCI_AC_NORET_##space(name, at, al, aa) \ - - -#include - -#undef DEF_PCI_AC_RET -#undef DEF_PCI_AC_NORET - -static struct ppc_pci_io wsp_pci_iops = { - .inb = wsp_pci_inb, - .inw = wsp_pci_inw, - .inl = wsp_pci_inl, - .outb = wsp_pci_outb, - .outw = wsp_pci_outw, - .outl = wsp_pci_outl, - .insb = wsp_pci_insb, - .insw = wsp_pci_insw, - .insl = wsp_pci_insl, - .outsb = wsp_pci_outsb, - .outsw = wsp_pci_outsw, - .outsl = wsp_pci_outsl, -}; - -static int __init wsp_setup_one_phb(struct device_node *np) -{ - struct pci_controller *hose; - struct wsp_phb *phb; - - pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name); - - phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL); - if (!phb) - return -ENOMEM; - hose = pcibios_alloc_controller(np); - if (!hose) { - /* Can't really free the phb */ - return -ENOMEM; - } - hose->private_data = phb; - phb->hose = hose; - - INIT_LIST_HEAD(&phb->dma_tables); - spin_lock_init(&phb->lock); - - /* XXX Use bus-range property ? */ - hose->first_busno = 0; - hose->last_busno = 0xff; - - /* We use cfg_data as the address for the whole bridge MMIO space - */ - hose->cfg_data = of_iomap(hose->dn, 0); - - pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data); - - /* Get the ranges of the device-tree */ - pci_process_bridge_OF_ranges(hose, np, 0); - - /* XXX Force re-assigning of everything for now */ - pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC | - PCI_ENABLE_PROC_DOMAINS); - - /* Calculate how the TCE space is divided */ - phb->dma32_base = 0; - phb->dma32_num_regions = NUM_DMA32_REGIONS; - if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) { - pr_warning("IOMMU: Clamped to %d DMA32 regions\n", - MAX_TABLE_TVT_COUNT); - phb->dma32_num_regions = MAX_TABLE_TVT_COUNT; - } - phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions; - - BUG_ON(!is_power_of_2(phb->dma32_region_size)); - - /* Setup config ops */ - hose->ops = &wsp_pcie_pci_ops; - - /* Configure the HW */ - wsp_pcie_configure_hw(hose); - - /* Instanciate IO workarounds */ - iowa_register_bus(hose, &wsp_pci_iops, NULL, phb); -#ifdef CONFIG_PCI_MSI - wsp_setup_phb_msi(hose); -#endif - - /* Add to global list */ - list_add(&phb->all, &wsp_phbs); - - return 0; -} - -void __init wsp_setup_pci(void) -{ - struct device_node *np; - int rc; - - /* Find host bridges */ - for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) { - rc = wsp_setup_one_phb(np); - if (rc) - pr_err("Failed to setup PCIe bridge %s, rc=%d\n", - np->full_name, rc); - } - - /* Establish device-tree linkage */ - pci_devs_phb_init(); - - /* Set DMA ops to use TCEs */ - if (iommu_is_off) { - pr_info("PCI-E: Disabled TCEs, using direct DMA\n"); - set_pci_dma_ops(&dma_direct_ops); - } else { - ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup; - ppc_md.tce_build = tce_build_wsp; - ppc_md.tce_free = tce_free_wsp; - set_pci_dma_ops(&dma_iommu_ops); - } -} - -#define err_debug(fmt...) pr_debug(fmt) -//#define err_debug(fmt...) - -static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np) -{ - const u32 *prop; - int hw_irq; - - /* Ok, no interrupts property, let's try to find our child P2P */ - np = of_get_next_child(np, NULL); - if (np == NULL) - return 0; - - /* Grab it's interrupt map */ - prop = of_get_property(np, "interrupt-map", NULL); - if (prop == NULL) - return 0; - - /* Grab one of the interrupts in there, keep the low 4 bits */ - hw_irq = prop[5] & 0xf; - - /* 0..4 for PHB 0 and 5..9 for PHB 1 */ - if (hw_irq < 5) - hw_irq = 4; - else - hw_irq = 9; - hw_irq |= prop[5] & ~0xf; - - err_debug("PCI: Using 0x%x as error IRQ for %s\n", - hw_irq, np->parent->full_name); - return irq_create_mapping(NULL, hw_irq); -} - -static const struct { - u32 offset; - const char *name; -} wsp_pci_regs[] = { -#define DREG(x) { PCIE_REG_##x, #x } -#define DUTL(x) { PCIE_UTL_##x, "UTL_" #x } - /* Architected registers except CONFIG_ and IODA - * to avoid side effects - */ - DREG(DMA_CHAN_STATUS), - DREG(CPU_LOADSTORE_STATUS), - DREG(LOCK0), - DREG(LOCK1), - DREG(PHB_CONFIG), - DREG(IO_BASE_ADDR), - DREG(IO_BASE_MASK), - DREG(IO_START_ADDR), - DREG(M32A_BASE_ADDR), - DREG(M32A_BASE_MASK), - DREG(M32A_START_ADDR), - DREG(M32B_BASE_ADDR), - DREG(M32B_BASE_MASK), - DREG(M32B_START_ADDR), - DREG(M64_BASE_ADDR), - DREG(M64_BASE_MASK), - DREG(M64_START_ADDR), - DREG(TCE_KILL), - DREG(LOCK2), - DREG(PHB_GEN_CAP), - DREG(PHB_TCE_CAP), - DREG(PHB_IRQ_CAP), - DREG(PHB_EEH_CAP), - DREG(PAPR_ERR_INJ_CONTROL), - DREG(PAPR_ERR_INJ_ADDR), - DREG(PAPR_ERR_INJ_MASK), - - /* UTL core regs */ - DUTL(SYS_BUS_CONTROL), - DUTL(STATUS), - DUTL(SYS_BUS_AGENT_STATUS), - DUTL(SYS_BUS_AGENT_ERR_SEV), - DUTL(SYS_BUS_AGENT_IRQ_EN), - DUTL(SYS_BUS_BURST_SZ_CONF), - DUTL(REVISION_ID), - DUTL(OUT_POST_HDR_BUF_ALLOC), - DUTL(OUT_POST_DAT_BUF_ALLOC), - DUTL(IN_POST_HDR_BUF_ALLOC), - DUTL(IN_POST_DAT_BUF_ALLOC), - DUTL(OUT_NP_BUF_ALLOC), - DUTL(IN_NP_BUF_ALLOC), - DUTL(PCIE_TAGS_ALLOC), - DUTL(GBIF_READ_TAGS_ALLOC), - - DUTL(PCIE_PORT_CONTROL), - DUTL(PCIE_PORT_STATUS), - DUTL(PCIE_PORT_ERROR_SEV), - DUTL(PCIE_PORT_IRQ_EN), - DUTL(RC_STATUS), - DUTL(RC_ERR_SEVERITY), - DUTL(RC_IRQ_EN), - DUTL(EP_STATUS), - DUTL(EP_ERR_SEVERITY), - DUTL(EP_ERR_IRQ_EN), - DUTL(PCI_PM_CTRL1), - DUTL(PCI_PM_CTRL2), - - /* PCIe stack regs */ - DREG(SYSTEM_CONFIG1), - DREG(SYSTEM_CONFIG2), - DREG(EP_SYSTEM_CONFIG), - DREG(EP_FLR), - DREG(EP_BAR_CONFIG), - DREG(LINK_CONFIG), - DREG(PM_CONFIG), - DREG(DLP_CONTROL), - DREG(DLP_STATUS), - DREG(ERR_REPORT_CONTROL), - DREG(SLOT_CONTROL1), - DREG(SLOT_CONTROL2), - DREG(UTL_CONFIG), - DREG(BUFFERS_CONFIG), - DREG(ERROR_INJECT), - DREG(SRIOV_CONFIG), - DREG(PF0_SRIOV_STATUS), - DREG(PF1_SRIOV_STATUS), - DREG(PORT_NUMBER), - DREG(POR_SYSTEM_CONFIG), - - /* Internal logic regs */ - DREG(PHB_VERSION), - DREG(RESET), - DREG(PHB_CONTROL), - DREG(PHB_TIMEOUT_CONTROL1), - DREG(PHB_QUIESCE_DMA), - DREG(PHB_DMA_READ_TAG_ACTV), - DREG(PHB_TCE_READ_TAG_ACTV), - - /* FIR registers */ - DREG(LEM_FIR_ACCUM), - DREG(LEM_FIR_AND_MASK), - DREG(LEM_FIR_OR_MASK), - DREG(LEM_ACTION0), - DREG(LEM_ACTION1), - DREG(LEM_ERROR_MASK), - DREG(LEM_ERROR_AND_MASK), - DREG(LEM_ERROR_OR_MASK), - - /* Error traps registers */ - DREG(PHB_ERR_STATUS), - DREG(PHB_ERR_STATUS), - DREG(PHB_ERR1_STATUS), - DREG(PHB_ERR_INJECT), - DREG(PHB_ERR_LEM_ENABLE), - DREG(PHB_ERR_IRQ_ENABLE), - DREG(PHB_ERR_FREEZE_ENABLE), - DREG(PHB_ERR_SIDE_ENABLE), - DREG(PHB_ERR_LOG_0), - DREG(PHB_ERR_LOG_1), - DREG(PHB_ERR_STATUS_MASK), - DREG(PHB_ERR1_STATUS_MASK), - DREG(MMIO_ERR_STATUS), - DREG(MMIO_ERR1_STATUS), - DREG(MMIO_ERR_INJECT), - DREG(MMIO_ERR_LEM_ENABLE), - DREG(MMIO_ERR_IRQ_ENABLE), - DREG(MMIO_ERR_FREEZE_ENABLE), - DREG(MMIO_ERR_SIDE_ENABLE), - DREG(MMIO_ERR_LOG_0), - DREG(MMIO_ERR_LOG_1), - DREG(MMIO_ERR_STATUS_MASK), - DREG(MMIO_ERR1_STATUS_MASK), - DREG(DMA_ERR_STATUS), - DREG(DMA_ERR1_STATUS), - DREG(DMA_ERR_INJECT), - DREG(DMA_ERR_LEM_ENABLE), - DREG(DMA_ERR_IRQ_ENABLE), - DREG(DMA_ERR_FREEZE_ENABLE), - DREG(DMA_ERR_SIDE_ENABLE), - DREG(DMA_ERR_LOG_0), - DREG(DMA_ERR_LOG_1), - DREG(DMA_ERR_STATUS_MASK), - DREG(DMA_ERR1_STATUS_MASK), - - /* Debug and Trace registers */ - DREG(PHB_DEBUG_CONTROL0), - DREG(PHB_DEBUG_STATUS0), - DREG(PHB_DEBUG_CONTROL1), - DREG(PHB_DEBUG_STATUS1), - DREG(PHB_DEBUG_CONTROL2), - DREG(PHB_DEBUG_STATUS2), - DREG(PHB_DEBUG_CONTROL3), - DREG(PHB_DEBUG_STATUS3), - DREG(PHB_DEBUG_CONTROL4), - DREG(PHB_DEBUG_STATUS4), - DREG(PHB_DEBUG_CONTROL5), - DREG(PHB_DEBUG_STATUS5), - - /* Don't seem to exist ... - DREG(PHB_DEBUG_CONTROL6), - DREG(PHB_DEBUG_STATUS6), - */ -}; - -static int wsp_pci_regs_show(struct seq_file *m, void *private) -{ - struct wsp_phb *phb = m->private; - struct pci_controller *hose = phb->hose; - int i; - - for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) { - /* Skip write-only regs */ - if (wsp_pci_regs[i].offset == 0xc08 || - wsp_pci_regs[i].offset == 0xc10 || - wsp_pci_regs[i].offset == 0xc38 || - wsp_pci_regs[i].offset == 0xc40) - continue; - seq_printf(m, "0x%03x: 0x%016llx %s\n", - wsp_pci_regs[i].offset, - in_be64(hose->cfg_data + wsp_pci_regs[i].offset), - wsp_pci_regs[i].name); - } - return 0; -} - -static int wsp_pci_regs_open(struct inode *inode, struct file *file) -{ - return single_open(file, wsp_pci_regs_show, inode->i_private); -} - -static const struct file_operations wsp_pci_regs_fops = { - .open = wsp_pci_regs_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int wsp_pci_reg_set(void *data, u64 val) -{ - out_be64((void __iomem *)data, val); - return 0; -} - -static int wsp_pci_reg_get(void *data, u64 *val) -{ - *val = in_be64((void __iomem *)data); - return 0; -} - -DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n"); - -static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id) -{ - struct wsp_phb *phb = dev_id; - struct pci_controller *hose = phb->hose; - irqreturn_t handled = IRQ_NONE; - struct wsp_pcie_err_log_data ed; - - pr_err("PCI: Error interrupt on %s (PHB %d)\n", - hose->dn->full_name, hose->global_number); - again: - memset(&ed, 0, sizeof(ed)); - - /* Read and clear UTL errors */ - ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS); - if (ed.utl_sys_err) - out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err); - ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS); - if (ed.utl_port_err) - out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err); - ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS); - if (ed.utl_rc_err) - out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err); - - /* Read and clear main trap errors */ - ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS); - if (ed.phb_err) { - ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS); - ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0); - ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1); - out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0); - out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0); - } - ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS); - if (ed.mmio_err) { - ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS); - ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0); - ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1); - out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0); - out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0); - } - ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS); - if (ed.dma_err) { - ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS); - ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0); - ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1); - out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0); - out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0); - } - - /* Now print things out */ - if (ed.phb_err) { - pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err); - pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1); - pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0); - pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1); - } - if (ed.mmio_err) { - pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err); - pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1); - pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0); - pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1); - } - if (ed.dma_err) { - pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err); - pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1); - pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0); - pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1); - } - if (ed.utl_sys_err) - pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err); - if (ed.utl_port_err) - pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err); - if (ed.utl_rc_err) - pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err); - - /* Interrupts are caused by the error traps. If we had any error there - * we loop again in case the UTL buffered some new stuff between - * going there and going to the traps - */ - if (ed.dma_err || ed.mmio_err || ed.phb_err) { - handled = IRQ_HANDLED; - goto again; - } - return handled; -} - -static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb) -{ - struct pci_controller *hose = phb->hose; - int err_irq, i, rc; - char fname[16]; - - /* Create a debugfs file for that PHB */ - sprintf(fname, "phb%d", phb->hose->global_number); - phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root); - - /* Some useful debug output */ - if (phb->ddir) { - struct dentry *d = debugfs_create_dir("regs", phb->ddir); - char tmp[64]; - - for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) { - sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset, - wsp_pci_regs[i].name); - debugfs_create_file(tmp, 0600, d, - hose->cfg_data + wsp_pci_regs[i].offset, - &wsp_pci_reg_fops); - } - debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops); - } - - /* Find the IRQ number for that PHB */ - err_irq = irq_of_parse_and_map(hose->dn, 0); - if (err_irq == 0) - /* XXX Error IRQ lacking from device-tree */ - err_irq = wsp_pci_get_err_irq_no_dt(hose->dn); - if (err_irq == 0) { - pr_err("PCI: Failed to fetch error interrupt for %s\n", - hose->dn->full_name); - return; - } - /* Request it */ - rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb); - if (rc) { - pr_err("PCI: Failed to request interrupt for %s\n", - hose->dn->full_name); - } - /* Enable interrupts for all errors for now */ - out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull); - out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull); - out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull); -} - -/* - * This is called later to hookup with the error interrupt - */ -static int __init wsp_setup_pci_late(void) -{ - struct wsp_phb *phb; - - list_for_each_entry(phb, &wsp_phbs, all) - wsp_setup_pci_err_reporting(phb); - - return 0; -} -arch_initcall(wsp_setup_pci_late); diff --git a/arch/powerpc/platforms/wsp/wsp_pci.h b/arch/powerpc/platforms/wsp/wsp_pci.h deleted file mode 100644 index 52e9bd95250d..000000000000 --- a/arch/powerpc/platforms/wsp/wsp_pci.h +++ /dev/null @@ -1,268 +0,0 @@ -/* - * Copyright 2010 Ben Herrenschmidt, IBM Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#ifndef __WSP_PCI_H -#define __WSP_PCI_H - -/* Architected registers */ -#define PCIE_REG_DMA_CHAN_STATUS 0x110 -#define PCIE_REG_CPU_LOADSTORE_STATUS 0x120 - -#define PCIE_REG_CONFIG_DATA 0x130 -#define PCIE_REG_LOCK0 0x138 -#define PCIE_REG_CONFIG_ADDRESS 0x140 -#define PCIE_REG_CA_ENABLE 0x8000000000000000ull -#define PCIE_REG_CA_BUS_MASK 0x0ff0000000000000ull -#define PCIE_REG_CA_BUS_SHIFT (20+32) -#define PCIE_REG_CA_DEV_MASK 0x000f800000000000ull -#define PCIE_REG_CA_DEV_SHIFT (15+32) -#define PCIE_REG_CA_FUNC_MASK 0x0000700000000000ull -#define PCIE_REG_CA_FUNC_SHIFT (12+32) -#define PCIE_REG_CA_REG_MASK 0x00000fff00000000ull -#define PCIE_REG_CA_REG_SHIFT ( 0+32) -#define PCIE_REG_CA_BE_MASK 0x00000000f0000000ull -#define PCIE_REG_CA_BE_SHIFT ( 28) -#define PCIE_REG_LOCK1 0x148 - -#define PCIE_REG_PHB_CONFIG 0x160 -#define PCIE_REG_PHBC_64B_TCE_EN 0x2000000000000000ull -#define PCIE_REG_PHBC_MMIO_DMA_FREEZE_EN 0x1000000000000000ull -#define PCIE_REG_PHBC_32BIT_MSI_EN 0x0080000000000000ull -#define PCIE_REG_PHBC_M64_EN 0x0040000000000000ull -#define PCIE_REG_PHBC_IO_EN 0x0008000000000000ull -#define PCIE_REG_PHBC_64BIT_MSI_EN 0x0002000000000000ull -#define PCIE_REG_PHBC_M32A_EN 0x0000800000000000ull -#define PCIE_REG_PHBC_M32B_EN 0x0000400000000000ull -#define PCIE_REG_PHBC_MSI_PE_VALIDATE 0x0000200000000000ull -#define PCIE_REG_PHBC_DMA_XLATE_BYPASS 0x0000100000000000ull - -#define PCIE_REG_IO_BASE_ADDR 0x170 -#define PCIE_REG_IO_BASE_MASK 0x178 -#define PCIE_REG_IO_START_ADDR 0x180 - -#define PCIE_REG_M32A_BASE_ADDR 0x190 -#define PCIE_REG_M32A_BASE_MASK 0x198 -#define PCIE_REG_M32A_START_ADDR 0x1a0 - -#define PCIE_REG_M32B_BASE_ADDR 0x1b0 -#define PCIE_REG_M32B_BASE_MASK 0x1b8 -#define PCIE_REG_M32B_START_ADDR 0x1c0 - -#define PCIE_REG_M64_BASE_ADDR 0x1e0 -#define PCIE_REG_M64_BASE_MASK 0x1e8 -#define PCIE_REG_M64_START_ADDR 0x1f0 - -#define PCIE_REG_TCE_KILL 0x210 -#define PCIE_REG_TCEKILL_SINGLE 0x8000000000000000ull -#define PCIE_REG_TCEKILL_ADDR_MASK 0x000003fffffffff8ull -#define PCIE_REG_TCEKILL_PS_4K 0 -#define PCIE_REG_TCEKILL_PS_64K 1 -#define PCIE_REG_TCEKILL_PS_16M 2 -#define PCIE_REG_TCEKILL_PS_16G 3 - -#define PCIE_REG_IODA_ADDR 0x220 -#define PCIE_REG_IODA_AD_AUTOINC 0x8000000000000000ull -#define PCIE_REG_IODA_AD_TBL_MVT 0x0005000000000000ull -#define PCIE_REG_IODA_AD_TBL_PELT 0x0006000000000000ull -#define PCIE_REG_IODA_AD_TBL_PESTA 0x0007000000000000ull -#define PCIE_REG_IODA_AD_TBL_PESTB 0x0008000000000000ull -#define PCIE_REG_IODA_AD_TBL_TVT 0x0009000000000000ull -#define PCIE_REG_IODA_AD_TBL_TCE 0x000a000000000000ull -#define PCIE_REG_IODA_DATA0 0x228 -#define PCIE_REG_IODA_DATA1 0x230 - -#define PCIE_REG_LOCK2 0x240 - -#define PCIE_REG_PHB_GEN_CAP 0x250 -#define PCIE_REG_PHB_TCE_CAP 0x258 -#define PCIE_REG_PHB_IRQ_CAP 0x260 -#define PCIE_REG_PHB_EEH_CAP 0x268 - -#define PCIE_REG_PAPR_ERR_INJ_CONTROL 0x2b0 -#define PCIE_REG_PAPR_ERR_INJ_ADDR 0x2b8 -#define PCIE_REG_PAPR_ERR_INJ_MASK 0x2c0 - - -#define PCIE_REG_SYS_CFG1 0x600 -#define PCIE_REG_SYS_CFG1_CLASS_CODE 0x0000000000ffffffull - -#define IODA_TVT0_TTA_MASK 0x000fffffffff0000ull -#define IODA_TVT0_TTA_SHIFT 4 -#define IODA_TVT0_BUSNUM_VALID_MASK 0x000000000000e000ull -#define IODA_TVT0_TCE_TABLE_SIZE_MASK 0x0000000000001f00ull -#define IODA_TVT0_TCE_TABLE_SIZE_SHIFT 8 -#define IODA_TVT0_BUSNUM_VALUE_MASK 0x00000000000000ffull -#define IODA_TVT0_BUSNUM_VALID_SHIFT 0 -#define IODA_TVT1_DEVNUM_VALID 0x2000000000000000ull -#define IODA_TVT1_DEVNUM_VALUE_MASK 0x1f00000000000000ull -#define IODA_TVT1_DEVNUM_VALUE_SHIFT 56 -#define IODA_TVT1_FUNCNUM_VALID 0x0008000000000000ull -#define IODA_TVT1_FUNCNUM_VALUE_MASK 0x0007000000000000ull -#define IODA_TVT1_FUNCNUM_VALUE_SHIFT 48 -#define IODA_TVT1_IO_PAGE_SIZE_MASK 0x00001f0000000000ull -#define IODA_TVT1_IO_PAGE_SIZE_SHIFT 40 -#define IODA_TVT1_PE_NUMBER_MASK 0x000000000000003full -#define IODA_TVT1_PE_NUMBER_SHIFT 0 - -#define IODA_TVT_COUNT 64 - -/* UTL Core registers */ -#define PCIE_UTL_SYS_BUS_CONTROL 0x400 -#define PCIE_UTL_STATUS 0x408 -#define PCIE_UTL_SYS_BUS_AGENT_STATUS 0x410 -#define PCIE_UTL_SYS_BUS_AGENT_ERR_SEV 0x418 -#define PCIE_UTL_SYS_BUS_AGENT_IRQ_EN 0x420 -#define PCIE_UTL_SYS_BUS_BURST_SZ_CONF 0x440 -#define PCIE_UTL_REVISION_ID 0x448 - -#define PCIE_UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0 -#define PCIE_UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0 -#define PCIE_UTL_IN_POST_HDR_BUF_ALLOC 0x4e0 -#define PCIE_UTL_IN_POST_DAT_BUF_ALLOC 0x4f0 -#define PCIE_UTL_OUT_NP_BUF_ALLOC 0x500 -#define PCIE_UTL_IN_NP_BUF_ALLOC 0x510 -#define PCIE_UTL_PCIE_TAGS_ALLOC 0x520 -#define PCIE_UTL_GBIF_READ_TAGS_ALLOC 0x530 - -#define PCIE_UTL_PCIE_PORT_CONTROL 0x540 -#define PCIE_UTL_PCIE_PORT_STATUS 0x548 -#define PCIE_UTL_PCIE_PORT_ERROR_SEV 0x550 -#define PCIE_UTL_PCIE_PORT_IRQ_EN 0x558 -#define PCIE_UTL_RC_STATUS 0x560 -#define PCIE_UTL_RC_ERR_SEVERITY 0x568 -#define PCIE_UTL_RC_IRQ_EN 0x570 -#define PCIE_UTL_EP_STATUS 0x578 -#define PCIE_UTL_EP_ERR_SEVERITY 0x580 -#define PCIE_UTL_EP_ERR_IRQ_EN 0x588 - -#define PCIE_UTL_PCI_PM_CTRL1 0x590 -#define PCIE_UTL_PCI_PM_CTRL2 0x598 - -/* PCIe stack registers */ -#define PCIE_REG_SYSTEM_CONFIG1 0x600 -#define PCIE_REG_SYSTEM_CONFIG2 0x608 -#define PCIE_REG_EP_SYSTEM_CONFIG 0x618 -#define PCIE_REG_EP_FLR 0x620 -#define PCIE_REG_EP_BAR_CONFIG 0x628 -#define PCIE_REG_LINK_CONFIG 0x630 -#define PCIE_REG_PM_CONFIG 0x640 -#define PCIE_REG_DLP_CONTROL 0x650 -#define PCIE_REG_DLP_STATUS 0x658 -#define PCIE_REG_ERR_REPORT_CONTROL 0x660 -#define PCIE_REG_SLOT_CONTROL1 0x670 -#define PCIE_REG_SLOT_CONTROL2 0x678 -#define PCIE_REG_UTL_CONFIG 0x680 -#define PCIE_REG_BUFFERS_CONFIG 0x690 -#define PCIE_REG_ERROR_INJECT 0x698 -#define PCIE_REG_SRIOV_CONFIG 0x6a0 -#define PCIE_REG_PF0_SRIOV_STATUS 0x6a8 -#define PCIE_REG_PF1_SRIOV_STATUS 0x6b0 -#define PCIE_REG_PORT_NUMBER 0x700 -#define PCIE_REG_POR_SYSTEM_CONFIG 0x708 - -/* PHB internal logic registers */ -#define PCIE_REG_PHB_VERSION 0x800 -#define PCIE_REG_RESET 0x808 -#define PCIE_REG_PHB_CONTROL 0x810 -#define PCIE_REG_PHB_TIMEOUT_CONTROL1 0x878 -#define PCIE_REG_PHB_QUIESCE_DMA 0x888 -#define PCIE_REG_PHB_DMA_READ_TAG_ACTV 0x900 -#define PCIE_REG_PHB_TCE_READ_TAG_ACTV 0x908 - -/* FIR registers */ -#define PCIE_REG_LEM_FIR_ACCUM 0xc00 -#define PCIE_REG_LEM_FIR_AND_MASK 0xc08 -#define PCIE_REG_LEM_FIR_OR_MASK 0xc10 -#define PCIE_REG_LEM_ACTION0 0xc18 -#define PCIE_REG_LEM_ACTION1 0xc20 -#define PCIE_REG_LEM_ERROR_MASK 0xc30 -#define PCIE_REG_LEM_ERROR_AND_MASK 0xc38 -#define PCIE_REG_LEM_ERROR_OR_MASK 0xc40 - -/* PHB Error registers */ -#define PCIE_REG_PHB_ERR_STATUS 0xc80 -#define PCIE_REG_PHB_ERR1_STATUS 0xc88 -#define PCIE_REG_PHB_ERR_INJECT 0xc90 -#define PCIE_REG_PHB_ERR_LEM_ENABLE 0xc98 -#define PCIE_REG_PHB_ERR_IRQ_ENABLE 0xca0 -#define PCIE_REG_PHB_ERR_FREEZE_ENABLE 0xca8 -#define PCIE_REG_PHB_ERR_SIDE_ENABLE 0xcb8 -#define PCIE_REG_PHB_ERR_LOG_0 0xcc0 -#define PCIE_REG_PHB_ERR_LOG_1 0xcc8 -#define PCIE_REG_PHB_ERR_STATUS_MASK 0xcd0 -#define PCIE_REG_PHB_ERR1_STATUS_MASK 0xcd8 - -#define PCIE_REG_MMIO_ERR_STATUS 0xd00 -#define PCIE_REG_MMIO_ERR1_STATUS 0xd08 -#define PCIE_REG_MMIO_ERR_INJECT 0xd10 -#define PCIE_REG_MMIO_ERR_LEM_ENABLE 0xd18 -#define PCIE_REG_MMIO_ERR_IRQ_ENABLE 0xd20 -#define PCIE_REG_MMIO_ERR_FREEZE_ENABLE 0xd28 -#define PCIE_REG_MMIO_ERR_SIDE_ENABLE 0xd38 -#define PCIE_REG_MMIO_ERR_LOG_0 0xd40 -#define PCIE_REG_MMIO_ERR_LOG_1 0xd48 -#define PCIE_REG_MMIO_ERR_STATUS_MASK 0xd50 -#define PCIE_REG_MMIO_ERR1_STATUS_MASK 0xd58 - -#define PCIE_REG_DMA_ERR_STATUS 0xd80 -#define PCIE_REG_DMA_ERR1_STATUS 0xd88 -#define PCIE_REG_DMA_ERR_INJECT 0xd90 -#define PCIE_REG_DMA_ERR_LEM_ENABLE 0xd98 -#define PCIE_REG_DMA_ERR_IRQ_ENABLE 0xda0 -#define PCIE_REG_DMA_ERR_FREEZE_ENABLE 0xda8 -#define PCIE_REG_DMA_ERR_SIDE_ENABLE 0xdb8 -#define PCIE_REG_DMA_ERR_LOG_0 0xdc0 -#define PCIE_REG_DMA_ERR_LOG_1 0xdc8 -#define PCIE_REG_DMA_ERR_STATUS_MASK 0xdd0 -#define PCIE_REG_DMA_ERR1_STATUS_MASK 0xdd8 - -/* Shortcuts for access to the above using the PHB definitions - * with an offset - */ -#define PCIE_REG_ERR_PHB_OFFSET 0x0 -#define PCIE_REG_ERR_MMIO_OFFSET 0x80 -#define PCIE_REG_ERR_DMA_OFFSET 0x100 - -/* Debug and Trace registers */ -#define PCIE_REG_PHB_DEBUG_CONTROL0 0xe00 -#define PCIE_REG_PHB_DEBUG_STATUS0 0xe08 -#define PCIE_REG_PHB_DEBUG_CONTROL1 0xe10 -#define PCIE_REG_PHB_DEBUG_STATUS1 0xe18 -#define PCIE_REG_PHB_DEBUG_CONTROL2 0xe20 -#define PCIE_REG_PHB_DEBUG_STATUS2 0xe28 -#define PCIE_REG_PHB_DEBUG_CONTROL3 0xe30 -#define PCIE_REG_PHB_DEBUG_STATUS3 0xe38 -#define PCIE_REG_PHB_DEBUG_CONTROL4 0xe40 -#define PCIE_REG_PHB_DEBUG_STATUS4 0xe48 -#define PCIE_REG_PHB_DEBUG_CONTROL5 0xe50 -#define PCIE_REG_PHB_DEBUG_STATUS5 0xe58 -#define PCIE_REG_PHB_DEBUG_CONTROL6 0xe60 -#define PCIE_REG_PHB_DEBUG_STATUS6 0xe68 - -/* Definition for PCIe errors */ -struct wsp_pcie_err_log_data { - __u64 phb_err; - __u64 phb_err1; - __u64 phb_log0; - __u64 phb_log1; - __u64 mmio_err; - __u64 mmio_err1; - __u64 mmio_log0; - __u64 mmio_log1; - __u64 dma_err; - __u64 dma_err1; - __u64 dma_log0; - __u64 dma_log1; - __u64 utl_sys_err; - __u64 utl_port_err; - __u64 utl_rc_err; - __u64 unused; -}; - -#endif /* __WSP_PCI_H */ -- cgit v1.2.3 From 96d016108640bc2b7fb0ee800737f80923847294 Mon Sep 17 00:00:00 2001 From: Sam bobroff Date: Thu, 5 Jun 2014 16:19:22 +1000 Subject: powerpc: Correct DSCR during TM context switch Correct the DSCR SPR becoming temporarily corrupted if a task is context switched during a transaction. The problem occurs while suspending the task and is caused by saving the DSCR to thread.dscr after it has already been set to the CPU's default value: __switch_to() calls __switch_to_tm() which calls tm_reclaim_task() which calls tm_reclaim_thread() which calls tm_reclaim() where the DSCR is set to the CPU's default __switch_to() calls _switch() where thread.dscr is set to the DSCR When the task is resumed, it's transaction will be doomed (as usual) and the DSCR SPR will be corrupted, although the checkpointed value will be correct. Therefore the DSCR will be immediately corrected by the transaction aborting, unless it has been suspended. In that case the incorrect value can be seen by the task until it resumes the transaction. The fix is to treat the DSCR similarly to the TAR and save it early in __switch_to(). A program exposing the problem is added to the kernel self tests as: tools/testing/selftests/powerpc/tm/tm-resched-dscr. Signed-off-by: Sam Bobroff CC: [v3.10+] Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/switch_to.h | 6 +- arch/powerpc/kernel/entry_64.S | 6 -- arch/powerpc/kernel/process.c | 8 +- tools/testing/selftests/powerpc/Makefile | 2 +- tools/testing/selftests/powerpc/tm/Makefile | 15 ++++ .../testing/selftests/powerpc/tm/tm-resched-dscr.c | 90 ++++++++++++++++++++++ 6 files changed, 114 insertions(+), 13 deletions(-) create mode 100644 tools/testing/selftests/powerpc/tm/Makefile create mode 100644 tools/testing/selftests/powerpc/tm/tm-resched-dscr.c (limited to 'arch') diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 0e83e7d8c73f..d2468eb12639 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -16,13 +16,15 @@ struct thread_struct; extern struct task_struct *_switch(struct thread_struct *prev, struct thread_struct *next); #ifdef CONFIG_PPC_BOOK3S_64 -static inline void save_tar(struct thread_struct *prev) +static inline void save_early_sprs(struct thread_struct *prev) { if (cpu_has_feature(CPU_FTR_ARCH_207S)) prev->tar = mfspr(SPRN_TAR); + if (cpu_has_feature(CPU_FTR_DSCR)) + prev->dscr = mfspr(SPRN_DSCR); } #else -static inline void save_tar(struct thread_struct *prev) {} +static inline void save_early_sprs(struct thread_struct *prev) {} #endif extern void enable_kernel_fp(void); diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 911d45366f59..6528c5e2cc44 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -428,12 +428,6 @@ BEGIN_FTR_SECTION std r24,THREAD_VRSAVE(r3) END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif /* CONFIG_ALTIVEC */ -#ifdef CONFIG_PPC64 -BEGIN_FTR_SECTION - mfspr r25,SPRN_DSCR - std r25,THREAD_DSCR(r3) -END_FTR_SECTION_IFSET(CPU_FTR_DSCR) -#endif and. r0,r0,r22 beq+ 1f andc r22,r22,r0 diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8a1edbe26b8f..be99774d3f44 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -755,15 +755,15 @@ struct task_struct *__switch_to(struct task_struct *prev, WARN_ON(!irqs_disabled()); - /* Back up the TAR across context switches. + /* Back up the TAR and DSCR across context switches. * Note that the TAR is not available for use in the kernel. (To * provide this, the TAR should be backed up/restored on exception * entry/exit instead, and be in pt_regs. FIXME, this should be in * pt_regs anyway (for debug).) - * Save the TAR here before we do treclaim/trecheckpoint as these - * will change the TAR. + * Save the TAR and DSCR here before we do treclaim/trecheckpoint as + * these will change them. */ - save_tar(&prev->thread); + save_early_sprs(&prev->thread); __switch_to_tm(prev); diff --git a/tools/testing/selftests/powerpc/Makefile b/tools/testing/selftests/powerpc/Makefile index b3dbe9ef1a40..54833a791a44 100644 --- a/tools/testing/selftests/powerpc/Makefile +++ b/tools/testing/selftests/powerpc/Makefile @@ -13,7 +13,7 @@ CFLAGS := -Wall -O2 -flto -Wall -Werror -DGIT_VERSION='"$(GIT_VERSION)"' -I$(CUR export CC CFLAGS -TARGETS = pmu copyloops mm +TARGETS = pmu copyloops mm tm endif diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile new file mode 100644 index 000000000000..51267f4184a6 --- /dev/null +++ b/tools/testing/selftests/powerpc/tm/Makefile @@ -0,0 +1,15 @@ +PROGS := tm-resched-dscr + +all: $(PROGS) + +$(PROGS): + +run_tests: all + @-for PROG in $(PROGS); do \ + ./$$PROG; \ + done; + +clean: + rm -f $(PROGS) *.o + +.PHONY: all run_tests clean diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c new file mode 100644 index 000000000000..ee98e3886af2 --- /dev/null +++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c @@ -0,0 +1,90 @@ +/* Test context switching to see if the DSCR SPR is correctly preserved + * when within a transaction. + * + * Note: We assume that the DSCR has been left at the default value (0) + * for all CPUs. + * + * Method: + * + * Set a value into the DSCR. + * + * Start a transaction, and suspend it (*). + * + * Hard loop checking to see if the transaction has become doomed. + * + * Now that we *may* have been preempted, record the DSCR and TEXASR SPRS. + * + * If the abort was because of a context switch, check the DSCR value. + * Otherwise, try again. + * + * (*) If the transaction is not suspended we can't see the problem because + * the transaction abort handler will restore the DSCR to it's checkpointed + * value before we regain control. + */ + +#include +#include +#include +#include +#include + +#define TBEGIN ".long 0x7C00051D ;" +#define TEND ".long 0x7C00055D ;" +#define TCHECK ".long 0x7C00059C ;" +#define TSUSPEND ".long 0x7C0005DD ;" +#define TRESUME ".long 0x7C2005DD ;" +#define SPRN_TEXASR 0x82 +#define SPRN_DSCR 0x03 + +int main(void) { + uint64_t rv, dscr1 = 1, dscr2, texasr; + + printf("Check DSCR TM context switch: "); + fflush(stdout); + for (;;) { + rv = 1; + asm __volatile__ ( + /* set a known value into the DSCR */ + "ld 3, %[dscr1];" + "mtspr %[sprn_dscr], 3;" + + /* start and suspend a transaction */ + TBEGIN + "beq 1f;" + TSUSPEND + + /* hard loop until the transaction becomes doomed */ + "2: ;" + TCHECK + "bc 4, 0, 2b;" + + /* record DSCR and TEXASR */ + "mfspr 3, %[sprn_dscr];" + "std 3, %[dscr2];" + "mfspr 3, %[sprn_texasr];" + "std 3, %[texasr];" + + TRESUME + TEND + "li %[rv], 0;" + "1: ;" + : [rv]"=r"(rv), [dscr2]"=m"(dscr2), [texasr]"=m"(texasr) + : [dscr1]"m"(dscr1) + , [sprn_dscr]"i"(SPRN_DSCR), [sprn_texasr]"i"(SPRN_TEXASR) + : "memory", "r3" + ); + assert(rv); /* make sure the transaction aborted */ + if ((texasr >> 56) != TM_CAUSE_RESCHED) { + putchar('.'); + fflush(stdout); + continue; + } + if (dscr2 != dscr1) { + printf(" FAIL\n"); + exit(EXIT_FAILURE); + } else { + printf(" OK\n"); + exit(EXIT_SUCCESS); + } + } +} -- cgit v1.2.3 From 59a53afe70fd530040bdc69581f03d880157f15a Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Fri, 6 Jun 2014 14:28:51 +1000 Subject: powerpc: Don't setup CPUs with bad status OPAL will mark a CPU that is guarded as "bad" in the status property of the CPU node. Unfortunatley Linux doesn't check this property and will put the bad CPU in the present map. This has caused hangs on booting when we try to unsplit the core. This patch checks the CPU is avaliable via this status property before putting it in the present map. Signed-off-by: Michael Neuling Tested-by: Anton Blanchard cc: stable@vger.kernel.org Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/setup-common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index d4d418376f99..e239df3768ac 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -471,7 +471,7 @@ void __init smp_setup_cpu_maps(void) for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { DBG(" thread %d -> cpu %d (hard id %d)\n", j, cpu, be32_to_cpu(intserv[j])); - set_cpu_present(cpu, true); + set_cpu_present(cpu, of_device_is_available(dn)); set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); set_cpu_possible(cpu, true); cpu++; -- cgit v1.2.3 From b2a80878693256ab97e1e3667e24b3ecdc951de1 Mon Sep 17 00:00:00 2001 From: "Shreyas B. Prabhu" Date: Fri, 6 Jun 2014 15:51:05 +0530 Subject: powerpc/powernv: Include asm/smp.h to fix UP build failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Build throws following errors when CONFIG_SMP=n arch/powerpc/platforms/powernv/setup.c: In function ‘pnv_kexec_wait_secondaries_down’: arch/powerpc/platforms/powernv/setup.c:179:4: error: implicit declaration of function ‘get_hard_smp_processor_id’ rc = opal_query_cpu_status(get_hard_smp_processor_id(i), The usage of get_hard_smp_processor_id() needs the declaration from . The file setup.c includes , which in-turn includes . However, includes only on SMP configs and hence UP builds fail. Fix this by directly including in setup.c unconditionally. Reported-by: Geert Uytterhoeven Reviewed-by: Srivatsa S. Bhat Signed-off-by: Shreyas B. Prabhu Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/powernv/setup.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 8c16a5f96728..678573c4ac49 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -35,6 +35,7 @@ #include #include #include +#include #include "powernv.h" -- cgit v1.2.3 From ad417330628c60e83692211e7161eb49e1843318 Mon Sep 17 00:00:00 2001 From: "Shreyas B. Prabhu" Date: Fri, 6 Jun 2014 15:52:51 +0530 Subject: powerpc/powernv : Disable subcore for UP configs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Build throws following errors when CONFIG_SMP=n arch/powerpc/platforms/powernv/subcore.c: In function ‘cpu_update_split_mode’: arch/powerpc/platforms/powernv/subcore.c:274:15: error: ‘setup_max_cpus’ undeclared (first use in this function) arch/powerpc/platforms/powernv/subcore.c:285:5: error: lvalue required as left operand of assignment 'setup_max_cpus' variable is relevant only on SMP, so there is no point working around it for UP. Furthermore, subcore itself is relevant only on SMP and hence the better solution is to exclude subcore.o and subcore-asm.o for UP builds. Signed-off-by: Shreyas B. Prabhu Reviewed-by: Srivatsa S. Bhat Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/powernv/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile index 4ad0d345bc96..d55891f89a2c 100644 --- a/arch/powerpc/platforms/powernv/Makefile +++ b/arch/powerpc/platforms/powernv/Makefile @@ -1,9 +1,9 @@ obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o -obj-y += opal-msglog.o subcore.o subcore-asm.o +obj-y += opal-msglog.o -obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o obj-$(CONFIG_PPC_SCOM) += opal-xscom.o -- cgit v1.2.3 From 1bd098903fda069cb96fe8b5cb4595b46c683385 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sat, 7 Jun 2014 11:29:01 +1000 Subject: powernv: Fix permissions on sysparam sysfs entries Everyone can write to these files, which is not what we want. Cc: stable@vger.kernel.org # 3.15 Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/powernv/opal-sysparam.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c index d202f9bc3683..9d1acf22a099 100644 --- a/arch/powerpc/platforms/powernv/opal-sysparam.c +++ b/arch/powerpc/platforms/powernv/opal-sysparam.c @@ -260,10 +260,10 @@ void __init opal_sys_param_init(void) attr[i].kobj_attr.attr.mode = S_IRUGO; break; case OPAL_SYSPARAM_WRITE: - attr[i].kobj_attr.attr.mode = S_IWUGO; + attr[i].kobj_attr.attr.mode = S_IWUSR; break; case OPAL_SYSPARAM_RW: - attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUGO; + attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUSR; break; default: break; -- cgit v1.2.3 From ddf0322a3ffe2d98facc72f255ac5c140b547c72 Mon Sep 17 00:00:00 2001 From: Guo Chao Date: Mon, 9 Jun 2014 16:58:51 +0800 Subject: powerpc/powernv: Fix endianness problems in EEH EEH information fetched from OPAL need fix before using in LE environment. To be included in sparse's endian check, declare them as __beXX and access them by accessors. Cc: Gavin Shan Signed-off-by: Guo Chao Acked-by: Gavin Shan Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/opal.h | 102 +++++++++++++++--------------- arch/powerpc/platforms/powernv/eeh-ioda.c | 36 ++++++----- arch/powerpc/platforms/powernv/pci.c | 81 +++++++++++++++--------- 3 files changed, 120 insertions(+), 99 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index cb15cbb51600..460018889ba9 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -599,9 +599,9 @@ enum { }; struct OpalIoPhbErrorCommon { - uint32_t version; - uint32_t ioType; - uint32_t len; + __be32 version; + __be32 ioType; + __be32 len; }; struct OpalIoP7IOCPhbErrorData { @@ -666,64 +666,64 @@ struct OpalIoP7IOCPhbErrorData { struct OpalIoPhb3ErrorData { struct OpalIoPhbErrorCommon common; - uint32_t brdgCtl; + __be32 brdgCtl; /* PHB3 UTL regs */ - uint32_t portStatusReg; - uint32_t rootCmplxStatus; - uint32_t busAgentStatus; + __be32 portStatusReg; + __be32 rootCmplxStatus; + __be32 busAgentStatus; /* PHB3 cfg regs */ - uint32_t deviceStatus; - uint32_t slotStatus; - uint32_t linkStatus; - uint32_t devCmdStatus; - uint32_t devSecStatus; + __be32 deviceStatus; + __be32 slotStatus; + __be32 linkStatus; + __be32 devCmdStatus; + __be32 devSecStatus; /* cfg AER regs */ - uint32_t rootErrorStatus; - uint32_t uncorrErrorStatus; - uint32_t corrErrorStatus; - uint32_t tlpHdr1; - uint32_t tlpHdr2; - uint32_t tlpHdr3; - uint32_t tlpHdr4; - uint32_t sourceId; + __be32 rootErrorStatus; + __be32 uncorrErrorStatus; + __be32 corrErrorStatus; + __be32 tlpHdr1; + __be32 tlpHdr2; + __be32 tlpHdr3; + __be32 tlpHdr4; + __be32 sourceId; - uint32_t rsv3; + __be32 rsv3; /* Record data about the call to allocate a buffer */ - uint64_t errorClass; - uint64_t correlator; + __be64 errorClass; + __be64 correlator; - uint64_t nFir; /* 000 */ - uint64_t nFirMask; /* 003 */ - uint64_t nFirWOF; /* 008 */ + __be64 nFir; /* 000 */ + __be64 nFirMask; /* 003 */ + __be64 nFirWOF; /* 008 */ /* PHB3 MMIO Error Regs */ - uint64_t phbPlssr; /* 120 */ - uint64_t phbCsr; /* 110 */ - uint64_t lemFir; /* C00 */ - uint64_t lemErrorMask; /* C18 */ - uint64_t lemWOF; /* C40 */ - uint64_t phbErrorStatus; /* C80 */ - uint64_t phbFirstErrorStatus; /* C88 */ - uint64_t phbErrorLog0; /* CC0 */ - uint64_t phbErrorLog1; /* CC8 */ - uint64_t mmioErrorStatus; /* D00 */ - uint64_t mmioFirstErrorStatus; /* D08 */ - uint64_t mmioErrorLog0; /* D40 */ - uint64_t mmioErrorLog1; /* D48 */ - uint64_t dma0ErrorStatus; /* D80 */ - uint64_t dma0FirstErrorStatus; /* D88 */ - uint64_t dma0ErrorLog0; /* DC0 */ - uint64_t dma0ErrorLog1; /* DC8 */ - uint64_t dma1ErrorStatus; /* E00 */ - uint64_t dma1FirstErrorStatus; /* E08 */ - uint64_t dma1ErrorLog0; /* E40 */ - uint64_t dma1ErrorLog1; /* E48 */ - uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS]; - uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS]; + __be64 phbPlssr; /* 120 */ + __be64 phbCsr; /* 110 */ + __be64 lemFir; /* C00 */ + __be64 lemErrorMask; /* C18 */ + __be64 lemWOF; /* C40 */ + __be64 phbErrorStatus; /* C80 */ + __be64 phbFirstErrorStatus; /* C88 */ + __be64 phbErrorLog0; /* CC0 */ + __be64 phbErrorLog1; /* CC8 */ + __be64 mmioErrorStatus; /* D00 */ + __be64 mmioFirstErrorStatus; /* D08 */ + __be64 mmioErrorLog0; /* D40 */ + __be64 mmioErrorLog1; /* D48 */ + __be64 dma0ErrorStatus; /* D80 */ + __be64 dma0FirstErrorStatus; /* D88 */ + __be64 dma0ErrorLog0; /* DC0 */ + __be64 dma0ErrorLog1; /* DC8 */ + __be64 dma1ErrorStatus; /* E00 */ + __be64 dma1FirstErrorStatus; /* E08 */ + __be64 dma1ErrorLog0; /* E40 */ + __be64 dma1ErrorLog1; /* E48 */ + __be64 pestA[OPAL_PHB3_NUM_PEST_REGS]; + __be64 pestB[OPAL_PHB3_NUM_PEST_REGS]; }; enum { @@ -851,8 +851,8 @@ int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t erro int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); int64_t opal_get_epow_status(__be64 *status); int64_t opal_set_system_attention_led(uint8_t led_action); -int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, - uint16_t *pci_error_type, uint16_t *severity); +int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe, + __be16 *pci_error_type, __be16 *severity); int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); int64_t opal_reinit_cpus(uint64_t flags); diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 753f08e36dfa..e0d6a3a213e2 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -267,7 +267,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) { s64 ret = 0; u8 fstate; - u16 pcierr; + __be16 pcierr; u32 pe_no; int result; struct pci_controller *hose = pe->phb; @@ -316,7 +316,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe) result = 0; result &= ~EEH_STATE_RESET_ACTIVE; - if (pcierr != OPAL_EEH_PHB_ERROR) { + if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) { result |= EEH_STATE_MMIO_ACTIVE; result |= EEH_STATE_DMA_ACTIVE; result |= EEH_STATE_MMIO_ENABLED; @@ -706,8 +706,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) struct pci_controller *hose; struct pnv_phb *phb; struct eeh_pe *phb_pe; - u64 frozen_pe_no; - u16 err_type, severity; + __be64 frozen_pe_no; + __be16 err_type, severity; long rc; int ret = EEH_NEXT_ERR_NONE; @@ -742,8 +742,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) } /* If the PHB doesn't have error, stop processing */ - if (err_type == OPAL_EEH_NO_ERROR || - severity == OPAL_EEH_SEV_NO_ERROR) { + if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR || + be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) { pr_devel("%s: No error found on PHB#%x\n", __func__, hose->global_number); continue; @@ -755,14 +755,14 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) * specific PHB. */ pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", - __func__, err_type, severity, - frozen_pe_no, hose->global_number); - switch (err_type) { + __func__, be16_to_cpu(err_type), be16_to_cpu(severity), + be64_to_cpu(frozen_pe_no), hose->global_number); + switch (be16_to_cpu(err_type)) { case OPAL_EEH_IOC_ERROR: - if (severity == OPAL_EEH_SEV_IOC_DEAD) { + if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) { pr_err("EEH: dead IOC detected\n"); ret = EEH_NEXT_ERR_DEAD_IOC; - } else if (severity == OPAL_EEH_SEV_INF) { + } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { pr_info("EEH: IOC informative error " "detected\n"); ioda_eeh_hub_diag(hose); @@ -771,17 +771,18 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) break; case OPAL_EEH_PHB_ERROR: - if (severity == OPAL_EEH_SEV_PHB_DEAD) { + if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { *pe = phb_pe; pr_err("EEH: dead PHB#%x detected\n", hose->global_number); ret = EEH_NEXT_ERR_DEAD_PHB; - } else if (severity == OPAL_EEH_SEV_PHB_FENCED) { + } else if (be16_to_cpu(severity) == + OPAL_EEH_SEV_PHB_FENCED) { *pe = phb_pe; pr_err("EEH: fenced PHB#%x detected\n", hose->global_number); ret = EEH_NEXT_ERR_FENCED_PHB; - } else if (severity == OPAL_EEH_SEV_INF) { + } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { pr_info("EEH: PHB#%x informative error " "detected\n", hose->global_number); @@ -801,12 +802,13 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) * progress with recovery. We needn't report * it again. */ - if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) { + if (ioda_eeh_get_pe(hose, + be64_to_cpu(frozen_pe_no), pe)) { *pe = phb_pe; pr_err("EEH: Escalated fenced PHB#%x " "detected for PE#%llx\n", hose->global_number, - frozen_pe_no); + be64_to_cpu(frozen_pe_no)); ret = EEH_NEXT_ERR_FENCED_PHB; } else if ((*pe)->state & EEH_PE_ISOLATED) { ret = EEH_NEXT_ERR_NONE; @@ -819,7 +821,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) break; default: pr_warn("%s: Unexpected error type %d\n", - __func__, err_type); + __func__, be16_to_cpu(err_type)); } /* diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c index eefbfcc3fd8c..f91a4e5d872e 100644 --- a/arch/powerpc/platforms/powernv/pci.c +++ b/arch/powerpc/platforms/powernv/pci.c @@ -206,72 +206,91 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose, data = (struct OpalIoPhb3ErrorData*)common; pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n", - hose->global_number, common->version); + hose->global_number, be32_to_cpu(common->version)); if (data->brdgCtl) pr_info("brdgCtl: %08x\n", - data->brdgCtl); + be32_to_cpu(data->brdgCtl)); if (data->portStatusReg || data->rootCmplxStatus || data->busAgentStatus) pr_info("UtlSts: %08x %08x %08x\n", - data->portStatusReg, data->rootCmplxStatus, - data->busAgentStatus); + be32_to_cpu(data->portStatusReg), + be32_to_cpu(data->rootCmplxStatus), + be32_to_cpu(data->busAgentStatus)); if (data->deviceStatus || data->slotStatus || data->linkStatus || data->devCmdStatus || data->devSecStatus) pr_info("RootSts: %08x %08x %08x %08x %08x\n", - data->deviceStatus, data->slotStatus, - data->linkStatus, data->devCmdStatus, - data->devSecStatus); + be32_to_cpu(data->deviceStatus), + be32_to_cpu(data->slotStatus), + be32_to_cpu(data->linkStatus), + be32_to_cpu(data->devCmdStatus), + be32_to_cpu(data->devSecStatus)); if (data->rootErrorStatus || data->uncorrErrorStatus || data->corrErrorStatus) pr_info("RootErrSts: %08x %08x %08x\n", - data->rootErrorStatus, data->uncorrErrorStatus, - data->corrErrorStatus); + be32_to_cpu(data->rootErrorStatus), + be32_to_cpu(data->uncorrErrorStatus), + be32_to_cpu(data->corrErrorStatus)); if (data->tlpHdr1 || data->tlpHdr2 || data->tlpHdr3 || data->tlpHdr4) pr_info("RootErrLog: %08x %08x %08x %08x\n", - data->tlpHdr1, data->tlpHdr2, - data->tlpHdr3, data->tlpHdr4); + be32_to_cpu(data->tlpHdr1), + be32_to_cpu(data->tlpHdr2), + be32_to_cpu(data->tlpHdr3), + be32_to_cpu(data->tlpHdr4)); if (data->sourceId || data->errorClass || data->correlator) pr_info("RootErrLog1: %08x %016llx %016llx\n", - data->sourceId, data->errorClass, - data->correlator); + be32_to_cpu(data->sourceId), + be64_to_cpu(data->errorClass), + be64_to_cpu(data->correlator)); if (data->nFir) pr_info("nFir: %016llx %016llx %016llx\n", - data->nFir, data->nFirMask, - data->nFirWOF); + be64_to_cpu(data->nFir), + be64_to_cpu(data->nFirMask), + be64_to_cpu(data->nFirWOF)); if (data->phbPlssr || data->phbCsr) pr_info("PhbSts: %016llx %016llx\n", - data->phbPlssr, data->phbCsr); + be64_to_cpu(data->phbPlssr), + be64_to_cpu(data->phbCsr)); if (data->lemFir) pr_info("Lem: %016llx %016llx %016llx\n", - data->lemFir, data->lemErrorMask, - data->lemWOF); + be64_to_cpu(data->lemFir), + be64_to_cpu(data->lemErrorMask), + be64_to_cpu(data->lemWOF)); if (data->phbErrorStatus) pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", - data->phbErrorStatus, data->phbFirstErrorStatus, - data->phbErrorLog0, data->phbErrorLog1); + be64_to_cpu(data->phbErrorStatus), + be64_to_cpu(data->phbFirstErrorStatus), + be64_to_cpu(data->phbErrorLog0), + be64_to_cpu(data->phbErrorLog1)); if (data->mmioErrorStatus) pr_info("OutErr: %016llx %016llx %016llx %016llx\n", - data->mmioErrorStatus, data->mmioFirstErrorStatus, - data->mmioErrorLog0, data->mmioErrorLog1); + be64_to_cpu(data->mmioErrorStatus), + be64_to_cpu(data->mmioFirstErrorStatus), + be64_to_cpu(data->mmioErrorLog0), + be64_to_cpu(data->mmioErrorLog1)); if (data->dma0ErrorStatus) pr_info("InAErr: %016llx %016llx %016llx %016llx\n", - data->dma0ErrorStatus, data->dma0FirstErrorStatus, - data->dma0ErrorLog0, data->dma0ErrorLog1); + be64_to_cpu(data->dma0ErrorStatus), + be64_to_cpu(data->dma0FirstErrorStatus), + be64_to_cpu(data->dma0ErrorLog0), + be64_to_cpu(data->dma0ErrorLog1)); if (data->dma1ErrorStatus) pr_info("InBErr: %016llx %016llx %016llx %016llx\n", - data->dma1ErrorStatus, data->dma1FirstErrorStatus, - data->dma1ErrorLog0, data->dma1ErrorLog1); + be64_to_cpu(data->dma1ErrorStatus), + be64_to_cpu(data->dma1FirstErrorStatus), + be64_to_cpu(data->dma1ErrorLog0), + be64_to_cpu(data->dma1ErrorLog1)); for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { - if ((data->pestA[i] >> 63) == 0 && - (data->pestB[i] >> 63) == 0) + if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 && + (be64_to_cpu(data->pestB[i]) >> 63) == 0) continue; pr_info("PE[%3d] A/B: %016llx %016llx\n", - i, data->pestA[i], data->pestB[i]); + i, be64_to_cpu(data->pestA[i]), + be64_to_cpu(data->pestB[i])); } } @@ -284,7 +303,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, return; common = (struct OpalIoPhbErrorCommon *)log_buff; - switch (common->ioType) { + switch (be32_to_cpu(common->ioType)) { case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: pnv_pci_dump_p7ioc_diag_data(hose, common); break; @@ -293,7 +312,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose, break; default: pr_warn("%s: Unrecognized ioType %d\n", - __func__, common->ioType); + __func__, be32_to_cpu(common->ioType)); } } -- cgit v1.2.3 From d9d82123199cab7a2f4fb3f94ef425f5def5a8a3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 9 Jun 2014 18:18:52 +0300 Subject: powerpc/cpm: Remove duplicate FCC_GFMR_TTX define The FCC_GFMR_TTX define is cut and pasted twice so we can remove the second instance. Signed-off-by: Dan Carpenter Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/cpm2.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h index f42e9baf3a4e..7c8608b09694 100644 --- a/arch/powerpc/include/asm/cpm2.h +++ b/arch/powerpc/include/asm/cpm2.h @@ -489,7 +489,6 @@ typedef struct scc_trans { #define FCC_GFMR_TCI ((uint)0x20000000) #define FCC_GFMR_TRX ((uint)0x10000000) #define FCC_GFMR_TTX ((uint)0x08000000) -#define FCC_GFMR_TTX ((uint)0x08000000) #define FCC_GFMR_CDP ((uint)0x04000000) #define FCC_GFMR_CTSP ((uint)0x02000000) #define FCC_GFMR_CDS ((uint)0x01000000) -- cgit v1.2.3 From be8f9642a0d533b6a3017d31f63747ec79891b94 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 9 Jun 2014 18:19:17 +0300 Subject: powerpc/spufs: Remove duplicate SPUFS_CNTL_MAP_SIZE define The SPUFS_CNTL_MAP_SIZE define is cut and pasted twice so we can delete the second instance. Signed-off-by: Dan Carpenter Acked-by: Jeremy Kerr Acked-by: Arnd Bergmann Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/cell/spufs/spufs.h | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 0ba3c9598358..bcfd6f063efa 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h @@ -35,7 +35,6 @@ #define SPUFS_PS_MAP_SIZE 0x20000 #define SPUFS_MFC_MAP_SIZE 0x1000 #define SPUFS_CNTL_MAP_SIZE 0x1000 -#define SPUFS_CNTL_MAP_SIZE 0x1000 #define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE #define SPUFS_MSS_MAP_SIZE 0x1000 -- cgit v1.2.3 From caf69ba62768d3bae4fa8e6ad734cd5565207bd4 Mon Sep 17 00:00:00 2001 From: Joel Stanley Date: Tue, 10 Jun 2014 16:03:59 +1000 Subject: powerpc/powernv: Fix reading of OPAL msglog memory_return_from_buffer returns a signed value, so ret should be ssize_t. Fixes the following issue reported by David Binderman: [linux-3.15/arch/powerpc/platforms/powernv/opal-msglog.c:65]: (style) Checking if unsigned variable 'ret' is less than zero. [linux-3.15/arch/powerpc/platforms/powernv/opal-msglog.c:82]: (style) Checking if unsigned variable 'ret' is less than zero. Local variable "ret" is of type size_t. This is always unsigned, so it is pointless to check if it is less than zero. https://bugzilla.kernel.org/show_bug.cgi?id=77551 Fixing this exposes a real bug for the case where the entire count bytes is successfully read from the POS_WRAP case. The second memory_read_from_buffer will return EINVAL, causing the entire read to return EINVAL to userspace, despite the data being copied correctly. The fix is to test for the case where the data has been read and return early. Signed-off-by: Joel Stanley Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/powernv/opal-msglog.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c index 1bb25b952504..44ed78af1a0d 100644 --- a/arch/powerpc/platforms/powernv/opal-msglog.c +++ b/arch/powerpc/platforms/powernv/opal-msglog.c @@ -37,7 +37,8 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, { struct memcons *mc = bin_attr->private; const char *conbuf; - size_t ret, first_read = 0; + ssize_t ret; + size_t first_read = 0; uint32_t out_pos, avail; if (!mc) @@ -69,6 +70,9 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj, to += first_read; count -= first_read; pos -= avail; + + if (count <= 0) + goto out; } /* Sanity check. The firmware should not do this to us. */ -- cgit v1.2.3 From 3df48c981d5a9610e02e9270b1bc4274fb536710 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 10 Jun 2014 16:46:21 +1000 Subject: powerpc/perf: Ensure all EBB register state is cleared on fork() In commit 330a1eb "Core EBB support for 64-bit book3s" I messed up clear_task_ebb(). It clears some but not all of the task's Event Based Branch (EBB) registers when we duplicate a task struct. That allows a child task to observe the EBBHR & EBBRR of its parent, which it should not be able to do. Fix it by clearing EBBHR & EBBRR. Signed-off-by: Michael Ellerman Cc: stable@vger.kernel.org [v3.11+] Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/switch_to.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index d2468eb12639..58abeda64cb7 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -86,6 +86,8 @@ static inline void clear_task_ebb(struct task_struct *t) { #ifdef CONFIG_PPC_BOOK3S_64 /* EBB perf events are not inherited, so clear all EBB state. */ + t->thread.ebbrr = 0; + t->thread.ebbhr = 0; t->thread.bescr = 0; t->thread.mmcr2 = 0; t->thread.mmcr0 = 0; -- cgit v1.2.3 From 50b66dbf876a10e35604fe325dc307811fb17a1b Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 10 Jun 2014 10:54:06 -0700 Subject: powerpc/xmon: avoid format string leaking to printk This makes sure format strings cannot leak into printk (the string has already been correctly processed for format arguments). Signed-off-by: Kees Cook Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/xmon/nonstdio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c index bce3dcfe5058..c98748617896 100644 --- a/arch/powerpc/xmon/nonstdio.c +++ b/arch/powerpc/xmon/nonstdio.c @@ -122,7 +122,7 @@ void xmon_printf(const char *format, ...) if (n && rc == 0) { /* No udbg hooks, fallback to printk() - dangerous */ - printk(xmon_outbuf); + printk("%s", xmon_outbuf); } } -- cgit v1.2.3 From 4817fc323dad9f08f2cb1aca22e6dc7f894c95d5 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 1 May 2014 07:20:04 +1000 Subject: powerpc/powernv: Reduce panic timeout from 180s to 10s We've already dropped the default pseries timeout to 10s, do the same for powernv. Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/powernv/setup.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c index 678573c4ac49..d9b88fa7c5a3 100644 --- a/arch/powerpc/platforms/powernv/setup.c +++ b/arch/powerpc/platforms/powernv/setup.c @@ -41,6 +41,8 @@ static void __init pnv_setup_arch(void) { + set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT); + /* Initialize SMP */ pnv_smp_init(); -- cgit v1.2.3 From 2c66599206938412d1781171953d565652ca3b93 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 5 May 2014 09:29:02 +1000 Subject: powerpc/eeh: Clear frozen state for child PE Since commit cb523e09 ("powerpc/eeh: Avoid I/O access during PE reset"), the PE is kept as frozen state on hardware level until the PE reset is done completely. After that, we explicitly clear the frozen state of the affected PE. However, there might have frozen child PEs of the affected PE and we also need clear their frozen state as well. Otherwise, the recovery is going to fail. Signed-off-by: Gavin Shan Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/eeh_driver.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 7100a5b96e70..8bb40e7cdeb6 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -447,8 +447,9 @@ static void *eeh_pe_detach_dev(void *data, void *userdata) * PE reset (for 3 times), we try to clear the frozen state * for 3 times as well. */ -static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) +static void *__eeh_clear_pe_frozen_state(void *data, void *flag) { + struct eeh_pe *pe = (struct eeh_pe *)data; int i, rc; for (i = 0; i < 3; i++) { @@ -461,13 +462,24 @@ static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) } /* The PE has been isolated, clear it */ - if (rc) + if (rc) { pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n", __func__, pe->phb->global_number, pe->addr, rc); - else + return (void *)pe; + } + + return NULL; +} + +static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) +{ + void *rc; + + rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL); + if (!rc) eeh_pe_state_clear(pe, EEH_PE_ISOLATED); - return rc; + return rc ? -EIO : 0; } /** -- cgit v1.2.3 From 1ad7a72c5e57bc6a7a3190c580df14dc3642febf Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 5 May 2014 09:29:03 +1000 Subject: powerpc/eeh: Report frozen parent PE prior to child PE When we have the corner case of frozen parent and child PE at the same time, we have to handle the frozen parent PE prior to the child. Without clearning the frozen state on parent PE, the child PE can't be recovered successfully. The patch searches the EEH PE hierarchy tree and returns the toppest frozen PE to be handled. It ensures the frozen parent PE will be handled prior to child PE. Signed-off-by: Gavin Shan Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/eeh.c | 27 ++++++++++++++++++++++++--- arch/powerpc/platforms/powernv/eeh-ioda.c | 30 ++++++++++++++++++++++++++++-- 2 files changed, 52 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index 7051ea3101b9..c25064b7d667 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -358,10 +358,11 @@ out: int eeh_dev_check_failure(struct eeh_dev *edev) { int ret; + int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); unsigned long flags; struct device_node *dn; struct pci_dev *dev; - struct eeh_pe *pe; + struct eeh_pe *pe, *parent_pe; int rc = 0; const char *location; @@ -439,14 +440,34 @@ int eeh_dev_check_failure(struct eeh_dev *edev) */ if ((ret < 0) || (ret == EEH_STATE_NOT_SUPPORT) || - (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == - (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) { + ((ret & active_flags) == active_flags)) { eeh_stats.false_positives++; pe->false_positives++; rc = 0; goto dn_unlock; } + /* + * It should be corner case that the parent PE has been + * put into frozen state as well. We should take care + * that at first. + */ + parent_pe = pe->parent; + while (parent_pe) { + /* Hit the ceiling ? */ + if (parent_pe->type & EEH_PE_PHB) + break; + + /* Frozen parent PE ? */ + ret = eeh_ops->get_state(parent_pe, NULL); + if (ret > 0 && + (ret & active_flags) != active_flags) + pe = parent_pe; + + /* Next parent level */ + parent_pe = parent_pe->parent; + } + eeh_stats.slot_resets++; /* Avoid repeated reports of this failure, including problems diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index e0d6a3a213e2..68167cd9ea97 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -705,11 +705,12 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) { struct pci_controller *hose; struct pnv_phb *phb; - struct eeh_pe *phb_pe; + struct eeh_pe *phb_pe, *parent_pe; __be64 frozen_pe_no; __be16 err_type, severity; + int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); long rc; - int ret = EEH_NEXT_ERR_NONE; + int state, ret = EEH_NEXT_ERR_NONE; /* * While running here, it's safe to purge the event queue. @@ -838,6 +839,31 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) ioda_eeh_phb_diag(hose); } + /* + * We probably have the frozen parent PE out there and + * we need have to handle frozen parent PE firstly. + */ + if (ret == EEH_NEXT_ERR_FROZEN_PE) { + parent_pe = (*pe)->parent; + while (parent_pe) { + /* Hit the ceiling ? */ + if (parent_pe->type & EEH_PE_PHB) + break; + + /* Frozen parent PE ? */ + state = ioda_eeh_get_state(parent_pe); + if (state > 0 && + (state & active_flags) != active_flags) + *pe = parent_pe; + + /* Next parent level */ + parent_pe = parent_pe->parent; + } + + /* We possibly migrate to another PE */ + eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); + } + /* * If we have no errors on the specific PHB or only * informative error there, we continue poking it. -- cgit v1.2.3 From 71b540adffd9832e025dd9401178a8ef8c814e50 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Mon, 5 May 2014 09:29:04 +1000 Subject: powerpc/powernv: Don't escalate non-existing frozen PE Commit cb5b242c ("powerpc/eeh: Escalate error on non-existing PE") escalates the frozen state on non-existing PE to fenced PHB. It was to improve kdump reliability. After that, commit 361f2a2a ("powrpc/powernv: Reset PHB in kdump kernel") was introduced to issue complete reset on all PHBs to increase the reliability of kdump kernel. Commit cb5b242c becomes unuseful and it would be reverted. Signed-off-by: Gavin Shan Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/platforms/powernv/eeh-ioda.c | 24 +++++++++--------------- 1 file changed, 9 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 68167cd9ea97..5711f6f1fda6 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -794,23 +794,17 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) break; case OPAL_EEH_PE_ERROR: /* - * If we can't find the corresponding PE, the - * PEEV / PEST would be messy. So we force an - * fenced PHB so that it can be recovered. - * - * If the PE has been marked as isolated, that - * should have been removed permanently or in - * progress with recovery. We needn't report - * it again. + * If we can't find the corresponding PE, we + * just try to unfreeze. */ if (ioda_eeh_get_pe(hose, - be64_to_cpu(frozen_pe_no), pe)) { - *pe = phb_pe; - pr_err("EEH: Escalated fenced PHB#%x " - "detected for PE#%llx\n", - hose->global_number, - be64_to_cpu(frozen_pe_no)); - ret = EEH_NEXT_ERR_FENCED_PHB; + be64_to_cpu(frozen_pe_no), pe)) { + /* Try best to clear it */ + pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", + hose->global_number, frozen_pe_no); + opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no, + OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); + ret = EEH_NEXT_ERR_NONE; } else if ((*pe)->state & EEH_PE_ISOLATED) { ret = EEH_NEXT_ERR_NONE; } else { -- cgit v1.2.3 From b69a1da94f3d1589d1942b5d1b384d8cfaac4500 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Tue, 20 May 2014 21:59:42 +0200 Subject: powerpc: fix typo 'CONFIG_PPC_CPU' Commit cd64d1697cf0 ("powerpc: mtmsrd not defined") added a check for CONFIG_PPC_CPU were a check for CONFIG_PPC_FPU was clearly intended. Fixes: cd64d1697cf0 ("powerpc: mtmsrd not defined") Signed-off-by: Paul Bolle Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/lib/sstep.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index c0511c27a733..412dd46dd0b7 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1470,7 +1470,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) regs->gpr[rd] = byterev_4(val); goto ldst_done; -#ifdef CONFIG_PPC_CPU +#ifdef CONFIG_PPC_FPU case 535: /* lfsx */ case 567: /* lfsux */ if (!(regs->msr & MSR_FP)) -- cgit v1.2.3 From 6e0fdf9af216887e0032c19d276889aad41cad00 Mon Sep 17 00:00:00 2001 From: Paul Bolle Date: Tue, 20 May 2014 22:24:58 +0200 Subject: powerpc: fix typo 'CONFIG_PMAC' Commit b0d278b7d3ae ("powerpc/perf_event: Reduce latency of calling perf_event_do_pending") added a check for CONFIG_PMAC were a check for CONFIG_PPC_PMAC was clearly intended. Fixes: b0d278b7d3ae ("powerpc/perf_event: Reduce latency of calling perf_event_do_pending") Signed-off-by: Paul Bolle Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/time.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 7e711bdcc6da..9fff9cdcc519 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs) may_hard_irq_enable(); -#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) +#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC) if (atomic_read(&ppc_n_lost_interrupts) != 0) do_IRQ(regs); #endif -- cgit v1.2.3 From 5c7a35e3e25232aef8d7aee484436f8cbe3b9b94 Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Wed, 4 Jun 2014 17:31:52 +1000 Subject: powerpc/powernv: Fix killed EEH event On PowerNV platform, EEH errors are reported by IO accessors or poller driven by interrupt. After the PE is isolated, we won't produce EEH event for the PE. The current implementation has possibility of EEH event lost in this way: The interrupt handler queues one "special" event, which drives the poller. EEH thread doesn't pick the special event yet. IO accessors kicks in, the frozen PE is marked as "isolated" and EEH event is queued to the list. EEH thread runs because of special event and purge all existing EEH events. However, we never produce an other EEH event for the frozen PE. Eventually, the PE is marked as "isolated" and we don't have EEH event to recover it. The patch fixes the issue to keep EEH events for PEs that have been marked as "isolated" with the help of additional "force" help to eeh_remove_event(). Reported-by: Rolf Brudeseth Signed-off-by: Gavin Shan Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/eeh_event.h | 2 +- arch/powerpc/kernel/eeh_driver.c | 4 ++-- arch/powerpc/kernel/eeh_event.c | 21 +++++++++++++++------ arch/powerpc/platforms/powernv/eeh-ioda.c | 2 +- 4 files changed, 19 insertions(+), 10 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h index 89d5670b2eeb..1e551a2d6f82 100644 --- a/arch/powerpc/include/asm/eeh_event.h +++ b/arch/powerpc/include/asm/eeh_event.h @@ -33,7 +33,7 @@ struct eeh_event { int eeh_event_init(void); int eeh_send_failure_event(struct eeh_pe *pe); -void eeh_remove_event(struct eeh_pe *pe); +void eeh_remove_event(struct eeh_pe *pe, bool force); void eeh_handle_event(struct eeh_pe *pe); #endif /* __KERNEL__ */ diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c index 8bb40e7cdeb6..420da61d4ce0 100644 --- a/arch/powerpc/kernel/eeh_driver.c +++ b/arch/powerpc/kernel/eeh_driver.c @@ -770,7 +770,7 @@ static void eeh_handle_special_event(void) eeh_serialize_lock(&flags); /* Purge all events */ - eeh_remove_event(NULL); + eeh_remove_event(NULL, true); list_for_each_entry(hose, &hose_list, list_node) { phb_pe = eeh_phb_pe_get(hose); @@ -789,7 +789,7 @@ static void eeh_handle_special_event(void) eeh_serialize_lock(&flags); /* Purge all events of the PHB */ - eeh_remove_event(pe); + eeh_remove_event(pe, true); if (rc == EEH_NEXT_ERR_DEAD_PHB) eeh_pe_state_mark(pe, EEH_PE_ISOLATED); diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c index 72d748b56c86..4eefb6e34dbb 100644 --- a/arch/powerpc/kernel/eeh_event.c +++ b/arch/powerpc/kernel/eeh_event.c @@ -152,24 +152,33 @@ int eeh_send_failure_event(struct eeh_pe *pe) /** * eeh_remove_event - Remove EEH event from the queue * @pe: Event binding to the PE + * @force: Event will be removed unconditionally * * On PowerNV platform, we might have subsequent coming events * is part of the former one. For that case, those subsequent * coming events are totally duplicated and unnecessary, thus * they should be removed. */ -void eeh_remove_event(struct eeh_pe *pe) +void eeh_remove_event(struct eeh_pe *pe, bool force) { unsigned long flags; struct eeh_event *event, *tmp; + /* + * If we have NULL PE passed in, we have dead IOC + * or we're sure we can report all existing errors + * by the caller. + * + * With "force", the event with associated PE that + * have been isolated, the event won't be removed + * to avoid event lost. + */ spin_lock_irqsave(&eeh_eventlist_lock, flags); list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { - /* - * If we don't have valid PE passed in, that means - * we already have event corresponding to dead IOC - * and all events should be purged. - */ + if (!force && event->pe && + (event->pe->state & EEH_PE_ISOLATED)) + continue; + if (!pe) { list_del(&event->list); kfree(event); diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 5711f6f1fda6..9c002099f875 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -717,7 +717,7 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) * And we should keep the cached OPAL notifier event sychronized * between the kernel and firmware. */ - eeh_remove_event(NULL); + eeh_remove_event(NULL, false); opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); list_for_each_entry(hose, &hose_list, list_node) { -- cgit v1.2.3 From d4e58e5928f8c6c49228451dd03e0714cbab299a Mon Sep 17 00:00:00 2001 From: Michael Neuling Date: Wed, 11 Jun 2014 15:59:28 +1000 Subject: powerpc/powernv: Enable POWER8 doorbell IPIs This patch enables POWER8 doorbell IPIs on powernv. Since doorbells can only IPI within a core, we test to see when we can use doorbells and if not we fall back to XICS. This also enables hypervisor doorbells to wakeup us up from nap/sleep via the LPCR PECEDH bit. Based on tests by Anton, the best case IPI latency between two threads dropped from 894ns to 512ns. Signed-off-by: Michael Neuling Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/cpu_setup_power.S | 2 ++ arch/powerpc/platforms/powernv/Kconfig | 1 + arch/powerpc/platforms/powernv/smp.c | 6 ++++++ arch/powerpc/platforms/pseries/Kconfig | 1 + arch/powerpc/sysdev/xics/icp-native.c | 9 ++++++++- 5 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S index 1557e7c2c7e1..46733535cc0b 100644 --- a/arch/powerpc/kernel/cpu_setup_power.S +++ b/arch/powerpc/kernel/cpu_setup_power.S @@ -56,6 +56,7 @@ _GLOBAL(__setup_cpu_power8) li r0,0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR + ori r3, r3, LPCR_PECEDH bl __init_LPCR bl __init_HFSCR bl __init_tlb_power8 @@ -74,6 +75,7 @@ _GLOBAL(__restore_cpu_power8) li r0,0 mtspr SPRN_LPID,r0 mfspr r3,SPRN_LPCR + ori r3, r3, LPCR_PECEDH bl __init_LPCR bl __init_HFSCR bl __init_tlb_power8 diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig index c252ee95bddf..45a8ed0585cd 100644 --- a/arch/powerpc/platforms/powernv/Kconfig +++ b/arch/powerpc/platforms/powernv/Kconfig @@ -17,6 +17,7 @@ config PPC_POWERNV select CPU_FREQ_GOV_USERSPACE select CPU_FREQ_GOV_ONDEMAND select CPU_FREQ_GOV_CONSERVATIVE + select PPC_DOORBELL default y config PPC_POWERNV_RTAS diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c index 0062a43a2e0d..5fcfcf44e3a9 100644 --- a/arch/powerpc/platforms/powernv/smp.c +++ b/arch/powerpc/platforms/powernv/smp.c @@ -32,6 +32,7 @@ #include #include #include +#include #include "powernv.h" @@ -46,6 +47,11 @@ static void pnv_smp_setup_cpu(int cpu) { if (cpu != boot_cpuid) xics_setup_cpu(); + +#ifdef CONFIG_PPC_DOORBELL + if (cpu_has_feature(CPU_FTR_DBELL)) + doorbell_setup_this_cpu(); +#endif } int pnv_smp_kick_cpu(int nr) diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index 2cb8b776c84a..756b482f819a 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -21,6 +21,7 @@ config PPC_PSERIES select HAVE_CONTEXT_TRACKING select HOTPLUG_CPU if SMP select ARCH_RANDOM + select PPC_DOORBELL default y config PPC_SPLPAR diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c index 9dee47071af8..de8d9483bbe8 100644 --- a/arch/powerpc/sysdev/xics/icp-native.c +++ b/arch/powerpc/sysdev/xics/icp-native.c @@ -26,6 +26,7 @@ #include #include #include +#include struct icp_ipl { union { @@ -145,7 +146,13 @@ static unsigned int icp_native_get_irq(void) static void icp_native_cause_ipi(int cpu, unsigned long data) { kvmppc_set_host_ipi(cpu, 1); - icp_native_set_qirr(cpu, IPI_PRIORITY); +#ifdef CONFIG_PPC_DOORBELL + if (cpu_has_feature(CPU_FTR_DBELL) && + (cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id())))) + doorbell_cause_ipi(cpu, data); + else +#endif + icp_native_set_qirr(cpu, IPI_PRIORITY); } void xics_wake_cpu(int cpu) -- cgit v1.2.3 From e430f34ee5192c84bcabd3c79ab7e2388b5eec74 Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Fri, 6 Jun 2014 14:46:06 -0700 Subject: net: filter: cleanup A/X name usage The macro 'A' used in internal BPF interpreter: #define A regs[insn->a_reg] was easily confused with the name of classic BPF register 'A', since 'A' would mean two different things depending on context. This patch is trying to clean up the naming and clarify its usage in the following way: - A and X are names of two classic BPF registers - BPF_REG_A denotes internal BPF register R0 used to map classic register A in internal BPF programs generated from classic - BPF_REG_X denotes internal BPF register R7 used to map classic register X in internal BPF programs generated from classic - internal BPF instruction format: struct sock_filter_int { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; - BPF_X/BPF_K is 1 bit used to encode source operand of instruction In classic: BPF_X - means use register X as source operand BPF_K - means use 32-bit immediate as source operand In internal: BPF_X - means use 'src_reg' register as source operand BPF_K - means use 32-bit immediate as source operand Suggested-by: Chema Gonzalez Signed-off-by: Alexei Starovoitov Acked-by: Daniel Borkmann Acked-by: Chema Gonzalez Signed-off-by: David S. Miller --- Documentation/networking/filter.txt | 2 +- arch/x86/net/bpf_jit_comp.c | 260 ++++++++++++++++++------------------ include/linux/filter.h | 156 ++++++++++++---------- net/core/filter.c | 198 +++++++++++++-------------- 4 files changed, 314 insertions(+), 302 deletions(-) (limited to 'arch') diff --git a/Documentation/networking/filter.txt b/Documentation/networking/filter.txt index 58c443926647..9f49b8690500 100644 --- a/Documentation/networking/filter.txt +++ b/Documentation/networking/filter.txt @@ -805,7 +805,7 @@ to seccomp_data, for converted BPF filters R1 points to a skb. A program, that is translated internally consists of the following elements: - op:16, jt:8, jf:8, k:32 ==> op:8, a_reg:4, x_reg:4, off:16, imm:32 + op:16, jt:8, jf:8, k:32 ==> op:8, dst_reg:4, src_reg:4, off:16, imm:32 So far 87 internal BPF instructions were implemented. 8-bit 'op' opcode field has room for new instructions. Some of them may use 16/24/32 byte encoding. New diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 080f3f071bb0..99bef86ed6df 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -64,10 +64,10 @@ static inline bool is_simm32(s64 value) return value == (s64) (s32) value; } -/* mov A, X */ -#define EMIT_mov(A, X) \ - do {if (A != X) \ - EMIT3(add_2mod(0x48, A, X), 0x89, add_2reg(0xC0, A, X)); \ +/* mov dst, src */ +#define EMIT_mov(DST, SRC) \ + do {if (DST != SRC) \ + EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \ } while (0) static int bpf_size_to_x86_bytes(int bpf_size) @@ -194,16 +194,16 @@ static inline u8 add_2mod(u8 byte, u32 r1, u32 r2) return byte; } -/* encode dest register 'a_reg' into x64 opcode 'byte' */ -static inline u8 add_1reg(u8 byte, u32 a_reg) +/* encode 'dst_reg' register into x64 opcode 'byte' */ +static inline u8 add_1reg(u8 byte, u32 dst_reg) { - return byte + reg2hex[a_reg]; + return byte + reg2hex[dst_reg]; } -/* encode dest 'a_reg' and src 'x_reg' registers into x64 opcode 'byte' */ -static inline u8 add_2reg(u8 byte, u32 a_reg, u32 x_reg) +/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */ +static inline u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg) { - return byte + reg2hex[a_reg] + (reg2hex[x_reg] << 3); + return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3); } struct jit_context { @@ -286,9 +286,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, } for (i = 0; i < insn_cnt; i++, insn++) { - const s32 K = insn->imm; - u32 a_reg = insn->a_reg; - u32 x_reg = insn->x_reg; + const s32 imm32 = insn->imm; + u32 dst_reg = insn->dst_reg; + u32 src_reg = insn->src_reg; u8 b1 = 0, b2 = 0, b3 = 0; s64 jmp_offset; u8 jmp_cond; @@ -315,32 +315,32 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_XOR: b2 = 0x31; break; } if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_2mod(0x48, a_reg, x_reg)); - else if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT1(add_2mod(0x40, a_reg, x_reg)); - EMIT2(b2, add_2reg(0xC0, a_reg, x_reg)); + EMIT1(add_2mod(0x48, dst_reg, src_reg)); + else if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg)); break; - /* mov A, X */ + /* mov dst, src */ case BPF_ALU64 | BPF_MOV | BPF_X: - EMIT_mov(a_reg, x_reg); + EMIT_mov(dst_reg, src_reg); break; - /* mov32 A, X */ + /* mov32 dst, src */ case BPF_ALU | BPF_MOV | BPF_X: - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT1(add_2mod(0x40, a_reg, x_reg)); - EMIT2(0x89, add_2reg(0xC0, a_reg, x_reg)); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT1(add_2mod(0x40, dst_reg, src_reg)); + EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg)); break; - /* neg A */ + /* neg dst */ case BPF_ALU | BPF_NEG: case BPF_ALU64 | BPF_NEG: if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, a_reg)); - else if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); - EMIT2(0xF7, add_1reg(0xD8, a_reg)); + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); + EMIT2(0xF7, add_1reg(0xD8, dst_reg)); break; case BPF_ALU | BPF_ADD | BPF_K: @@ -354,9 +354,9 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_OR | BPF_K: case BPF_ALU64 | BPF_XOR | BPF_K: if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, a_reg)); - else if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); switch (BPF_OP(insn->code)) { case BPF_ADD: b3 = 0xC0; break; @@ -366,10 +366,10 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_XOR: b3 = 0xF0; break; } - if (is_imm8(K)) - EMIT3(0x83, add_1reg(b3, a_reg), K); + if (is_imm8(imm32)) + EMIT3(0x83, add_1reg(b3, dst_reg), imm32); else - EMIT2_off32(0x81, add_1reg(b3, a_reg), K); + EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32); break; case BPF_ALU64 | BPF_MOV | BPF_K: @@ -377,23 +377,23 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, * use 'mov eax, imm32' (which zero-extends imm32) * to save 2 bytes */ - if (K < 0) { + if (imm32 < 0) { /* 'mov rax, imm32' sign extends imm32 */ - b1 = add_1mod(0x48, a_reg); + b1 = add_1mod(0x48, dst_reg); b2 = 0xC7; b3 = 0xC0; - EMIT3_off32(b1, b2, add_1reg(b3, a_reg), K); + EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32); break; } case BPF_ALU | BPF_MOV | BPF_K: /* mov %eax, imm32 */ - if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); - EMIT1_off32(add_1reg(0xB8, a_reg), K); + if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); + EMIT1_off32(add_1reg(0xB8, dst_reg), imm32); break; - /* A %= X, A /= X, A %= K, A /= K */ + /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */ case BPF_ALU | BPF_MOD | BPF_X: case BPF_ALU | BPF_DIV | BPF_X: case BPF_ALU | BPF_MOD | BPF_K: @@ -406,14 +406,14 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x52); /* push rdx */ if (BPF_SRC(insn->code) == BPF_X) - /* mov r11, X */ - EMIT_mov(AUX_REG, x_reg); + /* mov r11, src_reg */ + EMIT_mov(AUX_REG, src_reg); else - /* mov r11, K */ - EMIT3_off32(0x49, 0xC7, 0xC3, K); + /* mov r11, imm32 */ + EMIT3_off32(0x49, 0xC7, 0xC3, imm32); - /* mov rax, A */ - EMIT_mov(BPF_REG_0, a_reg); + /* mov rax, dst_reg */ + EMIT_mov(BPF_REG_0, dst_reg); /* xor edx, edx * equivalent to 'xor rdx, rdx', but one byte less @@ -421,7 +421,7 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT2(0x31, 0xd2); if (BPF_SRC(insn->code) == BPF_X) { - /* if (X == 0) return 0 */ + /* if (src_reg == 0) return 0 */ /* cmp r11, 0 */ EMIT4(0x49, 0x83, 0xFB, 0x00); @@ -457,8 +457,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x5A); /* pop rdx */ EMIT1(0x58); /* pop rax */ - /* mov A, r11 */ - EMIT_mov(a_reg, AUX_REG); + /* mov dst_reg, r11 */ + EMIT_mov(dst_reg, AUX_REG); break; case BPF_ALU | BPF_MUL | BPF_K: @@ -468,15 +468,15 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x50); /* push rax */ EMIT1(0x52); /* push rdx */ - /* mov r11, A */ - EMIT_mov(AUX_REG, a_reg); + /* mov r11, dst_reg */ + EMIT_mov(AUX_REG, dst_reg); if (BPF_SRC(insn->code) == BPF_X) - /* mov rax, X */ - EMIT_mov(BPF_REG_0, x_reg); + /* mov rax, src_reg */ + EMIT_mov(BPF_REG_0, src_reg); else - /* mov rax, K */ - EMIT3_off32(0x48, 0xC7, 0xC0, K); + /* mov rax, imm32 */ + EMIT3_off32(0x48, 0xC7, 0xC0, imm32); if (BPF_CLASS(insn->code) == BPF_ALU64) EMIT1(add_1mod(0x48, AUX_REG)); @@ -491,8 +491,8 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, EMIT1(0x5A); /* pop rdx */ EMIT1(0x58); /* pop rax */ - /* mov A, r11 */ - EMIT_mov(a_reg, AUX_REG); + /* mov dst_reg, r11 */ + EMIT_mov(dst_reg, AUX_REG); break; /* shifts */ @@ -503,39 +503,39 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_ALU64 | BPF_RSH | BPF_K: case BPF_ALU64 | BPF_ARSH | BPF_K: if (BPF_CLASS(insn->code) == BPF_ALU64) - EMIT1(add_1mod(0x48, a_reg)); - else if (is_ereg(a_reg)) - EMIT1(add_1mod(0x40, a_reg)); + EMIT1(add_1mod(0x48, dst_reg)); + else if (is_ereg(dst_reg)) + EMIT1(add_1mod(0x40, dst_reg)); switch (BPF_OP(insn->code)) { case BPF_LSH: b3 = 0xE0; break; case BPF_RSH: b3 = 0xE8; break; case BPF_ARSH: b3 = 0xF8; break; } - EMIT3(0xC1, add_1reg(b3, a_reg), K); + EMIT3(0xC1, add_1reg(b3, dst_reg), imm32); break; case BPF_ALU | BPF_END | BPF_FROM_BE: - switch (K) { + switch (imm32) { case 16: /* emit 'ror %ax, 8' to swap lower 2 bytes */ EMIT1(0x66); - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT1(0x41); - EMIT3(0xC1, add_1reg(0xC8, a_reg), 8); + EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8); break; case 32: /* emit 'bswap eax' to swap lower 4 bytes */ - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT2(0x41, 0x0F); else EMIT1(0x0F); - EMIT1(add_1reg(0xC8, a_reg)); + EMIT1(add_1reg(0xC8, dst_reg)); break; case 64: /* emit 'bswap rax' to swap 8 bytes */ - EMIT3(add_1mod(0x48, a_reg), 0x0F, - add_1reg(0xC8, a_reg)); + EMIT3(add_1mod(0x48, dst_reg), 0x0F, + add_1reg(0xC8, dst_reg)); break; } break; @@ -543,117 +543,117 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image, case BPF_ALU | BPF_END | BPF_FROM_LE: break; - /* ST: *(u8*)(a_reg + off) = imm */ + /* ST: *(u8*)(dst_reg + off) = imm */ case BPF_ST | BPF_MEM | BPF_B: - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT2(0x41, 0xC6); else EMIT1(0xC6); goto st; case BPF_ST | BPF_MEM | BPF_H: - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT3(0x66, 0x41, 0xC7); else EMIT2(0x66, 0xC7); goto st; case BPF_ST | BPF_MEM | BPF_W: - if (is_ereg(a_reg)) + if (is_ereg(dst_reg)) EMIT2(0x41, 0xC7); else EMIT1(0xC7); goto st; case BPF_ST | BPF_MEM | BPF_DW: - EMIT2(add_1mod(0x48, a_reg), 0xC7); + EMIT2(add_1mod(0x48, dst_reg), 0xC7); st: if (is_imm8(insn->off)) - EMIT2(add_1reg(0x40, a_reg), insn->off); + EMIT2(add_1reg(0x40, dst_reg), insn->off); else - EMIT1_off32(add_1reg(0x80, a_reg), insn->off); + EMIT1_off32(add_1reg(0x80, dst_reg), insn->off); - EMIT(K, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); + EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code))); break; - /* STX: *(u8*)(a_reg + off) = x_reg */ + /* STX: *(u8*)(dst_reg + off) = src_reg */ case BPF_STX | BPF_MEM | BPF_B: /* emit 'mov byte ptr [rax + off], al' */ - if (is_ereg(a_reg) || is_ereg(x_reg) || + if (is_ereg(dst_reg) || is_ereg(src_reg) || /* have to add extra byte for x86 SIL, DIL regs */ - x_reg == BPF_REG_1 || x_reg == BPF_REG_2) - EMIT2(add_2mod(0x40, a_reg, x_reg), 0x88); + src_reg == BPF_REG_1 || src_reg == BPF_REG_2) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88); else EMIT1(0x88); goto stx; case BPF_STX | BPF_MEM | BPF_H: - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT3(0x66, add_2mod(0x40, a_reg, x_reg), 0x89); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89); else EMIT2(0x66, 0x89); goto stx; case BPF_STX | BPF_MEM | BPF_W: - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT2(add_2mod(0x40, a_reg, x_reg), 0x89); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89); else EMIT1(0x89); goto stx; case BPF_STX | BPF_MEM | BPF_DW: - EMIT2(add_2mod(0x48, a_reg, x_reg), 0x89); + EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89); stx: if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); else - EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), insn->off); break; - /* LDX: a_reg = *(u8*)(x_reg + off) */ + /* LDX: dst_reg = *(u8*)(src_reg + off) */ case BPF_LDX | BPF_MEM | BPF_B: /* emit 'movzx rax, byte ptr [rax + off]' */ - EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB6); + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6); goto ldx; case BPF_LDX | BPF_MEM | BPF_H: /* emit 'movzx rax, word ptr [rax + off]' */ - EMIT3(add_2mod(0x48, x_reg, a_reg), 0x0F, 0xB7); + EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7); goto ldx; case BPF_LDX | BPF_MEM | BPF_W: /* emit 'mov eax, dword ptr [rax+0x14]' */ - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT2(add_2mod(0x40, x_reg, a_reg), 0x8B); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B); else EMIT1(0x8B); goto ldx; case BPF_LDX | BPF_MEM | BPF_DW: /* emit 'mov rax, qword ptr [rax+0x14]' */ - EMIT2(add_2mod(0x48, x_reg, a_reg), 0x8B); + EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B); ldx: /* if insn->off == 0 we can save one extra byte, but * special case of x86 r13 which always needs an offset * is not worth the hassle */ if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, x_reg, a_reg), insn->off); + EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off); else - EMIT1_off32(add_2reg(0x80, x_reg, a_reg), + EMIT1_off32(add_2reg(0x80, src_reg, dst_reg), insn->off); break; - /* STX XADD: lock *(u32*)(a_reg + off) += x_reg */ + /* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */ case BPF_STX | BPF_XADD | BPF_W: /* emit 'lock add dword ptr [rax + off], eax' */ - if (is_ereg(a_reg) || is_ereg(x_reg)) - EMIT3(0xF0, add_2mod(0x40, a_reg, x_reg), 0x01); + if (is_ereg(dst_reg) || is_ereg(src_reg)) + EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01); else EMIT2(0xF0, 0x01); goto xadd; case BPF_STX | BPF_XADD | BPF_DW: - EMIT3(0xF0, add_2mod(0x48, a_reg, x_reg), 0x01); + EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01); xadd: if (is_imm8(insn->off)) - EMIT2(add_2reg(0x40, a_reg, x_reg), insn->off); + EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off); else - EMIT1_off32(add_2reg(0x80, a_reg, x_reg), + EMIT1_off32(add_2reg(0x80, dst_reg, src_reg), insn->off); break; /* call */ case BPF_JMP | BPF_CALL: - func = (u8 *) __bpf_call_base + K; + func = (u8 *) __bpf_call_base + imm32; jmp_offset = func - (image + addrs[i]); if (ctx->seen_ld_abs) { EMIT2(0x41, 0x52); /* push %r10 */ @@ -663,9 +663,9 @@ xadd: if (is_imm8(insn->off)) */ jmp_offset += 4; } - if (!K || !is_simm32(jmp_offset)) { + if (!imm32 || !is_simm32(jmp_offset)) { pr_err("unsupported bpf func %d addr %p image %p\n", - K, func, image); + imm32, func, image); return -EINVAL; } EMIT1_off32(0xE8, jmp_offset); @@ -682,21 +682,21 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JGE | BPF_X: case BPF_JMP | BPF_JSGT | BPF_X: case BPF_JMP | BPF_JSGE | BPF_X: - /* cmp a_reg, x_reg */ - EMIT3(add_2mod(0x48, a_reg, x_reg), 0x39, - add_2reg(0xC0, a_reg, x_reg)); + /* cmp dst_reg, src_reg */ + EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39, + add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_X: - /* test a_reg, x_reg */ - EMIT3(add_2mod(0x48, a_reg, x_reg), 0x85, - add_2reg(0xC0, a_reg, x_reg)); + /* test dst_reg, src_reg */ + EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85, + add_2reg(0xC0, dst_reg, src_reg)); goto emit_cond_jmp; case BPF_JMP | BPF_JSET | BPF_K: - /* test a_reg, imm32 */ - EMIT1(add_1mod(0x48, a_reg)); - EMIT2_off32(0xF7, add_1reg(0xC0, a_reg), K); + /* test dst_reg, imm32 */ + EMIT1(add_1mod(0x48, dst_reg)); + EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32); goto emit_cond_jmp; case BPF_JMP | BPF_JEQ | BPF_K: @@ -705,13 +705,13 @@ xadd: if (is_imm8(insn->off)) case BPF_JMP | BPF_JGE | BPF_K: case BPF_JMP | BPF_JSGT | BPF_K: case BPF_JMP | BPF_JSGE | BPF_K: - /* cmp a_reg, imm8/32 */ - EMIT1(add_1mod(0x48, a_reg)); + /* cmp dst_reg, imm8/32 */ + EMIT1(add_1mod(0x48, dst_reg)); - if (is_imm8(K)) - EMIT3(0x83, add_1reg(0xF8, a_reg), K); + if (is_imm8(imm32)) + EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32); else - EMIT2_off32(0x81, add_1reg(0xF8, a_reg), K); + EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32); emit_cond_jmp: /* convert BPF opcode to x86 */ switch (BPF_OP(insn->code)) { @@ -773,27 +773,27 @@ emit_jmp: func = sk_load_word; goto common_load; case BPF_LD | BPF_ABS | BPF_W: - func = CHOOSE_LOAD_FUNC(K, sk_load_word); + func = CHOOSE_LOAD_FUNC(imm32, sk_load_word); common_load: ctx->seen_ld_abs = true; jmp_offset = func - (image + addrs[i]); if (!func || !is_simm32(jmp_offset)) { pr_err("unsupported bpf func %d addr %p image %p\n", - K, func, image); + imm32, func, image); return -EINVAL; } if (BPF_MODE(insn->code) == BPF_ABS) { /* mov %esi, imm32 */ - EMIT1_off32(0xBE, K); + EMIT1_off32(0xBE, imm32); } else { - /* mov %rsi, x_reg */ - EMIT_mov(BPF_REG_2, x_reg); - if (K) { - if (is_imm8(K)) + /* mov %rsi, src_reg */ + EMIT_mov(BPF_REG_2, src_reg); + if (imm32) { + if (is_imm8(imm32)) /* add %esi, imm8 */ - EMIT3(0x83, 0xC6, K); + EMIT3(0x83, 0xC6, imm32); else /* add %esi, imm32 */ - EMIT2_off32(0x81, 0xC6, K); + EMIT2_off32(0x81, 0xC6, imm32); } } /* skb pointer is in R6 (%rbx), it will be copied into @@ -808,13 +808,13 @@ common_load: ctx->seen_ld_abs = true; func = sk_load_half; goto common_load; case BPF_LD | BPF_ABS | BPF_H: - func = CHOOSE_LOAD_FUNC(K, sk_load_half); + func = CHOOSE_LOAD_FUNC(imm32, sk_load_half); goto common_load; case BPF_LD | BPF_IND | BPF_B: func = sk_load_byte; goto common_load; case BPF_LD | BPF_ABS | BPF_B: - func = CHOOSE_LOAD_FUNC(K, sk_load_byte); + func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte); goto common_load; case BPF_JMP | BPF_EXIT: diff --git a/include/linux/filter.h b/include/linux/filter.h index f0c2ad43b4af..a7e3c48d73a7 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -78,161 +78,173 @@ enum { /* Helper macros for filter block array initializers. */ -/* ALU ops on registers, bpf_add|sub|...: A += X */ +/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ -#define BPF_ALU64_REG(OP, A, X) \ +#define BPF_ALU64_REG(OP, DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -#define BPF_ALU32_REG(OP, A, X) \ +#define BPF_ALU32_REG(OP, DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -/* ALU ops on immediates, bpf_add|sub|...: A += IMM */ +/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ -#define BPF_ALU64_IMM(OP, A, IMM) \ +#define BPF_ALU64_IMM(OP, DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) -#define BPF_ALU32_IMM(OP, A, IMM) \ +#define BPF_ALU32_IMM(OP, DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ -#define BPF_ENDIAN(TYPE, A, LEN) \ +#define BPF_ENDIAN(TYPE, DST, LEN) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = LEN }) -/* Short form of mov, A = X */ +/* Short form of mov, dst_reg = src_reg */ -#define BPF_MOV64_REG(A, X) \ +#define BPF_MOV64_REG(DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -#define BPF_MOV32_REG(A, X) \ +#define BPF_MOV32_REG(DST, SRC) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = 0 }) -/* Short form of mov, A = IMM */ +/* Short form of mov, dst_reg = imm32 */ -#define BPF_MOV64_IMM(A, IMM) \ +#define BPF_MOV64_IMM(DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) -#define BPF_MOV32_IMM(A, IMM) \ +#define BPF_MOV32_IMM(DST, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = 0, \ .imm = IMM }) -/* Short form of mov based on type, BPF_X: A = X, BPF_K: A = IMM */ +/* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ -#define BPF_MOV64_RAW(TYPE, A, X, IMM) \ +#define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = IMM }) -#define BPF_MOV32_RAW(TYPE, A, X, IMM) \ +#define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ ((struct sock_filter_int) { \ .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = 0, \ .imm = IMM }) -/* Direct packet access, R0 = *(uint *) (skb->data + OFF) */ +/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ -#define BPF_LD_ABS(SIZE, OFF) \ +#define BPF_LD_ABS(SIZE, IMM) \ ((struct sock_filter_int) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ - .a_reg = 0, \ - .x_reg = 0, \ + .dst_reg = 0, \ + .src_reg = 0, \ .off = 0, \ - .imm = OFF }) + .imm = IMM }) -/* Indirect packet access, R0 = *(uint *) (skb->data + X + OFF) */ +/* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ -#define BPF_LD_IND(SIZE, X, OFF) \ +#define BPF_LD_IND(SIZE, SRC, IMM) \ ((struct sock_filter_int) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ - .a_reg = 0, \ - .x_reg = X, \ + .dst_reg = 0, \ + .src_reg = SRC, \ .off = 0, \ - .imm = OFF }) + .imm = IMM }) -/* Memory store, A = *(uint *) (X + OFF), and vice versa */ +/* Memory load, dst_reg = *(uint *) (src_reg + off16) */ -#define BPF_LDX_MEM(SIZE, A, X, OFF) \ +#define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ ((struct sock_filter_int) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) -#define BPF_STX_MEM(SIZE, A, X, OFF) \ +/* Memory store, *(uint *) (dst_reg + off16) = src_reg */ + +#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ ((struct sock_filter_int) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) -/* Conditional jumps against registers, if (A 'op' X) goto pc + OFF */ +/* Memory store, *(uint *) (dst_reg + off16) = imm32 */ + +#define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ + ((struct sock_filter_int) { \ + .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ + .dst_reg = DST, \ + .src_reg = 0, \ + .off = OFF, \ + .imm = IMM }) + +/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ -#define BPF_JMP_REG(OP, A, X, OFF) \ +#define BPF_JMP_REG(OP, DST, SRC, OFF) \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = 0 }) -/* Conditional jumps against immediates, if (A 'op' IMM) goto pc + OFF */ +/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ -#define BPF_JMP_IMM(OP, A, IMM, OFF) \ +#define BPF_JMP_IMM(OP, DST, IMM, OFF) \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ - .a_reg = A, \ - .x_reg = 0, \ + .dst_reg = DST, \ + .src_reg = 0, \ .off = OFF, \ .imm = IMM }) @@ -241,18 +253,18 @@ enum { #define BPF_EMIT_CALL(FUNC) \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_CALL, \ - .a_reg = 0, \ - .x_reg = 0, \ + .dst_reg = 0, \ + .src_reg = 0, \ .off = 0, \ .imm = ((FUNC) - __bpf_call_base) }) /* Raw code statement block */ -#define BPF_RAW_INSN(CODE, A, X, OFF, IMM) \ +#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ ((struct sock_filter_int) { \ .code = CODE, \ - .a_reg = A, \ - .x_reg = X, \ + .dst_reg = DST, \ + .src_reg = SRC, \ .off = OFF, \ .imm = IMM }) @@ -261,8 +273,8 @@ enum { #define BPF_EXIT_INSN() \ ((struct sock_filter_int) { \ .code = BPF_JMP | BPF_EXIT, \ - .a_reg = 0, \ - .x_reg = 0, \ + .dst_reg = 0, \ + .src_reg = 0, \ .off = 0, \ .imm = 0 }) @@ -287,8 +299,8 @@ enum { struct sock_filter_int { __u8 code; /* opcode */ - __u8 a_reg:4; /* dest register */ - __u8 x_reg:4; /* source register */ + __u8 dst_reg:4; /* dest register */ + __u8 src_reg:4; /* source register */ __s16 off; /* signed offset */ __s32 imm; /* signed immediate constant */ }; diff --git a/net/core/filter.c b/net/core/filter.c index 6bd2e350e751..b3f21751b238 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -59,12 +59,12 @@ #define BPF_R10 regs[BPF_REG_10] /* Named registers */ -#define A regs[insn->a_reg] -#define X regs[insn->x_reg] +#define DST regs[insn->dst_reg] +#define SRC regs[insn->src_reg] #define FP regs[BPF_REG_FP] #define ARG1 regs[BPF_REG_ARG1] #define CTX regs[BPF_REG_CTX] -#define K insn->imm +#define IMM insn->imm /* No hurry in this branch * @@ -264,7 +264,7 @@ static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *ins FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)]; ARG1 = (u64) (unsigned long) ctx; - /* Register for user BPF programs need to be reset first. */ + /* Registers used in classic BPF programs need to be reset first. */ regs[BPF_REG_A] = 0; regs[BPF_REG_X] = 0; @@ -274,16 +274,16 @@ select_insn: /* ALU */ #define ALU(OPCODE, OP) \ ALU64_##OPCODE##_X: \ - A = A OP X; \ + DST = DST OP SRC; \ CONT; \ ALU_##OPCODE##_X: \ - A = (u32) A OP (u32) X; \ + DST = (u32) DST OP (u32) SRC; \ CONT; \ ALU64_##OPCODE##_K: \ - A = A OP K; \ + DST = DST OP IMM; \ CONT; \ ALU_##OPCODE##_K: \ - A = (u32) A OP (u32) K; \ + DST = (u32) DST OP (u32) IMM; \ CONT; ALU(ADD, +) @@ -296,92 +296,92 @@ select_insn: ALU(MUL, *) #undef ALU ALU_NEG: - A = (u32) -A; + DST = (u32) -DST; CONT; ALU64_NEG: - A = -A; + DST = -DST; CONT; ALU_MOV_X: - A = (u32) X; + DST = (u32) SRC; CONT; ALU_MOV_K: - A = (u32) K; + DST = (u32) IMM; CONT; ALU64_MOV_X: - A = X; + DST = SRC; CONT; ALU64_MOV_K: - A = K; + DST = IMM; CONT; ALU64_ARSH_X: - (*(s64 *) &A) >>= X; + (*(s64 *) &DST) >>= SRC; CONT; ALU64_ARSH_K: - (*(s64 *) &A) >>= K; + (*(s64 *) &DST) >>= IMM; CONT; ALU64_MOD_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - tmp = A; - A = do_div(tmp, X); + tmp = DST; + DST = do_div(tmp, SRC); CONT; ALU_MOD_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - tmp = (u32) A; - A = do_div(tmp, (u32) X); + tmp = (u32) DST; + DST = do_div(tmp, (u32) SRC); CONT; ALU64_MOD_K: - tmp = A; - A = do_div(tmp, K); + tmp = DST; + DST = do_div(tmp, IMM); CONT; ALU_MOD_K: - tmp = (u32) A; - A = do_div(tmp, (u32) K); + tmp = (u32) DST; + DST = do_div(tmp, (u32) IMM); CONT; ALU64_DIV_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - do_div(A, X); + do_div(DST, SRC); CONT; ALU_DIV_X: - if (unlikely(X == 0)) + if (unlikely(SRC == 0)) return 0; - tmp = (u32) A; - do_div(tmp, (u32) X); - A = (u32) tmp; + tmp = (u32) DST; + do_div(tmp, (u32) SRC); + DST = (u32) tmp; CONT; ALU64_DIV_K: - do_div(A, K); + do_div(DST, IMM); CONT; ALU_DIV_K: - tmp = (u32) A; - do_div(tmp, (u32) K); - A = (u32) tmp; + tmp = (u32) DST; + do_div(tmp, (u32) IMM); + DST = (u32) tmp; CONT; ALU_END_TO_BE: - switch (K) { + switch (IMM) { case 16: - A = (__force u16) cpu_to_be16(A); + DST = (__force u16) cpu_to_be16(DST); break; case 32: - A = (__force u32) cpu_to_be32(A); + DST = (__force u32) cpu_to_be32(DST); break; case 64: - A = (__force u64) cpu_to_be64(A); + DST = (__force u64) cpu_to_be64(DST); break; } CONT; ALU_END_TO_LE: - switch (K) { + switch (IMM) { case 16: - A = (__force u16) cpu_to_le16(A); + DST = (__force u16) cpu_to_le16(DST); break; case 32: - A = (__force u32) cpu_to_le32(A); + DST = (__force u32) cpu_to_le32(DST); break; case 64: - A = (__force u64) cpu_to_le64(A); + DST = (__force u64) cpu_to_le64(DST); break; } CONT; @@ -401,85 +401,85 @@ select_insn: insn += insn->off; CONT; JMP_JEQ_X: - if (A == X) { + if (DST == SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JEQ_K: - if (A == K) { + if (DST == IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JNE_X: - if (A != X) { + if (DST != SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JNE_K: - if (A != K) { + if (DST != IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JGT_X: - if (A > X) { + if (DST > SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JGT_K: - if (A > K) { + if (DST > IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JGE_X: - if (A >= X) { + if (DST >= SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JGE_K: - if (A >= K) { + if (DST >= IMM) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGT_X: - if (((s64) A) > ((s64) X)) { + if (((s64) DST) > ((s64) SRC)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGT_K: - if (((s64) A) > ((s64) K)) { + if (((s64) DST) > ((s64) IMM)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGE_X: - if (((s64) A) >= ((s64) X)) { + if (((s64) DST) >= ((s64) SRC)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSGE_K: - if (((s64) A) >= ((s64) K)) { + if (((s64) DST) >= ((s64) IMM)) { insn += insn->off; CONT_JMP; } CONT; JMP_JSET_X: - if (A & X) { + if (DST & SRC) { insn += insn->off; CONT_JMP; } CONT; JMP_JSET_K: - if (A & K) { + if (DST & IMM) { insn += insn->off; CONT_JMP; } @@ -488,15 +488,15 @@ select_insn: return BPF_R0; /* STX and ST and LDX*/ -#define LDST(SIZEOP, SIZE) \ - STX_MEM_##SIZEOP: \ - *(SIZE *)(unsigned long) (A + insn->off) = X; \ - CONT; \ - ST_MEM_##SIZEOP: \ - *(SIZE *)(unsigned long) (A + insn->off) = K; \ - CONT; \ - LDX_MEM_##SIZEOP: \ - A = *(SIZE *)(unsigned long) (X + insn->off); \ +#define LDST(SIZEOP, SIZE) \ + STX_MEM_##SIZEOP: \ + *(SIZE *)(unsigned long) (DST + insn->off) = SRC; \ + CONT; \ + ST_MEM_##SIZEOP: \ + *(SIZE *)(unsigned long) (DST + insn->off) = IMM; \ + CONT; \ + LDX_MEM_##SIZEOP: \ + DST = *(SIZE *)(unsigned long) (SRC + insn->off); \ CONT; LDST(B, u8) @@ -504,16 +504,16 @@ select_insn: LDST(W, u32) LDST(DW, u64) #undef LDST - STX_XADD_W: /* lock xadd *(u32 *)(A + insn->off) += X */ - atomic_add((u32) X, (atomic_t *)(unsigned long) - (A + insn->off)); + STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */ + atomic_add((u32) SRC, (atomic_t *)(unsigned long) + (DST + insn->off)); CONT; - STX_XADD_DW: /* lock xadd *(u64 *)(A + insn->off) += X */ - atomic64_add((u64) X, (atomic64_t *)(unsigned long) - (A + insn->off)); + STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */ + atomic64_add((u64) SRC, (atomic64_t *)(unsigned long) + (DST + insn->off)); CONT; - LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + K)) */ - off = K; + LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */ + off = IMM; load_word: /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are * only appearing in the programs where ctx == @@ -527,51 +527,51 @@ load_word: * BPF_R6-BPF_R9, and store return value into BPF_R0. * * Implicit input: - * ctx + * ctx == skb == BPF_R6 == CTX * * Explicit input: - * X == any register - * K == 32-bit immediate + * SRC == any register + * IMM == 32-bit immediate * * Output: * BPF_R0 - 8/16/32-bit skb data converted to cpu endianness */ - ptr = load_pointer((struct sk_buff *) ctx, off, 4, &tmp); + ptr = load_pointer((struct sk_buff *) CTX, off, 4, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be32(ptr); CONT; } return 0; - LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + K)) */ - off = K; + LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */ + off = IMM; load_half: - ptr = load_pointer((struct sk_buff *) ctx, off, 2, &tmp); + ptr = load_pointer((struct sk_buff *) CTX, off, 2, &tmp); if (likely(ptr != NULL)) { BPF_R0 = get_unaligned_be16(ptr); CONT; } return 0; - LD_ABS_B: /* BPF_R0 = *(u8 *) (ctx + K) */ - off = K; + LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */ + off = IMM; load_byte: - ptr = load_pointer((struct sk_buff *) ctx, off, 1, &tmp); + ptr = load_pointer((struct sk_buff *) CTX, off, 1, &tmp); if (likely(ptr != NULL)) { BPF_R0 = *(u8 *)ptr; CONT; } return 0; - LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + X + K)) */ - off = K + X; + LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */ + off = IMM + SRC; goto load_word; - LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + X + K)) */ - off = K + X; + LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */ + off = IMM + SRC; goto load_half; - LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + X + K) */ - off = K + X; + LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */ + off = IMM + SRC; goto load_byte; default_label: @@ -675,7 +675,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_PROTOCOL: BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2); - /* A = *(u16 *) (ctx + offsetof(protocol)) */ + /* A = *(u16 *) (CTX + offsetof(protocol)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, protocol)); /* A = ntohs(A) [emitting a nop or swap16] */ @@ -741,7 +741,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp, BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000); - /* A = *(u16 *) (ctx + offsetof(vlan_tci)) */ + /* A = *(u16 *) (CTX + offsetof(vlan_tci)) */ *insn++ = BPF_LDX_MEM(BPF_H, BPF_REG_A, BPF_REG_CTX, offsetof(struct sk_buff, vlan_tci)); if (fp->k == SKF_AD_OFF + SKF_AD_VLAN_TAG) { @@ -760,13 +760,13 @@ static bool convert_bpf_extensions(struct sock_filter *fp, case SKF_AD_OFF + SKF_AD_NLATTR_NEST: case SKF_AD_OFF + SKF_AD_CPU: case SKF_AD_OFF + SKF_AD_RANDOM: - /* arg1 = ctx */ + /* arg1 = CTX */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG1, BPF_REG_CTX); /* arg2 = A */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG2, BPF_REG_A); /* arg3 = X */ *insn++ = BPF_MOV64_REG(BPF_REG_ARG3, BPF_REG_X); - /* Emit call(ctx, arg2=A, arg3=X) */ + /* Emit call(arg1=CTX, arg2=A, arg3=X) */ switch (fp->k) { case SKF_AD_OFF + SKF_AD_PAY_OFFSET: *insn = BPF_EMIT_CALL(__skb_get_pay_offset); @@ -941,12 +941,12 @@ do_pass: */ *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_TMP; + insn->dst_reg = BPF_REG_A; + insn->src_reg = BPF_REG_TMP; bpf_src = BPF_X; } else { - insn->a_reg = BPF_REG_A; - insn->x_reg = BPF_REG_X; + insn->dst_reg = BPF_REG_A; + insn->src_reg = BPF_REG_X; insn->imm = fp->k; bpf_src = BPF_SRC(fp->code); } -- cgit v1.2.3 From 357b2f3dd9b7e220ddbaef5bcc108f0359dc0fcf Mon Sep 17 00:00:00 2001 From: Gavin Shan Date: Wed, 11 Jun 2014 18:26:44 +1000 Subject: powerpc/eeh: Dump PE location code As Ben suggested, it's meaningful to dump PE's location code for site engineers when hitting EEH errors. The patch introduces function eeh_pe_loc_get() to retireve the location code from dev-tree so that we can output it when hitting EEH errors. If primary PE bus is root bus, the PHB's dev-node would be tried prior to root port's dev-node. Otherwise, the upstream bridge's dev-node of the primary PE bus will be check for the location code directly. Signed-off-by: Gavin Shan Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/include/asm/eeh.h | 1 + arch/powerpc/kernel/eeh.c | 13 ++++--- arch/powerpc/kernel/eeh_pe.c | 60 +++++++++++++++++++++++++++++++ arch/powerpc/platforms/powernv/eeh-ioda.c | 21 +++++++---- 4 files changed, 84 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h index b76f58c124ca..fab7743c2640 100644 --- a/arch/powerpc/include/asm/eeh.h +++ b/arch/powerpc/include/asm/eeh.h @@ -254,6 +254,7 @@ void *eeh_pe_traverse(struct eeh_pe *root, void *eeh_pe_dev_traverse(struct eeh_pe *root, eeh_traverse_func fn, void *flag); void eeh_pe_restore_bars(struct eeh_pe *pe); +const char *eeh_pe_loc_get(struct eeh_pe *pe); struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); void *eeh_dev_init(struct device_node *dn, void *data); diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c index c25064b7d667..86e25702aaca 100644 --- a/arch/powerpc/kernel/eeh.c +++ b/arch/powerpc/kernel/eeh.c @@ -330,8 +330,8 @@ static int eeh_phb_check_failure(struct eeh_pe *pe) eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); eeh_serialize_unlock(flags); - pr_err("EEH: PHB#%x failure detected\n", - phb_pe->phb->global_number); + pr_err("EEH: PHB#%x failure detected, location: %s\n", + phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe)); dump_stack(); eeh_send_failure_event(phb_pe); @@ -362,7 +362,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev) unsigned long flags; struct device_node *dn; struct pci_dev *dev; - struct eeh_pe *pe, *parent_pe; + struct eeh_pe *pe, *parent_pe, *phb_pe; int rc = 0; const char *location; @@ -481,8 +481,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev) * a stack trace will help the device-driver authors figure * out what happened. So print that out. */ - pr_err("EEH: Frozen PE#%x detected on PHB#%x\n", - pe->addr, pe->phb->global_number); + phb_pe = eeh_phb_pe_get(pe->phb); + pr_err("EEH: Frozen PHB#%x-PE#%x detected\n", + pe->phb->global_number, pe->addr); + pr_err("EEH: PE location: %s, PHB location: %s\n", + eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe)); dump_stack(); eeh_send_failure_event(pe); diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c index 995c2a284630..fbd01eba4473 100644 --- a/arch/powerpc/kernel/eeh_pe.c +++ b/arch/powerpc/kernel/eeh_pe.c @@ -791,6 +791,66 @@ void eeh_pe_restore_bars(struct eeh_pe *pe) eeh_pe_dev_traverse(pe, eeh_restore_one_device_bars, NULL); } +/** + * eeh_pe_loc_get - Retrieve location code binding to the given PE + * @pe: EEH PE + * + * Retrieve the location code of the given PE. If the primary PE bus + * is root bus, we will grab location code from PHB device tree node + * or root port. Otherwise, the upstream bridge's device tree node + * of the primary PE bus will be checked for the location code. + */ +const char *eeh_pe_loc_get(struct eeh_pe *pe) +{ + struct pci_controller *hose; + struct pci_bus *bus = eeh_pe_bus_get(pe); + struct pci_dev *pdev; + struct device_node *dn; + const char *loc; + + if (!bus) + return "N/A"; + + /* PHB PE or root PE ? */ + if (pci_is_root_bus(bus)) { + hose = pci_bus_to_host(bus); + loc = of_get_property(hose->dn, + "ibm,loc-code", NULL); + if (loc) + return loc; + loc = of_get_property(hose->dn, + "ibm,io-base-loc-code", NULL); + if (loc) + return loc; + + pdev = pci_get_slot(bus, 0x0); + } else { + pdev = bus->self; + } + + if (!pdev) { + loc = "N/A"; + goto out; + } + + dn = pci_device_to_OF_node(pdev); + if (!dn) { + loc = "N/A"; + goto out; + } + + loc = of_get_property(dn, "ibm,loc-code", NULL); + if (!loc) + loc = of_get_property(dn, "ibm,slot-location-code", NULL); + if (!loc) + loc = "N/A"; + +out: + if (pci_is_root_bus(bus) && pdev) + pci_dev_put(pdev); + return loc; +} + /** * eeh_pe_bus_get - Retrieve PCI bus according to the given PE * @pe: EEH PE diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c index 9c002099f875..8ad0c5b891f4 100644 --- a/arch/powerpc/platforms/powernv/eeh-ioda.c +++ b/arch/powerpc/platforms/powernv/eeh-ioda.c @@ -774,19 +774,24 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) case OPAL_EEH_PHB_ERROR: if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) { *pe = phb_pe; - pr_err("EEH: dead PHB#%x detected\n", - hose->global_number); + pr_err("EEH: dead PHB#%x detected, " + "location: %s\n", + hose->global_number, + eeh_pe_loc_get(phb_pe)); ret = EEH_NEXT_ERR_DEAD_PHB; } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_FENCED) { *pe = phb_pe; - pr_err("EEH: fenced PHB#%x detected\n", - hose->global_number); + pr_err("EEH: Fenced PHB#%x detected, " + "location: %s\n", + hose->global_number, + eeh_pe_loc_get(phb_pe)); ret = EEH_NEXT_ERR_FENCED_PHB; } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) { pr_info("EEH: PHB#%x informative error " - "detected\n", - hose->global_number); + "detected, location: %s\n", + hose->global_number, + eeh_pe_loc_get(phb_pe)); ioda_eeh_phb_diag(hose); ret = EEH_NEXT_ERR_NONE; } @@ -802,6 +807,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) /* Try best to clear it */ pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n", hose->global_number, frozen_pe_no); + pr_info("EEH: PHB location: %s\n", + eeh_pe_loc_get(phb_pe)); opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no, OPAL_EEH_ACTION_CLEAR_FREEZE_ALL); ret = EEH_NEXT_ERR_NONE; @@ -810,6 +817,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe) } else { pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", (*pe)->addr, (*pe)->phb->global_number); + pr_err("EEH: PE location: %s, PHB location: %s\n", + eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe)); ret = EEH_NEXT_ERR_FROZEN_PE; } -- cgit v1.2.3 From 2749a2f26a7c7eb4c7e3901695c8977cdb6b826d Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Wed, 11 Jun 2014 14:17:56 +0530 Subject: powerpc/book3s: Fix machine check handling for unhandled errors Current code does not check for unhandled/unrecovered errors and return from interrupt if it is recoverable exception which in-turn triggers same machine check exception in a loop causing hypervisor to be unresponsive. This patch fixes this situation and forces hypervisor to panic for unhandled/unrecovered errors. This patch also fixes another issue where unrecoverable_exception routine was called in real mode in case of unrecoverable exception (MSR_RI = 0). This causes another exception vector 0x300 (data access) during system crash leading to confusion while debugging cause of the system crash. Also turn ME bit off while going down, so that when another MCE is hit during panic path, system will checkstop and hypervisor will get restarted cleanly by SP. With the above fixes we now throw correct console messages (see below) while crashing the system in case of unhandled/unrecoverable machine checks. -------------- Severe Machine check interrupt [[Not recovered] Initiator: CPU Error type: UE [Instruction fetch] Effective address: 0000000030002864 Oops: Machine check, sig: 7 [#1] SMP NR_CPUS=2048 NUMA PowerNV Modules linked in: bork(O) bridge stp llc kvm [last unloaded: bork] CPU: 36 PID: 55162 Comm: bash Tainted: G O 3.14.0mce #1 task: c000002d72d022d0 ti: c000000007ec0000 task.ti: c000002d72de4000 NIP: 0000000030002864 LR: 00000000300151a4 CTR: 000000003001518c REGS: c000000007ec3d80 TRAP: 0200 Tainted: G O (3.14.0mce) MSR: 9000000000041002 CR: 28222848 XER: 20000000 CFAR: 0000000030002838 DAR: d0000000004d0000 DSISR: 00000000 SOFTE: 1 GPR00: 000000003001512c 0000000031f92cb0 0000000030078af0 0000000030002864 GPR04: d0000000004d0000 0000000000000000 0000000030002864 ffffffffffffffc9 GPR08: 0000000000000024 0000000030008af0 000000000000002c c00000000150e728 GPR12: 9000000000041002 0000000031f90000 0000000010142550 0000000040000000 GPR16: 0000000010143cdc 0000000000000000 00000000101306fc 00000000101424dc GPR20: 00000000101424e0 000000001013c6f0 0000000000000000 0000000000000000 GPR24: 0000000010143ce0 00000000100f6440 c000002d72de7e00 c000002d72860250 GPR28: c000002d72860240 c000002d72ac0038 0000000000000008 0000000000040000 NIP [0000000030002864] 0x30002864 LR [00000000300151a4] 0x300151a4 Call Trace: Instruction dump: XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX XXXXXXXX ---[ end trace 7285f0beac1e29d3 ]--- Sending IPI to other CPUs IPI complete OPAL V3 detected ! -------------- Signed-off-by: Mahesh Salgaonkar Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/exceptions-64s.S | 40 +++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 20f11eb4dff7..274a86d001c7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1389,6 +1389,7 @@ machine_check_handle_early: bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD bl machine_check_early + std r3,RESULT(r1) /* Save result */ ld r12,_MSR(r1) #ifdef CONFIG_PPC_P7_NAP /* @@ -1443,10 +1444,32 @@ machine_check_handle_early: */ andi. r11,r12,MSR_RI bne 2f -1: addi r3,r1,STACK_FRAME_OVERHEAD - bl unrecoverable_exception - b 1b +1: mfspr r11,SPRN_SRR0 + ld r10,PACAKBASE(r13) + LOAD_HANDLER(r10,unrecover_mce) + mtspr SPRN_SRR0,r10 + ld r10,PACAKMSR(r13) + /* + * We are going down. But there are chances that we might get hit by + * another MCE during panic path and we may run into unstable state + * with no way out. Hence, turn ME bit off while going down, so that + * when another MCE is hit during panic path, system will checkstop + * and hypervisor will get restarted cleanly by SP. + */ + li r3,MSR_ME + andc r10,r10,r3 /* Turn off MSR_ME */ + mtspr SPRN_SRR1,r10 + rfid + b . 2: + /* + * Check if we have successfully handled/recovered from error, if not + * then stay on emergency stack and panic. + */ + ld r3,RESULT(r1) /* Load result */ + cmpdi r3,0 /* see if we handled MCE successfully */ + + beq 1b /* if !handled then panic */ /* * Return from MC interrupt. * Queue up the MCE event so that we can log it later, while @@ -1460,6 +1483,17 @@ machine_check_handle_early: MACHINE_CHECK_HANDLER_WINDUP b machine_check_pSeries +unrecover_mce: + /* Invoke machine_check_exception to print MCE event and panic. */ + addi r3,r1,STACK_FRAME_OVERHEAD + bl .machine_check_exception + /* + * We will not reach here. Even if we did, there is no way out. Call + * unrecoverable_exception and die. + */ +1: addi r3,r1,STACK_FRAME_OVERHEAD + bl .unrecoverable_exception + b 1b /* * r13 points to the PACA, r9 contains the saved CR, * r12 contain the saved SRR1, SRR0 is still ready for return -- cgit v1.2.3 From e75ad93afcd34a7ed472d64fb2944560ad9cad4e Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Wed, 11 Jun 2014 14:18:02 +0530 Subject: powerpc/book3s: Add stack overflow check in machine check handler. Currently machine check handler does not check for stack overflow for nested machine check. If we hit another MCE while inside the machine check handler repeatedly from same address then we get into risk of stack overflow which can cause huge memory corruption. This patch limits the nested MCE level to 4 and panic when we cross level 4. Signed-off-by: Mahesh Salgaonkar Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/exceptions-64s.S | 24 ++++++++++++++++++++---- 1 file changed, 20 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 274a86d001c7..8a0292dbb5fc 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -439,9 +439,9 @@ BEGIN_FTR_SECTION * R9 = CR * Original R9 to R13 is saved on PACA_EXMC * - * Switch to mc_emergency stack and handle re-entrancy (though we - * currently don't test for overflow). Save MCE registers srr1, - * srr0, dar and dsisr and then set ME=1 + * Switch to mc_emergency stack and handle re-entrancy (we limit + * the nested MCE upto level 4 to avoid stack overflow). + * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1 * * We use paca->in_mce to check whether this is the first entry or * nested machine check. We increment paca->in_mce to track nested @@ -464,6 +464,9 @@ BEGIN_FTR_SECTION 0: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ addi r10,r10,1 /* increment paca->in_mce */ sth r10,PACA_IN_MCE(r13) + /* Limit nested MCE to level 4 to avoid stack overflow */ + cmpwi r10,4 + bgt 2f /* Check if we hit limit of 4 */ std r11,GPR1(r1) /* Save r1 on the stack. */ std r11,0(r1) /* make stack chain pointer */ mfspr r11,SPRN_SRR0 /* Save SRR0 */ @@ -482,10 +485,23 @@ BEGIN_FTR_SECTION ori r11,r11,MSR_RI /* turn on RI bit */ ld r12,PACAKBASE(r13) /* get high part of &label */ LOAD_HANDLER(r12, machine_check_handle_early) - mtspr SPRN_SRR0,r12 +1: mtspr SPRN_SRR0,r12 mtspr SPRN_SRR1,r11 rfid b . /* prevent speculative execution */ +2: + /* Stack overflow. Stay on emergency stack and panic. + * Keep the ME bit off while panic-ing, so that if we hit + * another machine check we checkstop. + */ + addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */ + ld r11,PACAKMSR(r13) + ld r12,PACAKBASE(r13) + LOAD_HANDLER(r12, unrecover_mce) + li r10,MSR_ME + andc r11,r11,r10 /* Turn off MSR_ME */ + b 1b + b . /* prevent speculative execution */ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) machine_check_pSeries: -- cgit v1.2.3 From e6654d5b4259317be82b06cf9218f82abec8c8e7 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Wed, 11 Jun 2014 14:18:07 +0530 Subject: powerpc/book3s: Increment the mce counter during machine_check_early call. We don't see MCE counter getting increased in /proc/interrupts which gives false impression of no MCE occurred even when there were MCE events. The machine check early handling was added for PowerKVM and we missed to increment the MCE count in the early handler. We also increment mce counters in the machine_check_exception call, but in most cases where we handle the error hypervisor never reaches there unless its fatal and we want to crash. Only during fatal situation we may see double increment of mce count. We need to fix that. But for now it always good to have some count increased instead of zero. Signed-off-by: Mahesh Salgaonkar Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/traps.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 1bd7ca298fa1..239f1cde3fff 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -295,6 +295,8 @@ long machine_check_early(struct pt_regs *regs) { long handled = 0; + __get_cpu_var(irq_stat).mce_exceptions++; + if (cur_cpu_spec && cur_cpu_spec->machine_check_early) handled = cur_cpu_spec->machine_check_early(regs); return handled; -- cgit v1.2.3 From 74845bc2fa9c0e6b218821cd4e1eb7a552d3e503 Mon Sep 17 00:00:00 2001 From: Mahesh Salgaonkar Date: Wed, 11 Jun 2014 14:18:21 +0530 Subject: powerpc/book3s: Fix guest MC delivery mechanism to avoid soft lockups in guest. Currently we forward MCEs to guest which have been recovered by guest. And for unhandled errors we do not deliver the MCE to guest. It looks like with no support of FWNMI in qemu, guest just panics whenever we deliver the recovered MCEs to guest. Also, the existig code used to return to host for unhandled errors which was casuing guest to hang with soft lockups inside guest and makes it difficult to recover guest instance. This patch now forwards all fatal MCEs to guest causing guest to crash/panic. And, for recovered errors we just go back to normal functioning of guest instead of returning to host. This fixes soft lockup issues in guest. This patch also fixes an issue where guest MCE events were not logged to host console. Signed-off-by: Mahesh Salgaonkar Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kvm/book3s_hv_ras.c | 15 +++++++-------- arch/powerpc/kvm/book3s_hv_rmhandlers.S | 19 ++++++++++++++++--- 2 files changed, 23 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c index 768a9f977c00..3a5c568b1e89 100644 --- a/arch/powerpc/kvm/book3s_hv_ras.c +++ b/arch/powerpc/kvm/book3s_hv_ras.c @@ -113,10 +113,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) * We assume that if the condition is recovered then linux host * will have generated an error log event that we will pick * up and log later. - * Don't release mce event now. In case if condition is not - * recovered we do guest exit and go back to linux host machine - * check handler. Hence we need make sure that current mce event - * is available for linux host to consume. + * Don't release mce event now. We will queue up the event so that + * we can log the MCE event info on host console. */ if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) goto out; @@ -128,11 +126,12 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu) out: /* - * If we have handled the error, then release the mce event because - * we will be delivering machine check to guest. + * We are now going enter guest either through machine check + * interrupt (for unhandled errors) or will continue from + * current HSRR0 (for handled errors) in guest. Hence + * queue up the event so that we can log it from host console later. */ - if (handled) - release_mce_event(); + machine_check_queue_event(); return handled; } diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S index 77356fd25ccc..868347ef09fd 100644 --- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S @@ -2257,15 +2257,28 @@ machine_check_realmode: mr r3, r9 /* get vcpu pointer */ bl kvmppc_realmode_machine_check nop - cmpdi r3, 0 /* continue exiting from guest? */ + cmpdi r3, 0 /* Did we handle MCE ? */ ld r9, HSTATE_KVM_VCPU(r13) li r12, BOOK3S_INTERRUPT_MACHINE_CHECK - beq mc_cont + /* + * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through + * machine check interrupt (set HSRR0 to 0x200). And for handled + * errors (no-fatal), just go back to guest execution with current + * HSRR0 instead of exiting guest. This new approach will inject + * machine check to guest for fatal error causing guest to crash. + * + * The old code used to return to host for unhandled errors which + * was causing guest to hang with soft lockups inside guest and + * makes it difficult to recover guest instance. + */ + ld r10, VCPU_PC(r9) + ld r11, VCPU_MSR(r9) + bne 2f /* Continue guest execution. */ /* If not, deliver a machine check. SRR0/1 are already set */ li r10, BOOK3S_INTERRUPT_MACHINE_CHECK ld r11, VCPU_MSR(r9) bl kvmppc_msr_interrupt - b fast_interrupt_c_return +2: b fast_interrupt_c_return /* * Check the reason we woke from nap, and take appropriate action. -- cgit v1.2.3 From ad718622ab6d500c870772b1b8dda46fa2195e6d Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 12 Jun 2014 09:08:37 +1000 Subject: powerpc/book3s: Fix some ABIv2 issues in machine check code Commit 2749a2f26a7c (powerpc/book3s: Fix machine check handling for unhandled errors) introduced a few ABIv2 issues. We can maintain ABIv1 and ABIv2 compatibility by branching to the function rather than the dot symbol. Fixes: 2749a2f26a7c ("powerpc/book3s: Fix machine check handling for unhandled errors") Signed-off-by: Anton Blanchard Signed-off-by: Benjamin Herrenschmidt --- arch/powerpc/kernel/exceptions-64s.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 8a0292dbb5fc..a7d36b19221d 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -1502,13 +1502,13 @@ machine_check_handle_early: unrecover_mce: /* Invoke machine_check_exception to print MCE event and panic. */ addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception + bl machine_check_exception /* * We will not reach here. Even if we did, there is no way out. Call * unrecoverable_exception and die. */ 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b /* * r13 points to the PACA, r9 contains the saved CR, -- cgit v1.2.3 From 699c659b49b5c7cf601fe454683841df16495c3e Mon Sep 17 00:00:00 2001 From: Michal Marek Date: Wed, 11 Jun 2014 13:53:48 +0200 Subject: powerpc: Avoid circular dependency with zImage.% The rule to create the final images uses a zImage.% pattern. Unfortunately, this also matches the names of the zImage.*.lds linker scripts, which appear as a dependency of the final images. This somehow worked when $(srctree) used to be an absolute path, but now the pattern matches too much. List only the images from $(image-y) as the target of the rule, to avoid the circular dependency. Reported-and-tested-by: Mike Qiu Signed-off-by: Michal Marek --- arch/powerpc/boot/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index a1f8c7f1ec60..ae6403708971 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile @@ -315,8 +315,8 @@ $(addprefix $(obj)/, $(initrd-y)): $(obj)/ramdisk.image.gz $(obj)/zImage.initrd.%: vmlinux $(wrapperbits) $(call if_changed,wrap,$*,,,$(obj)/ramdisk.image.gz) -$(obj)/zImage.%: vmlinux $(wrapperbits) - $(call if_changed,wrap,$*) +$(addprefix $(obj)/, $(sort $(filter zImage.%, $(image-y)))): vmlinux $(wrapperbits) + $(call if_changed,wrap,$(subst $(obj)/zImage.,,$@)) # dtbImage% - a dtbImage is a zImage with an embedded device tree blob $(obj)/dtbImage.initrd.%: vmlinux $(wrapperbits) $(obj)/%.dtb -- cgit v1.2.3 From 51fdc6bf98802eb7019ec94dfd82f0e6cd1c82a8 Mon Sep 17 00:00:00 2001 From: Brian Norris Date: Thu, 17 Apr 2014 00:21:46 -0700 Subject: blackfin: defconfigs: add MTD_SPI_NOR (new dependency for M25P80) These defconfigs contain the CONFIG_M25P80 symbol, which is now dependent on the MTD_SPI_NOR symbol. Add CONFIG_MTD_SPI_NOR to the relevant defconfigs. At the same time, drop the now-nonexistent CONFIG_MTD_CHAR symbol. Signed-off-by: Brian Norris Cc: Steven Miao Cc: adi-buildroot-devel@lists.sourceforge.net Cc: linux-kernel@vger.kernel.org --- arch/blackfin/configs/BF526-EZBRD_defconfig | 2 +- arch/blackfin/configs/BF527-EZKIT-V2_defconfig | 2 +- arch/blackfin/configs/BF527-EZKIT_defconfig | 2 +- arch/blackfin/configs/BF548-EZKIT_defconfig | 2 +- arch/blackfin/configs/BF609-EZKIT_defconfig | 2 +- arch/blackfin/configs/BlackStamp_defconfig | 3 +-- arch/blackfin/configs/H8606_defconfig | 3 +-- 7 files changed, 7 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/configs/BF526-EZBRD_defconfig b/arch/blackfin/configs/BF526-EZBRD_defconfig index 2f2c6acf210c..530ba7a6d7b0 100644 --- a/arch/blackfin/configs/BF526-EZBRD_defconfig +++ b/arch/blackfin/configs/BF526-EZBRD_defconfig @@ -53,7 +53,6 @@ CONFIG_IP_PNP=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y @@ -63,6 +62,7 @@ CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PHYSMAP=y CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=m +CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_RAM=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set diff --git a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig index 91535c38e7f2..a21c79275eae 100644 --- a/arch/blackfin/configs/BF527-EZKIT-V2_defconfig +++ b/arch/blackfin/configs/BF527-EZKIT-V2_defconfig @@ -58,7 +58,6 @@ CONFIG_BFIN_SIR0=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=m CONFIG_MTD_BLOCK=y CONFIG_MTD_JEDECPROBE=m CONFIG_MTD_RAM=y @@ -66,6 +65,7 @@ CONFIG_MTD_ROM=m CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=m +CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_RAM=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set diff --git a/arch/blackfin/configs/BF527-EZKIT_defconfig b/arch/blackfin/configs/BF527-EZKIT_defconfig index af2738c7441b..5070a860d8c4 100644 --- a/arch/blackfin/configs/BF527-EZKIT_defconfig +++ b/arch/blackfin/configs/BF527-EZKIT_defconfig @@ -57,7 +57,6 @@ CONFIG_BFIN_SIR0=y CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=m CONFIG_MTD_BLOCK=y CONFIG_MTD_JEDECPROBE=m CONFIG_MTD_RAM=y @@ -65,6 +64,7 @@ CONFIG_MTD_ROM=m CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=m +CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_RAM=y CONFIG_SCSI=y # CONFIG_SCSI_PROC_FS is not set diff --git a/arch/blackfin/configs/BF548-EZKIT_defconfig b/arch/blackfin/configs/BF548-EZKIT_defconfig index e716fdfd2cf2..efb293057287 100644 --- a/arch/blackfin/configs/BF548-EZKIT_defconfig +++ b/arch/blackfin/configs/BF548-EZKIT_defconfig @@ -64,7 +64,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y @@ -75,6 +74,7 @@ CONFIG_MTD_M25P80=y CONFIG_MTD_NAND=y CONFIG_MTD_NAND_BF5XX=y # CONFIG_MTD_NAND_BF5XX_HWECC is not set +CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_RAM=y # CONFIG_SCSI_PROC_FS is not set CONFIG_BLK_DEV_SD=y diff --git a/arch/blackfin/configs/BF609-EZKIT_defconfig b/arch/blackfin/configs/BF609-EZKIT_defconfig index 4ca39ab6b2bf..a7e9bfd84183 100644 --- a/arch/blackfin/configs/BF609-EZKIT_defconfig +++ b/arch/blackfin/configs/BF609-EZKIT_defconfig @@ -57,7 +57,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" CONFIG_FW_LOADER=m CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=y CONFIG_MTD_CFI_INTELEXT=y @@ -65,6 +64,7 @@ CONFIG_MTD_CFI_STAA=y CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_PHYSMAP=y CONFIG_MTD_M25P80=y +CONFIG_MTD_SPI_NOR=y CONFIG_MTD_UBI=m CONFIG_SCSI=y CONFIG_BLK_DEV_SD=y diff --git a/arch/blackfin/configs/BlackStamp_defconfig b/arch/blackfin/configs/BlackStamp_defconfig index 3853c473b443..f4a9200e1ab1 100644 --- a/arch/blackfin/configs/BlackStamp_defconfig +++ b/arch/blackfin/configs/BlackStamp_defconfig @@ -45,7 +45,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" # CONFIG_FW_LOADER is not set CONFIG_MTD=y CONFIG_MTD_CMDLINE_PARTS=y -CONFIG_MTD_CHAR=m CONFIG_MTD_BLOCK=y CONFIG_MTD_CFI=m CONFIG_MTD_CFI_AMDSTD=m @@ -53,7 +52,7 @@ CONFIG_MTD_RAM=y CONFIG_MTD_ROM=m CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_M25P80=y -# CONFIG_M25PXX_USE_FAST_READ is not set +CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_LOOP=y CONFIG_BLK_DEV_NBD=y CONFIG_BLK_DEV_RAM=y diff --git a/arch/blackfin/configs/H8606_defconfig b/arch/blackfin/configs/H8606_defconfig index f754e490bbfd..0ff97d8d047a 100644 --- a/arch/blackfin/configs/H8606_defconfig +++ b/arch/blackfin/configs/H8606_defconfig @@ -36,13 +36,12 @@ CONFIG_IRTTY_SIR=m # CONFIG_WIRELESS is not set # CONFIG_FW_LOADER is not set CONFIG_MTD=y -CONFIG_MTD_CHAR=y CONFIG_MTD_BLOCK=y CONFIG_MTD_RAM=y CONFIG_MTD_ROM=y CONFIG_MTD_COMPLEX_MAPPINGS=y CONFIG_MTD_M25P80=y -# CONFIG_M25PXX_USE_FAST_READ is not set +CONFIG_MTD_SPI_NOR=y CONFIG_BLK_DEV_RAM=y CONFIG_MISC_DEVICES=y CONFIG_EEPROM_AT25=y -- cgit v1.2.3 From b4b31f6101433e4b8ee73779b69b935af07682f8 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 12 Jun 2014 17:53:11 -0700 Subject: x86/vdso: Add PUT_LE to store little-endian values Add PUT_LE() by analogy with GET_LE() to write littleendian values in addition to reading them. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/3d9b27e92745b27b6fda1b9a98f70dc9c1246c7a.1402620737.git.luto@amacapital.net Signed-off-by: H. Peter Anvin --- arch/x86/vdso/vdso2c.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c index 450ac6eaf613..7a6bf50f9165 100644 --- a/arch/x86/vdso/vdso2c.c +++ b/arch/x86/vdso/vdso2c.c @@ -54,7 +54,7 @@ static void fail(const char *format, ...) } /* - * Evil macros to do a little-endian read. + * Evil macros for little-endian reads and writes */ #define GLE(x, bits, ifnot) \ __builtin_choose_expr( \ @@ -62,11 +62,24 @@ static void fail(const char *format, ...) (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot) extern void bad_get_le(void); -#define LAST_LE(x) \ +#define LAST_GLE(x) \ __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le()) #define GET_LE(x) \ - GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_LE(x)))) + GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x)))) + +#define PLE(x, val, bits, ifnot) \ + __builtin_choose_expr( \ + (sizeof(*(x)) == bits/8), \ + put_unaligned_le##bits((val), (x)), ifnot) + +extern void bad_put_le(void); +#define LAST_PLE(x, val) \ + __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le()) + +#define PUT_LE(x, val) \ + PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val)))) + #define NSYMS (sizeof(required_syms) / sizeof(required_syms[0])) -- cgit v1.2.3 From e0bf7b86dace87eccdabdd66d2769ccad19cb81c Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 12 Jun 2014 17:53:12 -0700 Subject: x86/vdso: Hack to keep 64-bit Go programs working The Go runtime has a buggy vDSO parser that currently segfaults. This writes an empty SHT_DYNSYM entry that causes Go's runtime to malfunction by thinking that the vDSO is empty rather than malfunctioning by running off the end and segfaulting. This affects x86-64 only as far as we know, so we do not need this for the i386 and x32 vdsos. Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/d10618176c4bd39b457a5e85c497295c90cab1bc.1402620737.git.luto@amacapital.net Signed-off-by: H. Peter Anvin --- arch/x86/vdso/Makefile | 18 ++++++++++-------- arch/x86/vdso/vdso-fakesections.c | 32 ++++++++++++++++++++++++++++++++ arch/x86/vdso/vdso2c.h | 23 ++++++++++++++++++----- 3 files changed, 60 insertions(+), 13 deletions(-) create mode 100644 arch/x86/vdso/vdso-fakesections.c (limited to 'arch') diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index 9769df094035..ba6fc2757ee4 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -15,12 +15,8 @@ vdso-install-$(VDSO32-y) += $(vdso32-images) # files to link into the vdso -vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o - -vobjs-$(VDSOX32-y) += $(vobjx32s-compat) - -# Filter out x32 objects. -vobj64s := $(filter-out $(vobjx32s-compat),$(vobjs-y)) +vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o +vobjs-nox32 := vdso-fakesections.o # files to link into kernel obj-y += vma.o @@ -34,7 +30,7 @@ vdso_img-$(VDSO32-y) += 32-sysenter obj-$(VDSO32-y) += vdso32-setup.o -vobjs := $(foreach F,$(vobj64s),$(obj)/$F) +vobjs := $(foreach F,$(vobjs-y),$(obj)/$F) $(obj)/vdso.o: $(obj)/vdso.so @@ -104,7 +100,13 @@ VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \ -Wl,-z,max-page-size=4096 \ -Wl,-z,common-page-size=4096 -vobjx32s-y := $(vobj64s:.o=-x32.o) +# 64-bit objects to re-brand as x32 +vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y)) + +# x32-rebranded versions +vobjx32s-y := $(vobjs64-for-x32:.o=-x32.o) + +# same thing, but in the output directory vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F) # Convert 64bit object file to x32 for x32 vDSO. diff --git a/arch/x86/vdso/vdso-fakesections.c b/arch/x86/vdso/vdso-fakesections.c new file mode 100644 index 000000000000..cb8a8d72c24b --- /dev/null +++ b/arch/x86/vdso/vdso-fakesections.c @@ -0,0 +1,32 @@ +/* + * Copyright 2014 Andy Lutomirski + * Subject to the GNU Public License, v.2 + * + * Hack to keep broken Go programs working. + * + * The Go runtime had a couple of bugs: it would read the section table to try + * to figure out how many dynamic symbols there were (it shouldn't have looked + * at the section table at all) and, if there were no SHT_SYNDYM section table + * entry, it would use an uninitialized value for the number of symbols. As a + * workaround, we supply a minimal section table. vdso2c will adjust the + * in-memory image so that "vdso_fake_sections" becomes the section table. + * + * The bug was introduced by: + * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31) + * and is being addressed in the Go runtime in this issue: + * https://code.google.com/p/go/issues/detail?id=8197 + */ + +#ifndef __x86_64__ +#error This hack is specific to the 64-bit vDSO +#endif + +#include + +extern const __visible struct elf64_shdr vdso_fake_sections[]; +const __visible struct elf64_shdr vdso_fake_sections[] = { + { + .sh_type = SHT_DYNSYM, + .sh_entsize = sizeof(Elf64_Sym), + } +}; diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h index d9f6f61aef1c..c6eefaf389b9 100644 --- a/arch/x86/vdso/vdso2c.h +++ b/arch/x86/vdso/vdso2c.h @@ -18,6 +18,8 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) const char *secstrings; uint64_t syms[NSYMS] = {}; + uint64_t fake_sections_value = 0, fake_sections_size = 0; + Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff)); /* Walk the segment table. */ @@ -84,6 +86,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) GET_LE(&symtab_hdr->sh_entsize) * i; const char *name = addr + GET_LE(&strtab_hdr->sh_offset) + GET_LE(&sym->st_name); + for (k = 0; k < NSYMS; k++) { if (!strcmp(name, required_syms[k])) { if (syms[k]) { @@ -93,6 +96,13 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) syms[k] = GET_LE(&sym->st_value); } } + + if (!strcmp(name, "vdso_fake_sections")) { + if (fake_sections_value) + fail("duplicate vdso_fake_sections\n"); + fake_sections_value = GET_LE(&sym->st_value); + fake_sections_size = GET_LE(&sym->st_size); + } } /* Validate mapping addresses. */ @@ -112,11 +122,14 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) if (syms[sym_end_mapping] % 4096) fail("end_mapping must be a multiple of 4096\n"); - /* Remove sections. */ - hdr->e_shoff = 0; - hdr->e_shentsize = 0; - hdr->e_shnum = 0; - hdr->e_shstrndx = SHN_UNDEF; /* SHN_UNDEF == 0 */ + /* Remove sections or use fakes */ + if (fake_sections_size % sizeof(Elf_Shdr)) + fail("vdso_fake_sections size is not a multiple of %ld\n", + (long)sizeof(Elf_Shdr)); + PUT_LE(&hdr->e_shoff, fake_sections_value); + PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0); + PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr)); + PUT_LE(&hdr->e_shstrndx, SHN_UNDEF); if (!name) { fwrite(addr, load_size, 1, outfile); -- cgit v1.2.3 From a934fb5bc9cd1260be89272cfb7a6c9dc71974d7 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 12 Jun 2014 08:28:10 -0700 Subject: x86/vdso: Fix vdso_install "make vdso_install" installs unstripped versions of the vdso objects for the benefit of the debugger. This was broken by checkin: 6f121e548f83 x86, vdso: Reimplement vdso.so preparation in build-time C The filenames are different now, so update the Makefile to cope. This still installs the 64-bit vdso as vdso64.so. We believe this will be okay, as the only known user is a patched gdb which is known to use build-ids, but if it turns out to be a problem we may have to add a link. Inspired by a patch from Sam Ravnborg. Acked-by: Sam Ravnborg Reported-by: Josh Boyer Tested-by: Josh Boyer Signed-off-by: Andy Lutomirski Link: http://lkml.kernel.org/r/b10299edd8ba98d17e07dafcd895b8ecf4d99eff.1402586707.git.luto@amacapital.net Signed-off-by: H. Peter Anvin --- arch/x86/vdso/Makefile | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index ba6fc2757ee4..3c0809a0631f 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile @@ -9,11 +9,6 @@ VDSOX32-$(CONFIG_X86_X32_ABI) := y VDSO32-$(CONFIG_X86_32) := y VDSO32-$(CONFIG_COMPAT) := y -vdso-install-$(VDSO64-y) += vdso.so -vdso-install-$(VDSOX32-y) += vdsox32.so -vdso-install-$(VDSO32-y) += $(vdso32-images) - - # files to link into the vdso vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o vobjs-nox32 := vdso-fakesections.o @@ -178,15 +173,20 @@ VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ GCOV_PROFILE := n # -# Install the unstripped copy of vdso*.so listed in $(vdso-install-y). +# Install the unstripped copies of vdso*.so. # -quiet_cmd_vdso_install = INSTALL $@ - cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@ -$(vdso-install-y): %.so: $(obj)/%.so.dbg FORCE +quiet_cmd_vdso_install = INSTALL $(@:install_%=%) + cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%) + +vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%) + +$(MODLIB)/vdso: FORCE @mkdir -p $(MODLIB)/vdso + +$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE $(call cmd,vdso_install) -PHONY += vdso_install $(vdso-install-y) -vdso_install: $(vdso-install-y) +PHONY += vdso_install $(vdso_img_insttargets) +vdso_install: $(vdso_img_insttargets) FORCE clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* -- cgit v1.2.3 From 9796853e902447e53a17dae5df9eb609f0e31e6a Mon Sep 17 00:00:00 2001 From: Peter Griffin Date: Mon, 16 Jun 2014 11:23:00 +0200 Subject: ARM: STi: DT: Properly define sti-ethclk & stmmaceth for stih415/6 This patch fixes two problems: - 1) The device tree isn't currently providing sti-ethclk which is required by the dwmac glue code to correctly configure the ethernet PHY clock speed. This means depending on what the bootloader/jtag has configured this clock to, and what switch/hub the board is plugged into you most likely will NOT successfully negotiate a ethernet link. 2) The stmmaceth clock was associated with the wrong clock. It was referencing the PHY clock rather than the interconnect clock which clocks the IP. This patch also brings us closer to not having to boot the upstream kernel with the clk_ignore_unused parameter. Acked-by: Lee Jones Signed-off-by: Peter Griffin Signed-off-by: Maxime Coquelin --- arch/arm/boot/dts/stih415.dtsi | 8 ++++---- arch/arm/boot/dts/stih416.dtsi | 8 ++++---- include/dt-bindings/clock/stih415-clks.h | 1 + include/dt-bindings/clock/stih416-clks.h | 1 + 4 files changed, 10 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/stih415.dtsi b/arch/arm/boot/dts/stih415.dtsi index d6f254f302fe..a0f6f75fe3b5 100644 --- a/arch/arm/boot/dts/stih415.dtsi +++ b/arch/arm/boot/dts/stih415.dtsi @@ -169,8 +169,8 @@ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mii0>; - clock-names = "stmmaceth"; - clocks = <&clk_s_a1_ls CLK_GMAC0_PHY>; + clock-names = "stmmaceth", "sti-ethclk"; + clocks = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>; }; ethernet1: dwmac@fef08000 { @@ -192,8 +192,8 @@ reset-names = "stmmaceth"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mii1>; - clock-names = "stmmaceth"; - clocks = <&clk_s_a0_ls CLK_ETH1_PHY>; + clock-names = "stmmaceth", "sti-ethclk"; + clocks = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>; }; rc: rc@fe518000 { diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi index 06473c5d9ea9..84758d76d064 100644 --- a/arch/arm/boot/dts/stih416.dtsi +++ b/arch/arm/boot/dts/stih416.dtsi @@ -175,8 +175,8 @@ reset-names = "stmmaceth"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mii0>; - clock-names = "stmmaceth"; - clocks = <&clk_s_a1_ls CLK_GMAC0_PHY>; + clock-names = "stmmaceth", "sti-ethclk"; + clocks = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>; }; ethernet1: dwmac@fef08000 { @@ -197,8 +197,8 @@ reset-names = "stmmaceth"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_mii1>; - clock-names = "stmmaceth"; - clocks = <&clk_s_a0_ls CLK_ETH1_PHY>; + clock-names = "stmmaceth", "sti-ethclk"; + clocks = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>; }; rc: rc@fe518000 { diff --git a/include/dt-bindings/clock/stih415-clks.h b/include/dt-bindings/clock/stih415-clks.h index 0d2c7397e028..d80caa68aebd 100644 --- a/include/dt-bindings/clock/stih415-clks.h +++ b/include/dt-bindings/clock/stih415-clks.h @@ -10,6 +10,7 @@ #define CLK_ETH1_PHY 4 /* CLOCKGEN A1 */ +#define CLK_ICN_IF_2 0 #define CLK_GMAC0_PHY 3 #endif diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h index 552c779eb6af..f9bdbd13568d 100644 --- a/include/dt-bindings/clock/stih416-clks.h +++ b/include/dt-bindings/clock/stih416-clks.h @@ -10,6 +10,7 @@ #define CLK_ETH1_PHY 4 /* CLOCKGEN A1 */ +#define CLK_ICN_IF_2 0 #define CLK_GMAC0_PHY 3 #endif -- cgit v1.2.3 From 88a1c67ff6e6fe5d8391cd87ea89744a5f2728a4 Mon Sep 17 00:00:00 2001 From: Lee Jones Date: Wed, 18 Jun 2014 11:30:17 +0100 Subject: ARM: stih41x: Rename stih416-b2020-revE.dts to stih416-b2020e.dts Two reasons for this rename. Firstly, it removes the camel case convention which isn't used by any other platform and secondly it matches the naming convention for the internal kernel, which can become annoying when flipping between the two. Signed-off-by: Lee Jones Signed-off-by: Maxime Coquelin --- arch/arm/boot/dts/Makefile | 2 +- arch/arm/boot/dts/stih416-b2020-revE.dts | 35 -------------------------------- arch/arm/boot/dts/stih416-b2020e.dts | 35 ++++++++++++++++++++++++++++++++ 3 files changed, 36 insertions(+), 36 deletions(-) delete mode 100644 arch/arm/boot/dts/stih416-b2020-revE.dts create mode 100644 arch/arm/boot/dts/stih416-b2020e.dts (limited to 'arch') diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 5986ff63b901..adb5ed9e269e 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -357,7 +357,7 @@ dtb-$(CONFIG_ARCH_STI)+= stih407-b2120.dtb \ stih415-b2020.dtb \ stih416-b2000.dtb \ stih416-b2020.dtb \ - stih416-b2020-revE.dtb + stih416-b2020e.dtb dtb-$(CONFIG_MACH_SUN4I) += \ sun4i-a10-a1000.dtb \ sun4i-a10-cubieboard.dtb \ diff --git a/arch/arm/boot/dts/stih416-b2020-revE.dts b/arch/arm/boot/dts/stih416-b2020-revE.dts deleted file mode 100644 index ba0fa2caaf18..000000000000 --- a/arch/arm/boot/dts/stih416-b2020-revE.dts +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2014 STMicroelectronics (R&D) Limited. - * Author: Lee Jones - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * publishhed by the Free Software Foundation. - */ -/dts-v1/; -#include "stih416.dtsi" -#include "stih41x-b2020.dtsi" -/ { - model = "STiH416 B2020 REV-E"; - compatible = "st,stih416-b2020", "st,stih416"; - - soc { - leds { - compatible = "gpio-leds"; - red { - #gpio-cells = <1>; - label = "Front Panel LED"; - gpios = <&PIO4 1>; - linux,default-trigger = "heartbeat"; - }; - green { - gpios = <&PIO1 3>; - default-state = "off"; - }; - }; - - ethernet1: dwmac@fef08000 { - snps,reset-gpio = <&PIO0 7>; - }; - }; -}; diff --git a/arch/arm/boot/dts/stih416-b2020e.dts b/arch/arm/boot/dts/stih416-b2020e.dts new file mode 100644 index 000000000000..ba0fa2caaf18 --- /dev/null +++ b/arch/arm/boot/dts/stih416-b2020e.dts @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2014 STMicroelectronics (R&D) Limited. + * Author: Lee Jones + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * publishhed by the Free Software Foundation. + */ +/dts-v1/; +#include "stih416.dtsi" +#include "stih41x-b2020.dtsi" +/ { + model = "STiH416 B2020 REV-E"; + compatible = "st,stih416-b2020", "st,stih416"; + + soc { + leds { + compatible = "gpio-leds"; + red { + #gpio-cells = <1>; + label = "Front Panel LED"; + gpios = <&PIO4 1>; + linux,default-trigger = "heartbeat"; + }; + green { + gpios = <&PIO1 3>; + default-state = "off"; + }; + }; + + ethernet1: dwmac@fef08000 { + snps,reset-gpio = <&PIO0 7>; + }; + }; +}; -- cgit v1.2.3