diff options
Diffstat (limited to 'include/linux')
152 files changed, 5707 insertions, 1821 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 358c01b971db..5320153c311b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -29,17 +29,17 @@ #include <linux/ioport.h> /* for struct resource */ #include <linux/device.h> -#ifdef CONFIG_ACPI - #ifndef _LINUX #define _LINUX #endif +#include <acpi/acpi.h> + +#ifdef CONFIG_ACPI #include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/dynamic_debug.h> -#include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <acpi/acpi_numa.h> diff --git a/include/linux/aer.h b/include/linux/aer.h index 4dbaa7081530..c826d1c28f9c 100644 --- a/include/linux/aer.h +++ b/include/linux/aer.h @@ -11,6 +11,8 @@ #define AER_FATAL 1 #define AER_CORRECTABLE 2 +struct pci_dev; + struct aer_header_log_regs { unsigned int dw0; unsigned int dw1; diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h index 6dfd51a04d77..09a947e8bc87 100644 --- a/include/linux/ahci_platform.h +++ b/include/linux/ahci_platform.h @@ -43,10 +43,7 @@ struct ahci_host_priv *ahci_platform_get_resources( struct platform_device *pdev); int ahci_platform_init_host(struct platform_device *pdev, struct ahci_host_priv *hpriv, - const struct ata_port_info *pi_template, - unsigned long host_flags, - unsigned int force_port_map, - unsigned int mask_port_map); + const struct ata_port_info *pi_template); int ahci_platform_suspend_host(struct device *dev); int ahci_platform_resume_host(struct device *dev); diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h index 15f6b9edd0b1..2b08e79f5100 100644 --- a/include/linux/amd-iommu.h +++ b/include/linux/amd-iommu.h @@ -119,6 +119,13 @@ typedef int (*amd_iommu_invalid_ppr_cb)(struct pci_dev *pdev, extern int amd_iommu_set_invalid_ppr_cb(struct pci_dev *pdev, amd_iommu_invalid_ppr_cb cb); +#define PPR_FAULT_EXEC (1 << 1) +#define PPR_FAULT_READ (1 << 2) +#define PPR_FAULT_WRITE (1 << 5) +#define PPR_FAULT_USER (1 << 6) +#define PPR_FAULT_RSVD (1 << 7) +#define PPR_FAULT_GN (1 << 8) + /** * amd_iommu_device_info() - Get information about IOMMUv2 support of a * PCI device diff --git a/include/linux/arcdevice.h b/include/linux/arcdevice.h index 7216b0daf544..df0356220730 100644 --- a/include/linux/arcdevice.h +++ b/include/linux/arcdevice.h @@ -22,10 +22,6 @@ #ifdef __KERNEL__ #include <linux/irqreturn.h> -#ifndef bool -#define bool int -#endif - /* * RECON_THRESHOLD is the maximum number of RECON messages to receive * within one minute before printing a "cabling problem" warning. The @@ -285,9 +281,9 @@ struct arcnet_local { unsigned long first_recon; /* time of "first" RECON message to count */ unsigned long last_recon; /* time of most recent RECON */ int num_recons; /* number of RECONs between first and last. */ - bool network_down; /* do we think the network is down? */ + int network_down; /* do we think the network is down? */ - bool excnak_pending; /* We just got an excesive nak interrupt */ + int excnak_pending; /* We just got an excesive nak interrupt */ struct { uint16_t sequence; /* sequence number (incs with each packet) */ @@ -305,7 +301,7 @@ struct arcnet_local { void (*command) (struct net_device * dev, int cmd); int (*status) (struct net_device * dev); void (*intmask) (struct net_device * dev, int mask); - bool (*reset) (struct net_device * dev, bool really_reset); + int (*reset) (struct net_device * dev, int really_reset); void (*open) (struct net_device * dev); void (*close) (struct net_device * dev); diff --git a/include/linux/atmel-ssc.h b/include/linux/atmel-ssc.h index 571a12ebb018..7c0f6549898b 100644 --- a/include/linux/atmel-ssc.h +++ b/include/linux/atmel-ssc.h @@ -7,6 +7,7 @@ struct atmel_ssc_platform_data { int use_dma; + int has_fslen_ext; }; struct ssc_device { @@ -71,6 +72,12 @@ void ssc_free(struct ssc_device *ssc); #define SSC_RFMR_DATNB_OFFSET 8 #define SSC_RFMR_FSEDGE_SIZE 1 #define SSC_RFMR_FSEDGE_OFFSET 24 +/* + * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, + * at91sam9g20, and at91sam9g45 and newer SoCs + */ +#define SSC_RFMR_FSLEN_EXT_SIZE 4 +#define SSC_RFMR_FSLEN_EXT_OFFSET 28 #define SSC_RFMR_FSLEN_SIZE 4 #define SSC_RFMR_FSLEN_OFFSET 16 #define SSC_RFMR_FSOS_SIZE 4 @@ -109,6 +116,12 @@ void ssc_free(struct ssc_device *ssc); #define SSC_TFMR_FSDEN_OFFSET 23 #define SSC_TFMR_FSEDGE_SIZE 1 #define SSC_TFMR_FSEDGE_OFFSET 24 +/* + * The FSLEN_EXT exist on at91sam9rl, at91sam9g10, + * at91sam9g20, and at91sam9g45 and newer SoCs + */ +#define SSC_TFMR_FSLEN_EXT_SIZE 4 +#define SSC_TFMR_FSLEN_EXT_OFFSET 28 #define SSC_TFMR_FSLEN_SIZE 4 #define SSC_TFMR_FSLEN_OFFSET 16 #define SSC_TFMR_FSOS_SIZE 3 diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index 0b3bb16c705a..0272e49135d0 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h @@ -6,6 +6,7 @@ #include <linux/bcma/bcma_driver_chipcommon.h> #include <linux/bcma/bcma_driver_pci.h> +#include <linux/bcma/bcma_driver_pcie2.h> #include <linux/bcma/bcma_driver_mips.h> #include <linux/bcma/bcma_driver_gmac_cmn.h> #include <linux/ssb/ssb.h> /* SPROM sharing */ @@ -72,17 +73,17 @@ struct bcma_host_ops { /* Core-ID values. */ #define BCMA_CORE_OOB_ROUTER 0x367 /* Out of band */ #define BCMA_CORE_4706_CHIPCOMMON 0x500 -#define BCMA_CORE_PCIEG2 0x501 -#define BCMA_CORE_DMA 0x502 -#define BCMA_CORE_SDIO3 0x503 -#define BCMA_CORE_USB20 0x504 -#define BCMA_CORE_USB30 0x505 -#define BCMA_CORE_A9JTAG 0x506 -#define BCMA_CORE_DDR23 0x507 -#define BCMA_CORE_ROM 0x508 -#define BCMA_CORE_NAND 0x509 -#define BCMA_CORE_QSPI 0x50A -#define BCMA_CORE_CHIPCOMMON_B 0x50B +#define BCMA_CORE_NS_PCIEG2 0x501 +#define BCMA_CORE_NS_DMA 0x502 +#define BCMA_CORE_NS_SDIO3 0x503 +#define BCMA_CORE_NS_USB20 0x504 +#define BCMA_CORE_NS_USB30 0x505 +#define BCMA_CORE_NS_A9JTAG 0x506 +#define BCMA_CORE_NS_DDR23 0x507 +#define BCMA_CORE_NS_ROM 0x508 +#define BCMA_CORE_NS_NAND 0x509 +#define BCMA_CORE_NS_QSPI 0x50A +#define BCMA_CORE_NS_CHIPCOMMON_B 0x50B #define BCMA_CORE_4706_SOC_RAM 0x50E #define BCMA_CORE_ARMCA9 0x510 #define BCMA_CORE_4706_MAC_GBIT 0x52D @@ -157,6 +158,9 @@ struct bcma_host_ops { /* Chip IDs of PCIe devices */ #define BCMA_CHIP_ID_BCM4313 0x4313 #define BCMA_CHIP_ID_BCM43142 43142 +#define BCMA_CHIP_ID_BCM43131 43131 +#define BCMA_CHIP_ID_BCM43217 43217 +#define BCMA_CHIP_ID_BCM43222 43222 #define BCMA_CHIP_ID_BCM43224 43224 #define BCMA_PKG_ID_BCM43224_FAB_CSM 0x8 #define BCMA_PKG_ID_BCM43224_FAB_SMIC 0xa @@ -333,6 +337,7 @@ struct bcma_bus { struct bcma_drv_cc drv_cc; struct bcma_drv_pci drv_pci[2]; + struct bcma_drv_pcie2 drv_pcie2; struct bcma_drv_mips drv_mips; struct bcma_drv_gmac_cmn drv_gmac_cmn; diff --git a/include/linux/bcma/bcma_driver_pcie2.h b/include/linux/bcma/bcma_driver_pcie2.h new file mode 100644 index 000000000000..5988b05781c3 --- /dev/null +++ b/include/linux/bcma/bcma_driver_pcie2.h @@ -0,0 +1,158 @@ +#ifndef LINUX_BCMA_DRIVER_PCIE2_H_ +#define LINUX_BCMA_DRIVER_PCIE2_H_ + +#define BCMA_CORE_PCIE2_CLK_CONTROL 0x0000 +#define PCIE2_CLKC_RST_OE 0x0001 /* When set, drives PCI_RESET out to pin */ +#define PCIE2_CLKC_RST 0x0002 /* Value driven out to pin */ +#define PCIE2_CLKC_SPERST 0x0004 /* SurvivePeRst */ +#define PCIE2_CLKC_DISABLE_L1CLK_GATING 0x0010 +#define PCIE2_CLKC_DLYPERST 0x0100 /* Delay PeRst to CoE Core */ +#define PCIE2_CLKC_DISSPROMLD 0x0200 /* DisableSpromLoadOnPerst */ +#define PCIE2_CLKC_WAKE_MODE_L2 0x1000 /* Wake on L2 */ +#define BCMA_CORE_PCIE2_RC_PM_CONTROL 0x0004 +#define BCMA_CORE_PCIE2_RC_PM_STATUS 0x0008 +#define BCMA_CORE_PCIE2_EP_PM_CONTROL 0x000C +#define BCMA_CORE_PCIE2_EP_PM_STATUS 0x0010 +#define BCMA_CORE_PCIE2_EP_LTR_CONTROL 0x0014 +#define BCMA_CORE_PCIE2_EP_LTR_STATUS 0x0018 +#define BCMA_CORE_PCIE2_EP_OBFF_STATUS 0x001C +#define BCMA_CORE_PCIE2_PCIE_ERR_STATUS 0x0020 +#define BCMA_CORE_PCIE2_RC_AXI_CONFIG 0x0100 +#define BCMA_CORE_PCIE2_EP_AXI_CONFIG 0x0104 +#define BCMA_CORE_PCIE2_RXDEBUG_STATUS0 0x0108 +#define BCMA_CORE_PCIE2_RXDEBUG_CONTROL0 0x010C +#define BCMA_CORE_PCIE2_CONFIGINDADDR 0x0120 +#define BCMA_CORE_PCIE2_CONFIGINDDATA 0x0124 +#define BCMA_CORE_PCIE2_MDIOCONTROL 0x0128 +#define BCMA_CORE_PCIE2_MDIOWRDATA 0x012C +#define BCMA_CORE_PCIE2_MDIORDDATA 0x0130 +#define BCMA_CORE_PCIE2_DATAINTF 0x0180 +#define BCMA_CORE_PCIE2_D2H_INTRLAZY_0 0x0188 +#define BCMA_CORE_PCIE2_H2D_INTRLAZY_0 0x018c +#define BCMA_CORE_PCIE2_H2D_INTSTAT_0 0x0190 +#define BCMA_CORE_PCIE2_H2D_INTMASK_0 0x0194 +#define BCMA_CORE_PCIE2_D2H_INTSTAT_0 0x0198 +#define BCMA_CORE_PCIE2_D2H_INTMASK_0 0x019c +#define BCMA_CORE_PCIE2_LTR_STATE 0x01A0 /* Latency Tolerance Reporting */ +#define PCIE2_LTR_ACTIVE 2 +#define PCIE2_LTR_ACTIVE_IDLE 1 +#define PCIE2_LTR_SLEEP 0 +#define PCIE2_LTR_FINAL_MASK 0x300 +#define PCIE2_LTR_FINAL_SHIFT 8 +#define BCMA_CORE_PCIE2_PWR_INT_STATUS 0x01A4 +#define BCMA_CORE_PCIE2_PWR_INT_MASK 0x01A8 +#define BCMA_CORE_PCIE2_CFG_ADDR 0x01F8 +#define BCMA_CORE_PCIE2_CFG_DATA 0x01FC +#define BCMA_CORE_PCIE2_SYS_EQ_PAGE 0x0200 +#define BCMA_CORE_PCIE2_SYS_MSI_PAGE 0x0204 +#define BCMA_CORE_PCIE2_SYS_MSI_INTREN 0x0208 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL0 0x0210 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL1 0x0214 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL2 0x0218 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL3 0x021C +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL4 0x0220 +#define BCMA_CORE_PCIE2_SYS_MSI_CTRL5 0x0224 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD0 0x0250 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL0 0x0254 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD1 0x0258 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL1 0x025C +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD2 0x0260 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL2 0x0264 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD3 0x0268 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL3 0x026C +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD4 0x0270 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL4 0x0274 +#define BCMA_CORE_PCIE2_SYS_EQ_HEAD5 0x0278 +#define BCMA_CORE_PCIE2_SYS_EQ_TAIL5 0x027C +#define BCMA_CORE_PCIE2_SYS_RC_INTX_EN 0x0330 +#define BCMA_CORE_PCIE2_SYS_RC_INTX_CSR 0x0334 +#define BCMA_CORE_PCIE2_SYS_MSI_REQ 0x0340 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR_EN 0x0344 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR_CSR 0x0348 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR0 0x0350 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR1 0x0354 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR2 0x0358 +#define BCMA_CORE_PCIE2_SYS_HOST_INTR3 0x035C +#define BCMA_CORE_PCIE2_SYS_EP_INT_EN0 0x0360 +#define BCMA_CORE_PCIE2_SYS_EP_INT_EN1 0x0364 +#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR0 0x0370 +#define BCMA_CORE_PCIE2_SYS_EP_INT_CSR1 0x0374 +#define BCMA_CORE_PCIE2_SPROM(wordoffset) (0x0800 + ((wordoffset) * 2)) +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_0 0x0C00 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_1 0x0C04 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_2 0x0C08 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_3 0x0C0C +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_4 0x0C10 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_5 0x0C14 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_6 0x0C18 +#define BCMA_CORE_PCIE2_FUNC0_IMAP0_7 0x0C1C +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_0 0x0C20 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_1 0x0C24 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_2 0x0C28 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_3 0x0C2C +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_4 0x0C30 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_5 0x0C34 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_6 0x0C38 +#define BCMA_CORE_PCIE2_FUNC1_IMAP0_7 0x0C3C +#define BCMA_CORE_PCIE2_FUNC0_IMAP1 0x0C80 +#define BCMA_CORE_PCIE2_FUNC1_IMAP1 0x0C88 +#define BCMA_CORE_PCIE2_FUNC0_IMAP2 0x0CC0 +#define BCMA_CORE_PCIE2_FUNC1_IMAP2 0x0CC8 +#define BCMA_CORE_PCIE2_IARR0_LOWER 0x0D00 +#define BCMA_CORE_PCIE2_IARR0_UPPER 0x0D04 +#define BCMA_CORE_PCIE2_IARR1_LOWER 0x0D08 +#define BCMA_CORE_PCIE2_IARR1_UPPER 0x0D0C +#define BCMA_CORE_PCIE2_IARR2_LOWER 0x0D10 +#define BCMA_CORE_PCIE2_IARR2_UPPER 0x0D14 +#define BCMA_CORE_PCIE2_OARR0 0x0D20 +#define BCMA_CORE_PCIE2_OARR1 0x0D28 +#define BCMA_CORE_PCIE2_OARR2 0x0D30 +#define BCMA_CORE_PCIE2_OMAP0_LOWER 0x0D40 +#define BCMA_CORE_PCIE2_OMAP0_UPPER 0x0D44 +#define BCMA_CORE_PCIE2_OMAP1_LOWER 0x0D48 +#define BCMA_CORE_PCIE2_OMAP1_UPPER 0x0D4C +#define BCMA_CORE_PCIE2_OMAP2_LOWER 0x0D50 +#define BCMA_CORE_PCIE2_OMAP2_UPPER 0x0D54 +#define BCMA_CORE_PCIE2_FUNC1_IARR1_SIZE 0x0D58 +#define BCMA_CORE_PCIE2_FUNC1_IARR2_SIZE 0x0D5C +#define BCMA_CORE_PCIE2_MEM_CONTROL 0x0F00 +#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG0 0x0F04 +#define BCMA_CORE_PCIE2_MEM_ECC_ERRLOG1 0x0F08 +#define BCMA_CORE_PCIE2_LINK_STATUS 0x0F0C +#define BCMA_CORE_PCIE2_STRAP_STATUS 0x0F10 +#define BCMA_CORE_PCIE2_RESET_STATUS 0x0F14 +#define BCMA_CORE_PCIE2_RESETEN_IN_LINKDOWN 0x0F18 +#define BCMA_CORE_PCIE2_MISC_INTR_EN 0x0F1C +#define BCMA_CORE_PCIE2_TX_DEBUG_CFG 0x0F20 +#define BCMA_CORE_PCIE2_MISC_CONFIG 0x0F24 +#define BCMA_CORE_PCIE2_MISC_STATUS 0x0F28 +#define BCMA_CORE_PCIE2_INTR_EN 0x0F30 +#define BCMA_CORE_PCIE2_INTR_CLEAR 0x0F34 +#define BCMA_CORE_PCIE2_INTR_STATUS 0x0F38 + +/* PCIE gen2 config regs */ +#define PCIE2_INTSTATUS 0x090 +#define PCIE2_INTMASK 0x094 +#define PCIE2_SBMBX 0x098 + +#define PCIE2_PMCR_REFUP 0x1814 /* Trefup time */ + +#define PCIE2_CAP_DEVSTSCTRL2_OFFSET 0xD4 +#define PCIE2_CAP_DEVSTSCTRL2_LTRENAB 0x400 +#define PCIE2_PVT_REG_PM_CLK_PERIOD 0x184c + +struct bcma_drv_pcie2 { + struct bcma_device *core; +}; + +#define pcie2_read16(pcie2, offset) bcma_read16((pcie2)->core, offset) +#define pcie2_read32(pcie2, offset) bcma_read32((pcie2)->core, offset) +#define pcie2_write16(pcie2, offset, val) bcma_write16((pcie2)->core, offset, val) +#define pcie2_write32(pcie2, offset, val) bcma_write32((pcie2)->core, offset, val) + +#define pcie2_set32(pcie2, offset, set) bcma_set32((pcie2)->core, offset, set) +#define pcie2_mask32(pcie2, offset, mask) bcma_mask32((pcie2)->core, offset, mask) + +void bcma_core_pcie2_init(struct bcma_drv_pcie2 *pcie2); + +#endif /* LINUX_BCMA_DRIVER_PCIE2_H_ */ diff --git a/include/linux/capability.h b/include/linux/capability.h index 84b13ad67c1c..aa93e5ef594c 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -78,8 +78,11 @@ extern const kernel_cap_t __cap_init_eff_set; # error Fix up hand-coded capability macro initializers #else /* HAND-CODED capability initializers */ +#define CAP_LAST_U32 ((_KERNEL_CAPABILITY_U32S) - 1) +#define CAP_LAST_U32_VALID_MASK (CAP_TO_MASK(CAP_LAST_CAP + 1) -1) + # define CAP_EMPTY_SET ((kernel_cap_t){{ 0, 0 }}) -# define CAP_FULL_SET ((kernel_cap_t){{ ~0, ~0 }}) +# define CAP_FULL_SET ((kernel_cap_t){{ ~0, CAP_LAST_U32_VALID_MASK }}) # define CAP_FS_SET ((kernel_cap_t){{ CAP_FS_MASK_B0 \ | CAP_TO_MASK(CAP_LINUX_IMMUTABLE), \ CAP_FS_MASK_B1 } }) diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 8a111dd42d7a..b5223c570eba 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -203,7 +203,15 @@ struct cgroup { struct kernfs_node *kn; /* cgroup kernfs entry */ struct kernfs_node *populated_kn; /* kn for "cgroup.subtree_populated" */ - /* the bitmask of subsystems enabled on the child cgroups */ + /* + * The bitmask of subsystems enabled on the child cgroups. + * ->subtree_control is the one configured through + * "cgroup.subtree_control" while ->child_subsys_mask is the + * effective one which may have more subsystems enabled. + * Controller knobs are made available iff it's enabled in + * ->subtree_control. + */ + unsigned int subtree_control; unsigned int child_subsys_mask; /* Private pointers for each registered subsystem */ @@ -248,73 +256,9 @@ struct cgroup { /* cgroup_root->flags */ enum { - /* - * Unfortunately, cgroup core and various controllers are riddled - * with idiosyncrasies and pointless options. The following flag, - * when set, will force sane behavior - some options are forced on, - * others are disallowed, and some controllers will change their - * hierarchical or other behaviors. - * - * The set of behaviors affected by this flag are still being - * determined and developed and the mount option for this flag is - * prefixed with __DEVEL__. The prefix will be dropped once we - * reach the point where all behaviors are compatible with the - * planned unified hierarchy, which will automatically turn on this - * flag. - * - * The followings are the behaviors currently affected this flag. - * - * - Mount options "noprefix", "xattr", "clone_children", - * "release_agent" and "name" are disallowed. - * - * - When mounting an existing superblock, mount options should - * match. - * - * - Remount is disallowed. - * - * - rename(2) is disallowed. - * - * - "tasks" is removed. Everything should be at process - * granularity. Use "cgroup.procs" instead. - * - * - "cgroup.procs" is not sorted. pids will be unique unless they - * got recycled inbetween reads. - * - * - "release_agent" and "notify_on_release" are removed. - * Replacement notification mechanism will be implemented. - * - * - "cgroup.clone_children" is removed. - * - * - "cgroup.subtree_populated" is available. Its value is 0 if - * the cgroup and its descendants contain no task; otherwise, 1. - * The file also generates kernfs notification which can be - * monitored through poll and [di]notify when the value of the - * file changes. - * - * - If mount is requested with sane_behavior but without any - * subsystem, the default unified hierarchy is mounted. - * - * - cpuset: tasks will be kept in empty cpusets when hotplug happens - * and take masks of ancestors with non-empty cpus/mems, instead of - * being moved to an ancestor. - * - * - cpuset: a task can be moved into an empty cpuset, and again it - * takes masks of ancestors. - * - * - memcg: use_hierarchy is on by default and the cgroup file for - * the flag is not created. - * - * - blkcg: blk-throttle becomes properly hierarchical. - * - * - debug: disallowed on the default hierarchy. - */ - CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), - + CGRP_ROOT_SANE_BEHAVIOR = (1 << 0), /* __DEVEL__sane_behavior specified */ CGRP_ROOT_NOPREFIX = (1 << 1), /* mounted subsystems have no named prefix */ CGRP_ROOT_XATTR = (1 << 2), /* supports extended attributes */ - - /* mount options live below bit 16 */ - CGRP_ROOT_OPTION_MASK = (1 << 16) - 1, }; /* @@ -440,9 +384,11 @@ struct css_set { enum { CFTYPE_ONLY_ON_ROOT = (1 << 0), /* only create on root cgrp */ CFTYPE_NOT_ON_ROOT = (1 << 1), /* don't create on root cgrp */ - CFTYPE_INSANE = (1 << 2), /* don't create if sane_behavior */ CFTYPE_NO_PREFIX = (1 << 3), /* (DON'T USE FOR NEW FILES) no subsys prefix */ - CFTYPE_ONLY_ON_DFL = (1 << 4), /* only on default hierarchy */ + + /* internal flags, do not use outside cgroup core proper */ + __CFTYPE_ONLY_ON_DFL = (1 << 16), /* only on default hierarchy */ + __CFTYPE_NOT_ON_DFL = (1 << 17), /* not on default hierarchy */ }; #define MAX_CFTYPE_NAME 64 @@ -526,20 +472,64 @@ struct cftype { extern struct cgroup_root cgrp_dfl_root; extern struct css_set init_css_set; +/** + * cgroup_on_dfl - test whether a cgroup is on the default hierarchy + * @cgrp: the cgroup of interest + * + * The default hierarchy is the v2 interface of cgroup and this function + * can be used to test whether a cgroup is on the default hierarchy for + * cases where a subsystem should behave differnetly depending on the + * interface version. + * + * The set of behaviors which change on the default hierarchy are still + * being determined and the mount option is prefixed with __DEVEL__. + * + * List of changed behaviors: + * + * - Mount options "noprefix", "xattr", "clone_children", "release_agent" + * and "name" are disallowed. + * + * - When mounting an existing superblock, mount options should match. + * + * - Remount is disallowed. + * + * - rename(2) is disallowed. + * + * - "tasks" is removed. Everything should be at process granularity. Use + * "cgroup.procs" instead. + * + * - "cgroup.procs" is not sorted. pids will be unique unless they got + * recycled inbetween reads. + * + * - "release_agent" and "notify_on_release" are removed. Replacement + * notification mechanism will be implemented. + * + * - "cgroup.clone_children" is removed. + * + * - "cgroup.subtree_populated" is available. Its value is 0 if the cgroup + * and its descendants contain no task; otherwise, 1. The file also + * generates kernfs notification which can be monitored through poll and + * [di]notify when the value of the file changes. + * + * - cpuset: tasks will be kept in empty cpusets when hotplug happens and + * take masks of ancestors with non-empty cpus/mems, instead of being + * moved to an ancestor. + * + * - cpuset: a task can be moved into an empty cpuset, and again it takes + * masks of ancestors. + * + * - memcg: use_hierarchy is on by default and the cgroup file for the flag + * is not created. + * + * - blkcg: blk-throttle becomes properly hierarchical. + * + * - debug: disallowed on the default hierarchy. + */ static inline bool cgroup_on_dfl(const struct cgroup *cgrp) { return cgrp->root == &cgrp_dfl_root; } -/* - * See the comment above CGRP_ROOT_SANE_BEHAVIOR for details. This - * function can be called as long as @cgrp is accessible. - */ -static inline bool cgroup_sane_behavior(const struct cgroup *cgrp) -{ - return cgrp->root->flags & CGRP_ROOT_SANE_BEHAVIOR; -} - /* no synchronization, the result can only be used as a hint */ static inline bool cgroup_has_tasks(struct cgroup *cgrp) { @@ -602,7 +592,8 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp) char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen); -int cgroup_add_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_add_dfl_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); +int cgroup_add_legacy_cftypes(struct cgroup_subsys *ss, struct cftype *cfts); int cgroup_rm_cftypes(struct cftype *cfts); bool cgroup_is_descendant(struct cgroup *cgrp, struct cgroup *ancestor); @@ -634,6 +625,7 @@ struct cgroup_subsys { int (*css_online)(struct cgroup_subsys_state *css); void (*css_offline)(struct cgroup_subsys_state *css); void (*css_free)(struct cgroup_subsys_state *css); + void (*css_reset)(struct cgroup_subsys_state *css); int (*can_attach)(struct cgroup_subsys_state *css, struct cgroup_taskset *tset); @@ -682,8 +674,21 @@ struct cgroup_subsys { */ struct list_head cfts; - /* base cftypes, automatically registered with subsys itself */ - struct cftype *base_cftypes; + /* + * Base cftypes which are automatically registered. The two can + * point to the same array. + */ + struct cftype *dfl_cftypes; /* for the default hierarchy */ + struct cftype *legacy_cftypes; /* for the legacy hierarchies */ + + /* + * A subsystem may depend on other subsystems. When such subsystem + * is enabled on a cgroup, the depended-upon subsystems are enabled + * together if available. Subsystems enabled due to dependency are + * not visible to userland until explicitly enabled. The following + * specifies the mask of subsystems that this one depends on. + */ + unsigned int depends_on; }; #define SUBSYS(_x) extern struct cgroup_subsys _x ## _cgrp_subsys; diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 0c287dbbb144..411dd7eb2653 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -619,5 +619,10 @@ static inline void clk_writel(u32 val, u32 __iomem *reg) #endif /* platform dependent I/O accessors */ +#ifdef CONFIG_DEBUG_FS +struct dentry *clk_debugfs_add_file(struct clk *clk, char *name, umode_t mode, + void *data, const struct file_operations *fops); +#endif + #endif /* CONFIG_COMMON_CLK */ #endif /* CLK_PROVIDER_H */ diff --git a/include/linux/clk/clk-conf.h b/include/linux/clk/clk-conf.h new file mode 100644 index 000000000000..f3050e15f833 --- /dev/null +++ b/include/linux/clk/clk-conf.h @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2014 Samsung Electronics Co., Ltd. + * Sylwester Nawrocki <s.nawrocki@samsung.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +struct device_node; + +#if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) +int of_clk_set_defaults(struct device_node *node, bool clk_supplier); +#else +static inline int of_clk_set_defaults(struct device_node *node, + bool clk_supplier) +{ + return 0; +} +#endif diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h index a16b497d5159..653f0e2b6ca9 100644 --- a/include/linux/clocksource.h +++ b/include/linux/clocksource.h @@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc, * @archdata: arch-specific data * @suspend: suspend function for the clocksource, if necessary * @resume: resume function for the clocksource, if necessary - * @cycle_last: most recent cycle counter value seen by ::read() * @owner: module reference, must be set by clocksource in modules */ struct clocksource { @@ -171,7 +170,6 @@ struct clocksource { * clocksource itself is cacheline aligned. */ cycle_t (*read)(struct clocksource *cs); - cycle_t cycle_last; cycle_t mask; u32 mult; u32 shift; diff --git a/include/linux/component.h b/include/linux/component.h index 68870182ca1e..c00dcc302611 100644 --- a/include/linux/component.h +++ b/include/linux/component.h @@ -29,4 +29,11 @@ void component_master_del(struct device *, int component_master_add_child(struct master *master, int (*compare)(struct device *, void *), void *compare_data); +struct component_match; + +int component_master_add_with_match(struct device *, + const struct component_master_ops *, struct component_match *); +void component_match_add(struct device *, struct component_match **, + int (*compare)(struct device *, void *), void *compare_data); + #endif diff --git a/include/linux/cper.h b/include/linux/cper.h index 2fc0ec3d89cc..76abba4b238e 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h @@ -22,6 +22,7 @@ #define LINUX_CPER_H #include <linux/uuid.h> +#include <linux/trace_seq.h> /* CPER record signature and the size */ #define CPER_SIG_RECORD "CPER" @@ -36,6 +37,13 @@ #define CPER_RECORD_REV 0x0100 /* + * CPER record length contains the CPER fields which are relevant for further + * handling of a memory error in userspace (we don't carry all the fields + * defined in the UEFI spec because some of them don't make any sense.) + * Currently, a length of 256 should be more than enough. + */ +#define CPER_REC_LEN 256 +/* * Severity difinition for error_severity in struct cper_record_header * and section_severity in struct cper_section_descriptor */ @@ -356,6 +364,24 @@ struct cper_sec_mem_err { __u16 mem_dev_handle; /* module handle in UEFI 2.4 */ }; +struct cper_mem_err_compact { + __u64 validation_bits; + __u16 node; + __u16 card; + __u16 module; + __u16 bank; + __u16 device; + __u16 row; + __u16 column; + __u16 bit_pos; + __u64 requestor_id; + __u64 responder_id; + __u64 target_id; + __u16 rank; + __u16 mem_array_handle; + __u16 mem_dev_handle; +}; + struct cper_sec_pcie { __u64 validation_bits; __u32 port_type; @@ -395,7 +421,13 @@ struct cper_sec_pcie { #pragma pack() u64 cper_next_record_id(void); +const char *cper_severity_str(unsigned int); +const char *cper_mem_err_type_str(unsigned int); void cper_print_bits(const char *prefix, unsigned int bits, const char * const strs[], unsigned int strs_size); +void cper_mem_err_pack(const struct cper_sec_mem_err *, + struct cper_mem_err_compact *); +const char *cper_mem_err_unpack(struct trace_seq *, + struct cper_mem_err_compact *); #endif diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 8f8ae95c6e27..7d1955afa62c 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -176,6 +176,7 @@ static inline void disable_cpufreq(void) { } #define CPUFREQ_RELATION_L 0 /* lowest frequency at or above target */ #define CPUFREQ_RELATION_H 1 /* highest frequency below or at target */ +#define CPUFREQ_RELATION_C 2 /* closest frequency to target */ struct freq_attr { struct attribute attr; diff --git a/include/linux/crc32.h b/include/linux/crc32.h index 7d275c4fc011..9e8a032c1788 100644 --- a/include/linux/crc32.h +++ b/include/linux/crc32.h @@ -8,8 +8,8 @@ #include <linux/types.h> #include <linux/bitrev.h> -extern u32 crc32_le(u32 crc, unsigned char const *p, size_t len); -extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); +u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len); +u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len); /** * crc32_le_combine - Combine two crc32 check values into one. For two @@ -29,9 +29,14 @@ extern u32 crc32_be(u32 crc, unsigned char const *p, size_t len); * with the same initializer as crc1, and crc2 seed was 0. See * also crc32_combine_test(). */ -extern u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2); +u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len); -extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len); +static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2) +{ + return crc32_le_shift(crc1, len2) ^ crc2; +} + +u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len); /** * __crc32c_le_combine - Combine two crc32c check values into one. For two @@ -51,7 +56,12 @@ extern u32 __crc32c_le(u32 crc, unsigned char const *p, size_t len); * seeded with the same initializer as crc1, and crc2 seed * was 0. See also crc32c_combine_test(). */ -extern u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2); +u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len); + +static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2) +{ + return __crc32c_le_shift(crc1, len2) ^ crc2; +} #define crc32(seed, data, length) crc32_le(seed, (unsigned char const *)(data), length) diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b92eadf92d72..d45e949699ea 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -710,9 +710,9 @@ static inline void ablkcipher_request_free(struct ablkcipher_request *req) static inline void ablkcipher_request_set_callback( struct ablkcipher_request *req, - u32 flags, crypto_completion_t complete, void *data) + u32 flags, crypto_completion_t compl, void *data) { - req->base.complete = complete; + req->base.complete = compl; req->base.data = data; req->base.flags = flags; } @@ -841,10 +841,10 @@ static inline void aead_request_free(struct aead_request *req) static inline void aead_request_set_callback(struct aead_request *req, u32 flags, - crypto_completion_t complete, + crypto_completion_t compl, void *data) { - req->base.complete = complete; + req->base.complete = compl; req->base.data = data; req->base.flags = flags; } diff --git a/include/linux/device.h b/include/linux/device.h index af424acd393d..43d183aeb25b 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -124,7 +124,7 @@ struct bus_type { const struct dev_pm_ops *pm; - struct iommu_ops *iommu_ops; + const struct iommu_ops *iommu_ops; struct subsys_private *p; struct lock_class_key lock_key; @@ -605,6 +605,10 @@ extern int devres_release_group(struct device *dev, void *id); /* managed devm_k.alloc/kfree for device drivers */ extern void *devm_kmalloc(struct device *dev, size_t size, gfp_t gfp); +extern char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt, + va_list ap); +extern char *devm_kasprintf(struct device *dev, gfp_t gfp, + const char *fmt, ...); static inline void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp) { return devm_kmalloc(dev, size, gfp | __GFP_ZERO); @@ -631,8 +635,6 @@ extern unsigned long devm_get_free_pages(struct device *dev, extern void devm_free_pages(struct device *dev, unsigned long addr); void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res); -void __iomem *devm_request_and_ioremap(struct device *dev, - struct resource *res); /* allows to add/remove a custom action to devres stack */ int devm_add_action(struct device *dev, void (*action)(void *), void *data); diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index f886985a28b2..694e1fe1c4b4 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h @@ -30,6 +30,8 @@ #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/fs.h> +#include <linux/fence.h> +#include <linux/wait.h> struct device; struct dma_buf; @@ -115,6 +117,7 @@ struct dma_buf_ops { * @exp_name: name of the exporter; useful for debugging. * @list_node: node for dma_buf accounting and debugging. * @priv: exporter specific private data for this buffer object. + * @resv: reservation object linked to this dma-buf */ struct dma_buf { size_t size; @@ -128,6 +131,17 @@ struct dma_buf { const char *exp_name; struct list_head list_node; void *priv; + struct reservation_object *resv; + + /* poll support */ + wait_queue_head_t poll; + + struct dma_buf_poll_cb_t { + struct fence_cb cb; + wait_queue_head_t *poll; + + unsigned long active; + } cb_excl, cb_shared; }; /** @@ -168,10 +182,11 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *dmabuf_attach); struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops, - size_t size, int flags, const char *); + size_t size, int flags, const char *, + struct reservation_object *); -#define dma_buf_export(priv, ops, size, flags) \ - dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME) +#define dma_buf_export(priv, ops, size, flags, resv) \ + dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME, resv) int dma_buf_fd(struct dma_buf *dmabuf, int flags); struct dma_buf *dma_buf_get(int fd); diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index d2c5cc7c583c..3d1c2aa51530 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h @@ -299,6 +299,7 @@ enum dma_slave_buswidth { DMA_SLAVE_BUSWIDTH_UNDEFINED = 0, DMA_SLAVE_BUSWIDTH_1_BYTE = 1, DMA_SLAVE_BUSWIDTH_2_BYTES = 2, + DMA_SLAVE_BUSWIDTH_3_BYTES = 3, DMA_SLAVE_BUSWIDTH_4_BYTES = 4, DMA_SLAVE_BUSWIDTH_8_BYTES = 8, }; diff --git a/include/linux/dmar.h b/include/linux/dmar.h index 23c8db129560..1deece46a0ca 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h @@ -114,22 +114,30 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, /* Intel IOMMU detection */ extern int detect_intel_iommu(void); extern int enable_drhd_fault_handling(void); -#else -struct dmar_pci_notify_info; -static inline int detect_intel_iommu(void) + +#ifdef CONFIG_INTEL_IOMMU +extern int iommu_detected, no_iommu; +extern int intel_iommu_init(void); +extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); +extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); +extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); +#else /* !CONFIG_INTEL_IOMMU: */ +static inline int intel_iommu_init(void) { return -ENODEV; } +static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) { - return -ENODEV; + return 0; } - -static inline int dmar_table_init(void) +static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) { - return -ENODEV; + return 0; } -static inline int enable_drhd_fault_handling(void) +static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) { - return -1; + return 0; } -#endif /* !CONFIG_DMAR_TABLE */ +#endif /* CONFIG_INTEL_IOMMU */ + +#endif /* CONFIG_DMAR_TABLE */ struct irte { union { @@ -177,26 +185,4 @@ extern int dmar_set_interrupt(struct intel_iommu *iommu); extern irqreturn_t dmar_fault(int irq, void *dev_id); extern int arch_setup_dmar_msi(unsigned int irq); -#ifdef CONFIG_INTEL_IOMMU -extern int iommu_detected, no_iommu; -extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); -extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); -extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); -extern int intel_iommu_init(void); -#else /* !CONFIG_INTEL_IOMMU: */ -static inline int intel_iommu_init(void) { return -ENODEV; } -static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) -{ - return 0; -} -static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) -{ - return 0; -} -static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) -{ - return 0; -} -#endif /* CONFIG_INTEL_IOMMU */ - #endif /* __DMAR_H__ */ diff --git a/include/linux/efi.h b/include/linux/efi.h index 41bbf8ba4ba8..efc681fd5895 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -20,6 +20,7 @@ #include <linux/ioport.h> #include <linux/pfn.h> #include <linux/pstore.h> +#include <linux/reboot.h> #include <asm/page.h> @@ -521,6 +522,8 @@ typedef efi_status_t efi_query_capsule_caps_t(efi_capsule_header_t **capsules, int *reset_type); typedef efi_status_t efi_query_variable_store_t(u32 attributes, unsigned long size); +void efi_native_runtime_setup(void); + /* * EFI Configuration Table and GUID definitions */ @@ -870,11 +873,13 @@ extern int __init efi_uart_console_only (void); extern void efi_initialize_iomem_resources(struct resource *code_resource, struct resource *data_resource, struct resource *bss_resource); extern void efi_get_time(struct timespec *now); -extern int efi_set_rtc_mmss(const struct timespec *now); extern void efi_reserve_boot_services(void); extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose); extern struct efi_memory_map memmap; +extern int efi_reboot_quirk_mode; +extern bool efi_poweroff_required(void); + /* Iterate through an efi_memory_map */ #define for_each_efi_memory_desc(m, md) \ for ((md) = (m)->map; \ @@ -916,7 +921,8 @@ extern int __init efi_setup_pcdp_console(char *); #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ #define EFI_64BIT 5 /* Is the firmware 64-bit? */ -#define EFI_ARCH_1 6 /* First arch-specific bit */ +#define EFI_PARAVIRT 6 /* Access is via a paravirt interface */ +#define EFI_ARCH_1 7 /* First arch-specific bit */ #ifdef CONFIG_EFI /* @@ -926,11 +932,14 @@ static inline bool efi_enabled(int feature) { return test_bit(feature, &efi.flags) != 0; } +extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); #else static inline bool efi_enabled(int feature) { return false; } +static inline void +efi_reboot(enum reboot_mode reboot_mode, const char *__unused) {} #endif /* @@ -1031,12 +1040,8 @@ struct efivar_operations { struct efivars { /* * ->lock protects two things: - * 1) ->list - adds, removals, reads, writes - * 2) ops.[gs]et_variable() calls. - * It must not be held when creating sysfs entries or calling kmalloc. - * ops.get_next_variable() is only called from register_efivars() - * or efivar_update_sysfs_entries(), - * which is protected by the BKL, so that path is safe. + * 1) efivarfs_list and efivars_sysfs_list + * 2) ->ops calls */ spinlock_t lock; struct kset *kset; @@ -1161,4 +1166,46 @@ static inline void efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {} #endif +/* prototypes shared between arch specific and generic stub code */ + +#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg) +#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg) + +void efi_printk(efi_system_table_t *sys_table_arg, char *str); + +void efi_free(efi_system_table_t *sys_table_arg, unsigned long size, + unsigned long addr); + +char *efi_convert_cmdline(efi_system_table_t *sys_table_arg, + efi_loaded_image_t *image, int *cmd_line_len); + +efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg, + efi_memory_desc_t **map, + unsigned long *map_size, + unsigned long *desc_size, + u32 *desc_ver, + unsigned long *key_ptr); + +efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr); + +efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg, + unsigned long size, unsigned long align, + unsigned long *addr, unsigned long max); + +efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg, + unsigned long *image_addr, + unsigned long image_size, + unsigned long alloc_size, + unsigned long preferred_addr, + unsigned long alignment); + +efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg, + efi_loaded_image_t *image, + char *cmd_line, char *option_string, + unsigned long max_addr, + unsigned long *load_addr, + unsigned long *load_size); + #endif /* _LINUX_EFI_H */ diff --git a/include/linux/extcon/sm5502.h b/include/linux/extcon/sm5502.h new file mode 100644 index 000000000000..030526bf8d79 --- /dev/null +++ b/include/linux/extcon/sm5502.h @@ -0,0 +1,287 @@ +/* + * sm5502.h + * + * Copyright (c) 2014 Samsung Electronics Co., Ltd + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __LINUX_EXTCON_SM5502_H +#define __LINUX_EXTCON_SM5502_H + +enum sm5502_types { + TYPE_SM5502, +}; + +/* SM5502 registers */ +enum sm5502_reg { + SM5502_REG_DEVICE_ID = 0x01, + SM5502_REG_CONTROL, + SM5502_REG_INT1, + SM5502_REG_INT2, + SM5502_REG_INTMASK1, + SM5502_REG_INTMASK2, + SM5502_REG_ADC, + SM5502_REG_TIMING_SET1, + SM5502_REG_TIMING_SET2, + SM5502_REG_DEV_TYPE1, + SM5502_REG_DEV_TYPE2, + SM5502_REG_BUTTON1, + SM5502_REG_BUTTON2, + SM5502_REG_CAR_KIT_STATUS, + SM5502_REG_RSVD1, + SM5502_REG_RSVD2, + SM5502_REG_RSVD3, + SM5502_REG_RSVD4, + SM5502_REG_MANUAL_SW1, + SM5502_REG_MANUAL_SW2, + SM5502_REG_DEV_TYPE3, + SM5502_REG_RSVD5, + SM5502_REG_RSVD6, + SM5502_REG_RSVD7, + SM5502_REG_RSVD8, + SM5502_REG_RSVD9, + SM5502_REG_RESET, + SM5502_REG_RSVD10, + SM5502_REG_RESERVED_ID1, + SM5502_REG_RSVD11, + SM5502_REG_RSVD12, + SM5502_REG_RESERVED_ID2, + SM5502_REG_RSVD13, + SM5502_REG_OCP, + SM5502_REG_RSVD14, + SM5502_REG_RSVD15, + SM5502_REG_RSVD16, + SM5502_REG_RSVD17, + SM5502_REG_RSVD18, + SM5502_REG_RSVD19, + SM5502_REG_RSVD20, + SM5502_REG_RSVD21, + SM5502_REG_RSVD22, + SM5502_REG_RSVD23, + SM5502_REG_RSVD24, + SM5502_REG_RSVD25, + SM5502_REG_RSVD26, + SM5502_REG_RSVD27, + SM5502_REG_RSVD28, + SM5502_REG_RSVD29, + SM5502_REG_RSVD30, + SM5502_REG_RSVD31, + SM5502_REG_RSVD32, + SM5502_REG_RSVD33, + SM5502_REG_RSVD34, + SM5502_REG_RSVD35, + SM5502_REG_RSVD36, + SM5502_REG_RESERVED_ID3, + + SM5502_REG_END, +}; + +/* Define SM5502 MASK/SHIFT constant */ +#define SM5502_REG_DEVICE_ID_VENDOR_SHIFT 0 +#define SM5502_REG_DEVICE_ID_VERSION_SHIFT 3 +#define SM5502_REG_DEVICE_ID_VENDOR_MASK (0x3 << SM5502_REG_DEVICE_ID_VENDOR_SHIFT) +#define SM5502_REG_DEVICE_ID_VERSION_MASK (0x1f << SM5502_REG_DEVICE_ID_VERSION_SHIFT) + +#define SM5502_REG_CONTROL_MASK_INT_SHIFT 0 +#define SM5502_REG_CONTROL_WAIT_SHIFT 1 +#define SM5502_REG_CONTROL_MANUAL_SW_SHIFT 2 +#define SM5502_REG_CONTROL_RAW_DATA_SHIFT 3 +#define SM5502_REG_CONTROL_SW_OPEN_SHIFT 4 +#define SM5502_REG_CONTROL_MASK_INT_MASK (0x1 << SM5502_REG_CONTROL_MASK_INT_SHIFT) +#define SM5502_REG_CONTROL_WAIT_MASK (0x1 << SM5502_REG_CONTROL_WAIT_SHIFT) +#define SM5502_REG_CONTROL_MANUAL_SW_MASK (0x1 << SM5502_REG_CONTROL_MANUAL_SW_SHIFT) +#define SM5502_REG_CONTROL_RAW_DATA_MASK (0x1 << SM5502_REG_CONTROL_RAW_DATA_SHIFT) +#define SM5502_REG_CONTROL_SW_OPEN_MASK (0x1 << SM5502_REG_CONTROL_SW_OPEN_SHIFT) + +#define SM5502_REG_INTM1_ATTACH_SHIFT 0 +#define SM5502_REG_INTM1_DETACH_SHIFT 1 +#define SM5502_REG_INTM1_KP_SHIFT 2 +#define SM5502_REG_INTM1_LKP_SHIFT 3 +#define SM5502_REG_INTM1_LKR_SHIFT 4 +#define SM5502_REG_INTM1_OVP_EVENT_SHIFT 5 +#define SM5502_REG_INTM1_OCP_EVENT_SHIFT 6 +#define SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT 7 +#define SM5502_REG_INTM1_ATTACH_MASK (0x1 << SM5502_REG_INTM1_ATTACH_SHIFT) +#define SM5502_REG_INTM1_DETACH_MASK (0x1 << SM5502_REG_INTM1_DETACH_SHIFT) +#define SM5502_REG_INTM1_KP_MASK (0x1 << SM5502_REG_INTM1_KP_SHIFT) +#define SM5502_REG_INTM1_LKP_MASK (0x1 << SM5502_REG_INTM1_LKP_SHIFT) +#define SM5502_REG_INTM1_LKR_MASK (0x1 << SM5502_REG_INTM1_LKR_SHIFT) +#define SM5502_REG_INTM1_OVP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OVP_EVENT_SHIFT) +#define SM5502_REG_INTM1_OCP_EVENT_MASK (0x1 << SM5502_REG_INTM1_OCP_EVENT_SHIFT) +#define SM5502_REG_INTM1_OVP_OCP_DIS_MASK (0x1 << SM5502_REG_INTM1_OVP_OCP_DIS_SHIFT) + +#define SM5502_REG_INTM2_VBUS_DET_SHIFT 0 +#define SM5502_REG_INTM2_REV_ACCE_SHIFT 1 +#define SM5502_REG_INTM2_ADC_CHG_SHIFT 2 +#define SM5502_REG_INTM2_STUCK_KEY_SHIFT 3 +#define SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT 4 +#define SM5502_REG_INTM2_MHL_SHIFT 5 +#define SM5502_REG_INTM2_VBUS_DET_MASK (0x1 << SM5502_REG_INTM2_VBUS_DET_SHIFT) +#define SM5502_REG_INTM2_REV_ACCE_MASK (0x1 << SM5502_REG_INTM2_REV_ACCE_SHIFT) +#define SM5502_REG_INTM2_ADC_CHG_MASK (0x1 << SM5502_REG_INTM2_ADC_CHG_SHIFT) +#define SM5502_REG_INTM2_STUCK_KEY_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_SHIFT) +#define SM5502_REG_INTM2_STUCK_KEY_RCV_MASK (0x1 << SM5502_REG_INTM2_STUCK_KEY_RCV_SHIFT) +#define SM5502_REG_INTM2_MHL_MASK (0x1 << SM5502_REG_INTM2_MHL_SHIFT) + +#define SM5502_REG_ADC_SHIFT 0 +#define SM5502_REG_ADC_MASK (0x1f << SM5502_REG_ADC_SHIFT) + +#define SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT 4 +#define SM5502_REG_TIMING_SET1_KEY_PRESS_MASK (0xf << SM5502_REG_TIMING_SET1_KEY_PRESS_SHIFT) +#define TIMING_KEY_PRESS_100MS 0x0 +#define TIMING_KEY_PRESS_200MS 0x1 +#define TIMING_KEY_PRESS_300MS 0x2 +#define TIMING_KEY_PRESS_400MS 0x3 +#define TIMING_KEY_PRESS_500MS 0x4 +#define TIMING_KEY_PRESS_600MS 0x5 +#define TIMING_KEY_PRESS_700MS 0x6 +#define TIMING_KEY_PRESS_800MS 0x7 +#define TIMING_KEY_PRESS_900MS 0x8 +#define TIMING_KEY_PRESS_1000MS 0x9 +#define SM5502_REG_TIMING_SET1_ADC_DET_SHIFT 0 +#define SM5502_REG_TIMING_SET1_ADC_DET_MASK (0xf << SM5502_REG_TIMING_SET1_ADC_DET_SHIFT) +#define TIMING_ADC_DET_50MS 0x0 +#define TIMING_ADC_DET_100MS 0x1 +#define TIMING_ADC_DET_150MS 0x2 +#define TIMING_ADC_DET_200MS 0x3 +#define TIMING_ADC_DET_300MS 0x4 +#define TIMING_ADC_DET_400MS 0x5 +#define TIMING_ADC_DET_500MS 0x6 +#define TIMING_ADC_DET_600MS 0x7 +#define TIMING_ADC_DET_700MS 0x8 +#define TIMING_ADC_DET_800MS 0x9 +#define TIMING_ADC_DET_900MS 0xA +#define TIMING_ADC_DET_1000MS 0xB + +#define SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT 4 +#define SM5502_REG_TIMING_SET2_SW_WAIT_MASK (0xf << SM5502_REG_TIMING_SET2_SW_WAIT_SHIFT) +#define TIMING_SW_WAIT_10MS 0x0 +#define TIMING_SW_WAIT_30MS 0x1 +#define TIMING_SW_WAIT_50MS 0x2 +#define TIMING_SW_WAIT_70MS 0x3 +#define TIMING_SW_WAIT_90MS 0x4 +#define TIMING_SW_WAIT_110MS 0x5 +#define TIMING_SW_WAIT_130MS 0x6 +#define TIMING_SW_WAIT_150MS 0x7 +#define TIMING_SW_WAIT_170MS 0x8 +#define TIMING_SW_WAIT_190MS 0x9 +#define TIMING_SW_WAIT_210MS 0xA +#define SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT 0 +#define SM5502_REG_TIMING_SET2_LONG_KEY_MASK (0xf << SM5502_REG_TIMING_SET2_LONG_KEY_SHIFT) +#define TIMING_LONG_KEY_300MS 0x0 +#define TIMING_LONG_KEY_400MS 0x1 +#define TIMING_LONG_KEY_500MS 0x2 +#define TIMING_LONG_KEY_600MS 0x3 +#define TIMING_LONG_KEY_700MS 0x4 +#define TIMING_LONG_KEY_800MS 0x5 +#define TIMING_LONG_KEY_900MS 0x6 +#define TIMING_LONG_KEY_1000MS 0x7 +#define TIMING_LONG_KEY_1100MS 0x8 +#define TIMING_LONG_KEY_1200MS 0x9 +#define TIMING_LONG_KEY_1300MS 0xA +#define TIMING_LONG_KEY_1400MS 0xB +#define TIMING_LONG_KEY_1500MS 0xC + +#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT 0 +#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT 1 +#define SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT 2 +#define SM5502_REG_DEV_TYPE1_UART_SHIFT 3 +#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT 4 +#define SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT 5 +#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT 6 +#define SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT 7 +#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE1_SHIFT) +#define SM5502_REG_DEV_TYPE1_AUDIO_TYPE1__MASK (0x1 << SM5502_REG_DEV_TYPE1_AUDIO_TYPE2_SHIFT) +#define SM5502_REG_DEV_TYPE1_USB_SDP_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_SDP_SHIFT) +#define SM5502_REG_DEV_TYPE1_UART_MASK (0x1 << SM5502_REG_DEV_TYPE1_UART_SHIFT) +#define SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_MASK (0x1 << SM5502_REG_DEV_TYPE1_CAR_KIT_CHARGER_SHIFT) +#define SM5502_REG_DEV_TYPE1_USB_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_CHG_SHIFT) +#define SM5502_REG_DEV_TYPE1_DEDICATED_CHG_MASK (0x1 << SM5502_REG_DEV_TYPE1_DEDICATED_CHG_SHIFT) +#define SM5502_REG_DEV_TYPE1_USB_OTG_MASK (0x1 << SM5502_REG_DEV_TYPE1_USB_OTG_SHIFT) + +#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT 0 +#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT 1 +#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT 2 +#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT 3 +#define SM5502_REG_DEV_TYPE2_PPD_SHIFT 4 +#define SM5502_REG_DEV_TYPE2_TTY_SHIFT 5 +#define SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT 6 +#define SM5502_REG_DEV_TYPE2_JIG_USB_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_ON_SHIFT) +#define SM5502_REG_DEV_TYPE2_JIG_USB_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_USB_OFF_SHIFT) +#define SM5502_REG_DEV_TYPE2_JIG_UART_ON_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_ON_SHIFT) +#define SM5502_REG_DEV_TYPE2_JIG_UART_OFF_MASK (0x1 << SM5502_REG_DEV_TYPE2_JIG_UART_OFF_SHIFT) +#define SM5502_REG_DEV_TYPE2_PPD_MASK (0x1 << SM5502_REG_DEV_TYPE2_PPD_SHIFT) +#define SM5502_REG_DEV_TYPE2_TTY_MASK (0x1 << SM5502_REG_DEV_TYPE2_TTY_SHIFT) +#define SM5502_REG_DEV_TYPE2_AV_CABLE_MASK (0x1 << SM5502_REG_DEV_TYPE2_AV_CABLE_SHIFT) + +#define SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT 0 +#define SM5502_REG_MANUAL_SW1_DP_SHIFT 2 +#define SM5502_REG_MANUAL_SW1_DM_SHIFT 5 +#define SM5502_REG_MANUAL_SW1_VBUSIN_MASK (0x3 << SM5502_REG_MANUAL_SW1_VBUSIN_SHIFT) +#define SM5502_REG_MANUAL_SW1_DP_MASK (0x7 << SM5502_REG_MANUAL_SW1_DP_SHIFT) +#define SM5502_REG_MANUAL_SW1_DM_MASK (0x7 << SM5502_REG_MANUAL_SW1_DM_SHIFT) +#define VBUSIN_SWITCH_OPEN 0x0 +#define VBUSIN_SWITCH_VBUSOUT 0x1 +#define VBUSIN_SWITCH_MIC 0x2 +#define VBUSIN_SWITCH_VBUSOUT_WITH_USB 0x3 +#define DM_DP_CON_SWITCH_OPEN 0x0 +#define DM_DP_CON_SWITCH_USB 0x1 +#define DM_DP_CON_SWITCH_AUDIO 0x2 +#define DM_DP_CON_SWITCH_UART 0x3 +#define DM_DP_SWITCH_OPEN ((DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ + | (DM_DP_CON_SWITCH_OPEN <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) +#define DM_DP_SWITCH_USB ((DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ + | (DM_DP_CON_SWITCH_USB <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) +#define DM_DP_SWITCH_AUDIO ((DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ + | (DM_DP_CON_SWITCH_AUDIO <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) +#define DM_DP_SWITCH_UART ((DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DP_SHIFT) \ + | (DM_DP_CON_SWITCH_UART <<SM5502_REG_MANUAL_SW1_DM_SHIFT)) + +/* SM5502 Interrupts */ +enum sm5502_irq { + /* INT1 */ + SM5502_IRQ_INT1_ATTACH, + SM5502_IRQ_INT1_DETACH, + SM5502_IRQ_INT1_KP, + SM5502_IRQ_INT1_LKP, + SM5502_IRQ_INT1_LKR, + SM5502_IRQ_INT1_OVP_EVENT, + SM5502_IRQ_INT1_OCP_EVENT, + SM5502_IRQ_INT1_OVP_OCP_DIS, + + /* INT2 */ + SM5502_IRQ_INT2_VBUS_DET, + SM5502_IRQ_INT2_REV_ACCE, + SM5502_IRQ_INT2_ADC_CHG, + SM5502_IRQ_INT2_STUCK_KEY, + SM5502_IRQ_INT2_STUCK_KEY_RCV, + SM5502_IRQ_INT2_MHL, + + SM5502_IRQ_NUM, +}; + +#define SM5502_IRQ_INT1_ATTACH_MASK BIT(0) +#define SM5502_IRQ_INT1_DETACH_MASK BIT(1) +#define SM5502_IRQ_INT1_KP_MASK BIT(2) +#define SM5502_IRQ_INT1_LKP_MASK BIT(3) +#define SM5502_IRQ_INT1_LKR_MASK BIT(4) +#define SM5502_IRQ_INT1_OVP_EVENT_MASK BIT(5) +#define SM5502_IRQ_INT1_OCP_EVENT_MASK BIT(6) +#define SM5502_IRQ_INT1_OVP_OCP_DIS_MASK BIT(7) +#define SM5502_IRQ_INT2_VBUS_DET_MASK BIT(0) +#define SM5502_IRQ_INT2_REV_ACCE_MASK BIT(1) +#define SM5502_IRQ_INT2_ADC_CHG_MASK BIT(2) +#define SM5502_IRQ_INT2_STUCK_KEY_MASK BIT(3) +#define SM5502_IRQ_INT2_STUCK_KEY_RCV_MASK BIT(4) +#define SM5502_IRQ_INT2_MHL_MASK BIT(5) + +#endif /* __LINUX_EXTCON_SM5502_H */ diff --git a/include/linux/fence.h b/include/linux/fence.h new file mode 100644 index 000000000000..d174585b874b --- /dev/null +++ b/include/linux/fence.h @@ -0,0 +1,360 @@ +/* + * Fence mechanism for dma-buf to allow for asynchronous dma access + * + * Copyright (C) 2012 Canonical Ltd + * Copyright (C) 2012 Texas Instruments + * + * Authors: + * Rob Clark <robdclark@gmail.com> + * Maarten Lankhorst <maarten.lankhorst@canonical.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_FENCE_H +#define __LINUX_FENCE_H + +#include <linux/err.h> +#include <linux/wait.h> +#include <linux/list.h> +#include <linux/bitops.h> +#include <linux/kref.h> +#include <linux/sched.h> +#include <linux/printk.h> +#include <linux/rcupdate.h> + +struct fence; +struct fence_ops; +struct fence_cb; + +/** + * struct fence - software synchronization primitive + * @refcount: refcount for this fence + * @ops: fence_ops associated with this fence + * @rcu: used for releasing fence with kfree_rcu + * @cb_list: list of all callbacks to call + * @lock: spin_lock_irqsave used for locking + * @context: execution context this fence belongs to, returned by + * fence_context_alloc() + * @seqno: the sequence number of this fence inside the execution context, + * can be compared to decide which fence would be signaled later. + * @flags: A mask of FENCE_FLAG_* defined below + * @timestamp: Timestamp when the fence was signaled. + * @status: Optional, only valid if < 0, must be set before calling + * fence_signal, indicates that the fence has completed with an error. + * + * the flags member must be manipulated and read using the appropriate + * atomic ops (bit_*), so taking the spinlock will not be needed most + * of the time. + * + * FENCE_FLAG_SIGNALED_BIT - fence is already signaled + * FENCE_FLAG_ENABLE_SIGNAL_BIT - enable_signaling might have been called* + * FENCE_FLAG_USER_BITS - start of the unused bits, can be used by the + * implementer of the fence for its own purposes. Can be used in different + * ways by different fence implementers, so do not rely on this. + * + * *) Since atomic bitops are used, this is not guaranteed to be the case. + * Particularly, if the bit was set, but fence_signal was called right + * before this bit was set, it would have been able to set the + * FENCE_FLAG_SIGNALED_BIT, before enable_signaling was called. + * Adding a check for FENCE_FLAG_SIGNALED_BIT after setting + * FENCE_FLAG_ENABLE_SIGNAL_BIT closes this race, and makes sure that + * after fence_signal was called, any enable_signaling call will have either + * been completed, or never called at all. + */ +struct fence { + struct kref refcount; + const struct fence_ops *ops; + struct rcu_head rcu; + struct list_head cb_list; + spinlock_t *lock; + unsigned context, seqno; + unsigned long flags; + ktime_t timestamp; + int status; +}; + +enum fence_flag_bits { + FENCE_FLAG_SIGNALED_BIT, + FENCE_FLAG_ENABLE_SIGNAL_BIT, + FENCE_FLAG_USER_BITS, /* must always be last member */ +}; + +typedef void (*fence_func_t)(struct fence *fence, struct fence_cb *cb); + +/** + * struct fence_cb - callback for fence_add_callback + * @node: used by fence_add_callback to append this struct to fence::cb_list + * @func: fence_func_t to call + * + * This struct will be initialized by fence_add_callback, additional + * data can be passed along by embedding fence_cb in another struct. + */ +struct fence_cb { + struct list_head node; + fence_func_t func; +}; + +/** + * struct fence_ops - operations implemented for fence + * @get_driver_name: returns the driver name. + * @get_timeline_name: return the name of the context this fence belongs to. + * @enable_signaling: enable software signaling of fence. + * @signaled: [optional] peek whether the fence is signaled, can be null. + * @wait: custom wait implementation, or fence_default_wait. + * @release: [optional] called on destruction of fence, can be null + * @fill_driver_data: [optional] callback to fill in free-form debug info + * Returns amount of bytes filled, or -errno. + * @fence_value_str: [optional] fills in the value of the fence as a string + * @timeline_value_str: [optional] fills in the current value of the timeline + * as a string + * + * Notes on enable_signaling: + * For fence implementations that have the capability for hw->hw + * signaling, they can implement this op to enable the necessary + * irqs, or insert commands into cmdstream, etc. This is called + * in the first wait() or add_callback() path to let the fence + * implementation know that there is another driver waiting on + * the signal (ie. hw->sw case). + * + * This function can be called called from atomic context, but not + * from irq context, so normal spinlocks can be used. + * + * A return value of false indicates the fence already passed, + * or some failure occured that made it impossible to enable + * signaling. True indicates succesful enabling. + * + * fence->status may be set in enable_signaling, but only when false is + * returned. + * + * Calling fence_signal before enable_signaling is called allows + * for a tiny race window in which enable_signaling is called during, + * before, or after fence_signal. To fight this, it is recommended + * that before enable_signaling returns true an extra reference is + * taken on the fence, to be released when the fence is signaled. + * This will mean fence_signal will still be called twice, but + * the second time will be a noop since it was already signaled. + * + * Notes on signaled: + * May set fence->status if returning true. + * + * Notes on wait: + * Must not be NULL, set to fence_default_wait for default implementation. + * the fence_default_wait implementation should work for any fence, as long + * as enable_signaling works correctly. + * + * Must return -ERESTARTSYS if the wait is intr = true and the wait was + * interrupted, and remaining jiffies if fence has signaled, or 0 if wait + * timed out. Can also return other error values on custom implementations, + * which should be treated as if the fence is signaled. For example a hardware + * lockup could be reported like that. + * + * Notes on release: + * Can be NULL, this function allows additional commands to run on + * destruction of the fence. Can be called from irq context. + * If pointer is set to NULL, kfree will get called instead. + */ + +struct fence_ops { + const char * (*get_driver_name)(struct fence *fence); + const char * (*get_timeline_name)(struct fence *fence); + bool (*enable_signaling)(struct fence *fence); + bool (*signaled)(struct fence *fence); + signed long (*wait)(struct fence *fence, bool intr, signed long timeout); + void (*release)(struct fence *fence); + + int (*fill_driver_data)(struct fence *fence, void *data, int size); + void (*fence_value_str)(struct fence *fence, char *str, int size); + void (*timeline_value_str)(struct fence *fence, char *str, int size); +}; + +void fence_init(struct fence *fence, const struct fence_ops *ops, + spinlock_t *lock, unsigned context, unsigned seqno); + +void fence_release(struct kref *kref); +void fence_free(struct fence *fence); + +/** + * fence_get - increases refcount of the fence + * @fence: [in] fence to increase refcount of + * + * Returns the same fence, with refcount increased by 1. + */ +static inline struct fence *fence_get(struct fence *fence) +{ + if (fence) + kref_get(&fence->refcount); + return fence; +} + +/** + * fence_get_rcu - get a fence from a reservation_object_list with rcu read lock + * @fence: [in] fence to increase refcount of + * + * Function returns NULL if no refcount could be obtained, or the fence. + */ +static inline struct fence *fence_get_rcu(struct fence *fence) +{ + if (kref_get_unless_zero(&fence->refcount)) + return fence; + else + return NULL; +} + +/** + * fence_put - decreases refcount of the fence + * @fence: [in] fence to reduce refcount of + */ +static inline void fence_put(struct fence *fence) +{ + if (fence) + kref_put(&fence->refcount, fence_release); +} + +int fence_signal(struct fence *fence); +int fence_signal_locked(struct fence *fence); +signed long fence_default_wait(struct fence *fence, bool intr, signed long timeout); +int fence_add_callback(struct fence *fence, struct fence_cb *cb, + fence_func_t func); +bool fence_remove_callback(struct fence *fence, struct fence_cb *cb); +void fence_enable_sw_signaling(struct fence *fence); + +/** + * fence_is_signaled_locked - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if fence_add_callback, fence_wait or fence_enable_sw_signaling + * haven't been called before. + * + * This function requires fence->lock to be held. + */ +static inline bool +fence_is_signaled_locked(struct fence *fence) +{ + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + fence_signal_locked(fence); + return true; + } + + return false; +} + +/** + * fence_is_signaled - Return an indication if the fence is signaled yet. + * @fence: [in] the fence to check + * + * Returns true if the fence was already signaled, false if not. Since this + * function doesn't enable signaling, it is not guaranteed to ever return + * true if fence_add_callback, fence_wait or fence_enable_sw_signaling + * haven't been called before. + * + * It's recommended for seqno fences to call fence_signal when the + * operation is complete, it makes it possible to prevent issues from + * wraparound between time of issue and time of use by checking the return + * value of this function before calling hardware-specific wait instructions. + */ +static inline bool +fence_is_signaled(struct fence *fence) +{ + if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags)) + return true; + + if (fence->ops->signaled && fence->ops->signaled(fence)) { + fence_signal(fence); + return true; + } + + return false; +} + +/** + * fence_later - return the chronologically later fence + * @f1: [in] the first fence from the same context + * @f2: [in] the second fence from the same context + * + * Returns NULL if both fences are signaled, otherwise the fence that would be + * signaled last. Both fences must be from the same context, since a seqno is + * not re-used across contexts. + */ +static inline struct fence *fence_later(struct fence *f1, struct fence *f2) +{ + if (WARN_ON(f1->context != f2->context)) + return NULL; + + /* + * can't check just FENCE_FLAG_SIGNALED_BIT here, it may never have been + * set if enable_signaling wasn't called, and enabling that here is + * overkill. + */ + if (f2->seqno - f1->seqno <= INT_MAX) + return fence_is_signaled(f2) ? NULL : f2; + else + return fence_is_signaled(f1) ? NULL : f1; +} + +signed long fence_wait_timeout(struct fence *, bool intr, signed long timeout); + + +/** + * fence_wait - sleep until the fence gets signaled + * @fence: [in] the fence to wait on + * @intr: [in] if true, do an interruptible wait + * + * This function will return -ERESTARTSYS if interrupted by a signal, + * or 0 if the fence was signaled. Other error values may be + * returned on custom implementations. + * + * Performs a synchronous wait on this fence. It is assumed the caller + * directly or indirectly holds a reference to the fence, otherwise the + * fence might be freed before return, resulting in undefined behavior. + */ +static inline signed long fence_wait(struct fence *fence, bool intr) +{ + signed long ret; + + /* Since fence_wait_timeout cannot timeout with + * MAX_SCHEDULE_TIMEOUT, only valid return values are + * -ERESTARTSYS and MAX_SCHEDULE_TIMEOUT. + */ + ret = fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT); + + return ret < 0 ? ret : 0; +} + +unsigned fence_context_alloc(unsigned num); + +#define FENCE_TRACE(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + if (config_enabled(CONFIG_FENCE_TRACE)) \ + pr_info("f %u#%u: " fmt, \ + __ff->context, __ff->seqno, ##args); \ + } while (0) + +#define FENCE_WARN(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#define FENCE_ERR(f, fmt, args...) \ + do { \ + struct fence *__ff = (f); \ + pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \ + ##args); \ + } while (0) + +#endif /* __LINUX_FENCE_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index a7e3c48d73a7..a5227ab8ccb1 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -6,6 +6,7 @@ #include <linux/atomic.h> #include <linux/compat.h> +#include <linux/skbuff.h> #include <linux/workqueue.h> #include <uapi/linux/filter.h> @@ -81,7 +82,7 @@ enum { /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ #define BPF_ALU64_REG(OP, DST, SRC) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -89,7 +90,7 @@ enum { .imm = 0 }) #define BPF_ALU32_REG(OP, DST, SRC) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -99,7 +100,7 @@ enum { /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ #define BPF_ALU64_IMM(OP, DST, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ @@ -107,7 +108,7 @@ enum { .imm = IMM }) #define BPF_ALU32_IMM(OP, DST, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ @@ -117,7 +118,7 @@ enum { /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */ #define BPF_ENDIAN(TYPE, DST, LEN) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU | BPF_END | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = 0, \ @@ -127,7 +128,7 @@ enum { /* Short form of mov, dst_reg = src_reg */ #define BPF_MOV64_REG(DST, SRC) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -135,7 +136,7 @@ enum { .imm = 0 }) #define BPF_MOV32_REG(DST, SRC) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -145,7 +146,7 @@ enum { /* Short form of mov, dst_reg = imm32 */ #define BPF_MOV64_IMM(DST, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ @@ -153,7 +154,7 @@ enum { .imm = IMM }) #define BPF_MOV32_IMM(DST, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ @@ -163,7 +164,7 @@ enum { /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -171,7 +172,7 @@ enum { .imm = IMM }) #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ALU | BPF_MOV | BPF_SRC(TYPE), \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -181,7 +182,7 @@ enum { /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ #define BPF_LD_ABS(SIZE, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ .dst_reg = 0, \ .src_reg = 0, \ @@ -191,7 +192,7 @@ enum { /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */ #define BPF_LD_IND(SIZE, SRC, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_LD | BPF_SIZE(SIZE) | BPF_IND, \ .dst_reg = 0, \ .src_reg = SRC, \ @@ -201,7 +202,7 @@ enum { /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -211,7 +212,7 @@ enum { /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -221,7 +222,7 @@ enum { /* Memory store, *(uint *) (dst_reg + off16) = imm32 */ #define BPF_ST_MEM(SIZE, DST, OFF, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM, \ .dst_reg = DST, \ .src_reg = 0, \ @@ -231,7 +232,7 @@ enum { /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */ #define BPF_JMP_REG(OP, DST, SRC, OFF) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_X, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -241,7 +242,7 @@ enum { /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ .dst_reg = DST, \ .src_reg = 0, \ @@ -251,7 +252,7 @@ enum { /* Function call */ #define BPF_EMIT_CALL(FUNC) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_JMP | BPF_CALL, \ .dst_reg = 0, \ .src_reg = 0, \ @@ -261,7 +262,7 @@ enum { /* Raw code statement block */ #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = CODE, \ .dst_reg = DST, \ .src_reg = SRC, \ @@ -271,7 +272,7 @@ enum { /* Program exit */ #define BPF_EXIT_INSN() \ - ((struct sock_filter_int) { \ + ((struct bpf_insn) { \ .code = BPF_JMP | BPF_EXIT, \ .dst_reg = 0, \ .src_reg = 0, \ @@ -295,9 +296,10 @@ enum { }) /* Macro to invoke filter function. */ -#define SK_RUN_FILTER(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) +#define SK_RUN_FILTER(filter, ctx) \ + (*filter->prog->bpf_func)(ctx, filter->prog->insnsi) -struct sock_filter_int { +struct bpf_insn { __u8 code; /* opcode */ __u8 dst_reg:4; /* dest register */ __u8 src_reg:4; /* source register */ @@ -322,54 +324,58 @@ struct sk_buff; struct sock; struct seccomp_data; -struct sk_filter { - atomic_t refcnt; +struct bpf_prog { u32 jited:1, /* Is our filter JIT'ed? */ len:31; /* Number of filter blocks */ struct sock_fprog_kern *orig_prog; /* Original BPF program */ - struct rcu_head rcu; unsigned int (*bpf_func)(const struct sk_buff *skb, - const struct sock_filter_int *filter); + const struct bpf_insn *filter); union { struct sock_filter insns[0]; - struct sock_filter_int insnsi[0]; + struct bpf_insn insnsi[0]; struct work_struct work; }; }; -static inline unsigned int sk_filter_size(unsigned int proglen) +struct sk_filter { + atomic_t refcnt; + struct rcu_head rcu; + struct bpf_prog *prog; +}; + +#define BPF_PROG_RUN(filter, ctx) (*filter->bpf_func)(ctx, filter->insnsi) + +static inline unsigned int bpf_prog_size(unsigned int proglen) { - return max(sizeof(struct sk_filter), - offsetof(struct sk_filter, insns[proglen])); + return max(sizeof(struct bpf_prog), + offsetof(struct bpf_prog, insns[proglen])); } -#define sk_filter_proglen(fprog) \ - (fprog->len * sizeof(fprog->filter[0])) +#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0])) int sk_filter(struct sock *sk, struct sk_buff *skb); -void sk_filter_select_runtime(struct sk_filter *fp); -void sk_filter_free(struct sk_filter *fp); +void bpf_prog_select_runtime(struct bpf_prog *fp); +void bpf_prog_free(struct bpf_prog *fp); -int sk_convert_filter(struct sock_filter *prog, int len, - struct sock_filter_int *new_prog, int *new_len); +int bpf_convert_filter(struct sock_filter *prog, int len, + struct bpf_insn *new_prog, int *new_len); -int sk_unattached_filter_create(struct sk_filter **pfp, - struct sock_fprog_kern *fprog); -void sk_unattached_filter_destroy(struct sk_filter *fp); +int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog); +void bpf_prog_destroy(struct bpf_prog *fp); int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk); int sk_detach_filter(struct sock *sk); -int sk_chk_filter(struct sock_filter *filter, unsigned int flen); +int bpf_check_classic(const struct sock_filter *filter, unsigned int flen); int sk_get_filter(struct sock *sk, struct sock_filter __user *filter, unsigned int len); -void sk_filter_charge(struct sock *sk, struct sk_filter *fp); +bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); -void bpf_int_jit_compile(struct sk_filter *fp); +void bpf_int_jit_compile(struct bpf_prog *fp); #define BPF_ANC BIT(15) @@ -406,13 +412,25 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest) } } +void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, + int k, unsigned int size); + +static inline void *bpf_load_pointer(const struct sk_buff *skb, int k, + unsigned int size, void *buffer) +{ + if (k >= 0) + return skb_header_pointer(skb, k, size, buffer); + + return bpf_internal_load_pointer_neg_helper(skb, k, size); +} + #ifdef CONFIG_BPF_JIT #include <stdarg.h> #include <linux/linkage.h> #include <linux/printk.h> -void bpf_jit_compile(struct sk_filter *fp); -void bpf_jit_free(struct sk_filter *fp); +void bpf_jit_compile(struct bpf_prog *fp); +void bpf_jit_free(struct bpf_prog *fp); static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, u32 pass, void *image) @@ -426,11 +444,11 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen, #else #include <linux/slab.h> -static inline void bpf_jit_compile(struct sk_filter *fp) +static inline void bpf_jit_compile(struct bpf_prog *fp) { } -static inline void bpf_jit_free(struct sk_filter *fp) +static inline void bpf_jit_free(struct bpf_prog *fp) { kfree(fp); } diff --git a/include/linux/firmware.h b/include/linux/firmware.h index 59529330efd6..5c41c5e75b5c 100644 --- a/include/linux/firmware.h +++ b/include/linux/firmware.h @@ -45,6 +45,8 @@ int request_firmware_nowait( struct module *module, bool uevent, const char *name, struct device *device, gfp_t gfp, void *context, void (*cont)(const struct firmware *fw, void *context)); +int request_firmware_direct(const struct firmware **fw, const char *name, + struct device *device); void release_firmware(const struct firmware *fw); #else @@ -66,13 +68,12 @@ static inline void release_firmware(const struct firmware *fw) { } -#endif +static inline int request_firmware_direct(const struct firmware **fw, + const char *name, + struct device *device) +{ + return -EINVAL; +} -#ifdef CONFIG_FW_LOADER_USER_HELPER -int request_firmware_direct(const struct firmware **fw, const char *name, - struct device *device); -#else -#define request_firmware_direct request_firmware #endif - #endif diff --git a/include/linux/fs.h b/include/linux/fs.h index e11d60cc867b..2daccaf4b547 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -833,7 +833,7 @@ static inline struct file *get_file(struct file *f) * * Lockd stuffs a "host" pointer into this. */ -typedef struct files_struct *fl_owner_t; +typedef void *fl_owner_t; struct file_lock_operations { void (*fl_copy_lock)(struct file_lock *, struct file_lock *); diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 404a686a3644..6bb5e3f2a3b4 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -33,8 +33,7 @@ * features, then it must call an indirect function that * does. Or at least does enough to prevent any unwelcomed side effects. */ -#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \ - !ARCH_SUPPORTS_FTRACE_OPS +#if !ARCH_SUPPORTS_FTRACE_OPS # define FTRACE_FORCE_LIST_FUNC 1 #else # define FTRACE_FORCE_LIST_FUNC 0 @@ -118,17 +117,18 @@ struct ftrace_ops { ftrace_func_t func; struct ftrace_ops *next; unsigned long flags; - int __percpu *disabled; void *private; + int __percpu *disabled; #ifdef CONFIG_DYNAMIC_FTRACE + int nr_trampolines; struct ftrace_hash *notrace_hash; struct ftrace_hash *filter_hash; + struct ftrace_hash *tramp_hash; struct mutex regex_lock; + unsigned long trampoline; #endif }; -extern int function_trace_stop; - /* * Type of the current tracing. */ @@ -140,32 +140,6 @@ enum ftrace_tracing_type_t { /* Current tracing type, default is FTRACE_TYPE_ENTER */ extern enum ftrace_tracing_type_t ftrace_tracing_type; -/** - * ftrace_stop - stop function tracer. - * - * A quick way to stop the function tracer. Note this an on off switch, - * it is not something that is recursive like preempt_disable. - * This does not disable the calling of mcount, it only stops the - * calling of functions from mcount. - */ -static inline void ftrace_stop(void) -{ - function_trace_stop = 1; -} - -/** - * ftrace_start - start the function tracer. - * - * This function is the inverse of ftrace_stop. This does not enable - * the function tracing if the function tracer is disabled. This only - * sets the function tracer flag to continue calling the functions - * from mcount. - */ -static inline void ftrace_start(void) -{ - function_trace_stop = 0; -} - /* * The ftrace_ops must be a static and should also * be read_mostly. These functions do modify read_mostly variables @@ -242,8 +216,6 @@ static inline int ftrace_nr_registered_ops(void) } static inline void clear_ftrace_function(void) { } static inline void ftrace_kill(void) { } -static inline void ftrace_stop(void) { } -static inline void ftrace_start(void) { } #endif /* CONFIG_FUNCTION_TRACER */ #ifdef CONFIG_STACK_TRACER @@ -317,13 +289,20 @@ extern int ftrace_nr_registered_ops(void); * from tracing that function. */ enum { - FTRACE_FL_ENABLED = (1UL << 29), + FTRACE_FL_ENABLED = (1UL << 31), FTRACE_FL_REGS = (1UL << 30), - FTRACE_FL_REGS_EN = (1UL << 31) + FTRACE_FL_REGS_EN = (1UL << 29), + FTRACE_FL_TRAMP = (1UL << 28), + FTRACE_FL_TRAMP_EN = (1UL << 27), }; -#define FTRACE_FL_MASK (0x7UL << 29) -#define FTRACE_REF_MAX ((1UL << 29) - 1) +#define FTRACE_REF_MAX_SHIFT 27 +#define FTRACE_FL_BITS 5 +#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) +#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) +#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) + +#define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK) struct dyn_ftrace { unsigned long ip; /* address of mcount call-site */ @@ -431,6 +410,10 @@ void ftrace_modify_all_code(int command); #define FTRACE_ADDR ((unsigned long)ftrace_caller) #endif +#ifndef FTRACE_GRAPH_ADDR +#define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller) +#endif + #ifndef FTRACE_REGS_ADDR #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller) @@ -439,6 +422,16 @@ void ftrace_modify_all_code(int command); #endif #endif +/* + * If an arch would like functions that are only traced + * by the function graph tracer to jump directly to its own + * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR + * to be that address to jump to. + */ +#ifndef FTRACE_GRAPH_TRAMP_ADDR +#define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0) +#endif + #ifdef CONFIG_FUNCTION_GRAPH_TRACER extern void ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void); @@ -736,6 +729,7 @@ extern char __irqentry_text_end[]; extern int register_ftrace_graph(trace_func_graph_ret_t retfunc, trace_func_graph_ent_t entryfunc); +extern bool ftrace_graph_is_dead(void); extern void ftrace_graph_stop(void); /* The current handlers in use */ diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index cff3106ffe2c..06c6faa9e5cc 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -272,7 +272,6 @@ struct ftrace_event_call { struct trace_event event; const char *print_fmt; struct event_filter *filter; - struct list_head *files; void *mod; void *data; /* @@ -404,8 +403,6 @@ enum event_trigger_type { ETT_EVENT_ENABLE = (1 << 3), }; -extern void destroy_preds(struct ftrace_event_file *file); -extern void destroy_call_preds(struct ftrace_event_call *call); extern int filter_match_preds(struct event_filter *filter, void *rec); extern int filter_check_discard(struct ftrace_event_file *file, void *rec, diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index e7a8d3fa91d5..a036d058a249 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h @@ -165,6 +165,7 @@ enum hrtimer_base_type { * struct hrtimer_cpu_base - the per cpu clock bases * @lock: lock protecting the base and associated clock bases * and timers + * @cpu: cpu number * @active_bases: Bitfield to mark bases with active timers * @clock_was_set: Indicates that clock was set from irq context. * @expires_next: absolute time of the next event which was scheduled @@ -179,6 +180,7 @@ enum hrtimer_base_type { */ struct hrtimer_cpu_base { raw_spinlock_t lock; + unsigned int cpu; unsigned int active_bases; unsigned int clock_was_set; #ifdef CONFIG_HIGH_RES_TIMERS @@ -324,14 +326,6 @@ static inline void timerfd_clock_was_set(void) { } #endif extern void hrtimers_resume(void); -extern ktime_t ktime_get(void); -extern ktime_t ktime_get_real(void); -extern ktime_t ktime_get_boottime(void); -extern ktime_t ktime_get_monotonic_offset(void); -extern ktime_t ktime_get_clocktai(void); -extern ktime_t ktime_get_update_offsets(ktime_t *offs_real, ktime_t *offs_boot, - ktime_t *offs_tai); - DECLARE_PER_CPU(struct tick_device, tick_cpu_device); @@ -452,12 +446,6 @@ extern void hrtimer_run_pending(void); /* Bootup initialization: */ extern void __init hrtimers_init(void); -#if BITS_PER_LONG < 64 -extern u64 ktime_divns(const ktime_t kt, s64 div); -#else /* BITS_PER_LONG < 64 */ -# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) -#endif - /* Show pending timers: */ extern void sysrq_timer_list_show(void); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 255cd5cc0754..a23c096b3080 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -80,6 +80,7 @@ int dequeue_hwpoisoned_huge_page(struct page *page); bool isolate_huge_page(struct page *page, struct list_head *list); void putback_active_hugepage(struct page *page); bool is_hugepage_active(struct page *page); +void free_huge_page(struct page *page); #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h index b4b0eef5fddf..914bb08cd738 100644 --- a/include/linux/hw_random.h +++ b/include/linux/hw_random.h @@ -29,6 +29,8 @@ * @read: New API. drivers can fill up to max bytes of data * into the buffer. The buffer is aligned for any type. * @priv: Private data, for use by the RNG driver. + * @quality: Estimation of true entropy in RNG's bitstream + * (per mill). */ struct hwrng { const char *name; @@ -38,6 +40,7 @@ struct hwrng { int (*data_read)(struct hwrng *rng, u32 *data); int (*read)(struct hwrng *rng, void *data, size_t max, bool wait); unsigned long priv; + unsigned short quality; /* internal. */ struct list_head list; @@ -47,5 +50,7 @@ struct hwrng { extern int hwrng_register(struct hwrng *rng); /** Unregister a Hardware Random Number Generator driver. */ extern void hwrng_unregister(struct hwrng *rng); +/** Feed random bits into the pool. */ +extern void add_hwgenerator_randomness(const char *buffer, size_t count, size_t entropy); #endif /* LINUX_HWRANDOM_H_ */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 6bff13f74050..63ab3873c5ed 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -1001,6 +1001,26 @@ struct ieee80211_vendor_ie { u8 oui_type; } __packed; +struct ieee80211_wmm_ac_param { + u8 aci_aifsn; /* AIFSN, ACM, ACI */ + u8 cw; /* ECWmin, ECWmax (CW = 2^ECW - 1) */ + __le16 txop_limit; +} __packed; + +struct ieee80211_wmm_param_ie { + u8 element_id; /* Element ID: 221 (0xdd); */ + u8 len; /* Length: 24 */ + /* required fields for WMM version 1 */ + u8 oui[3]; /* 00:50:f2 */ + u8 oui_type; /* 2 */ + u8 oui_subtype; /* 1 */ + u8 version; /* 1 for WMM version 1.0 */ + u8 qos_info; /* AP/STA specific QoS info */ + u8 reserved; /* 0 */ + /* AC_BE, AC_BK, AC_VI, AC_VO */ + struct ieee80211_wmm_ac_param ac[4]; +} __packed; + /* Control frames */ struct ieee80211_rts { __le16 frame_control; @@ -1621,6 +1641,9 @@ enum ieee80211_reasoncode { WLAN_REASON_INVALID_RSN_IE_CAP = 22, WLAN_REASON_IEEE8021X_FAILED = 23, WLAN_REASON_CIPHER_SUITE_REJECTED = 24, + /* TDLS (802.11z) */ + WLAN_REASON_TDLS_TEARDOWN_UNREACHABLE = 25, + WLAN_REASON_TDLS_TEARDOWN_UNSPECIFIED = 26, /* 802.11e */ WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32, WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33, diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h index fd22789d7b2e..808dcb8cc04f 100644 --- a/include/linux/if_bridge.h +++ b/include/linux/if_bridge.h @@ -36,8 +36,28 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use typedef int br_should_route_hook_t(struct sk_buff *skb); extern br_should_route_hook_t __rcu *br_should_route_hook; + +#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING) int br_multicast_list_adjacent(struct net_device *dev, struct list_head *br_ip_list); +bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto); bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto); +#else +static inline int br_multicast_list_adjacent(struct net_device *dev, + struct list_head *br_ip_list) +{ + return 0; +} +static inline bool br_multicast_has_querier_anywhere(struct net_device *dev, + int proto) +{ + return false; +} +static inline bool br_multicast_has_querier_adjacent(struct net_device *dev, + int proto) +{ + return false; +} +#endif #endif diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h new file mode 100644 index 000000000000..fd1d540ea62d --- /dev/null +++ b/include/linux/iio/accel/kxcjk_1013.h @@ -0,0 +1,22 @@ +/* + * KXCJK-1013 3-axis accelerometer Interface + * Copyright (c) 2014, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __IIO_KXCJK_1013_H__ +#define __IIO_KXCJK_1013_H__ + +struct kxcjk_1013_platform_data { + bool active_high_intr; +}; + +#endif diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 96f51f0e0096..d8257ab60bac 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h @@ -47,6 +47,7 @@ .type = device_type, \ .modified = mod, \ .info_mask_separate = mask, \ + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), \ .scan_index = index, \ .channel2 = ch2, \ .address = addr, \ @@ -59,11 +60,6 @@ }, \ } -#define ST_SENSOR_DEV_ATTR_SAMP_FREQ() \ - IIO_DEV_ATTR_SAMP_FREQ(S_IWUSR | S_IRUGO, \ - st_sensors_sysfs_get_sampling_frequency, \ - st_sensors_sysfs_set_sampling_frequency) - #define ST_SENSORS_DEV_ATTR_SAMP_FREQ_AVAIL() \ IIO_DEV_ATTR_SAMP_FREQ_AVAIL( \ st_sensors_sysfs_sampling_frequency_avail) @@ -285,12 +281,6 @@ int st_sensors_read_info_raw(struct iio_dev *indio_dev, int st_sensors_check_device_support(struct iio_dev *indio_dev, int num_sensors_list, const struct st_sensors *sensors); -ssize_t st_sensors_sysfs_get_sampling_frequency(struct device *dev, - struct device_attribute *attr, char *buf); - -ssize_t st_sensors_sysfs_set_sampling_frequency(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size); - ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev, struct device_attribute *attr, char *buf); diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h index 67d845385ae2..1796af093368 100644 --- a/include/linux/iio/common/st_sensors_i2c.h +++ b/include/linux/iio/common/st_sensors_i2c.h @@ -13,8 +13,19 @@ #include <linux/i2c.h> #include <linux/iio/common/st_sensors.h> +#include <linux/of.h> void st_sensors_i2c_configure(struct iio_dev *indio_dev, struct i2c_client *client, struct st_sensor_data *sdata); +#ifdef CONFIG_OF +void st_sensors_of_i2c_probe(struct i2c_client *client, + const struct of_device_id *match); +#else +static inline void st_sensors_of_i2c_probe(struct i2c_client *client, + const struct of_device_id *match) +{ +} +#endif + #endif /* ST_SENSORS_I2C_H */ diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index ccde91725f98..15dc6bc2bdd2 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -277,14 +277,7 @@ static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, **/ static inline s64 iio_get_time_ns(void) { - struct timespec ts; - /* - * calls getnstimeofday. - * If hrtimers then up to ns accurate, if not microsecond. - */ - ktime_get_real_ts(&ts); - - return timespec_to_ns(&ts); + return ktime_get_real_ns(); } /* Device operating modes */ diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h index b665dc7f017b..fa2d01ef8f55 100644 --- a/include/linux/iio/imu/adis.h +++ b/include/linux/iio/imu/adis.h @@ -157,13 +157,14 @@ int adis_single_conversion(struct iio_dev *indio_dev, const struct iio_chan_spec *chan, unsigned int error_mask, int *val); -#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, bits) { \ +#define ADIS_VOLTAGE_CHAN(addr, si, chan, name, info_all, bits) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .channel = (chan), \ .extend_name = name, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = info_all, \ .address = (addr), \ .scan_index = (si), \ .scan_type = { \ @@ -174,19 +175,20 @@ int adis_single_conversion(struct iio_dev *indio_dev, }, \ } -#define ADIS_SUPPLY_CHAN(addr, si, bits) \ - ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", bits) +#define ADIS_SUPPLY_CHAN(addr, si, info_all, bits) \ + ADIS_VOLTAGE_CHAN(addr, si, 0, "supply", info_all, bits) -#define ADIS_AUX_ADC_CHAN(addr, si, bits) \ - ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, bits) +#define ADIS_AUX_ADC_CHAN(addr, si, info_all, bits) \ + ADIS_VOLTAGE_CHAN(addr, si, 1, NULL, info_all, bits) -#define ADIS_TEMP_CHAN(addr, si, bits) { \ +#define ADIS_TEMP_CHAN(addr, si, info_all, bits) { \ .type = IIO_TEMP, \ .indexed = 1, \ .channel = 0, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_OFFSET), \ + .info_mask_shared_by_all = info_all, \ .address = (addr), \ .scan_index = (si), \ .scan_type = { \ @@ -197,13 +199,14 @@ int adis_single_conversion(struct iio_dev *indio_dev, }, \ } -#define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, bits) { \ +#define ADIS_MOD_CHAN(_type, mod, addr, si, info_sep, info_all, bits) { \ .type = (_type), \ .modified = 1, \ .channel2 = IIO_MOD_ ## mod, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ info_sep, \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .info_mask_shared_by_all = info_all, \ .address = (addr), \ .scan_index = (si), \ .scan_type = { \ @@ -214,17 +217,17 @@ int adis_single_conversion(struct iio_dev *indio_dev, }, \ } -#define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, bits) \ - ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, bits) +#define ADIS_ACCEL_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_ACCEL, mod, addr, si, info_sep, info_all, bits) -#define ADIS_GYRO_CHAN(mod, addr, si, info_sep, bits) \ - ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, bits) +#define ADIS_GYRO_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_ANGL_VEL, mod, addr, si, info_sep, info_all, bits) -#define ADIS_INCLI_CHAN(mod, addr, si, info_sep, bits) \ - ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, bits) +#define ADIS_INCLI_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_INCLI, mod, addr, si, info_sep, info_all, bits) -#define ADIS_ROT_CHAN(mod, addr, si, info_sep, bits) \ - ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, bits) +#define ADIS_ROT_CHAN(mod, addr, si, info_sep, info_all, bits) \ + ADIS_MOD_CHAN(IIO_ROT, mod, addr, si, info_sep, info_all, bits) #ifdef CONFIG_IIO_ADIS_LIB_BUFFER diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 369cf2cd5144..4b79ffe7b188 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h @@ -129,12 +129,11 @@ void iio_trigger_unregister(struct iio_trigger *trig_info); /** * iio_trigger_poll() - called on a trigger occurring * @trig: trigger which occurred - * @time: timestamp when trigger occurred * * Typically called in relevant hardware interrupt handler. **/ -void iio_trigger_poll(struct iio_trigger *trig, s64 time); -void iio_trigger_poll_chained(struct iio_trigger *trig, s64 time); +void iio_trigger_poll(struct iio_trigger *trig); +void iio_trigger_poll_chained(struct iio_trigger *trig); irqreturn_t iio_trigger_generic_data_rdy_poll(int irq, void *private); diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index d480631eabc2..4a2af8adf874 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -56,6 +56,10 @@ enum iio_modifier { IIO_MOD_QUATERNION, IIO_MOD_TEMP_AMBIENT, IIO_MOD_TEMP_OBJECT, + IIO_MOD_NORTH_MAGN, + IIO_MOD_NORTH_TRUE, + IIO_MOD_NORTH_MAGN_TILT_COMP, + IIO_MOD_NORTH_TRUE_TILT_COMP }; enum iio_event_type { @@ -70,6 +74,7 @@ enum iio_event_info { IIO_EV_INFO_ENABLE, IIO_EV_INFO_VALUE, IIO_EV_INFO_HYSTERESIS, + IIO_EV_INFO_PERIOD, }; enum iio_event_direction { diff --git a/include/linux/ima.h b/include/linux/ima.h index 1b7f268cddce..7cf5e9b32550 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h @@ -19,6 +19,7 @@ extern int ima_file_check(struct file *file, int mask); extern void ima_file_free(struct file *file); extern int ima_file_mmap(struct file *file, unsigned long prot); extern int ima_module_check(struct file *file); +extern int ima_fw_from_file(struct file *file, char *buf, size_t size); #else static inline int ima_bprm_check(struct linux_binprm *bprm) @@ -46,6 +47,11 @@ static inline int ima_module_check(struct file *file) return 0; } +static inline int ima_fw_from_file(struct file *file, char *buf, size_t size) +{ + return 0; +} + #endif /* CONFIG_IMA */ #ifdef CONFIG_IMA_APPRAISE diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 6df7f9fe0d01..2bb4c4f3531a 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -102,12 +102,6 @@ extern struct group_info init_groups; #define INIT_IDS #endif -#ifdef CONFIG_RCU_BOOST -#define INIT_TASK_RCU_BOOST() \ - .rcu_boost_mutex = NULL, -#else -#define INIT_TASK_RCU_BOOST() -#endif #ifdef CONFIG_TREE_PREEMPT_RCU #define INIT_TASK_RCU_TREE_PREEMPT() \ .rcu_blocked_node = NULL, @@ -119,8 +113,7 @@ extern struct group_info init_groups; .rcu_read_lock_nesting = 0, \ .rcu_read_unlock_special = 0, \ .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ - INIT_TASK_RCU_TREE_PREEMPT() \ - INIT_TASK_RCU_BOOST() + INIT_TASK_RCU_TREE_PREEMPT() #else #define INIT_TASK_RCU_PREEMPT(tsk) #endif diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 0a2da5188217..a65208a8fe18 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h @@ -336,6 +336,7 @@ struct intel_iommu { #ifdef CONFIG_IRQ_REMAP struct ir_table *ir_table; /* Interrupt remapping info */ #endif + struct device *iommu_dev; /* IOMMU-sysfs device */ int node; }; @@ -365,4 +366,6 @@ extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int dmar_ir_support(void); +extern const struct attribute_group *intel_iommu_groups[]; + #endif diff --git a/include/linux/io.h b/include/linux/io.h index b76e6e545806..d5fc9b8d8b03 100644 --- a/include/linux/io.h +++ b/include/linux/io.h @@ -58,6 +58,8 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr) } #endif +#define IOMEM_ERR_PTR(err) (__force void __iomem *)ERR_PTR(err) + void __iomem *devm_ioremap(struct device *dev, resource_size_t offset, unsigned long size); void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset, diff --git a/include/linux/iommu.h b/include/linux/iommu.h index b96a5b2136e4..20f9a527922a 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -50,7 +50,7 @@ struct iommu_domain_geometry { }; struct iommu_domain { - struct iommu_ops *ops; + const struct iommu_ops *ops; void *priv; iommu_fault_handler_t handler; void *handler_token; @@ -140,7 +140,7 @@ struct iommu_ops { #define IOMMU_GROUP_NOTIFY_UNBIND_DRIVER 5 /* Pre Driver unbind */ #define IOMMU_GROUP_NOTIFY_UNBOUND_DRIVER 6 /* Post Driver unbind */ -extern int bus_set_iommu(struct bus_type *bus, struct iommu_ops *ops); +extern int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops); extern bool iommu_present(struct bus_type *bus); extern struct iommu_domain *iommu_domain_alloc(struct bus_type *bus); extern struct iommu_group *iommu_group_get_by_id(int id); @@ -181,11 +181,18 @@ extern int iommu_group_register_notifier(struct iommu_group *group, extern int iommu_group_unregister_notifier(struct iommu_group *group, struct notifier_block *nb); extern int iommu_group_id(struct iommu_group *group); +extern struct iommu_group *iommu_group_get_for_dev(struct device *dev); extern int iommu_domain_get_attr(struct iommu_domain *domain, enum iommu_attr, void *data); extern int iommu_domain_set_attr(struct iommu_domain *domain, enum iommu_attr, void *data); +struct device *iommu_device_create(struct device *parent, void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...); +void iommu_device_destroy(struct device *dev); +int iommu_device_link(struct device *dev, struct device *link); +void iommu_device_unlink(struct device *dev, struct device *link); /* Window handling function prototypes */ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, @@ -396,6 +403,27 @@ static inline int iommu_domain_set_attr(struct iommu_domain *domain, return -EINVAL; } +static inline struct device *iommu_device_create(struct device *parent, + void *drvdata, + const struct attribute_group **groups, + const char *fmt, ...) +{ + return ERR_PTR(-ENODEV); +} + +static inline void iommu_device_destroy(struct device *dev) +{ +} + +static inline int iommu_device_link(struct device *dev, struct device *link) +{ + return -EINVAL; +} + +static inline void iommu_device_unlink(struct device *dev, struct device *link) +{ +} + #endif /* CONFIG_IOMMU_API */ #endif /* __LINUX_IOMMU_H */ diff --git a/include/linux/iova.h b/include/linux/iova.h index 3277f4711349..19e81d5ccb6d 100644 --- a/include/linux/iova.h +++ b/include/linux/iova.h @@ -34,6 +34,11 @@ struct iova_domain { unsigned long dma_32bit_pfn; }; +static inline unsigned long iova_size(struct iova *iova) +{ + return iova->pfn_hi - iova->pfn_lo + 1; +} + struct iova *alloc_iova_mem(void); void free_iova_mem(struct iova *iova); void free_iova(struct iova_domain *iovad, unsigned long pfn); diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 2faef339d8f2..ff560537dd61 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -39,6 +39,7 @@ struct ipv6_devconf { #endif __s32 proxy_ndp; __s32 accept_source_route; + __s32 accept_ra_from_local; #ifdef CONFIG_IPV6_OPTIMISTIC_DAD __s32 optimistic_dad; #endif @@ -193,12 +194,13 @@ struct ipv6_pinfo { sndflow:1, repflow:1, pmtudisc:3, - ipv6only:1, + padding:1, /* 1 bit hole */ srcprefs:3, /* 001: prefer temporary address * 010: prefer public address * 100: prefer care-of address */ - dontfrag:1; + dontfrag:1, + autoflowlabel:1; __u8 min_hopcount; __u8 tclass; __be32 rcv_flowinfo; @@ -256,16 +258,6 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk) return inet_sk(__sk)->pinet6; } -static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops) -{ - struct request_sock *req = reqsk_alloc(ops); - - if (req) - inet_rsk(req)->pktopts = NULL; - - return req; -} - static inline struct raw6_sock *raw6_sk(const struct sock *sk) { return (struct raw6_sock *)sk; @@ -282,8 +274,8 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to, __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } -#define __ipv6_only_sock(sk) (inet6_sk(sk)->ipv6only) -#define ipv6_only_sock(sk) ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk)) +#define __ipv6_only_sock(sk) (sk->sk_ipv6only) +#define ipv6_only_sock(sk) (__ipv6_only_sock(sk)) #define ipv6_sk_rxinfo(sk) ((sk)->sk_family == PF_INET6 && \ inet6_sk(sk)->rxopt.bits.rxinfo) @@ -296,8 +288,8 @@ static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk) static inline int inet_v6_ipv6only(const struct sock *sk) { - return likely(sk->sk_state != TCP_TIME_WAIT) ? - ipv6_only_sock(sk) : inet_twsk(sk)->tw_ipv6only; + /* ipv6only field is at same position for timewait and other sockets */ + return ipv6_only_sock(sk); } #else #define __ipv6_only_sock(sk) 0 diff --git a/include/linux/irq.h b/include/linux/irq.h index 0d998d8b01d8..62af59242ddc 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -771,6 +771,8 @@ void irq_gc_eoi(struct irq_data *d); int irq_gc_set_wake(struct irq_data *d, unsigned int on); /* Setup functions for irq_chip_generic */ +int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, + irq_hw_number_t hw_irq); struct irq_chip_generic * irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, void __iomem *reg_base, irq_flow_handler_t handler); diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 19ae05d4b8ec..bf9422c3aefe 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -33,6 +33,11 @@ void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) #define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), } bool irq_work_queue(struct irq_work *work); + +#ifdef CONFIG_SMP +bool irq_work_queue_on(struct irq_work *work, int cpu); +#endif + void irq_work_run(void); void irq_work_sync(struct irq_work *work); diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h new file mode 100644 index 000000000000..03a4ea37ba86 --- /dev/null +++ b/include/linux/irqchip/arm-gic-v3.h @@ -0,0 +1,200 @@ +/* + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ +#ifndef __LINUX_IRQCHIP_ARM_GIC_V3_H +#define __LINUX_IRQCHIP_ARM_GIC_V3_H + +#include <asm/sysreg.h> + +/* + * Distributor registers. We assume we're running non-secure, with ARE + * being set. Secure-only and non-ARE registers are not described. + */ +#define GICD_CTLR 0x0000 +#define GICD_TYPER 0x0004 +#define GICD_IIDR 0x0008 +#define GICD_STATUSR 0x0010 +#define GICD_SETSPI_NSR 0x0040 +#define GICD_CLRSPI_NSR 0x0048 +#define GICD_SETSPI_SR 0x0050 +#define GICD_CLRSPI_SR 0x0058 +#define GICD_SEIR 0x0068 +#define GICD_ISENABLER 0x0100 +#define GICD_ICENABLER 0x0180 +#define GICD_ISPENDR 0x0200 +#define GICD_ICPENDR 0x0280 +#define GICD_ISACTIVER 0x0300 +#define GICD_ICACTIVER 0x0380 +#define GICD_IPRIORITYR 0x0400 +#define GICD_ICFGR 0x0C00 +#define GICD_IROUTER 0x6000 +#define GICD_PIDR2 0xFFE8 + +#define GICD_CTLR_RWP (1U << 31) +#define GICD_CTLR_ARE_NS (1U << 4) +#define GICD_CTLR_ENABLE_G1A (1U << 1) +#define GICD_CTLR_ENABLE_G1 (1U << 0) + +#define GICD_IROUTER_SPI_MODE_ONE (0U << 31) +#define GICD_IROUTER_SPI_MODE_ANY (1U << 31) + +#define GIC_PIDR2_ARCH_MASK 0xf0 +#define GIC_PIDR2_ARCH_GICv3 0x30 +#define GIC_PIDR2_ARCH_GICv4 0x40 + +/* + * Re-Distributor registers, offsets from RD_base + */ +#define GICR_CTLR GICD_CTLR +#define GICR_IIDR 0x0004 +#define GICR_TYPER 0x0008 +#define GICR_STATUSR GICD_STATUSR +#define GICR_WAKER 0x0014 +#define GICR_SETLPIR 0x0040 +#define GICR_CLRLPIR 0x0048 +#define GICR_SEIR GICD_SEIR +#define GICR_PROPBASER 0x0070 +#define GICR_PENDBASER 0x0078 +#define GICR_INVLPIR 0x00A0 +#define GICR_INVALLR 0x00B0 +#define GICR_SYNCR 0x00C0 +#define GICR_MOVLPIR 0x0100 +#define GICR_MOVALLR 0x0110 +#define GICR_PIDR2 GICD_PIDR2 + +#define GICR_WAKER_ProcessorSleep (1U << 1) +#define GICR_WAKER_ChildrenAsleep (1U << 2) + +/* + * Re-Distributor registers, offsets from SGI_base + */ +#define GICR_ISENABLER0 GICD_ISENABLER +#define GICR_ICENABLER0 GICD_ICENABLER +#define GICR_ISPENDR0 GICD_ISPENDR +#define GICR_ICPENDR0 GICD_ICPENDR +#define GICR_ISACTIVER0 GICD_ISACTIVER +#define GICR_ICACTIVER0 GICD_ICACTIVER +#define GICR_IPRIORITYR0 GICD_IPRIORITYR +#define GICR_ICFGR0 GICD_ICFGR + +#define GICR_TYPER_VLPIS (1U << 1) +#define GICR_TYPER_LAST (1U << 4) + +/* + * CPU interface registers + */ +#define ICC_CTLR_EL1_EOImode_drop_dir (0U << 1) +#define ICC_CTLR_EL1_EOImode_drop (1U << 1) +#define ICC_SRE_EL1_SRE (1U << 0) + +/* + * Hypervisor interface registers (SRE only) + */ +#define ICH_LR_VIRTUAL_ID_MASK ((1UL << 32) - 1) + +#define ICH_LR_EOI (1UL << 41) +#define ICH_LR_GROUP (1UL << 60) +#define ICH_LR_STATE (3UL << 62) +#define ICH_LR_PENDING_BIT (1UL << 62) +#define ICH_LR_ACTIVE_BIT (1UL << 63) + +#define ICH_MISR_EOI (1 << 0) +#define ICH_MISR_U (1 << 1) + +#define ICH_HCR_EN (1 << 0) +#define ICH_HCR_UIE (1 << 1) + +#define ICH_VMCR_CTLR_SHIFT 0 +#define ICH_VMCR_CTLR_MASK (0x21f << ICH_VMCR_CTLR_SHIFT) +#define ICH_VMCR_BPR1_SHIFT 18 +#define ICH_VMCR_BPR1_MASK (7 << ICH_VMCR_BPR1_SHIFT) +#define ICH_VMCR_BPR0_SHIFT 21 +#define ICH_VMCR_BPR0_MASK (7 << ICH_VMCR_BPR0_SHIFT) +#define ICH_VMCR_PMR_SHIFT 24 +#define ICH_VMCR_PMR_MASK (0xffUL << ICH_VMCR_PMR_SHIFT) + +#define ICC_EOIR1_EL1 sys_reg(3, 0, 12, 12, 1) +#define ICC_IAR1_EL1 sys_reg(3, 0, 12, 12, 0) +#define ICC_SGI1R_EL1 sys_reg(3, 0, 12, 11, 5) +#define ICC_PMR_EL1 sys_reg(3, 0, 4, 6, 0) +#define ICC_CTLR_EL1 sys_reg(3, 0, 12, 12, 4) +#define ICC_SRE_EL1 sys_reg(3, 0, 12, 12, 5) +#define ICC_GRPEN1_EL1 sys_reg(3, 0, 12, 12, 7) + +#define ICC_IAR1_EL1_SPURIOUS 0x3ff + +#define ICC_SRE_EL2 sys_reg(3, 4, 12, 9, 5) + +#define ICC_SRE_EL2_SRE (1 << 0) +#define ICC_SRE_EL2_ENABLE (1 << 3) + +/* + * System register definitions + */ +#define ICH_VSEIR_EL2 sys_reg(3, 4, 12, 9, 4) +#define ICH_HCR_EL2 sys_reg(3, 4, 12, 11, 0) +#define ICH_VTR_EL2 sys_reg(3, 4, 12, 11, 1) +#define ICH_MISR_EL2 sys_reg(3, 4, 12, 11, 2) +#define ICH_EISR_EL2 sys_reg(3, 4, 12, 11, 3) +#define ICH_ELSR_EL2 sys_reg(3, 4, 12, 11, 5) +#define ICH_VMCR_EL2 sys_reg(3, 4, 12, 11, 7) + +#define __LR0_EL2(x) sys_reg(3, 4, 12, 12, x) +#define __LR8_EL2(x) sys_reg(3, 4, 12, 13, x) + +#define ICH_LR0_EL2 __LR0_EL2(0) +#define ICH_LR1_EL2 __LR0_EL2(1) +#define ICH_LR2_EL2 __LR0_EL2(2) +#define ICH_LR3_EL2 __LR0_EL2(3) +#define ICH_LR4_EL2 __LR0_EL2(4) +#define ICH_LR5_EL2 __LR0_EL2(5) +#define ICH_LR6_EL2 __LR0_EL2(6) +#define ICH_LR7_EL2 __LR0_EL2(7) +#define ICH_LR8_EL2 __LR8_EL2(0) +#define ICH_LR9_EL2 __LR8_EL2(1) +#define ICH_LR10_EL2 __LR8_EL2(2) +#define ICH_LR11_EL2 __LR8_EL2(3) +#define ICH_LR12_EL2 __LR8_EL2(4) +#define ICH_LR13_EL2 __LR8_EL2(5) +#define ICH_LR14_EL2 __LR8_EL2(6) +#define ICH_LR15_EL2 __LR8_EL2(7) + +#define __AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) +#define ICH_AP0R0_EL2 __AP0Rx_EL2(0) +#define ICH_AP0R1_EL2 __AP0Rx_EL2(1) +#define ICH_AP0R2_EL2 __AP0Rx_EL2(2) +#define ICH_AP0R3_EL2 __AP0Rx_EL2(3) + +#define __AP1Rx_EL2(x) sys_reg(3, 4, 12, 9, x) +#define ICH_AP1R0_EL2 __AP1Rx_EL2(0) +#define ICH_AP1R1_EL2 __AP1Rx_EL2(1) +#define ICH_AP1R2_EL2 __AP1Rx_EL2(2) +#define ICH_AP1R3_EL2 __AP1Rx_EL2(3) + +#ifndef __ASSEMBLY__ + +#include <linux/stringify.h> + +static inline void gic_write_eoir(u64 irq) +{ + asm volatile("msr_s " __stringify(ICC_EOIR1_EL1) ", %0" : : "r" (irq)); + isb(); +} + +#endif + +#endif diff --git a/include/linux/irqchip/spear-shirq.h b/include/linux/irqchip/spear-shirq.h deleted file mode 100644 index c8be16d213a3..000000000000 --- a/include/linux/irqchip/spear-shirq.h +++ /dev/null @@ -1,64 +0,0 @@ -/* - * SPEAr platform shared irq layer header file - * - * Copyright (C) 2009-2012 ST Microelectronics - * Viresh Kumar <viresh.linux@gmail.com> - * - * This file is licensed under the terms of the GNU General Public - * License version 2. This program is licensed "as is" without any - * warranty of any kind, whether express or implied. - */ - -#ifndef __SPEAR_SHIRQ_H -#define __SPEAR_SHIRQ_H - -#include <linux/irq.h> -#include <linux/types.h> - -/* - * struct shirq_regs: shared irq register configuration - * - * enb_reg: enable register offset - * reset_to_enb: val 1 indicates, we need to clear bit for enabling interrupt - * status_reg: status register offset - * status_reg_mask: status register valid mask - * clear_reg: clear register offset - * reset_to_clear: val 1 indicates, we need to clear bit for clearing interrupt - */ -struct shirq_regs { - u32 enb_reg; - u32 reset_to_enb; - u32 status_reg; - u32 clear_reg; - u32 reset_to_clear; -}; - -/* - * struct spear_shirq: shared irq structure - * - * irq: hardware irq number - * irq_base: base irq in linux domain - * irq_nr: no. of shared interrupts in a particular block - * irq_bit_off: starting bit offset in the status register - * invalid_irq: irq group is currently disabled - * base: base address of shared irq register - * regs: register configuration for shared irq block - */ -struct spear_shirq { - u32 irq; - u32 irq_base; - u32 irq_nr; - u32 irq_bit_off; - int invalid_irq; - void __iomem *base; - struct shirq_regs regs; -}; - -int __init spear300_shirq_of_init(struct device_node *np, - struct device_node *parent); -int __init spear310_shirq_of_init(struct device_node *np, - struct device_node *parent); -int __init spear320_shirq_of_init(struct device_node *np, - struct device_node *parent); - -#endif /* __SPEAR_SHIRQ_H */ diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index c983ed18c332..b0f9d16e48f6 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -172,6 +172,8 @@ extern int irq_domain_associate(struct irq_domain *domain, unsigned int irq, extern void irq_domain_associate_many(struct irq_domain *domain, unsigned int irq_base, irq_hw_number_t hwirq_base, int count); +extern void irq_domain_disassociate(struct irq_domain *domain, + unsigned int irq); extern unsigned int irq_create_mapping(struct irq_domain *host, irq_hw_number_t hwirq); diff --git a/include/linux/isdn_ppp.h b/include/linux/isdn_ppp.h index 8e10f57f109f..a0070c6dfaf8 100644 --- a/include/linux/isdn_ppp.h +++ b/include/linux/isdn_ppp.h @@ -180,8 +180,8 @@ struct ippp_struct { struct slcompress *slcomp; #endif #ifdef CONFIG_IPPP_FILTER - struct sk_filter *pass_filter; /* filter for packets to pass */ - struct sk_filter *active_filter; /* filter for pkts to reset idle */ + struct bpf_prog *pass_filter; /* filter for packets to pass */ + struct bpf_prog *active_filter; /* filter for pkts to reset idle */ #endif unsigned long debug; struct isdn_ppp_compressor *compressor,*decompressor; diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 4c52907a6d8b..a9e2268ecccb 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -501,7 +501,7 @@ static inline char * __deprecated pack_hex_byte(char *buf, u8 byte) extern int hex_to_bin(char ch); extern int __must_check hex2bin(u8 *dst, const char *src, size_t count); -int mac_pton(const char *s, u8 *mac); +bool mac_pton(const char *s, u8 *mac); /* * General tracing related utility functions - trace_printk(), diff --git a/include/linux/key-type.h b/include/linux/key-type.h index a74c3a84dfdd..44792ee649de 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h @@ -41,10 +41,11 @@ struct key_construction { struct key_preparsed_payload { char *description; /* Proposed key description (or NULL) */ void *type_data[2]; /* Private key-type data */ - void *payload; /* Proposed payload */ + void *payload[2]; /* Proposed payload */ const void *data; /* Raw data */ size_t datalen; /* Raw datalen */ size_t quotalen; /* Quota length for proposed payload */ + time_t expiry; /* Expiry time of key */ bool trusted; /* True if key is trusted */ }; @@ -159,5 +160,7 @@ static inline int key_negate_and_link(struct key *key, return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); } +extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); + #endif /* CONFIG_KEYS */ #endif /* _LINUX_KEY_TYPE_H */ diff --git a/include/linux/key.h b/include/linux/key.h index 017b0826642f..e1d4715f3222 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -170,6 +170,8 @@ struct key { #define KEY_FLAG_INVALIDATED 7 /* set if key has been invalidated */ #define KEY_FLAG_TRUSTED 8 /* set if key is trusted */ #define KEY_FLAG_TRUSTED_ONLY 9 /* set if keyring only accepts links to trusted keys */ +#define KEY_FLAG_BUILTIN 10 /* set if key is builtin */ +#define KEY_FLAG_ROOT_CAN_INVAL 11 /* set if key can be invalidated by root without permission */ /* the key type and key description string * - the desc is used to match a key against search criteria diff --git a/include/linux/kthread.h b/include/linux/kthread.h index 7dcef3317689..13d55206ccf6 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h @@ -73,7 +73,6 @@ struct kthread_worker { struct kthread_work { struct list_head node; kthread_work_func_t func; - wait_queue_head_t done; struct kthread_worker *worker; }; @@ -85,7 +84,6 @@ struct kthread_work { #define KTHREAD_WORK_INIT(work, fn) { \ .node = LIST_HEAD_INIT((work).node), \ .func = (fn), \ - .done = __WAIT_QUEUE_HEAD_INITIALIZER((work).done), \ } #define DEFINE_KTHREAD_WORKER(worker) \ @@ -95,22 +93,16 @@ struct kthread_work { struct kthread_work work = KTHREAD_WORK_INIT(work, fn) /* - * kthread_worker.lock and kthread_work.done need their own lockdep class - * keys if they are defined on stack with lockdep enabled. Use the - * following macros when defining them on stack. + * kthread_worker.lock needs its own lockdep class key when defined on + * stack with lockdep enabled. Use the following macros in such cases. */ #ifdef CONFIG_LOCKDEP # define KTHREAD_WORKER_INIT_ONSTACK(worker) \ ({ init_kthread_worker(&worker); worker; }) # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \ struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker) -# define KTHREAD_WORK_INIT_ONSTACK(work, fn) \ - ({ init_kthread_work((&work), fn); work; }) -# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) \ - struct kthread_work work = KTHREAD_WORK_INIT_ONSTACK(work, fn) #else # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker) -# define DEFINE_KTHREAD_WORK_ONSTACK(work, fn) DEFINE_KTHREAD_WORK(work, fn) #endif extern void __init_kthread_worker(struct kthread_worker *worker, @@ -127,7 +119,6 @@ extern void __init_kthread_worker(struct kthread_worker *worker, memset((work), 0, sizeof(struct kthread_work)); \ INIT_LIST_HEAD(&(work)->node); \ (work)->func = (fn); \ - init_waitqueue_head(&(work)->done); \ } while (0) int kthread_worker_fn(void *worker_ptr); diff --git a/include/linux/ktime.h b/include/linux/ktime.h index de9e46e6bcc9..c9d645ad98ff 100644 --- a/include/linux/ktime.h +++ b/include/linux/ktime.h @@ -27,43 +27,19 @@ /* * ktime_t: * - * On 64-bit CPUs a single 64-bit variable is used to store the hrtimers + * A single 64-bit variable is used to store the hrtimers * internal representation of time values in scalar nanoseconds. The * design plays out best on 64-bit CPUs, where most conversions are * NOPs and most arithmetic ktime_t operations are plain arithmetic * operations. * - * On 32-bit CPUs an optimized representation of the timespec structure - * is used to avoid expensive conversions from and to timespecs. The - * endian-aware order of the tv struct members is chosen to allow - * mathematical operations on the tv64 member of the union too, which - * for certain operations produces better code. - * - * For architectures with efficient support for 64/32-bit conversions the - * plain scalar nanosecond based representation can be selected by the - * config switch CONFIG_KTIME_SCALAR. */ union ktime { s64 tv64; -#if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR) - struct { -# ifdef __BIG_ENDIAN - s32 sec, nsec; -# else - s32 nsec, sec; -# endif - } tv; -#endif }; typedef union ktime ktime_t; /* Kill this */ -/* - * ktime_t definitions when using the 64-bit scalar representation: - */ - -#if (BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR) - /** * ktime_set - Set a ktime_t variable from a seconds/nanoseconds value * @secs: seconds to set @@ -71,13 +47,12 @@ typedef union ktime ktime_t; /* Kill this */ * * Return: The ktime_t representation of the value. */ -static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) +static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs) { -#if (BITS_PER_LONG == 64) if (unlikely(secs >= KTIME_SEC_MAX)) return (ktime_t){ .tv64 = KTIME_MAX }; -#endif - return (ktime_t) { .tv64 = (s64)secs * NSEC_PER_SEC + (s64)nsecs }; + + return (ktime_t) { .tv64 = secs * NSEC_PER_SEC + (s64)nsecs }; } /* Subtract two ktime_t variables. rem = lhs -rhs: */ @@ -108,6 +83,12 @@ static inline ktime_t timespec_to_ktime(struct timespec ts) return ktime_set(ts.tv_sec, ts.tv_nsec); } +/* convert a timespec64 to ktime_t format: */ +static inline ktime_t timespec64_to_ktime(struct timespec64 ts) +{ + return ktime_set(ts.tv_sec, ts.tv_nsec); +} + /* convert a timeval to ktime_t format: */ static inline ktime_t timeval_to_ktime(struct timeval tv) { @@ -117,159 +98,15 @@ static inline ktime_t timeval_to_ktime(struct timeval tv) /* Map the ktime_t to timespec conversion to ns_to_timespec function */ #define ktime_to_timespec(kt) ns_to_timespec((kt).tv64) +/* Map the ktime_t to timespec conversion to ns_to_timespec function */ +#define ktime_to_timespec64(kt) ns_to_timespec64((kt).tv64) + /* Map the ktime_t to timeval conversion to ns_to_timeval function */ #define ktime_to_timeval(kt) ns_to_timeval((kt).tv64) /* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */ #define ktime_to_ns(kt) ((kt).tv64) -#else /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */ - -/* - * Helper macros/inlines to get the ktime_t math right in the timespec - * representation. The macros are sometimes ugly - their actual use is - * pretty okay-ish, given the circumstances. We do all this for - * performance reasons. The pure scalar nsec_t based code was nice and - * simple, but created too many 64-bit / 32-bit conversions and divisions. - * - * Be especially aware that negative values are represented in a way - * that the tv.sec field is negative and the tv.nsec field is greater - * or equal to zero but less than nanoseconds per second. This is the - * same representation which is used by timespecs. - * - * tv.sec < 0 and 0 >= tv.nsec < NSEC_PER_SEC - */ - -/* Set a ktime_t variable to a value in sec/nsec representation: */ -static inline ktime_t ktime_set(const long secs, const unsigned long nsecs) -{ - return (ktime_t) { .tv = { .sec = secs, .nsec = nsecs } }; -} - -/** - * ktime_sub - subtract two ktime_t variables - * @lhs: minuend - * @rhs: subtrahend - * - * Return: The remainder of the subtraction. - */ -static inline ktime_t ktime_sub(const ktime_t lhs, const ktime_t rhs) -{ - ktime_t res; - - res.tv64 = lhs.tv64 - rhs.tv64; - if (res.tv.nsec < 0) - res.tv.nsec += NSEC_PER_SEC; - - return res; -} - -/** - * ktime_add - add two ktime_t variables - * @add1: addend1 - * @add2: addend2 - * - * Return: The sum of @add1 and @add2. - */ -static inline ktime_t ktime_add(const ktime_t add1, const ktime_t add2) -{ - ktime_t res; - - res.tv64 = add1.tv64 + add2.tv64; - /* - * performance trick: the (u32) -NSEC gives 0x00000000Fxxxxxxx - * so we subtract NSEC_PER_SEC and add 1 to the upper 32 bit. - * - * it's equivalent to: - * tv.nsec -= NSEC_PER_SEC - * tv.sec ++; - */ - if (res.tv.nsec >= NSEC_PER_SEC) - res.tv64 += (u32)-NSEC_PER_SEC; - - return res; -} - -/** - * ktime_add_ns - Add a scalar nanoseconds value to a ktime_t variable - * @kt: addend - * @nsec: the scalar nsec value to add - * - * Return: The sum of @kt and @nsec in ktime_t format. - */ -extern ktime_t ktime_add_ns(const ktime_t kt, u64 nsec); - -/** - * ktime_sub_ns - Subtract a scalar nanoseconds value from a ktime_t variable - * @kt: minuend - * @nsec: the scalar nsec value to subtract - * - * Return: The subtraction of @nsec from @kt in ktime_t format. - */ -extern ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec); - -/** - * timespec_to_ktime - convert a timespec to ktime_t format - * @ts: the timespec variable to convert - * - * Return: A ktime_t variable with the converted timespec value. - */ -static inline ktime_t timespec_to_ktime(const struct timespec ts) -{ - return (ktime_t) { .tv = { .sec = (s32)ts.tv_sec, - .nsec = (s32)ts.tv_nsec } }; -} - -/** - * timeval_to_ktime - convert a timeval to ktime_t format - * @tv: the timeval variable to convert - * - * Return: A ktime_t variable with the converted timeval value. - */ -static inline ktime_t timeval_to_ktime(const struct timeval tv) -{ - return (ktime_t) { .tv = { .sec = (s32)tv.tv_sec, - .nsec = (s32)(tv.tv_usec * - NSEC_PER_USEC) } }; -} - -/** - * ktime_to_timespec - convert a ktime_t variable to timespec format - * @kt: the ktime_t variable to convert - * - * Return: The timespec representation of the ktime value. - */ -static inline struct timespec ktime_to_timespec(const ktime_t kt) -{ - return (struct timespec) { .tv_sec = (time_t) kt.tv.sec, - .tv_nsec = (long) kt.tv.nsec }; -} - -/** - * ktime_to_timeval - convert a ktime_t variable to timeval format - * @kt: the ktime_t variable to convert - * - * Return: The timeval representation of the ktime value. - */ -static inline struct timeval ktime_to_timeval(const ktime_t kt) -{ - return (struct timeval) { - .tv_sec = (time_t) kt.tv.sec, - .tv_usec = (suseconds_t) (kt.tv.nsec / NSEC_PER_USEC) }; -} - -/** - * ktime_to_ns - convert a ktime_t variable to scalar nanoseconds - * @kt: the ktime_t variable to convert - * - * Return: The scalar nanoseconds representation of @kt. - */ -static inline s64 ktime_to_ns(const ktime_t kt) -{ - return (s64) kt.tv.sec * NSEC_PER_SEC + kt.tv.nsec; -} - -#endif /* !((BITS_PER_LONG == 64) || defined(CONFIG_KTIME_SCALAR)) */ /** * ktime_equal - Compares two ktime_t variables to see if they are equal @@ -328,16 +165,20 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2) return ktime_compare(cmp1, cmp2) < 0; } +#if BITS_PER_LONG < 64 +extern u64 ktime_divns(const ktime_t kt, s64 div); +#else /* BITS_PER_LONG < 64 */ +# define ktime_divns(kt, div) (u64)((kt).tv64 / (div)) +#endif + static inline s64 ktime_to_us(const ktime_t kt) { - struct timeval tv = ktime_to_timeval(kt); - return (s64) tv.tv_sec * USEC_PER_SEC + tv.tv_usec; + return ktime_divns(kt, NSEC_PER_USEC); } static inline s64 ktime_to_ms(const ktime_t kt) { - struct timeval tv = ktime_to_timeval(kt); - return (s64) tv.tv_sec * MSEC_PER_SEC + tv.tv_usec / USEC_PER_MSEC; + return ktime_divns(kt, NSEC_PER_MSEC); } static inline s64 ktime_us_delta(const ktime_t later, const ktime_t earlier) @@ -381,6 +222,25 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, } } +/** + * ktime_to_timespec64_cond - convert a ktime_t variable to timespec64 + * format only if the variable contains data + * @kt: the ktime_t variable to convert + * @ts: the timespec variable to store the result in + * + * Return: %true if there was a successful conversion, %false if kt was 0. + */ +static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt, + struct timespec64 *ts) +{ + if (kt.tv64) { + *ts = ktime_to_timespec64(kt); + return true; + } else { + return false; + } +} + /* * The resolution of the clocks. The resolution value is returned in * the clock_getres() system call to give application programmers an @@ -390,12 +250,6 @@ static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt, #define LOW_RES_NSEC TICK_NSEC #define KTIME_LOW_RES (ktime_t){ .tv64 = LOW_RES_NSEC } -/* Get the monotonic time in timespec format: */ -extern void ktime_get_ts(struct timespec *ts); - -/* Get the real (wall-) time in timespec format: */ -#define ktime_get_real_ts(ts) getnstimeofday(ts) - static inline ktime_t ns_to_ktime(u64 ns) { static const ktime_t ktime_zero = { .tv64 = 0 }; @@ -410,4 +264,6 @@ static inline ktime_t ms_to_ktime(u64 ms) return ktime_add_ms(ktime_zero, ms); } +# include <linux/timekeeping.h> + #endif diff --git a/include/linux/libata.h b/include/linux/libata.h index 5ab4e3a76721..92abb497ab14 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -593,6 +593,7 @@ struct ata_host { struct device *dev; void __iomem * const *iomap; unsigned int n_ports; + unsigned int n_tags; /* nr of NCQ tags */ void *private_data; struct ata_port_operations *ops; unsigned long flags; diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h index 6d9371f88875..a614b33d0a39 100644 --- a/include/linux/mfd/arizona/core.h +++ b/include/linux/mfd/arizona/core.h @@ -110,6 +110,12 @@ struct arizona { int clk32k_ref; struct snd_soc_dapm_context *dapm; + + int tdm_width[ARIZONA_MAX_AIF]; + int tdm_slots[ARIZONA_MAX_AIF]; + + uint16_t dac_comp_coeff; + uint8_t dac_comp_enabled; }; int arizona_clk32k_enable(struct arizona *arizona); diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h index 12a5c135c746..4578c72c9b86 100644 --- a/include/linux/mfd/arizona/pdata.h +++ b/include/linux/mfd/arizona/pdata.h @@ -127,6 +127,9 @@ struct arizona_pdata { /** Internal pull on GPIO5 is disabled when used for jack detection */ bool jd_gpio5_nopull; + /** set to true if jackdet contact opens on insert */ + bool jd_invert; + /** Use the headphone detect circuit to identify the accessory */ bool hpdet_acc_id; diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 3e050b933dd0..c466ff3e16b8 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -262,6 +262,41 @@ enum max77693_irq_source { MAX77693_IRQ_GROUP_NR, }; +#define LED_IRQ_FLED2_OPEN BIT(0) +#define LED_IRQ_FLED2_SHORT BIT(1) +#define LED_IRQ_FLED1_OPEN BIT(2) +#define LED_IRQ_FLED1_SHORT BIT(3) +#define LED_IRQ_MAX_FLASH BIT(4) + +#define TOPSYS_IRQ_T120C_INT BIT(0) +#define TOPSYS_IRQ_T140C_INT BIT(1) +#define TOPSYS_IRQ_LOWSYS_INT BIT(3) + +#define CHG_IRQ_BYP_I BIT(0) +#define CHG_IRQ_THM_I BIT(2) +#define CHG_IRQ_BAT_I BIT(3) +#define CHG_IRQ_CHG_I BIT(4) +#define CHG_IRQ_CHGIN_I BIT(6) + +#define MUIC_IRQ_INT1_ADC BIT(0) +#define MUIC_IRQ_INT1_ADC_LOW BIT(1) +#define MUIC_IRQ_INT1_ADC_ERR BIT(2) +#define MUIC_IRQ_INT1_ADC1K BIT(3) + +#define MUIC_IRQ_INT2_CHGTYP BIT(0) +#define MUIC_IRQ_INT2_CHGDETREUN BIT(1) +#define MUIC_IRQ_INT2_DCDTMR BIT(2) +#define MUIC_IRQ_INT2_DXOVP BIT(3) +#define MUIC_IRQ_INT2_VBVOLT BIT(4) +#define MUIC_IRQ_INT2_VIDRM BIT(5) + +#define MUIC_IRQ_INT3_EOC BIT(0) +#define MUIC_IRQ_INT3_CGMBC BIT(1) +#define MUIC_IRQ_INT3_OVP BIT(2) +#define MUIC_IRQ_INT3_MBCCHG_ERR BIT(3) +#define MUIC_IRQ_INT3_CHG_ENABLED BIT(4) +#define MUIC_IRQ_INT3_BAT_DET BIT(5) + enum max77693_irq { /* PMIC - FLASH */ MAX77693_LED_IRQ_FLED2_OPEN, @@ -282,6 +317,10 @@ enum max77693_irq { MAX77693_CHG_IRQ_CHG_I, MAX77693_CHG_IRQ_CHGIN_I, + MAX77693_IRQ_NR, +}; + +enum max77693_irq_muic { /* MUIC INT1 */ MAX77693_MUIC_IRQ_INT1_ADC, MAX77693_MUIC_IRQ_INT1_ADC_LOW, @@ -304,7 +343,7 @@ enum max77693_irq { MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MAX77693_MUIC_IRQ_INT3_BAT_DET, - MAX77693_IRQ_NR, + MAX77693_MUIC_IRQ_NR, }; struct max77693_dev { @@ -319,7 +358,10 @@ struct max77693_dev { struct regmap *regmap_muic; struct regmap *regmap_haptic; - struct irq_domain *irq_domain; + struct regmap_irq_chip_data *irq_data_led; + struct regmap_irq_chip_data *irq_data_topsys; + struct regmap_irq_chip_data *irq_data_charger; + struct regmap_irq_chip_data *irq_data_muic; int irq; int irq_gpio; @@ -332,14 +374,6 @@ enum max77693_types { TYPE_MAX77693, }; -extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest); -extern int max77693_bulk_read(struct regmap *map, u8 reg, int count, - u8 *buf); -extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value); -extern int max77693_bulk_write(struct regmap *map, u8 reg, int count, - u8 *buf); -extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask); - extern int max77693_irq_init(struct max77693_dev *max77686); extern void max77693_irq_exit(struct max77693_dev *max77686); extern int max77693_irq_resume(struct max77693_dev *max77686); diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 3420e09e2e20..fb0390a1a498 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h @@ -30,6 +30,8 @@ #define PALMAS_CHIP_ID 0xC035 #define PALMAS_CHIP_CHARGER_ID 0xC036 +#define TPS65917_RESERVED -1 + #define is_palmas(a) (((a) == PALMAS_CHIP_OLD_ID) || \ ((a) == PALMAS_CHIP_ID)) #define is_palmas_charger(a) ((a) == PALMAS_CHIP_CHARGER_ID) @@ -51,6 +53,8 @@ struct palmas_pmic; struct palmas_gpadc; struct palmas_resource; struct palmas_usb; +struct palmas_pmic_driver_data; +struct palmas_pmic_platform_data; enum palmas_usb_state { PALMAS_USB_STATE_DISCONNECT, @@ -74,6 +78,8 @@ struct palmas { struct mutex irq_lock; struct regmap_irq_chip_data *irq_data; + struct palmas_pmic_driver_data *pmic_ddata; + /* Child Devices */ struct palmas_pmic *pmic; struct palmas_gpadc *gpadc; @@ -86,6 +92,46 @@ struct palmas { u8 pwm_muxed; }; +#define PALMAS_EXT_REQ (PALMAS_EXT_CONTROL_ENABLE1 | \ + PALMAS_EXT_CONTROL_ENABLE2 | \ + PALMAS_EXT_CONTROL_NSLEEP) + +struct palmas_sleep_requestor_info { + int id; + int reg_offset; + int bit_pos; +}; + +struct palmas_regs_info { + char *name; + char *sname; + u8 vsel_addr; + u8 ctrl_addr; + u8 tstep_addr; + int sleep_id; +}; + +struct palmas_pmic_driver_data { + int smps_start; + int smps_end; + int ldo_begin; + int ldo_end; + int max_reg; + struct palmas_regs_info *palmas_regs_info; + struct of_regulator_match *palmas_matches; + struct palmas_sleep_requestor_info *sleep_req_info; + int (*smps_register)(struct palmas_pmic *pmic, + struct palmas_pmic_driver_data *ddata, + struct palmas_pmic_platform_data *pdata, + const char *pdev_name, + struct regulator_config config); + int (*ldo_register)(struct palmas_pmic *pmic, + struct palmas_pmic_driver_data *ddata, + struct palmas_pmic_platform_data *pdata, + const char *pdev_name, + struct regulator_config config); +}; + struct palmas_gpadc_platform_data { /* Channel 3 current source is only enabled during conversion */ int ch3_current; @@ -184,6 +230,27 @@ enum palmas_regulators { PALMAS_NUM_REGS, }; +enum tps65917_regulators { + /* SMPS regulators */ + TPS65917_REG_SMPS1, + TPS65917_REG_SMPS2, + TPS65917_REG_SMPS3, + TPS65917_REG_SMPS4, + TPS65917_REG_SMPS5, + /* LDO regulators */ + TPS65917_REG_LDO1, + TPS65917_REG_LDO2, + TPS65917_REG_LDO3, + TPS65917_REG_LDO4, + TPS65917_REG_LDO5, + TPS65917_REG_REGEN1, + TPS65917_REG_REGEN2, + TPS65917_REG_REGEN3, + + /* Total number of regulators */ + TPS65917_NUM_REGS, +}; + /* External controll signal name */ enum { PALMAS_EXT_CONTROL_ENABLE1 = 0x1, @@ -228,6 +295,24 @@ enum palmas_external_requestor_id { PALMAS_EXTERNAL_REQSTR_ID_MAX, }; +enum tps65917_external_requestor_id { + TPS65917_EXTERNAL_REQSTR_ID_REGEN1, + TPS65917_EXTERNAL_REQSTR_ID_REGEN2, + TPS65917_EXTERNAL_REQSTR_ID_REGEN3, + TPS65917_EXTERNAL_REQSTR_ID_SMPS1, + TPS65917_EXTERNAL_REQSTR_ID_SMPS2, + TPS65917_EXTERNAL_REQSTR_ID_SMPS3, + TPS65917_EXTERNAL_REQSTR_ID_SMPS4, + TPS65917_EXTERNAL_REQSTR_ID_SMPS5, + TPS65917_EXTERNAL_REQSTR_ID_LDO1, + TPS65917_EXTERNAL_REQSTR_ID_LDO2, + TPS65917_EXTERNAL_REQSTR_ID_LDO3, + TPS65917_EXTERNAL_REQSTR_ID_LDO4, + TPS65917_EXTERNAL_REQSTR_ID_LDO5, + /* Last entry */ + TPS65917_EXTERNAL_REQSTR_ID_MAX, +}; + struct palmas_pmic_platform_data { /* An array of pointers to regulator init data indexed by regulator * ID @@ -349,6 +434,48 @@ struct palmas_gpadc_result { #define PALMAS_MAX_CHANNELS 16 +/* Define the tps65917 IRQ numbers */ +enum tps65917_irqs { + /* INT1 registers */ + TPS65917_RESERVED1, + TPS65917_PWRON_IRQ, + TPS65917_LONG_PRESS_KEY_IRQ, + TPS65917_RESERVED2, + TPS65917_PWRDOWN_IRQ, + TPS65917_HOTDIE_IRQ, + TPS65917_VSYS_MON_IRQ, + TPS65917_RESERVED3, + /* INT2 registers */ + TPS65917_RESERVED4, + TPS65917_OTP_ERROR_IRQ, + TPS65917_WDT_IRQ, + TPS65917_RESERVED5, + TPS65917_RESET_IN_IRQ, + TPS65917_FSD_IRQ, + TPS65917_SHORT_IRQ, + TPS65917_RESERVED6, + /* INT3 registers */ + TPS65917_GPADC_AUTO_0_IRQ, + TPS65917_GPADC_AUTO_1_IRQ, + TPS65917_GPADC_EOC_SW_IRQ, + TPS65917_RESREVED6, + TPS65917_RESERVED7, + TPS65917_RESERVED8, + TPS65917_RESERVED9, + TPS65917_VBUS_IRQ, + /* INT4 registers */ + TPS65917_GPIO_0_IRQ, + TPS65917_GPIO_1_IRQ, + TPS65917_GPIO_2_IRQ, + TPS65917_GPIO_3_IRQ, + TPS65917_GPIO_4_IRQ, + TPS65917_GPIO_5_IRQ, + TPS65917_GPIO_6_IRQ, + TPS65917_RESERVED10, + /* Total Number IRQs */ + TPS65917_NUM_IRQ, +}; + /* Define the palmas IRQ numbers */ enum palmas_irqs { /* INT1 registers */ @@ -400,6 +527,7 @@ struct palmas_pmic { int smps123; int smps457; + int smps12; int range[PALMAS_REG_SMPS10_OUT1]; unsigned int ramp_delay[PALMAS_REG_SMPS10_OUT1]; @@ -2871,6 +2999,715 @@ enum usb_irq_events { #define PALMAS_GPADC_TRIM15 0x0E #define PALMAS_GPADC_TRIM16 0x0F +/* TPS65917 Interrupt registers */ + +/* Registers for function INTERRUPT */ +#define TPS65917_INT1_STATUS 0x00 +#define TPS65917_INT1_MASK 0x01 +#define TPS65917_INT1_LINE_STATE 0x02 +#define TPS65917_INT2_STATUS 0x05 +#define TPS65917_INT2_MASK 0x06 +#define TPS65917_INT2_LINE_STATE 0x07 +#define TPS65917_INT3_STATUS 0x0A +#define TPS65917_INT3_MASK 0x0B +#define TPS65917_INT3_LINE_STATE 0x0C +#define TPS65917_INT4_STATUS 0x0F +#define TPS65917_INT4_MASK 0x10 +#define TPS65917_INT4_LINE_STATE 0x11 +#define TPS65917_INT4_EDGE_DETECT1 0x12 +#define TPS65917_INT4_EDGE_DETECT2 0x13 +#define TPS65917_INT_CTRL 0x14 + +/* Bit definitions for INT1_STATUS */ +#define TPS65917_INT1_STATUS_VSYS_MON 0x40 +#define TPS65917_INT1_STATUS_VSYS_MON_SHIFT 0x06 +#define TPS65917_INT1_STATUS_HOTDIE 0x20 +#define TPS65917_INT1_STATUS_HOTDIE_SHIFT 0x05 +#define TPS65917_INT1_STATUS_PWRDOWN 0x10 +#define TPS65917_INT1_STATUS_PWRDOWN_SHIFT 0x04 +#define TPS65917_INT1_STATUS_LONG_PRESS_KEY 0x04 +#define TPS65917_INT1_STATUS_LONG_PRESS_KEY_SHIFT 0x02 +#define TPS65917_INT1_STATUS_PWRON 0x02 +#define TPS65917_INT1_STATUS_PWRON_SHIFT 0x01 + +/* Bit definitions for INT1_MASK */ +#define TPS65917_INT1_MASK_VSYS_MON 0x40 +#define TPS65917_INT1_MASK_VSYS_MON_SHIFT 0x06 +#define TPS65917_INT1_MASK_HOTDIE 0x20 +#define TPS65917_INT1_MASK_HOTDIE_SHIFT 0x05 +#define TPS65917_INT1_MASK_PWRDOWN 0x10 +#define TPS65917_INT1_MASK_PWRDOWN_SHIFT 0x04 +#define TPS65917_INT1_MASK_LONG_PRESS_KEY 0x04 +#define TPS65917_INT1_MASK_LONG_PRESS_KEY_SHIFT 0x02 +#define TPS65917_INT1_MASK_PWRON 0x02 +#define TPS65917_INT1_MASK_PWRON_SHIFT 0x01 + +/* Bit definitions for INT1_LINE_STATE */ +#define TPS65917_INT1_LINE_STATE_VSYS_MON 0x40 +#define TPS65917_INT1_LINE_STATE_VSYS_MON_SHIFT 0x06 +#define TPS65917_INT1_LINE_STATE_HOTDIE 0x20 +#define TPS65917_INT1_LINE_STATE_HOTDIE_SHIFT 0x05 +#define TPS65917_INT1_LINE_STATE_PWRDOWN 0x10 +#define TPS65917_INT1_LINE_STATE_PWRDOWN_SHIFT 0x04 +#define TPS65917_INT1_LINE_STATE_LONG_PRESS_KEY 0x04 +#define TPS65917_INT1_LINE_STATE_LONG_PRESS_KEY_SHIFT 0x02 +#define TPS65917_INT1_LINE_STATE_PWRON 0x02 +#define TPS65917_INT1_LINE_STATE_PWRON_SHIFT 0x01 + +/* Bit definitions for INT2_STATUS */ +#define TPS65917_INT2_STATUS_SHORT 0x40 +#define TPS65917_INT2_STATUS_SHORT_SHIFT 0x06 +#define TPS65917_INT2_STATUS_FSD 0x20 +#define TPS65917_INT2_STATUS_FSD_SHIFT 0x05 +#define TPS65917_INT2_STATUS_RESET_IN 0x10 +#define TPS65917_INT2_STATUS_RESET_IN_SHIFT 0x04 +#define TPS65917_INT2_STATUS_WDT 0x04 +#define TPS65917_INT2_STATUS_WDT_SHIFT 0x02 +#define TPS65917_INT2_STATUS_OTP_ERROR 0x02 +#define TPS65917_INT2_STATUS_OTP_ERROR_SHIFT 0x01 + +/* Bit definitions for INT2_MASK */ +#define TPS65917_INT2_MASK_SHORT 0x40 +#define TPS65917_INT2_MASK_SHORT_SHIFT 0x06 +#define TPS65917_INT2_MASK_FSD 0x20 +#define TPS65917_INT2_MASK_FSD_SHIFT 0x05 +#define TPS65917_INT2_MASK_RESET_IN 0x10 +#define TPS65917_INT2_MASK_RESET_IN_SHIFT 0x04 +#define TPS65917_INT2_MASK_WDT 0x04 +#define TPS65917_INT2_MASK_WDT_SHIFT 0x02 +#define TPS65917_INT2_MASK_OTP_ERROR_TIMER 0x02 +#define TPS65917_INT2_MASK_OTP_ERROR_SHIFT 0x01 + +/* Bit definitions for INT2_LINE_STATE */ +#define TPS65917_INT2_LINE_STATE_SHORT 0x40 +#define TPS65917_INT2_LINE_STATE_SHORT_SHIFT 0x06 +#define TPS65917_INT2_LINE_STATE_FSD 0x20 +#define TPS65917_INT2_LINE_STATE_FSD_SHIFT 0x05 +#define TPS65917_INT2_LINE_STATE_RESET_IN 0x10 +#define TPS65917_INT2_LINE_STATE_RESET_IN_SHIFT 0x04 +#define TPS65917_INT2_LINE_STATE_WDT 0x04 +#define TPS65917_INT2_LINE_STATE_WDT_SHIFT 0x02 +#define TPS65917_INT2_LINE_STATE_OTP_ERROR 0x02 +#define TPS65917_INT2_LINE_STATE_OTP_ERROR_SHIFT 0x01 + +/* Bit definitions for INT3_STATUS */ +#define TPS65917_INT3_STATUS_VBUS 0x80 +#define TPS65917_INT3_STATUS_VBUS_SHIFT 0x07 +#define TPS65917_INT3_STATUS_GPADC_EOC_SW 0x04 +#define TPS65917_INT3_STATUS_GPADC_EOC_SW_SHIFT 0x02 +#define TPS65917_INT3_STATUS_GPADC_AUTO_1 0x02 +#define TPS65917_INT3_STATUS_GPADC_AUTO_1_SHIFT 0x01 +#define TPS65917_INT3_STATUS_GPADC_AUTO_0 0x01 +#define TPS65917_INT3_STATUS_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT3_MASK */ +#define TPS65917_INT3_MASK_VBUS 0x80 +#define TPS65917_INT3_MASK_VBUS_SHIFT 0x07 +#define TPS65917_INT3_MASK_GPADC_EOC_SW 0x04 +#define TPS65917_INT3_MASK_GPADC_EOC_SW_SHIFT 0x02 +#define TPS65917_INT3_MASK_GPADC_AUTO_1 0x02 +#define TPS65917_INT3_MASK_GPADC_AUTO_1_SHIFT 0x01 +#define TPS65917_INT3_MASK_GPADC_AUTO_0 0x01 +#define TPS65917_INT3_MASK_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT3_LINE_STATE */ +#define TPS65917_INT3_LINE_STATE_VBUS 0x80 +#define TPS65917_INT3_LINE_STATE_VBUS_SHIFT 0x07 +#define TPS65917_INT3_LINE_STATE_GPADC_EOC_SW 0x04 +#define TPS65917_INT3_LINE_STATE_GPADC_EOC_SW_SHIFT 0x02 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_1 0x02 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_1_SHIFT 0x01 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_0 0x01 +#define TPS65917_INT3_LINE_STATE_GPADC_AUTO_0_SHIFT 0x00 + +/* Bit definitions for INT4_STATUS */ +#define TPS65917_INT4_STATUS_GPIO_6 0x40 +#define TPS65917_INT4_STATUS_GPIO_6_SHIFT 0x06 +#define TPS65917_INT4_STATUS_GPIO_5 0x20 +#define TPS65917_INT4_STATUS_GPIO_5_SHIFT 0x05 +#define TPS65917_INT4_STATUS_GPIO_4 0x10 +#define TPS65917_INT4_STATUS_GPIO_4_SHIFT 0x04 +#define TPS65917_INT4_STATUS_GPIO_3 0x08 +#define TPS65917_INT4_STATUS_GPIO_3_SHIFT 0x03 +#define TPS65917_INT4_STATUS_GPIO_2 0x04 +#define TPS65917_INT4_STATUS_GPIO_2_SHIFT 0x02 +#define TPS65917_INT4_STATUS_GPIO_1 0x02 +#define TPS65917_INT4_STATUS_GPIO_1_SHIFT 0x01 +#define TPS65917_INT4_STATUS_GPIO_0 0x01 +#define TPS65917_INT4_STATUS_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_MASK */ +#define TPS65917_INT4_MASK_GPIO_6 0x40 +#define TPS65917_INT4_MASK_GPIO_6_SHIFT 0x06 +#define TPS65917_INT4_MASK_GPIO_5 0x20 +#define TPS65917_INT4_MASK_GPIO_5_SHIFT 0x05 +#define TPS65917_INT4_MASK_GPIO_4 0x10 +#define TPS65917_INT4_MASK_GPIO_4_SHIFT 0x04 +#define TPS65917_INT4_MASK_GPIO_3 0x08 +#define TPS65917_INT4_MASK_GPIO_3_SHIFT 0x03 +#define TPS65917_INT4_MASK_GPIO_2 0x04 +#define TPS65917_INT4_MASK_GPIO_2_SHIFT 0x02 +#define TPS65917_INT4_MASK_GPIO_1 0x02 +#define TPS65917_INT4_MASK_GPIO_1_SHIFT 0x01 +#define TPS65917_INT4_MASK_GPIO_0 0x01 +#define TPS65917_INT4_MASK_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_LINE_STATE */ +#define TPS65917_INT4_LINE_STATE_GPIO_6 0x40 +#define TPS65917_INT4_LINE_STATE_GPIO_6_SHIFT 0x06 +#define TPS65917_INT4_LINE_STATE_GPIO_5 0x20 +#define TPS65917_INT4_LINE_STATE_GPIO_5_SHIFT 0x05 +#define TPS65917_INT4_LINE_STATE_GPIO_4 0x10 +#define TPS65917_INT4_LINE_STATE_GPIO_4_SHIFT 0x04 +#define TPS65917_INT4_LINE_STATE_GPIO_3 0x08 +#define TPS65917_INT4_LINE_STATE_GPIO_3_SHIFT 0x03 +#define TPS65917_INT4_LINE_STATE_GPIO_2 0x04 +#define TPS65917_INT4_LINE_STATE_GPIO_2_SHIFT 0x02 +#define TPS65917_INT4_LINE_STATE_GPIO_1 0x02 +#define TPS65917_INT4_LINE_STATE_GPIO_1_SHIFT 0x01 +#define TPS65917_INT4_LINE_STATE_GPIO_0 0x01 +#define TPS65917_INT4_LINE_STATE_GPIO_0_SHIFT 0x00 + +/* Bit definitions for INT4_EDGE_DETECT1 */ +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_RISING 0x80 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_RISING_SHIFT 0x07 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_FALLING 0x40 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_3_FALLING_SHIFT 0x06 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_RISING 0x20 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_RISING_SHIFT 0x05 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_FALLING 0x10 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_2_FALLING_SHIFT 0x04 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_RISING 0x08 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_RISING_SHIFT 0x03 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_FALLING 0x04 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_1_FALLING_SHIFT 0x02 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_RISING 0x02 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_RISING_SHIFT 0x01 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_FALLING 0x01 +#define TPS65917_INT4_EDGE_DETECT1_GPIO_0_FALLING_SHIFT 0x00 + +/* Bit definitions for INT4_EDGE_DETECT2 */ +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_RISING 0x20 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_RISING_SHIFT 0x05 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_FALLING 0x10 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_6_FALLING_SHIFT 0x04 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_RISING 0x08 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_RISING_SHIFT 0x03 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_FALLING 0x04 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_5_FALLING_SHIFT 0x02 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_RISING 0x02 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_RISING_SHIFT 0x01 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_FALLING 0x01 +#define TPS65917_INT4_EDGE_DETECT2_GPIO_4_FALLING_SHIFT 0x00 + +/* Bit definitions for INT_CTRL */ +#define TPS65917_INT_CTRL_INT_PENDING 0x04 +#define TPS65917_INT_CTRL_INT_PENDING_SHIFT 0x02 +#define TPS65917_INT_CTRL_INT_CLEAR 0x01 +#define TPS65917_INT_CTRL_INT_CLEAR_SHIFT 0x00 + +/* TPS65917 SMPS Registers */ + +/* Registers for function SMPS */ +#define TPS65917_SMPS1_CTRL 0x00 +#define TPS65917_SMPS1_FORCE 0x02 +#define TPS65917_SMPS1_VOLTAGE 0x03 +#define TPS65917_SMPS2_CTRL 0x04 +#define TPS65917_SMPS2_FORCE 0x06 +#define TPS65917_SMPS2_VOLTAGE 0x07 +#define TPS65917_SMPS3_CTRL 0x0C +#define TPS65917_SMPS3_FORCE 0x0E +#define TPS65917_SMPS3_VOLTAGE 0x0F +#define TPS65917_SMPS4_CTRL 0x10 +#define TPS65917_SMPS4_VOLTAGE 0x13 +#define TPS65917_SMPS5_CTRL 0x18 +#define TPS65917_SMPS5_VOLTAGE 0x1B +#define TPS65917_SMPS_CTRL 0x24 +#define TPS65917_SMPS_PD_CTRL 0x25 +#define TPS65917_SMPS_THERMAL_EN 0x27 +#define TPS65917_SMPS_THERMAL_STATUS 0x28 +#define TPS65917_SMPS_SHORT_STATUS 0x29 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN 0x2A +#define TPS65917_SMPS_POWERGOOD_MASK1 0x2B +#define TPS65917_SMPS_POWERGOOD_MASK2 0x2C + +/* Bit definitions for SMPS1_CTRL */ +#define TPS65917_SMPS1_CTRL_WR_S 0x80 +#define TPS65917_SMPS1_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS1_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS1_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS1_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS1_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS1_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS1_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS1_FORCE */ +#define TPS65917_SMPS1_FORCE_CMD 0x80 +#define TPS65917_SMPS1_FORCE_CMD_SHIFT 0x07 +#define TPS65917_SMPS1_FORCE_VSEL_MASK 0x7F +#define TPS65917_SMPS1_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS1_VOLTAGE */ +#define TPS65917_SMPS1_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS1_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS1_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS1_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS2_CTRL */ +#define TPS65917_SMPS2_CTRL_WR_S 0x80 +#define TPS65917_SMPS2_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS2_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS2_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS2_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS2_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS2_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS2_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS2_FORCE */ +#define TPS65917_SMPS2_FORCE_CMD 0x80 +#define TPS65917_SMPS2_FORCE_CMD_SHIFT 0x07 +#define TPS65917_SMPS2_FORCE_VSEL_MASK 0x7F +#define TPS65917_SMPS2_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS2_VOLTAGE */ +#define TPS65917_SMPS2_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS2_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS2_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS2_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS3_CTRL */ +#define TPS65917_SMPS3_CTRL_WR_S 0x80 +#define TPS65917_SMPS3_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS3_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS3_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS3_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS3_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS3_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS3_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS3_FORCE */ +#define TPS65917_SMPS3_FORCE_CMD 0x80 +#define TPS65917_SMPS3_FORCE_CMD_SHIFT 0x07 +#define TPS65917_SMPS3_FORCE_VSEL_MASK 0x7F +#define TPS65917_SMPS3_FORCE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS3_VOLTAGE */ +#define TPS65917_SMPS3_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS3_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS3_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS3_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS4_CTRL */ +#define TPS65917_SMPS4_CTRL_WR_S 0x80 +#define TPS65917_SMPS4_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS4_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS4_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS4_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS4_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS4_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS4_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS4_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS4_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS4_VOLTAGE */ +#define TPS65917_SMPS4_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS4_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS4_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS4_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS5_CTRL */ +#define TPS65917_SMPS5_CTRL_WR_S 0x80 +#define TPS65917_SMPS5_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_SMPS5_CTRL_ROOF_FLOOR_EN 0x40 +#define TPS65917_SMPS5_CTRL_ROOF_FLOOR_EN_SHIFT 0x06 +#define TPS65917_SMPS5_CTRL_STATUS_MASK 0x30 +#define TPS65917_SMPS5_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_SMPS5_CTRL_MODE_SLEEP_MASK 0x0C +#define TPS65917_SMPS5_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_SMPS5_CTRL_MODE_ACTIVE_MASK 0x03 +#define TPS65917_SMPS5_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for SMPS5_VOLTAGE */ +#define TPS65917_SMPS5_VOLTAGE_RANGE 0x80 +#define TPS65917_SMPS5_VOLTAGE_RANGE_SHIFT 0x07 +#define TPS65917_SMPS5_VOLTAGE_VSEL_MASK 0x7F +#define TPS65917_SMPS5_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for SMPS_CTRL */ +#define TPS65917_SMPS_CTRL_SMPS1_SMPS12_EN 0x10 +#define TPS65917_SMPS_CTRL_SMPS1_SMPS12_EN_SHIFT 0x04 +#define TPS65917_SMPS_CTRL_SMPS12_PHASE_CTRL 0x03 +#define TPS65917_SMPS_CTRL_SMPS12_PHASE_CTRL_SHIFT 0x00 + +/* Bit definitions for SMPS_PD_CTRL */ +#define TPS65917_SMPS_PD_CTRL_SMPS5 0x40 +#define TPS65917_SMPS_PD_CTRL_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_PD_CTRL_SMPS4 0x10 +#define TPS65917_SMPS_PD_CTRL_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_PD_CTRL_SMPS3 0x08 +#define TPS65917_SMPS_PD_CTRL_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_PD_CTRL_SMPS2 0x02 +#define TPS65917_SMPS_PD_CTRL_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_PD_CTRL_SMPS1 0x01 +#define TPS65917_SMPS_PD_CTRL_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_THERMAL_EN */ +#define TPS65917_SMPS_THERMAL_EN_SMPS5 0x40 +#define TPS65917_SMPS_THERMAL_EN_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_THERMAL_EN_SMPS3 0x08 +#define TPS65917_SMPS_THERMAL_EN_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_THERMAL_EN_SMPS12 0x01 +#define TPS65917_SMPS_THERMAL_EN_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_THERMAL_STATUS */ +#define TPS65917_SMPS_THERMAL_STATUS_SMPS5 0x40 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS3 0x08 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS12 0x01 +#define TPS65917_SMPS_THERMAL_STATUS_SMPS12_SHIFT 0x00 + +/* Bit definitions for SMPS_SHORT_STATUS */ +#define TPS65917_SMPS_SHORT_STATUS_SMPS5 0x40 +#define TPS65917_SMPS_SHORT_STATUS_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_SHORT_STATUS_SMPS4 0x10 +#define TPS65917_SMPS_SHORT_STATUS_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_SHORT_STATUS_SMPS3 0x08 +#define TPS65917_SMPS_SHORT_STATUS_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_SHORT_STATUS_SMPS2 0x02 +#define TPS65917_SMPS_SHORT_STATUS_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_SHORT_STATUS_SMPS1 0x01 +#define TPS65917_SMPS_SHORT_STATUS_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_NEGATIVE_CURRENT_LIMIT_EN */ +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS5 0x40 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS4 0x10 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3 0x08 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS2 0x02 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS1 0x01 +#define TPS65917_SMPS_NEGATIVE_CURRENT_LIMIT_EN_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_POWERGOOD_MASK1 */ +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS5 0x40 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS5_SHIFT 0x06 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS4 0x10 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS4_SHIFT 0x04 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS3 0x08 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS3_SHIFT 0x03 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS2 0x02 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS2_SHIFT 0x01 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS1 0x01 +#define TPS65917_SMPS_POWERGOOD_MASK1_SMPS1_SHIFT 0x00 + +/* Bit definitions for SMPS_POWERGOOD_MASK2 */ +#define TPS65917_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT 0x80 +#define TPS65917_SMPS_POWERGOOD_MASK2_POWERGOOD_TYPE_SELECT_SHIFT 0x07 +#define TPS65917_SMPS_POWERGOOD_MASK2_OVC_ALARM_SHIFT 0x10 +#define TPS65917_SMPS_POWERGOOD_MASK2_OVC_ALARM 0x04 + +/* Bit definitions for SMPS_PLL_CTRL */ + +#define TPS65917_SMPS_PLL_CTRL_PLL_EN_PLL_BYPASS_SHIFT 0x08 +#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_EN_BYPASS 0x03 +#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_BYPASS_CLK_SHIFT 0x04 +#define TPS65917_SMPS_PLL_CTRL_PLL_PLL_BYPASS_CLK 0x02 + +/* Registers for function LDO */ +#define TPS65917_LDO1_CTRL 0x00 +#define TPS65917_LDO1_VOLTAGE 0x01 +#define TPS65917_LDO2_CTRL 0x02 +#define TPS65917_LDO2_VOLTAGE 0x03 +#define TPS65917_LDO3_CTRL 0x04 +#define TPS65917_LDO3_VOLTAGE 0x05 +#define TPS65917_LDO4_CTRL 0x0E +#define TPS65917_LDO4_VOLTAGE 0x0F +#define TPS65917_LDO5_CTRL 0x12 +#define TPS65917_LDO5_VOLTAGE 0x13 +#define TPS65917_LDO_PD_CTRL1 0x1B +#define TPS65917_LDO_PD_CTRL2 0x1C +#define TPS65917_LDO_SHORT_STATUS1 0x1D +#define TPS65917_LDO_SHORT_STATUS2 0x1E +#define TPS65917_LDO_PD_CTRL3 0x2D +#define TPS65917_LDO_SHORT_STATUS3 0x2E + +/* Bit definitions for LDO1_CTRL */ +#define TPS65917_LDO1_CTRL_WR_S 0x80 +#define TPS65917_LDO1_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO1_CTRL_BYPASS_EN 0x40 +#define TPS65917_LDO1_CTRL_BYPASS_EN_SHIFT 0x06 +#define TPS65917_LDO1_CTRL_STATUS 0x10 +#define TPS65917_LDO1_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO1_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO1_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO1_VOLTAGE */ +#define TPS65917_LDO1_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO1_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO2_CTRL */ +#define TPS65917_LDO2_CTRL_WR_S 0x80 +#define TPS65917_LDO2_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO2_CTRL_BYPASS_EN 0x40 +#define TPS65917_LDO2_CTRL_BYPASS_EN_SHIFT 0x06 +#define TPS65917_LDO2_CTRL_STATUS 0x10 +#define TPS65917_LDO2_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO2_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO2_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO2_VOLTAGE */ +#define TPS65917_LDO2_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO2_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO3_CTRL */ +#define TPS65917_LDO3_CTRL_WR_S 0x80 +#define TPS65917_LDO3_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO3_CTRL_STATUS 0x10 +#define TPS65917_LDO3_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO3_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO3_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO3_VOLTAGE */ +#define TPS65917_LDO3_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO3_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO4_CTRL */ +#define TPS65917_LDO4_CTRL_WR_S 0x80 +#define TPS65917_LDO4_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO4_CTRL_STATUS 0x10 +#define TPS65917_LDO4_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO4_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO4_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO4_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO4_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO4_VOLTAGE */ +#define TPS65917_LDO4_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO4_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO5_CTRL */ +#define TPS65917_LDO5_CTRL_WR_S 0x80 +#define TPS65917_LDO5_CTRL_WR_S_SHIFT 0x07 +#define TPS65917_LDO5_CTRL_STATUS 0x10 +#define TPS65917_LDO5_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_LDO5_CTRL_MODE_SLEEP 0x04 +#define TPS65917_LDO5_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_LDO5_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_LDO5_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for LDO5_VOLTAGE */ +#define TPS65917_LDO5_VOLTAGE_VSEL_MASK 0x2F +#define TPS65917_LDO5_VOLTAGE_VSEL_SHIFT 0x00 + +/* Bit definitions for LDO_PD_CTRL1 */ +#define TPS65917_LDO_PD_CTRL1_LDO4 0x80 +#define TPS65917_LDO_PD_CTRL1_LDO4_SHIFT 0x07 +#define TPS65917_LDO_PD_CTRL1_LDO2 0x02 +#define TPS65917_LDO_PD_CTRL1_LDO2_SHIFT 0x01 +#define TPS65917_LDO_PD_CTRL1_LDO1 0x01 +#define TPS65917_LDO_PD_CTRL1_LDO1_SHIFT 0x00 + +/* Bit definitions for LDO_PD_CTRL2 */ +#define TPS65917_LDO_PD_CTRL2_LDO3 0x04 +#define TPS65917_LDO_PD_CTRL2_LDO3_SHIFT 0x02 +#define TPS65917_LDO_PD_CTRL2_LDO5 0x02 +#define TPS65917_LDO_PD_CTRL2_LDO5_SHIFT 0x01 + +/* Bit definitions for LDO_PD_CTRL3 */ +#define TPS65917_LDO_PD_CTRL2_LDOVANA 0x80 +#define TPS65917_LDO_PD_CTRL2_LDOVANA_SHIFT 0x07 + +/* Bit definitions for LDO_SHORT_STATUS1 */ +#define TPS65917_LDO_SHORT_STATUS1_LDO4 0x80 +#define TPS65917_LDO_SHORT_STATUS1_LDO4_SHIFT 0x07 +#define TPS65917_LDO_SHORT_STATUS1_LDO2 0x02 +#define TPS65917_LDO_SHORT_STATUS1_LDO2_SHIFT 0x01 +#define TPS65917_LDO_SHORT_STATUS1_LDO1 0x01 +#define TPS65917_LDO_SHORT_STATUS1_LDO1_SHIFT 0x00 + +/* Bit definitions for LDO_SHORT_STATUS2 */ +#define TPS65917_LDO_SHORT_STATUS2_LDO3 0x04 +#define TPS65917_LDO_SHORT_STATUS2_LDO3_SHIFT 0x02 +#define TPS65917_LDO_SHORT_STATUS2_LDO5 0x02 +#define TPS65917_LDO_SHORT_STATUS2_LDO5_SHIFT 0x01 + +/* Bit definitions for LDO_SHORT_STATUS2 */ +#define TPS65917_LDO_SHORT_STATUS2_LDOVANA 0x80 +#define TPS65917_LDO_SHORT_STATUS2_LDOVANA_SHIFT 0x07 + +/* Bit definitions for REGEN1_CTRL */ +#define TPS65917_REGEN1_CTRL_STATUS 0x10 +#define TPS65917_REGEN1_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_REGEN1_CTRL_MODE_SLEEP 0x04 +#define TPS65917_REGEN1_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_REGEN1_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_REGEN1_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for PLLEN_CTRL */ +#define TPS65917_PLLEN_CTRL_STATUS 0x10 +#define TPS65917_PLLEN_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_PLLEN_CTRL_MODE_SLEEP 0x04 +#define TPS65917_PLLEN_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_PLLEN_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_PLLEN_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for REGEN2_CTRL */ +#define TPS65917_REGEN2_CTRL_STATUS 0x10 +#define TPS65917_REGEN2_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_REGEN2_CTRL_MODE_SLEEP 0x04 +#define TPS65917_REGEN2_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_REGEN2_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_REGEN2_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Bit definitions for NSLEEP_RES_ASSIGN */ +#define TPS65917_NSLEEP_RES_ASSIGN_PLL_EN 0x08 +#define TPS65917_NSLEEP_RES_ASSIGN_PLL_EN_SHIFT 0x03 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN3 0x04 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN3_SHIFT 0x02 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN2 0x02 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN1 0x01 +#define TPS65917_NSLEEP_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_SMPS_ASSIGN */ +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS5 0x40 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS5_SHIFT 0x06 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS4 0x10 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS4_SHIFT 0x04 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS3 0x08 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS3_SHIFT 0x03 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS2 0x02 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS2_SHIFT 0x01 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS1 0x01 +#define TPS65917_NSLEEP_SMPS_ASSIGN_SMPS1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_LDO_ASSIGN1 */ +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO4 0x80 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO4_SHIFT 0x07 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO2 0x02 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO1 0x01 +#define TPS65917_NSLEEP_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for NSLEEP_LDO_ASSIGN2 */ +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO3 0x04 +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO3_SHIFT 0x02 +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO5 0x02 +#define TPS65917_NSLEEP_LDO_ASSIGN2_LDO5_SHIFT 0x01 + +/* Bit definitions for ENABLE1_RES_ASSIGN */ +#define TPS65917_ENABLE1_RES_ASSIGN_PLLEN 0x08 +#define TPS65917_ENABLE1_RES_ASSIGN_PLLEN_SHIFT 0x03 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN3 0x04 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN3_SHIFT 0x02 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN2 0x02 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN1 0x01 +#define TPS65917_ENABLE1_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_SMPS_ASSIGN */ +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS5 0x40 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS5_SHIFT 0x06 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS4 0x10 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS4_SHIFT 0x04 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS3 0x08 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS3_SHIFT 0x03 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS2 0x02 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS2_SHIFT 0x01 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS1 0x01 +#define TPS65917_ENABLE1_SMPS_ASSIGN_SMPS1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_LDO_ASSIGN1 */ +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO4 0x80 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO4_SHIFT 0x07 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO2 0x02 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO1 0x01 +#define TPS65917_ENABLE1_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for ENABLE1_LDO_ASSIGN2 */ +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO3 0x04 +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO3_SHIFT 0x02 +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO5 0x02 +#define TPS65917_ENABLE1_LDO_ASSIGN2_LDO5_SHIFT 0x01 + +/* Bit definitions for ENABLE2_RES_ASSIGN */ +#define TPS65917_ENABLE2_RES_ASSIGN_PLLEN 0x08 +#define TPS65917_ENABLE2_RES_ASSIGN_PLLEN_SHIFT 0x03 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN3 0x04 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN3_SHIFT 0x02 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN2 0x02 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN2_SHIFT 0x01 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN1 0x01 +#define TPS65917_ENABLE2_RES_ASSIGN_REGEN1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_SMPS_ASSIGN */ +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS5 0x40 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS5_SHIFT 0x06 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS4 0x10 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS4_SHIFT 0x04 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS3 0x08 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS3_SHIFT 0x03 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS2 0x02 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS2_SHIFT 0x01 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS1 0x01 +#define TPS65917_ENABLE2_SMPS_ASSIGN_SMPS1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_LDO_ASSIGN1 */ +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO4 0x80 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO4_SHIFT 0x07 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO2 0x02 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO2_SHIFT 0x01 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO1 0x01 +#define TPS65917_ENABLE2_LDO_ASSIGN1_LDO1_SHIFT 0x00 + +/* Bit definitions for ENABLE2_LDO_ASSIGN2 */ +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO3 0x04 +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO3_SHIFT 0x02 +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO5 0x02 +#define TPS65917_ENABLE2_LDO_ASSIGN2_LDO5_SHIFT 0x01 + +/* Bit definitions for REGEN3_CTRL */ +#define TPS65917_REGEN3_CTRL_STATUS 0x10 +#define TPS65917_REGEN3_CTRL_STATUS_SHIFT 0x04 +#define TPS65917_REGEN3_CTRL_MODE_SLEEP 0x04 +#define TPS65917_REGEN3_CTRL_MODE_SLEEP_SHIFT 0x02 +#define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 +#define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 + +/* Registers for function RESOURCE */ +#define TPS65917_REGEN1_CTRL 0x2 +#define TPS65917_PLLEN_CTRL 0x3 +#define TPS65917_NSLEEP_RES_ASSIGN 0x6 +#define TPS65917_NSLEEP_SMPS_ASSIGN 0x7 +#define TPS65917_NSLEEP_LDO_ASSIGN1 0x8 +#define TPS65917_NSLEEP_LDO_ASSIGN2 0x9 +#define TPS65917_ENABLE1_RES_ASSIGN 0xA +#define TPS65917_ENABLE1_SMPS_ASSIGN 0xB +#define TPS65917_ENABLE1_LDO_ASSIGN1 0xC +#define TPS65917_ENABLE1_LDO_ASSIGN2 0xD +#define TPS65917_ENABLE2_RES_ASSIGN 0xE +#define TPS65917_ENABLE2_SMPS_ASSIGN 0xF +#define TPS65917_ENABLE2_LDO_ASSIGN1 0x10 +#define TPS65917_ENABLE2_LDO_ASSIGN2 0x11 +#define TPS65917_REGEN2_CTRL 0x12 +#define TPS65917_REGEN3_CTRL 0x13 + static inline int palmas_read(struct palmas *palmas, unsigned int base, unsigned int reg, unsigned int *val) { diff --git a/include/linux/mic_bus.h b/include/linux/mic_bus.h new file mode 100644 index 000000000000..d5b5f76d57ef --- /dev/null +++ b/include/linux/mic_bus.h @@ -0,0 +1,110 @@ +/* + * Intel MIC Platform Software Stack (MPSS) + * + * Copyright(c) 2014 Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License, version 2, as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution in + * the file called "COPYING". + * + * Intel MIC Bus driver. + * + * This implementation is very similar to the the virtio bus driver + * implementation @ include/linux/virtio.h. + */ +#ifndef _MIC_BUS_H_ +#define _MIC_BUS_H_ +/* + * Everything a mbus driver needs to work with any particular mbus + * implementation. + */ +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> + +struct mbus_device_id { + __u32 device; + __u32 vendor; +}; + +#define MBUS_DEV_DMA_HOST 2 +#define MBUS_DEV_DMA_MIC 3 +#define MBUS_DEV_ANY_ID 0xffffffff + +/** + * mbus_device - representation of a device using mbus + * @mmio_va: virtual address of mmio space + * @hw_ops: the hardware ops supported by this device. + * @id: the device type identification (used to match it with a driver). + * @dev: underlying device. + * be used to communicate with. + * @index: unique position on the mbus bus + */ +struct mbus_device { + void __iomem *mmio_va; + struct mbus_hw_ops *hw_ops; + struct mbus_device_id id; + struct device dev; + int index; +}; + +/** + * mbus_driver - operations for a mbus I/O driver + * @driver: underlying device driver (populate name and owner). + * @id_table: the ids serviced by this driver. + * @probe: the function to call when a device is found. Returns 0 or -errno. + * @remove: the function to call when a device is removed. + */ +struct mbus_driver { + struct device_driver driver; + const struct mbus_device_id *id_table; + int (*probe)(struct mbus_device *dev); + void (*scan)(struct mbus_device *dev); + void (*remove)(struct mbus_device *dev); +}; + +/** + * struct mic_irq - opaque pointer used as cookie + */ +struct mic_irq; + +/** + * mbus_hw_ops - Hardware operations for accessing a MIC device on the MIC bus. + */ +struct mbus_hw_ops { + struct mic_irq* (*request_threaded_irq)(struct mbus_device *mbdev, + irq_handler_t handler, + irq_handler_t thread_fn, + const char *name, void *data, + int intr_src); + void (*free_irq)(struct mbus_device *mbdev, + struct mic_irq *cookie, void *data); + void (*ack_interrupt)(struct mbus_device *mbdev, int num); +}; + +struct mbus_device * +mbus_register_device(struct device *pdev, int id, struct dma_map_ops *dma_ops, + struct mbus_hw_ops *hw_ops, void __iomem *mmio_va); +void mbus_unregister_device(struct mbus_device *mbdev); + +int mbus_register_driver(struct mbus_driver *drv); +void mbus_unregister_driver(struct mbus_driver *drv); + +static inline struct mbus_device *dev_to_mbus(struct device *_dev) +{ + return container_of(_dev, struct mbus_device, dev); +} + +static inline struct mbus_driver *drv_to_mbus(struct device_driver *drv) +{ + return container_of(drv, struct mbus_driver, driver); +} + +#endif /* _MIC_BUS_H */ diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 35b51e7af886..e15b1544ea83 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -48,6 +48,17 @@ #define MSIX_LEGACY_SZ 4 #define MIN_MSIX_P_PORT 5 +#define MLX4_NUM_UP 8 +#define MLX4_NUM_TC 8 +#define MLX4_MAX_100M_UNITS_VAL 255 /* + * work around: can't set values + * greater then this value when + * using 100 Mbps units. + */ +#define MLX4_RATELIMIT_100M_UNITS 3 /* 100 Mbps */ +#define MLX4_RATELIMIT_1G_UNITS 4 /* 1 Gbps */ +#define MLX4_RATELIMIT_DEFAULT 0x00ff + #define MLX4_ROCE_MAX_GIDS 128 #define MLX4_ROCE_PF_GIDS 16 @@ -1243,4 +1254,11 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port); int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port, int enable); + +/* Returns true if running in low memory profile (kdump kernel) */ +static inline bool mlx4_low_memory_profile(void) +{ + return reset_devices; +} + #endif /* MLX4_DEVICE_H */ diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 3406cfb1267a..334947151dfc 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -456,9 +456,6 @@ struct mlx5_eqe_cq_err { u8 syndrome; }; -struct mlx5_eqe_dropped_packet { -}; - struct mlx5_eqe_port_state { u8 reserved0[8]; u8 port; @@ -498,7 +495,6 @@ union ev_data { struct mlx5_eqe_comp comp; struct mlx5_eqe_qp_srq qp_srq; struct mlx5_eqe_cq_err cq_err; - struct mlx5_eqe_dropped_packet dp; struct mlx5_eqe_port_state port; struct mlx5_eqe_gpio gpio; struct mlx5_eqe_congestion cong; diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 2bce4aad2570..b88e9b46d957 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -381,8 +381,8 @@ struct mlx5_buf { struct mlx5_buf_list *page_list; int nbufs; int npages; - int page_shift; int size; + u8 page_shift; }; struct mlx5_eq { @@ -543,6 +543,10 @@ struct mlx5_priv { /* protect mkey key part */ spinlock_t mkey_lock; u8 mkey_key; + + struct list_head dev_list; + struct list_head ctx_list; + spinlock_t ctx_lock; }; struct mlx5_core_dev { @@ -555,7 +559,7 @@ struct mlx5_core_dev { struct mlx5_init_seg __iomem *iseg; void (*event) (struct mlx5_core_dev *dev, enum mlx5_dev_event event, - void *data); + unsigned long param); struct mlx5_priv priv; struct mlx5_profile *profile; atomic_t num_qps; @@ -604,8 +608,8 @@ struct mlx5_cmd_work_ent { int page_queue; u8 status; u8 token; - struct timespec ts1; - struct timespec ts2; + u64 ts1; + u64 ts2; u16 op; }; @@ -686,8 +690,6 @@ static inline u32 mlx5_base_mkey(const u32 key) return key & 0xffffff00u; } -int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev); -void mlx5_dev_cleanup(struct mlx5_core_dev *dev); int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); void mlx5_cmd_use_events(struct mlx5_core_dev *dev); @@ -734,7 +736,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr, int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb, - u16 opmod, int port); + u16 opmod, u8 port); void mlx5_pagealloc_init(struct mlx5_core_dev *dev); void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); int mlx5_pagealloc_start(struct mlx5_core_dev *dev); @@ -767,7 +769,7 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in, int size_in, void *data_out, int size_out, u16 reg_num, int arg, int write); -int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps); +int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); @@ -811,9 +813,20 @@ enum { MAX_MR_CACHE_ENTRIES = 16, }; +struct mlx5_interface { + void * (*add)(struct mlx5_core_dev *dev); + void (*remove)(struct mlx5_core_dev *dev, void *context); + void (*event)(struct mlx5_core_dev *dev, void *context, + enum mlx5_dev_event event, unsigned long param); + struct list_head list; +}; + +int mlx5_register_interface(struct mlx5_interface *intf); +void mlx5_unregister_interface(struct mlx5_interface *intf); + struct mlx5_profile { u64 mask; - u32 log_max_qp; + u8 log_max_qp; struct { int size; int limit; diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 96c5750e3110..796deac19fcf 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -516,4 +516,12 @@ struct vm_special_mapping struct page **pages; }; +enum tlb_flush_reason { + TLB_FLUSH_ON_TASK_SWITCH, + TLB_REMOTE_SHOOTDOWN, + TLB_LOCAL_SHOOTDOWN, + TLB_LOCAL_MM_SHOOTDOWN, + NR_TLB_FLUSH_REASONS, +}; + #endif /* _LINUX_MM_TYPES_H */ diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index b1990c5524e1..494f99e852da 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -381,6 +381,11 @@ extern int param_set_ulong(const char *val, const struct kernel_param *kp); extern int param_get_ulong(char *buffer, const struct kernel_param *kp); #define param_check_ulong(name, p) __param_check(name, p, unsigned long) +extern struct kernel_param_ops param_ops_ullong; +extern int param_set_ullong(const char *val, const struct kernel_param *kp); +extern int param_get_ullong(char *buffer, const struct kernel_param *kp); +#define param_check_ullong(name, p) __param_check(name, p, unsigned long long) + extern struct kernel_param_ops param_ops_charp; extern int param_set_charp(const char *val, const struct kernel_param *kp); extern int param_get_charp(char *buffer, const struct kernel_param *kp); diff --git a/include/linux/msi.h b/include/linux/msi.h index 92a2f991262a..8103f32f6d87 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -25,7 +25,8 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg); struct msi_desc { struct { __u8 is_msix : 1; - __u8 multiple: 3; /* log2 number of messages */ + __u8 multiple: 3; /* log2 num of messages allocated */ + __u8 multi_cap : 3; /* log2 num of messages supported */ __u8 maskbit : 1; /* mask-pending bit supported ? */ __u8 is_64 : 1; /* Address size: 0=32bit 1=64bit */ __u8 pos; /* Location of the msi capability */ diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 42aa9b9ecd5f..8d5535c58cc2 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -176,8 +176,4 @@ extern void mutex_unlock(struct mutex *lock); extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); -#ifndef arch_mutex_cpu_relax -# define arch_mutex_cpu_relax() cpu_relax() -#endif - #endif /* __LINUX_MUTEX_H */ diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index d99800cbdcf3..dcfdecbfa0b7 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -176,4 +176,12 @@ enum { NETIF_F_HW_VLAN_STAG_RX | \ NETIF_F_HW_VLAN_STAG_TX) +#define NETIF_F_GSO_ENCAP_ALL (NETIF_F_GSO_GRE | \ + NETIF_F_GSO_GRE_CSUM | \ + NETIF_F_GSO_IPIP | \ + NETIF_F_GSO_SIT | \ + NETIF_F_GSO_UDP_TUNNEL | \ + NETIF_F_GSO_UDP_TUNNEL_CSUM | \ + NETIF_F_GSO_MPLS) + #endif /* _LINUX_NETDEV_FEATURES_H */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 66f9a04ec270..38377392d082 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -943,7 +943,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * const unsigned char *addr) * Deletes the FDB entry from dev coresponding to addr. * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, - * struct net_device *dev, int idx) + * struct net_device *dev, struct net_device *filter_dev, + * int idx) * Used to add FDB entries to dump requests. Implementers should add * entries to skb and update idx with the number of entries. * @@ -1114,6 +1115,7 @@ struct net_device_ops { int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, + struct net_device *filter_dev, int idx); int (*ndo_bridge_setlink)(struct net_device *dev, @@ -1229,42 +1231,228 @@ enum netdev_priv_flags { #define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE #define IFF_MACVLAN IFF_MACVLAN -/* - * The DEVICE structure. - * Actually, this whole structure is a big mistake. It mixes I/O - * data with strictly "high-level" data, and it has to know about - * almost every data structure used in the INET module. +/** + * struct net_device - The DEVICE structure. + * Actually, this whole structure is a big mistake. It mixes I/O + * data with strictly "high-level" data, and it has to know about + * almost every data structure used in the INET module. + * + * @name: This is the first field of the "visible" part of this structure + * (i.e. as seen by users in the "Space.c" file). It is the name + * of the interface. + * + * @name_hlist: Device name hash chain, please keep it close to name[] + * @ifalias: SNMP alias + * @mem_end: Shared memory end + * @mem_start: Shared memory start + * @base_addr: Device I/O address + * @irq: Device IRQ number + * + * @state: Generic network queuing layer state, see netdev_state_t + * @dev_list: The global list of network devices + * @napi_list: List entry, that is used for polling napi devices + * @unreg_list: List entry, that is used, when we are unregistering the + * device, see the function unregister_netdev + * @close_list: List entry, that is used, when we are closing the device + * + * @adj_list: Directly linked devices, like slaves for bonding + * @all_adj_list: All linked devices, *including* neighbours + * @features: Currently active device features + * @hw_features: User-changeable features + * + * @wanted_features: User-requested features + * @vlan_features: Mask of features inheritable by VLAN devices + * + * @hw_enc_features: Mask of features inherited by encapsulating devices + * This field indicates what encapsulation + * offloads the hardware is capable of doing, + * and drivers will need to set them appropriately. + * + * @mpls_features: Mask of features inheritable by MPLS + * + * @ifindex: interface index + * @iflink: unique device identifier + * + * @stats: Statistics struct, which was left as a legacy, use + * rtnl_link_stats64 instead + * + * @rx_dropped: Dropped packets by core network, + * do not use this in drivers + * @tx_dropped: Dropped packets by core network, + * do not use this in drivers + * + * @carrier_changes: Stats to monitor carrier on<->off transitions + * + * @wireless_handlers: List of functions to handle Wireless Extensions, + * instead of ioctl, + * see <net/iw_handler.h> for details. + * @wireless_data: Instance data managed by the core of wireless extensions + * + * @netdev_ops: Includes several pointers to callbacks, + * if one wants to override the ndo_*() functions + * @ethtool_ops: Management operations + * @fwd_ops: Management operations + * @header_ops: Includes callbacks for creating,parsing,rebuilding,etc + * of Layer 2 headers. + * + * @flags: Interface flags (a la BSD) + * @priv_flags: Like 'flags' but invisible to userspace, + * see if.h for the definitions + * @gflags: Global flags ( kept as legacy ) + * @padded: How much padding added by alloc_netdev() + * @operstate: RFC2863 operstate + * @link_mode: Mapping policy to operstate + * @if_port: Selectable AUI, TP, ... + * @dma: DMA channel + * @mtu: Interface MTU value + * @type: Interface hardware type + * @hard_header_len: Hardware header length + * + * @needed_headroom: Extra headroom the hardware may need, but not in all + * cases can this be guaranteed + * @needed_tailroom: Extra tailroom the hardware may need, but not in all + * cases can this be guaranteed. Some cases also use + * LL_MAX_HEADER instead to allocate the skb + * + * interface address info: + * + * @perm_addr: Permanent hw address + * @addr_assign_type: Hw address assignment type + * @addr_len: Hardware address length + * @neigh_priv_len; Used in neigh_alloc(), + * initialized only in atm/clip.c + * @dev_id: Used to differentiate devices that share + * the same link layer address + * @dev_port: Used to differentiate devices that share + * the same function + * @addr_list_lock: XXX: need comments on this one + * @uc: unicast mac addresses + * @mc: multicast mac addresses + * @dev_addrs: list of device hw addresses + * @queues_kset: Group of all Kobjects in the Tx and RX queues + * @uc_promisc: Counter, that indicates, that promiscuous mode + * has been enabled due to the need to listen to + * additional unicast addresses in a device that + * does not implement ndo_set_rx_mode() + * @promiscuity: Number of times, the NIC is told to work in + * Promiscuous mode, if it becomes 0 the NIC will + * exit from working in Promiscuous mode + * @allmulti: Counter, enables or disables allmulticast mode + * + * @vlan_info: VLAN info + * @dsa_ptr: dsa specific data + * @tipc_ptr: TIPC specific data + * @atalk_ptr: AppleTalk link + * @ip_ptr: IPv4 specific data + * @dn_ptr: DECnet specific data + * @ip6_ptr: IPv6 specific data + * @ax25_ptr: AX.25 specific data + * @ieee80211_ptr: IEEE 802.11 specific data, assign before registering + * + * @last_rx: Time of last Rx + * @dev_addr: Hw address (before bcast, + * because most packets are unicast) + * + * @_rx: Array of RX queues + * @num_rx_queues: Number of RX queues + * allocated at register_netdev() time + * @real_num_rx_queues: Number of RX queues currently active in device + * + * @rx_handler: handler for received packets + * @rx_handler_data: XXX: need comments on this one + * @ingress_queue: XXX: need comments on this one + * @broadcast: hw bcast address + * + * @_tx: Array of TX queues + * @num_tx_queues: Number of TX queues allocated at alloc_netdev_mq() time + * @real_num_tx_queues: Number of TX queues currently active in device + * @qdisc: Root qdisc from userspace point of view + * @tx_queue_len: Max frames per queue allowed + * @tx_global_lock: XXX: need comments on this one + * + * @xps_maps: XXX: need comments on this one + * + * @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts, + * indexed by RX queue number. Assigned by driver. + * This must only be set if the ndo_rx_flow_steer + * operation is defined + * + * @trans_start: Time (in jiffies) of last Tx + * @watchdog_timeo: Represents the timeout that is used by + * the watchdog ( see dev_watchdog() ) + * @watchdog_timer: List of timers + * + * @pcpu_refcnt: Number of references to this device + * @todo_list: Delayed register/unregister + * @index_hlist: Device index hash chain + * @link_watch_list: XXX: need comments on this one + * + * @reg_state: Register/unregister state machine + * @dismantle: Device is going to be freed + * @rtnl_link_state: This enum represents the phases of creating + * a new link + * + * @destructor: Called from unregister, + * can be used to call free_netdev + * @npinfo: XXX: need comments on this one + * @nd_net: Network namespace this network device is inside + * + * @ml_priv: Mid-layer private + * @lstats: Loopback statistics + * @tstats: Tunnel statistics + * @dstats: Dummy statistics + * @vstats: Virtual ethernet statistics + * + * @garp_port: GARP + * @mrp_port: MRP + * + * @dev: Class/net/name entry + * @sysfs_groups: Space for optional device, statistics and wireless + * sysfs groups + * + * @sysfs_rx_queue_group: Space for optional per-rx queue attributes + * @rtnl_link_ops: Rtnl_link_ops + * + * @gso_max_size: Maximum size of generic segmentation offload + * @gso_max_segs: Maximum number of segments that can be passed to the + * NIC for GSO + * + * @dcbnl_ops: Data Center Bridging netlink ops + * @num_tc: Number of traffic classes in the net device + * @tc_to_txq: XXX: need comments on this one + * @prio_tc_map XXX: need comments on this one + * + * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp + * + * @priomap: XXX: need comments on this one + * @phydev: Physical device may attach itself + * for hardware timestamping + * + * @qdisc_tx_busylock: XXX: need comments on this one + * + * @group: The group, that the device belongs to + * @pm_qos_req: Power Management QoS object * * FIXME: cleanup struct net_device such that network protocol info * moves out. */ struct net_device { - - /* - * This is the first field of the "visible" part of this structure - * (i.e. as seen by users in the "Space.c" file). It is the name - * of the interface. - */ char name[IFNAMSIZ]; - - /* device name hash chain, please keep it close to name[] */ struct hlist_node name_hlist; - - /* snmp alias */ char *ifalias; - /* * I/O specific fields * FIXME: Merge these and struct ifmap into one */ - unsigned long mem_end; /* shared mem end */ - unsigned long mem_start; /* shared mem start */ - unsigned long base_addr; /* device I/O address */ - int irq; /* device IRQ number */ + unsigned long mem_end; + unsigned long mem_start; + unsigned long base_addr; + int irq; /* - * Some hardware also needs these fields, but they are not + * Some hardware also needs these fields (state,dev_list, + * napi_list,unreg_list,close_list) but they are not * part of the usual set specified in Space.c. */ @@ -1275,110 +1463,80 @@ struct net_device { struct list_head unreg_list; struct list_head close_list; - /* directly linked devices, like slaves for bonding */ struct { struct list_head upper; struct list_head lower; } adj_list; - /* all linked devices, *including* neighbours */ struct { struct list_head upper; struct list_head lower; } all_adj_list; - - /* currently active device features */ netdev_features_t features; - /* user-changeable features */ netdev_features_t hw_features; - /* user-requested features */ netdev_features_t wanted_features; - /* mask of features inheritable by VLAN devices */ netdev_features_t vlan_features; - /* mask of features inherited by encapsulating devices - * This field indicates what encapsulation offloads - * the hardware is capable of doing, and drivers will - * need to set them appropriately. - */ netdev_features_t hw_enc_features; - /* mask of fetures inheritable by MPLS */ netdev_features_t mpls_features; - /* Interface index. Unique device identifier */ int ifindex; int iflink; struct net_device_stats stats; - /* dropped packets by core network, Do not use this in drivers */ atomic_long_t rx_dropped; atomic_long_t tx_dropped; - /* Stats to monitor carrier on<->off transitions */ atomic_t carrier_changes; #ifdef CONFIG_WIRELESS_EXT - /* List of functions to handle Wireless Extensions (instead of ioctl). - * See <net/iw_handler.h> for details. Jean II */ const struct iw_handler_def * wireless_handlers; - /* Instance data managed by the core of Wireless Extensions. */ struct iw_public_data * wireless_data; #endif - /* Management operations */ const struct net_device_ops *netdev_ops; const struct ethtool_ops *ethtool_ops; const struct forwarding_accel_ops *fwd_ops; - /* Hardware header description */ const struct header_ops *header_ops; - unsigned int flags; /* interface flags (a la BSD) */ - unsigned int priv_flags; /* Like 'flags' but invisible to userspace. - * See if.h for definitions. */ + unsigned int flags; + unsigned int priv_flags; + unsigned short gflags; - unsigned short padded; /* How much padding added by alloc_netdev() */ + unsigned short padded; - unsigned char operstate; /* RFC2863 operstate */ - unsigned char link_mode; /* mapping policy to operstate */ + unsigned char operstate; + unsigned char link_mode; - unsigned char if_port; /* Selectable AUI, TP,..*/ - unsigned char dma; /* DMA channel */ + unsigned char if_port; + unsigned char dma; - unsigned int mtu; /* interface MTU value */ - unsigned short type; /* interface hardware type */ - unsigned short hard_header_len; /* hardware hdr length */ + unsigned int mtu; + unsigned short type; + unsigned short hard_header_len; - /* extra head- and tailroom the hardware may need, but not in all cases - * can this be guaranteed, especially tailroom. Some cases also use - * LL_MAX_HEADER instead to allocate the skb. - */ unsigned short needed_headroom; unsigned short needed_tailroom; /* Interface address info. */ - unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ - unsigned char addr_assign_type; /* hw address assignment type */ - unsigned char addr_len; /* hardware address length */ + unsigned char perm_addr[MAX_ADDR_LEN]; + unsigned char addr_assign_type; + unsigned char addr_len; unsigned short neigh_priv_len; - unsigned short dev_id; /* Used to differentiate devices - * that share the same link - * layer address - */ - unsigned short dev_port; /* Used to differentiate - * devices that share the same - * function - */ + unsigned short dev_id; + unsigned short dev_port; spinlock_t addr_list_lock; - struct netdev_hw_addr_list uc; /* Unicast mac addresses */ - struct netdev_hw_addr_list mc; /* Multicast mac addresses */ - struct netdev_hw_addr_list dev_addrs; /* list of device - * hw addresses - */ + struct netdev_hw_addr_list uc; + struct netdev_hw_addr_list mc; + struct netdev_hw_addr_list dev_addrs; + #ifdef CONFIG_SYSFS struct kset *queues_kset; #endif + unsigned char name_assign_type; + bool uc_promisc; unsigned int promiscuity; unsigned int allmulti; @@ -1387,40 +1545,34 @@ struct net_device { /* Protocol specific pointers */ #if IS_ENABLED(CONFIG_VLAN_8021Q) - struct vlan_info __rcu *vlan_info; /* VLAN info */ + struct vlan_info __rcu *vlan_info; #endif #if IS_ENABLED(CONFIG_NET_DSA) - struct dsa_switch_tree *dsa_ptr; /* dsa specific data */ + struct dsa_switch_tree *dsa_ptr; #endif #if IS_ENABLED(CONFIG_TIPC) - struct tipc_bearer __rcu *tipc_ptr; /* TIPC specific data */ + struct tipc_bearer __rcu *tipc_ptr; #endif - void *atalk_ptr; /* AppleTalk link */ - struct in_device __rcu *ip_ptr; /* IPv4 specific data */ - struct dn_dev __rcu *dn_ptr; /* DECnet specific data */ - struct inet6_dev __rcu *ip6_ptr; /* IPv6 specific data */ - void *ax25_ptr; /* AX.25 specific data */ - struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data, - assign before registering */ + void *atalk_ptr; + struct in_device __rcu *ip_ptr; + struct dn_dev __rcu *dn_ptr; + struct inet6_dev __rcu *ip6_ptr; + void *ax25_ptr; + struct wireless_dev *ieee80211_ptr; /* * Cache lines mostly used on receive path (including eth_type_trans()) */ - unsigned long last_rx; /* Time of last Rx */ + unsigned long last_rx; /* Interface address info used in eth_type_trans() */ - unsigned char *dev_addr; /* hw address, (before bcast - because most packets are - unicast) */ + unsigned char *dev_addr; #ifdef CONFIG_SYSFS struct netdev_rx_queue *_rx; - /* Number of RX queues allocated at register_netdev() time */ unsigned int num_rx_queues; - - /* Number of RX queues currently active in device */ unsigned int real_num_rx_queues; #endif @@ -1429,33 +1581,23 @@ struct net_device { void __rcu *rx_handler_data; struct netdev_queue __rcu *ingress_queue; - unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */ + unsigned char broadcast[MAX_ADDR_LEN]; /* * Cache lines mostly used on transmit path */ struct netdev_queue *_tx ____cacheline_aligned_in_smp; - - /* Number of TX queues allocated at alloc_netdev_mq() time */ unsigned int num_tx_queues; - - /* Number of TX queues currently active in device */ unsigned int real_num_tx_queues; - - /* root qdisc from userspace point of view */ struct Qdisc *qdisc; - - unsigned long tx_queue_len; /* Max frames per queue allowed */ + unsigned long tx_queue_len; spinlock_t tx_global_lock; #ifdef CONFIG_XPS struct xps_dev_maps __rcu *xps_maps; #endif #ifdef CONFIG_RFS_ACCEL - /* CPU reverse-mapping for RX completion interrupts, indexed - * by RX queue number. Assigned by driver. This must only be - * set if the ndo_rx_flow_steer operation is defined. */ struct cpu_rmap *rx_cpu_rmap; #endif @@ -1465,22 +1607,17 @@ struct net_device { * trans_start here is expensive for high speed devices on SMP, * please use netdev_queue->trans_start instead. */ - unsigned long trans_start; /* Time (in jiffies) of last Tx */ + unsigned long trans_start; - int watchdog_timeo; /* used by dev_watchdog() */ + int watchdog_timeo; struct timer_list watchdog_timer; - /* Number of references to this device */ int __percpu *pcpu_refcnt; - - /* delayed register/unregister */ struct list_head todo_list; - /* device index hash chain */ - struct hlist_node index_hlist; + struct hlist_node index_hlist; struct list_head link_watch_list; - /* register/unregister state machine */ enum { NETREG_UNINITIALIZED=0, NETREG_REGISTERED, /* completed register_netdevice */ NETREG_UNREGISTERING, /* called unregister_netdevice */ @@ -1489,14 +1626,13 @@ struct net_device { NETREG_DUMMY, /* dummy device for NAPI poll */ } reg_state:8; - bool dismantle; /* device is going do be freed */ + bool dismantle; enum { RTNL_LINK_INITIALIZED, RTNL_LINK_INITIALIZING, } rtnl_link_state:16; - /* Called from unregister, can be used to call free_netdev */ void (*destructor)(struct net_device *dev); #ifdef CONFIG_NETPOLL @@ -1504,31 +1640,25 @@ struct net_device { #endif #ifdef CONFIG_NET_NS - /* Network namespace this network device is inside */ struct net *nd_net; #endif /* mid-layer private */ union { - void *ml_priv; - struct pcpu_lstats __percpu *lstats; /* loopback stats */ + void *ml_priv; + struct pcpu_lstats __percpu *lstats; struct pcpu_sw_netstats __percpu *tstats; - struct pcpu_dstats __percpu *dstats; /* dummy stats */ - struct pcpu_vstats __percpu *vstats; /* veth stats */ + struct pcpu_dstats __percpu *dstats; + struct pcpu_vstats __percpu *vstats; }; - /* GARP */ + struct garp_port __rcu *garp_port; - /* MRP */ struct mrp_port __rcu *mrp_port; - /* class/net/name entry */ - struct device dev; - /* space for optional device, statistics, and wireless sysfs groups */ + struct device dev; const struct attribute_group *sysfs_groups[4]; - /* space for optional per-rx queue attributes */ const struct attribute_group *sysfs_rx_queue_group; - /* rtnetlink link ops */ const struct rtnl_link_ops *rtnl_link_ops; /* for setting kernel sock attribute on TCP connection setup */ @@ -1538,7 +1668,6 @@ struct net_device { u16 gso_max_segs; #ifdef CONFIG_DCB - /* Data Center Bridging netlink ops */ const struct dcbnl_rtnl_ops *dcbnl_ops; #endif u8 num_tc; @@ -1546,20 +1675,14 @@ struct net_device { u8 prio_tc_map[TC_BITMASK + 1]; #if IS_ENABLED(CONFIG_FCOE) - /* max exchange id for FCoE LRO by ddp */ unsigned int fcoe_ddp_xid; #endif #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) struct netprio_map __rcu *priomap; #endif - /* phy device may attach itself for hardware timestamping */ struct phy_device *phydev; - struct lock_class_key *qdisc_tx_busylock; - - /* group the device belongs to */ int group; - struct pm_qos_request pm_qos_req; }; #define to_net_dev(d) container_of(d, struct net_device, dev) @@ -2486,7 +2609,7 @@ static inline int netif_set_xps_queue(struct net_device *dev, * as a distribution range limit for the returned value. */ static inline u16 skb_tx_hash(const struct net_device *dev, - const struct sk_buff *skb) + struct sk_buff *skb) { return __skb_tx_hash(dev, skb, dev->real_num_tx_queues); } @@ -2987,13 +3110,15 @@ void ether_setup(struct net_device *dev); /* Support for loadable net-drivers */ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, + unsigned char name_assign_type, void (*setup)(struct net_device *), unsigned int txqs, unsigned int rxqs); -#define alloc_netdev(sizeof_priv, name, setup) \ - alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1) +#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \ + alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1) -#define alloc_netdev_mq(sizeof_priv, name, setup, count) \ - alloc_netdev_mqs(sizeof_priv, name, setup, count, count) +#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \ + alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \ + count) int register_netdev(struct net_device *dev); void unregister_netdev(struct net_device *dev); @@ -3377,11 +3502,26 @@ extern struct pernet_operations __net_initdata loopback_net_ops; static inline const char *netdev_name(const struct net_device *dev) { - if (dev->reg_state != NETREG_REGISTERED) - return "(unregistered net_device)"; + if (!dev->name[0] || strchr(dev->name, '%')) + return "(unnamed net_device)"; return dev->name; } +static inline const char *netdev_reg_state(const struct net_device *dev) +{ + switch (dev->reg_state) { + case NETREG_UNINITIALIZED: return " (uninitialized)"; + case NETREG_REGISTERED: return ""; + case NETREG_UNREGISTERING: return " (unregistering)"; + case NETREG_UNREGISTERED: return " (unregistered)"; + case NETREG_RELEASED: return " (released)"; + case NETREG_DUMMY: return " (dummy)"; + } + + WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state); + return " (unknown)"; +} + __printf(3, 4) int netdev_printk(const char *level, const struct net_device *dev, const char *format, ...); @@ -3438,7 +3578,8 @@ do { \ * file/line information and a backtrace. */ #define netdev_WARN(dev, format, args...) \ - WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args) + WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ + netdev_reg_state(dev), ##args) /* netif printk helpers, similar to netdev_printk */ diff --git a/include/linux/nmi.h b/include/linux/nmi.h index 447775ee2c4b..1d2a6ab6b8bb 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h @@ -63,4 +63,8 @@ extern int proc_dowatchdog(struct ctl_table *, int , void __user *, size_t *, loff_t *); #endif +#ifdef CONFIG_HAVE_ACPI_APEI_NMI +#include <asm/nmi.h> +#endif + #endif diff --git a/include/linux/of_address.h b/include/linux/of_address.h index c13b8782a4eb..fb7b7221e063 100644 --- a/include/linux/of_address.h +++ b/include/linux/of_address.h @@ -109,7 +109,12 @@ static inline bool of_dma_is_coherent(struct device_node *np) extern int of_address_to_resource(struct device_node *dev, int index, struct resource *r); void __iomem *of_iomap(struct device_node *node, int index); +void __iomem *of_io_request_and_map(struct device_node *device, + int index, char *name); #else + +#include <linux/io.h> + static inline int of_address_to_resource(struct device_node *dev, int index, struct resource *r) { @@ -120,6 +125,12 @@ static inline void __iomem *of_iomap(struct device_node *device, int index) { return NULL; } + +static inline void __iomem *of_io_request_and_map(struct device_node *device, + int index, char *name) +{ + return IOMEM_ERR_PTR(-EINVAL); +} #endif #if defined(CONFIG_OF_ADDRESS) && defined(CONFIG_PCI) diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 05117899fcb4..0ff360d5b3b3 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h @@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname, int depth, void *data); extern bool early_init_dt_scan(void *params); +extern bool early_init_dt_verify(void *params); +extern void early_init_dt_scan_nodes(void); extern const char *of_flat_dt_get_machine_name(void); extern const void *of_flat_dt_match_machine(const void *default_match, @@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void); extern void early_init_devtree(void *); extern void early_get_first_memblock_info(void *, phys_addr_t *); extern u64 fdt_translate_address(const void *blob, int node_offset); +extern void of_fdt_limit_memory(int limit); #else /* CONFIG_OF_FLATTREE */ static inline void early_init_fdt_scan_reserved_mem(void) {} static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h index 6926db724258..c2bbf672b84e 100644 --- a/include/linux/oid_registry.h +++ b/include/linux/oid_registry.h @@ -52,9 +52,15 @@ enum OID { OID_md4, /* 1.2.840.113549.2.4 */ OID_md5, /* 1.2.840.113549.2.5 */ - OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ + /* Microsoft Authenticode & Software Publishing */ + OID_msIndirectData, /* 1.3.6.1.4.1.311.2.1.4 */ + OID_msPeImageDataObjId, /* 1.3.6.1.4.1.311.2.1.15 */ + OID_msIndividualSPKeyPurpose, /* 1.3.6.1.4.1.311.2.1.21 */ OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */ + + OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */ OID_sha1, /* 1.3.14.3.2.26 */ + OID_sha256, /* 2.16.840.1.101.3.4.2.1 */ /* Distinguished Name attribute IDs [RFC 2256] */ OID_commonName, /* 2.5.4.3 */ diff --git a/include/linux/omap-iommu.h b/include/linux/omap-iommu.h index cac78de09c07..c1aede46718b 100644 --- a/include/linux/omap-iommu.h +++ b/include/linux/omap-iommu.h @@ -10,41 +10,8 @@ * published by the Free Software Foundation. */ -#ifndef _INTEL_IOMMU_H_ -#define _INTEL_IOMMU_H_ - -struct iovm_struct { - struct omap_iommu *iommu; /* iommu object which this belongs to */ - u32 da_start; /* area definition */ - u32 da_end; - u32 flags; /* IOVMF_: see below */ - struct list_head list; /* linked in ascending order */ - const struct sg_table *sgt; /* keep 'page' <-> 'da' mapping */ - void *va; /* mpu side mapped address */ -}; - -#define MMU_RAM_ENDIAN_SHIFT 9 -#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT) -#define MMU_RAM_ELSZ_8 (0 << MMU_RAM_ELSZ_SHIFT) -#define IOVMF_ENDIAN_LITTLE MMU_RAM_ENDIAN_LITTLE -#define MMU_RAM_ELSZ_SHIFT 7 -#define IOVMF_ELSZ_8 MMU_RAM_ELSZ_8 - -struct iommu_domain; - -extern struct iovm_struct *omap_find_iovm_area(struct device *dev, u32 da); -extern u32 -omap_iommu_vmap(struct iommu_domain *domain, struct device *dev, u32 da, - const struct sg_table *sgt, u32 flags); -extern struct sg_table *omap_iommu_vunmap(struct iommu_domain *domain, - struct device *dev, u32 da); -extern u32 -omap_iommu_vmalloc(struct iommu_domain *domain, struct device *dev, - u32 da, size_t bytes, u32 flags); -extern void -omap_iommu_vfree(struct iommu_domain *domain, struct device *dev, - const u32 da); -extern void *omap_da_to_va(struct device *dev, u32 da); +#ifndef _OMAP_IOMMU_H_ +#define _OMAP_IOMMU_H_ extern void omap_iommu_save_ctx(struct device *dev); extern void omap_iommu_restore_ctx(struct device *dev); diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index 0a97b583ee8d..e1474ae18c88 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -399,6 +399,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping, } /* + * Get the offset in PAGE_SIZE. + * (TODO: hugepage should have ->index in PAGE_SIZE) + */ +static inline pgoff_t page_to_pgoff(struct page *page) +{ + if (unlikely(PageHeadHuge(page))) + return page->index << compound_order(page); + else + return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); +} + +/* * Return byte-offset into filesystem object for page. */ static inline loff_t page_offset(struct page *page) diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h index 637a608ded0b..64dacb7288a6 100644 --- a/include/linux/pci-acpi.h +++ b/include/linux/pci-acpi.h @@ -11,12 +11,17 @@ #include <linux/acpi.h> #ifdef CONFIG_ACPI -extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev, - struct pci_bus *pci_bus); -extern acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev); +extern acpi_status pci_acpi_add_bus_pm_notifier(struct acpi_device *dev); +static inline acpi_status pci_acpi_remove_bus_pm_notifier(struct acpi_device *dev) +{ + return acpi_remove_pm_notifier(dev); +} extern acpi_status pci_acpi_add_pm_notifier(struct acpi_device *dev, struct pci_dev *pci_dev); -extern acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev); +static inline acpi_status pci_acpi_remove_pm_notifier(struct acpi_device *dev) +{ + return acpi_remove_pm_notifier(dev); +} extern phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle); static inline acpi_handle acpi_find_root_bridge_handle(struct pci_dev *pdev) diff --git a/include/linux/pci.h b/include/linux/pci.h index 466bcd111d85..61978a460841 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -978,6 +978,8 @@ int pci_try_reset_slot(struct pci_slot *slot); int pci_probe_reset_bus(struct pci_bus *bus); int pci_reset_bus(struct pci_bus *bus); int pci_try_reset_bus(struct pci_bus *bus); +void pci_reset_secondary_bus(struct pci_dev *dev); +void pcibios_reset_secondary_bus(struct pci_dev *dev); void pci_reset_bridge_secondary_bus(struct pci_dev *dev); void pci_update_resource(struct pci_dev *dev, int resno); int __must_check pci_assign_resource(struct pci_dev *dev, int i); @@ -1186,7 +1188,6 @@ int pci_msix_vec_count(struct pci_dev *dev); int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec); void pci_msix_shutdown(struct pci_dev *dev); void pci_disable_msix(struct pci_dev *dev); -void msi_remove_pci_irq_vectors(struct pci_dev *dev); void pci_restore_msi_state(struct pci_dev *dev); int pci_msi_enabled(void); int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec); @@ -1217,7 +1218,6 @@ static inline int pci_enable_msix(struct pci_dev *dev, { return -ENOSYS; } static inline void pci_msix_shutdown(struct pci_dev *dev) { } static inline void pci_disable_msix(struct pci_dev *dev) { } -static inline void msi_remove_pci_irq_vectors(struct pci_dev *dev) { } static inline void pci_restore_msi_state(struct pci_dev *dev) { } static inline int pci_msi_enabled(void) { return 0; } static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec, @@ -1477,8 +1477,9 @@ enum pci_fixup_pass { pci_fixup_final, /* Final phase of device fixups */ pci_fixup_enable, /* pci_enable_device() time */ pci_fixup_resume, /* pci_device_resume() */ - pci_fixup_suspend, /* pci_device_suspend */ + pci_fixup_suspend, /* pci_device_suspend() */ pci_fixup_resume_early, /* pci_device_resume_early() */ + pci_fixup_suspend_late, /* pci_device_suspend_late() */ }; /* Anonymous variables would be nice... */ @@ -1519,6 +1520,11 @@ enum pci_fixup_pass { DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ suspend##hook, vendor, device, class, \ class_shift, hook) +#define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ + class_shift, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ + suspend_late##hook, vendor, device, \ + class, class_shift, hook) #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ @@ -1544,6 +1550,10 @@ enum pci_fixup_pass { DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ suspend##hook, vendor, device, \ PCI_ANY_ID, 0, hook) +#define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ + DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ + suspend_late##hook, vendor, device, \ + PCI_ANY_ID, 0, hook) #ifdef CONFIG_PCI_QUIRKS void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 7fa31731c854..6ed0bb73a864 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -6,6 +6,8 @@ * Do not add new entries to this file unless the definitions * are shared between multiple drivers. */ +#ifndef _LINUX_PCI_IDS_H +#define _LINUX_PCI_IDS_H /* Device classes and subclasses */ @@ -2968,3 +2970,5 @@ #define PCI_DEVICE_ID_XEN_PLATFORM 0x0001 #define PCI_VENDOR_ID_OCZ 0x1b85 + +#endif /* _LINUX_PCI_IDS_H */ diff --git a/include/linux/pe.h b/include/linux/pe.h new file mode 100644 index 000000000000..e170b95e763b --- /dev/null +++ b/include/linux/pe.h @@ -0,0 +1,448 @@ +/* + * Copyright 2011 Red Hat, Inc. + * All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + * + * Author(s): Peter Jones <pjones@redhat.com> + */ +#ifndef __LINUX_PE_H +#define __LINUX_PE_H + +#include <linux/types.h> + +#define MZ_MAGIC 0x5a4d /* "MZ" */ + +struct mz_hdr { + uint16_t magic; /* MZ_MAGIC */ + uint16_t lbsize; /* size of last used block */ + uint16_t blocks; /* pages in file, 0x3 */ + uint16_t relocs; /* relocations */ + uint16_t hdrsize; /* header size in "paragraphs" */ + uint16_t min_extra_pps; /* .bss */ + uint16_t max_extra_pps; /* runtime limit for the arena size */ + uint16_t ss; /* relative stack segment */ + uint16_t sp; /* initial %sp register */ + uint16_t checksum; /* word checksum */ + uint16_t ip; /* initial %ip register */ + uint16_t cs; /* initial %cs relative to load segment */ + uint16_t reloc_table_offset; /* offset of the first relocation */ + uint16_t overlay_num; /* overlay number. set to 0. */ + uint16_t reserved0[4]; /* reserved */ + uint16_t oem_id; /* oem identifier */ + uint16_t oem_info; /* oem specific */ + uint16_t reserved1[10]; /* reserved */ + uint32_t peaddr; /* address of pe header */ + char message[64]; /* message to print */ +}; + +struct mz_reloc { + uint16_t offset; + uint16_t segment; +}; + +#define PE_MAGIC 0x00004550 /* "PE\0\0" */ +#define PE_OPT_MAGIC_PE32 0x010b +#define PE_OPT_MAGIC_PE32_ROM 0x0107 +#define PE_OPT_MAGIC_PE32PLUS 0x020b + +/* machine type */ +#define IMAGE_FILE_MACHINE_UNKNOWN 0x0000 +#define IMAGE_FILE_MACHINE_AM33 0x01d3 +#define IMAGE_FILE_MACHINE_AMD64 0x8664 +#define IMAGE_FILE_MACHINE_ARM 0x01c0 +#define IMAGE_FILE_MACHINE_ARMV7 0x01c4 +#define IMAGE_FILE_MACHINE_EBC 0x0ebc +#define IMAGE_FILE_MACHINE_I386 0x014c +#define IMAGE_FILE_MACHINE_IA64 0x0200 +#define IMAGE_FILE_MACHINE_M32R 0x9041 +#define IMAGE_FILE_MACHINE_MIPS16 0x0266 +#define IMAGE_FILE_MACHINE_MIPSFPU 0x0366 +#define IMAGE_FILE_MACHINE_MIPSFPU16 0x0466 +#define IMAGE_FILE_MACHINE_POWERPC 0x01f0 +#define IMAGE_FILE_MACHINE_POWERPCFP 0x01f1 +#define IMAGE_FILE_MACHINE_R4000 0x0166 +#define IMAGE_FILE_MACHINE_SH3 0x01a2 +#define IMAGE_FILE_MACHINE_SH3DSP 0x01a3 +#define IMAGE_FILE_MACHINE_SH3E 0x01a4 +#define IMAGE_FILE_MACHINE_SH4 0x01a6 +#define IMAGE_FILE_MACHINE_SH5 0x01a8 +#define IMAGE_FILE_MACHINE_THUMB 0x01c2 +#define IMAGE_FILE_MACHINE_WCEMIPSV2 0x0169 + +/* flags */ +#define IMAGE_FILE_RELOCS_STRIPPED 0x0001 +#define IMAGE_FILE_EXECUTABLE_IMAGE 0x0002 +#define IMAGE_FILE_LINE_NUMS_STRIPPED 0x0004 +#define IMAGE_FILE_LOCAL_SYMS_STRIPPED 0x0008 +#define IMAGE_FILE_AGGRESSIVE_WS_TRIM 0x0010 +#define IMAGE_FILE_LARGE_ADDRESS_AWARE 0x0020 +#define IMAGE_FILE_16BIT_MACHINE 0x0040 +#define IMAGE_FILE_BYTES_REVERSED_LO 0x0080 +#define IMAGE_FILE_32BIT_MACHINE 0x0100 +#define IMAGE_FILE_DEBUG_STRIPPED 0x0200 +#define IMAGE_FILE_REMOVABLE_RUN_FROM_SWAP 0x0400 +#define IMAGE_FILE_NET_RUN_FROM_SWAP 0x0800 +#define IMAGE_FILE_SYSTEM 0x1000 +#define IMAGE_FILE_DLL 0x2000 +#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000 +#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000 + +struct pe_hdr { + uint32_t magic; /* PE magic */ + uint16_t machine; /* machine type */ + uint16_t sections; /* number of sections */ + uint32_t timestamp; /* time_t */ + uint32_t symbol_table; /* symbol table offset */ + uint32_t symbols; /* number of symbols */ + uint16_t opt_hdr_size; /* size of optional header */ + uint16_t flags; /* flags */ +}; + +#define IMAGE_FILE_OPT_ROM_MAGIC 0x107 +#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b +#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b + +#define IMAGE_SUBSYSTEM_UNKNOWN 0 +#define IMAGE_SUBSYSTEM_NATIVE 1 +#define IMAGE_SUBSYSTEM_WINDOWS_GUI 2 +#define IMAGE_SUBSYSTEM_WINDOWS_CUI 3 +#define IMAGE_SUBSYSTEM_POSIX_CUI 7 +#define IMAGE_SUBSYSTEM_WINDOWS_CE_GUI 9 +#define IMAGE_SUBSYSTEM_EFI_APPLICATION 10 +#define IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER 11 +#define IMAGE_SUBSYSTEM_EFI_RUNTIME_DRIVER 12 +#define IMAGE_SUBSYSTEM_EFI_ROM_IMAGE 13 +#define IMAGE_SUBSYSTEM_XBOX 14 + +#define IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE 0x0040 +#define IMAGE_DLL_CHARACTERISTICS_FORCE_INTEGRITY 0x0080 +#define IMAGE_DLL_CHARACTERISTICS_NX_COMPAT 0x0100 +#define IMAGE_DLLCHARACTERISTICS_NO_ISOLATION 0x0200 +#define IMAGE_DLLCHARACTERISTICS_NO_SEH 0x0400 +#define IMAGE_DLLCHARACTERISTICS_NO_BIND 0x0800 +#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000 +#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000 + +/* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't + * work right. vomit. */ +struct pe32_opt_hdr { + /* "standard" header */ + uint16_t magic; /* file type */ + uint8_t ld_major; /* linker major version */ + uint8_t ld_minor; /* linker minor version */ + uint32_t text_size; /* size of text section(s) */ + uint32_t data_size; /* size of data section(s) */ + uint32_t bss_size; /* size of bss section(s) */ + uint32_t entry_point; /* file offset of entry point */ + uint32_t code_base; /* relative code addr in ram */ + uint32_t data_base; /* relative data addr in ram */ + /* "windows" header */ + uint32_t image_base; /* preferred load address */ + uint32_t section_align; /* alignment in bytes */ + uint32_t file_align; /* file alignment in bytes */ + uint16_t os_major; /* major OS version */ + uint16_t os_minor; /* minor OS version */ + uint16_t image_major; /* major image version */ + uint16_t image_minor; /* minor image version */ + uint16_t subsys_major; /* major subsystem version */ + uint16_t subsys_minor; /* minor subsystem version */ + uint32_t win32_version; /* reserved, must be 0 */ + uint32_t image_size; /* image size */ + uint32_t header_size; /* header size rounded up to + file_align */ + uint32_t csum; /* checksum */ + uint16_t subsys; /* subsystem */ + uint16_t dll_flags; /* more flags! */ + uint32_t stack_size_req;/* amt of stack requested */ + uint32_t stack_size; /* amt of stack required */ + uint32_t heap_size_req; /* amt of heap requested */ + uint32_t heap_size; /* amt of heap required */ + uint32_t loader_flags; /* reserved, must be 0 */ + uint32_t data_dirs; /* number of data dir entries */ +}; + +struct pe32plus_opt_hdr { + uint16_t magic; /* file type */ + uint8_t ld_major; /* linker major version */ + uint8_t ld_minor; /* linker minor version */ + uint32_t text_size; /* size of text section(s) */ + uint32_t data_size; /* size of data section(s) */ + uint32_t bss_size; /* size of bss section(s) */ + uint32_t entry_point; /* file offset of entry point */ + uint32_t code_base; /* relative code addr in ram */ + /* "windows" header */ + uint64_t image_base; /* preferred load address */ + uint32_t section_align; /* alignment in bytes */ + uint32_t file_align; /* file alignment in bytes */ + uint16_t os_major; /* major OS version */ + uint16_t os_minor; /* minor OS version */ + uint16_t image_major; /* major image version */ + uint16_t image_minor; /* minor image version */ + uint16_t subsys_major; /* major subsystem version */ + uint16_t subsys_minor; /* minor subsystem version */ + uint32_t win32_version; /* reserved, must be 0 */ + uint32_t image_size; /* image size */ + uint32_t header_size; /* header size rounded up to + file_align */ + uint32_t csum; /* checksum */ + uint16_t subsys; /* subsystem */ + uint16_t dll_flags; /* more flags! */ + uint64_t stack_size_req;/* amt of stack requested */ + uint64_t stack_size; /* amt of stack required */ + uint64_t heap_size_req; /* amt of heap requested */ + uint64_t heap_size; /* amt of heap required */ + uint32_t loader_flags; /* reserved, must be 0 */ + uint32_t data_dirs; /* number of data dir entries */ +}; + +struct data_dirent { + uint32_t virtual_address; /* relative to load address */ + uint32_t size; +}; + +struct data_directory { + struct data_dirent exports; /* .edata */ + struct data_dirent imports; /* .idata */ + struct data_dirent resources; /* .rsrc */ + struct data_dirent exceptions; /* .pdata */ + struct data_dirent certs; /* certs */ + struct data_dirent base_relocations; /* .reloc */ + struct data_dirent debug; /* .debug */ + struct data_dirent arch; /* reservered */ + struct data_dirent global_ptr; /* global pointer reg. Size=0 */ + struct data_dirent tls; /* .tls */ + struct data_dirent load_config; /* load configuration structure */ + struct data_dirent bound_imports; /* no idea */ + struct data_dirent import_addrs; /* import address table */ + struct data_dirent delay_imports; /* delay-load import table */ + struct data_dirent clr_runtime_hdr; /* .cor (object only) */ + struct data_dirent reserved; +}; + +struct section_header { + char name[8]; /* name or "/12\0" string tbl offset */ + uint32_t virtual_size; /* size of loaded section in ram */ + uint32_t virtual_address; /* relative virtual address */ + uint32_t raw_data_size; /* size of the section */ + uint32_t data_addr; /* file pointer to first page of sec */ + uint32_t relocs; /* file pointer to relocation entries */ + uint32_t line_numbers; /* line numbers! */ + uint16_t num_relocs; /* number of relocations */ + uint16_t num_lin_numbers; /* srsly. */ + uint32_t flags; +}; + +/* they actually defined 0x00000000 as well, but I think we'll skip that one. */ +#define IMAGE_SCN_RESERVED_0 0x00000001 +#define IMAGE_SCN_RESERVED_1 0x00000002 +#define IMAGE_SCN_RESERVED_2 0x00000004 +#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */ +#define IMAGE_SCN_RESERVED_3 0x00000010 +#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */ +#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */ +#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */ +#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */ +#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */ +#define IMAGE_SCN_RESERVED_4 0x00000400 +#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/ +#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */ +#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */ +#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */ +#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */ +/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */ +#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */ +#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */ +#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */ +#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */ +/* and here they just stuck a 1-byte integer in the middle of a bitfield */ +#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */ +#define IMAGE_SCN_ALIGN_2BYTES 0x00200000 +#define IMAGE_SCN_ALIGN_4BYTES 0x00300000 +#define IMAGE_SCN_ALIGN_8BYTES 0x00400000 +#define IMAGE_SCN_ALIGN_16BYTES 0x00500000 +#define IMAGE_SCN_ALIGN_32BYTES 0x00600000 +#define IMAGE_SCN_ALIGN_64BYTES 0x00700000 +#define IMAGE_SCN_ALIGN_128BYTES 0x00800000 +#define IMAGE_SCN_ALIGN_256BYTES 0x00900000 +#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000 +#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000 +#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000 +#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000 +#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000 +#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */ +#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */ +#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */ +#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */ +#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */ +#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */ +#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */ +#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */ + +enum x64_coff_reloc_type { + IMAGE_REL_AMD64_ABSOLUTE = 0, + IMAGE_REL_AMD64_ADDR64, + IMAGE_REL_AMD64_ADDR32, + IMAGE_REL_AMD64_ADDR32N, + IMAGE_REL_AMD64_REL32, + IMAGE_REL_AMD64_REL32_1, + IMAGE_REL_AMD64_REL32_2, + IMAGE_REL_AMD64_REL32_3, + IMAGE_REL_AMD64_REL32_4, + IMAGE_REL_AMD64_REL32_5, + IMAGE_REL_AMD64_SECTION, + IMAGE_REL_AMD64_SECREL, + IMAGE_REL_AMD64_SECREL7, + IMAGE_REL_AMD64_TOKEN, + IMAGE_REL_AMD64_SREL32, + IMAGE_REL_AMD64_PAIR, + IMAGE_REL_AMD64_SSPAN32, +}; + +enum arm_coff_reloc_type { + IMAGE_REL_ARM_ABSOLUTE, + IMAGE_REL_ARM_ADDR32, + IMAGE_REL_ARM_ADDR32N, + IMAGE_REL_ARM_BRANCH2, + IMAGE_REL_ARM_BRANCH1, + IMAGE_REL_ARM_SECTION, + IMAGE_REL_ARM_SECREL, +}; + +enum sh_coff_reloc_type { + IMAGE_REL_SH3_ABSOLUTE, + IMAGE_REL_SH3_DIRECT16, + IMAGE_REL_SH3_DIRECT32, + IMAGE_REL_SH3_DIRECT8, + IMAGE_REL_SH3_DIRECT8_WORD, + IMAGE_REL_SH3_DIRECT8_LONG, + IMAGE_REL_SH3_DIRECT4, + IMAGE_REL_SH3_DIRECT4_WORD, + IMAGE_REL_SH3_DIRECT4_LONG, + IMAGE_REL_SH3_PCREL8_WORD, + IMAGE_REL_SH3_PCREL8_LONG, + IMAGE_REL_SH3_PCREL12_WORD, + IMAGE_REL_SH3_STARTOF_SECTION, + IMAGE_REL_SH3_SIZEOF_SECTION, + IMAGE_REL_SH3_SECTION, + IMAGE_REL_SH3_SECREL, + IMAGE_REL_SH3_DIRECT32_NB, + IMAGE_REL_SH3_GPREL4_LONG, + IMAGE_REL_SH3_TOKEN, + IMAGE_REL_SHM_PCRELPT, + IMAGE_REL_SHM_REFLO, + IMAGE_REL_SHM_REFHALF, + IMAGE_REL_SHM_RELLO, + IMAGE_REL_SHM_RELHALF, + IMAGE_REL_SHM_PAIR, + IMAGE_REL_SHM_NOMODE, +}; + +enum ppc_coff_reloc_type { + IMAGE_REL_PPC_ABSOLUTE, + IMAGE_REL_PPC_ADDR64, + IMAGE_REL_PPC_ADDR32, + IMAGE_REL_PPC_ADDR24, + IMAGE_REL_PPC_ADDR16, + IMAGE_REL_PPC_ADDR14, + IMAGE_REL_PPC_REL24, + IMAGE_REL_PPC_REL14, + IMAGE_REL_PPC_ADDR32N, + IMAGE_REL_PPC_SECREL, + IMAGE_REL_PPC_SECTION, + IMAGE_REL_PPC_SECREL16, + IMAGE_REL_PPC_REFHI, + IMAGE_REL_PPC_REFLO, + IMAGE_REL_PPC_PAIR, + IMAGE_REL_PPC_SECRELLO, + IMAGE_REL_PPC_GPREL, + IMAGE_REL_PPC_TOKEN, +}; + +enum x86_coff_reloc_type { + IMAGE_REL_I386_ABSOLUTE, + IMAGE_REL_I386_DIR16, + IMAGE_REL_I386_REL16, + IMAGE_REL_I386_DIR32, + IMAGE_REL_I386_DIR32NB, + IMAGE_REL_I386_SEG12, + IMAGE_REL_I386_SECTION, + IMAGE_REL_I386_SECREL, + IMAGE_REL_I386_TOKEN, + IMAGE_REL_I386_SECREL7, + IMAGE_REL_I386_REL32, +}; + +enum ia64_coff_reloc_type { + IMAGE_REL_IA64_ABSOLUTE, + IMAGE_REL_IA64_IMM14, + IMAGE_REL_IA64_IMM22, + IMAGE_REL_IA64_IMM64, + IMAGE_REL_IA64_DIR32, + IMAGE_REL_IA64_DIR64, + IMAGE_REL_IA64_PCREL21B, + IMAGE_REL_IA64_PCREL21M, + IMAGE_REL_IA64_PCREL21F, + IMAGE_REL_IA64_GPREL22, + IMAGE_REL_IA64_LTOFF22, + IMAGE_REL_IA64_SECTION, + IMAGE_REL_IA64_SECREL22, + IMAGE_REL_IA64_SECREL64I, + IMAGE_REL_IA64_SECREL32, + IMAGE_REL_IA64_DIR32NB, + IMAGE_REL_IA64_SREL14, + IMAGE_REL_IA64_SREL22, + IMAGE_REL_IA64_SREL32, + IMAGE_REL_IA64_UREL32, + IMAGE_REL_IA64_PCREL60X, + IMAGE_REL_IA64_PCREL60B, + IMAGE_REL_IA64_PCREL60F, + IMAGE_REL_IA64_PCREL60I, + IMAGE_REL_IA64_PCREL60M, + IMAGE_REL_IA64_IMMGPREL6, + IMAGE_REL_IA64_TOKEN, + IMAGE_REL_IA64_GPREL32, + IMAGE_REL_IA64_ADDEND, +}; + +struct coff_reloc { + uint32_t virtual_address; + uint32_t symbol_table_index; + union { + enum x64_coff_reloc_type x64_type; + enum arm_coff_reloc_type arm_type; + enum sh_coff_reloc_type sh_type; + enum ppc_coff_reloc_type ppc_type; + enum x86_coff_reloc_type x86_type; + enum ia64_coff_reloc_type ia64_type; + uint16_t data; + }; +}; + +/* + * Definitions for the contents of the certs data block + */ +#define WIN_CERT_TYPE_PKCS_SIGNED_DATA 0x0002 +#define WIN_CERT_TYPE_EFI_OKCS115 0x0EF0 +#define WIN_CERT_TYPE_EFI_GUID 0x0EF1 + +#define WIN_CERT_REVISION_1_0 0x0100 +#define WIN_CERT_REVISION_2_0 0x0200 + +struct win_certificate { + uint32_t length; + uint16_t revision; + uint16_t cert_type; +}; + +#endif /* __LINUX_PE_H */ diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index dec01d6c3f80..cfd56046ecec 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -1,6 +1,40 @@ +/* + * linux/percpu-defs.h - basic definitions for percpu areas + * + * DO NOT INCLUDE DIRECTLY OUTSIDE PERCPU IMPLEMENTATION PROPER. + * + * This file is separate from linux/percpu.h to avoid cyclic inclusion + * dependency from arch header files. Only to be included from + * asm/percpu.h. + * + * This file includes macros necessary to declare percpu sections and + * variables, and definitions of percpu accessors and operations. It + * should provide enough percpu features to arch header files even when + * they can only include asm/percpu.h to avoid cyclic inclusion dependency. + */ + #ifndef _LINUX_PERCPU_DEFS_H #define _LINUX_PERCPU_DEFS_H +#ifdef CONFIG_SMP + +#ifdef MODULE +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "" +#else +#define PER_CPU_SHARED_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#endif +#define PER_CPU_FIRST_SECTION "..first" + +#else + +#define PER_CPU_SHARED_ALIGNED_SECTION "" +#define PER_CPU_ALIGNED_SECTION "..shared_aligned" +#define PER_CPU_FIRST_SECTION "" + +#endif + /* * Base implementations of per-CPU variable declarations and definitions, where * the section in which the variable is to be placed is provided by the @@ -19,19 +53,6 @@ __attribute__((section(".discard"), unused)) /* - * Macro which verifies @ptr is a percpu pointer without evaluating - * @ptr. This is to be used in percpu accessors to verify that the - * input parameter is a percpu pointer. - * - * + 0 is required in order to convert the pointer type from a - * potential array type to a pointer to a single item of the array. - */ -#define __verify_pcpu_ptr(ptr) do { \ - const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ - (void)__vpp_verify; \ -} while (0) - -/* * s390 and alpha modules require percpu variables to be defined as * weak to force the compiler to generate GOT based external * references for them. This is necessary because percpu sections @@ -164,4 +185,337 @@ #define EXPORT_PER_CPU_SYMBOL_GPL(var) #endif +/* + * Accessors and operations. + */ +#ifndef __ASSEMBLY__ + +/* + * __verify_pcpu_ptr() verifies @ptr is a percpu pointer without evaluating + * @ptr and is invoked once before a percpu area is accessed by all + * accessors and operations. This is performed in the generic part of + * percpu and arch overrides don't need to worry about it; however, if an + * arch wants to implement an arch-specific percpu accessor or operation, + * it may use __verify_pcpu_ptr() to verify the parameters. + * + * + 0 is required in order to convert the pointer type from a + * potential array type to a pointer to a single item of the array. + */ +#define __verify_pcpu_ptr(ptr) \ +do { \ + const void __percpu *__vpp_verify = (typeof((ptr) + 0))NULL; \ + (void)__vpp_verify; \ +} while (0) + +#ifdef CONFIG_SMP + +/* + * Add an offset to a pointer but keep the pointer as-is. Use RELOC_HIDE() + * to prevent the compiler from making incorrect assumptions about the + * pointer value. The weird cast keeps both GCC and sparse happy. + */ +#define SHIFT_PERCPU_PTR(__p, __offset) \ + RELOC_HIDE((typeof(*(__p)) __kernel __force *)(__p), (__offset)) + +#define per_cpu_ptr(ptr, cpu) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))); \ +}) + +#define raw_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + arch_raw_cpu_ptr(ptr); \ +}) + +#ifdef CONFIG_DEBUG_PREEMPT +#define this_cpu_ptr(ptr) \ +({ \ + __verify_pcpu_ptr(ptr); \ + SHIFT_PERCPU_PTR(ptr, my_cpu_offset); \ +}) +#else +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) +#endif + +#else /* CONFIG_SMP */ + +#define VERIFY_PERCPU_PTR(__p) \ +({ \ + __verify_pcpu_ptr(__p); \ + (typeof(*(__p)) __kernel __force *)(__p); \ +}) + +#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR(ptr); }) +#define raw_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) +#define this_cpu_ptr(ptr) raw_cpu_ptr(ptr) + +#endif /* CONFIG_SMP */ + +#define per_cpu(var, cpu) (*per_cpu_ptr(&(var), cpu)) +#define __raw_get_cpu_var(var) (*raw_cpu_ptr(&(var))) +#define __get_cpu_var(var) (*this_cpu_ptr(&(var))) + +/* keep until we have removed all uses of __this_cpu_ptr */ +#define __this_cpu_ptr(ptr) raw_cpu_ptr(ptr) + +/* + * Must be an lvalue. Since @var must be a simple identifier, + * we force a syntax error here if it isn't. + */ +#define get_cpu_var(var) \ +(*({ \ + preempt_disable(); \ + this_cpu_ptr(&var); \ +})) + +/* + * The weird & is necessary because sparse considers (void)(var) to be + * a direct dereference of percpu variable (var). + */ +#define put_cpu_var(var) \ +do { \ + (void)&(var); \ + preempt_enable(); \ +} while (0) + +#define get_cpu_ptr(var) \ +({ \ + preempt_disable(); \ + this_cpu_ptr(var); \ +}) + +#define put_cpu_ptr(var) \ +do { \ + (void)(var); \ + preempt_enable(); \ +} while (0) + +/* + * Branching function to split up a function into a set of functions that + * are called for different scalar sizes of the objects handled. + */ + +extern void __bad_size_call_parameter(void); + +#ifdef CONFIG_DEBUG_PREEMPT +extern void __this_cpu_preempt_check(const char *op); +#else +static inline void __this_cpu_preempt_check(const char *op) { } +#endif + +#define __pcpu_size_call_return(stem, variable) \ +({ \ + typeof(variable) pscr_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr_ret__ = stem##1(variable); break; \ + case 2: pscr_ret__ = stem##2(variable); break; \ + case 4: pscr_ret__ = stem##4(variable); break; \ + case 8: pscr_ret__ = stem##8(variable); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr_ret__; \ +}) + +#define __pcpu_size_call_return2(stem, variable, ...) \ +({ \ + typeof(variable) pscr2_ret__; \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ + case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ + case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ + case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pscr2_ret__; \ +}) + +/* + * Special handling for cmpxchg_double. cmpxchg_double is passed two + * percpu variables. The first has to be aligned to a double word + * boundary and the second has to follow directly thereafter. + * We enforce this on all architectures even if they don't support + * a double cmpxchg instruction, since it's a cheap requirement, and it + * avoids breaking the requirement for architectures with the instruction. + */ +#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ +({ \ + bool pdcrb_ret__; \ + __verify_pcpu_ptr(&(pcp1)); \ + BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ + VM_BUG_ON((unsigned long)(&(pcp1)) % (2 * sizeof(pcp1))); \ + VM_BUG_ON((unsigned long)(&(pcp2)) != \ + (unsigned long)(&(pcp1)) + sizeof(pcp1)); \ + switch(sizeof(pcp1)) { \ + case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ + case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ + case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ + case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ + default: \ + __bad_size_call_parameter(); break; \ + } \ + pdcrb_ret__; \ +}) + +#define __pcpu_size_call(stem, variable, ...) \ +do { \ + __verify_pcpu_ptr(&(variable)); \ + switch(sizeof(variable)) { \ + case 1: stem##1(variable, __VA_ARGS__);break; \ + case 2: stem##2(variable, __VA_ARGS__);break; \ + case 4: stem##4(variable, __VA_ARGS__);break; \ + case 8: stem##8(variable, __VA_ARGS__);break; \ + default: \ + __bad_size_call_parameter();break; \ + } \ +} while (0) + +/* + * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> + * + * Optimized manipulation for memory allocated through the per cpu + * allocator or for addresses of per cpu variables. + * + * These operation guarantee exclusivity of access for other operations + * on the *same* processor. The assumption is that per cpu data is only + * accessed by a single processor instance (the current one). + * + * The arch code can provide optimized implementation by defining macros + * for certain scalar sizes. F.e. provide this_cpu_add_2() to provide per + * cpu atomic operations for 2 byte sized RMW actions. If arch code does + * not provide operations for a scalar size then the fallback in the + * generic code will be used. + * + * cmpxchg_double replaces two adjacent scalars at once. The first two + * parameters are per cpu variables which have to be of the same size. A + * truth value is returned to indicate success or failure (since a double + * register result is difficult to handle). There is very limited hardware + * support for these operations, so only certain sizes may work. + */ + +/* + * Operations for contexts where we do not want to do any checks for + * preemptions. Unless strictly necessary, always use [__]this_cpu_*() + * instead. + * + * If there is no other protection through preempt disable and/or disabling + * interupts then one of these RMW operations can show unexpected behavior + * because the execution thread was rescheduled on another processor or an + * interrupt occurred and the same percpu variable was modified from the + * interrupt context. + */ +#define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, pcp) +#define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, pcp, val) +#define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, pcp, val) +#define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, pcp, val) +#define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, pcp, val) +#define raw_cpu_add_return(pcp, val) __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) +#define raw_cpu_xchg(pcp, nval) __pcpu_size_call_return2(raw_cpu_xchg_, pcp, nval) +#define raw_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) +#define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define raw_cpu_sub(pcp, val) raw_cpu_add(pcp, -(val)) +#define raw_cpu_inc(pcp) raw_cpu_add(pcp, 1) +#define raw_cpu_dec(pcp) raw_cpu_sub(pcp, 1) +#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) +#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) + +/* + * Operations for contexts that are safe from preemption/interrupts. These + * operations verify that preemption is disabled. + */ +#define __this_cpu_read(pcp) \ +({ \ + __this_cpu_preempt_check("read"); \ + raw_cpu_read(pcp); \ +}) + +#define __this_cpu_write(pcp, val) \ +({ \ + __this_cpu_preempt_check("write"); \ + raw_cpu_write(pcp, val); \ +}) + +#define __this_cpu_add(pcp, val) \ +({ \ + __this_cpu_preempt_check("add"); \ + raw_cpu_add(pcp, val); \ +}) + +#define __this_cpu_and(pcp, val) \ +({ \ + __this_cpu_preempt_check("and"); \ + raw_cpu_and(pcp, val); \ +}) + +#define __this_cpu_or(pcp, val) \ +({ \ + __this_cpu_preempt_check("or"); \ + raw_cpu_or(pcp, val); \ +}) + +#define __this_cpu_add_return(pcp, val) \ +({ \ + __this_cpu_preempt_check("add_return"); \ + raw_cpu_add_return(pcp, val); \ +}) + +#define __this_cpu_xchg(pcp, nval) \ +({ \ + __this_cpu_preempt_check("xchg"); \ + raw_cpu_xchg(pcp, nval); \ +}) + +#define __this_cpu_cmpxchg(pcp, oval, nval) \ +({ \ + __this_cpu_preempt_check("cmpxchg"); \ + raw_cpu_cmpxchg(pcp, oval, nval); \ +}) + +#define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ +({ __this_cpu_preempt_check("cmpxchg_double"); \ + raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2); \ +}) + +#define __this_cpu_sub(pcp, val) __this_cpu_add(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc(pcp) __this_cpu_add(pcp, 1) +#define __this_cpu_dec(pcp) __this_cpu_sub(pcp, 1) +#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) +#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) + +/* + * Operations with implied preemption protection. These operations can be + * used without worrying about preemption. Note that interrupts may still + * occur while an operation is in progress and if the interrupt modifies + * the variable too then RMW actions may not be reliable. + */ +#define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, pcp) +#define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, pcp, val) +#define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, pcp, val) +#define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, pcp, val) +#define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, pcp, val) +#define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) +#define this_cpu_xchg(pcp, nval) __pcpu_size_call_return2(this_cpu_xchg_, pcp, nval) +#define this_cpu_cmpxchg(pcp, oval, nval) \ + __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) +#define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ + __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, pcp1, pcp2, oval1, oval2, nval1, nval2) + +#define this_cpu_sub(pcp, val) this_cpu_add(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc(pcp) this_cpu_add(pcp, 1) +#define this_cpu_dec(pcp) this_cpu_sub(pcp, 1) +#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) +#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) +#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) + +#endif /* __ASSEMBLY__ */ #endif /* _LINUX_PERCPU_DEFS_H */ diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 5d8920e23073..3dfbf237cd8f 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h @@ -57,11 +57,9 @@ struct percpu_ref { atomic_t count; /* * The low bit of the pointer indicates whether the ref is in percpu - * mode; if set, then get/put will manipulate the atomic_t (this is a - * hack because we need to keep the pointer around for - * percpu_ref_kill_rcu()) + * mode; if set, then get/put will manipulate the atomic_t. */ - unsigned __percpu *pcpu_count; + unsigned long pcpu_count_ptr; percpu_ref_func_t *release; percpu_ref_func_t *confirm_kill; struct rcu_head rcu; @@ -69,7 +67,8 @@ struct percpu_ref { int __must_check percpu_ref_init(struct percpu_ref *ref, percpu_ref_func_t *release); -void percpu_ref_cancel_init(struct percpu_ref *ref); +void percpu_ref_reinit(struct percpu_ref *ref); +void percpu_ref_exit(struct percpu_ref *ref); void percpu_ref_kill_and_confirm(struct percpu_ref *ref, percpu_ref_func_t *confirm_kill); @@ -88,12 +87,28 @@ static inline void percpu_ref_kill(struct percpu_ref *ref) return percpu_ref_kill_and_confirm(ref, NULL); } -#define PCPU_STATUS_BITS 2 -#define PCPU_STATUS_MASK ((1 << PCPU_STATUS_BITS) - 1) -#define PCPU_REF_PTR 0 #define PCPU_REF_DEAD 1 -#define REF_STATUS(count) (((unsigned long) count) & PCPU_STATUS_MASK) +/* + * Internal helper. Don't use outside percpu-refcount proper. The + * function doesn't return the pointer and let the caller test it for NULL + * because doing so forces the compiler to generate two conditional + * branches as it can't assume that @ref->pcpu_count is not NULL. + */ +static inline bool __pcpu_ref_alive(struct percpu_ref *ref, + unsigned __percpu **pcpu_countp) +{ + unsigned long pcpu_ptr = ACCESS_ONCE(ref->pcpu_count_ptr); + + /* paired with smp_store_release() in percpu_ref_reinit() */ + smp_read_barrier_depends(); + + if (unlikely(pcpu_ptr & PCPU_REF_DEAD)) + return false; + + *pcpu_countp = (unsigned __percpu *)pcpu_ptr; + return true; +} /** * percpu_ref_get - increment a percpu refcount @@ -107,9 +122,7 @@ static inline void percpu_ref_get(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) + if (__pcpu_ref_alive(ref, &pcpu_count)) this_cpu_inc(*pcpu_count); else atomic_inc(&ref->count); @@ -133,9 +146,7 @@ static inline bool percpu_ref_tryget(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + if (__pcpu_ref_alive(ref, &pcpu_count)) { this_cpu_inc(*pcpu_count); ret = true; } else { @@ -168,9 +179,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) { + if (__pcpu_ref_alive(ref, &pcpu_count)) { this_cpu_inc(*pcpu_count); ret = true; } @@ -193,9 +202,7 @@ static inline void percpu_ref_put(struct percpu_ref *ref) rcu_read_lock_sched(); - pcpu_count = ACCESS_ONCE(ref->pcpu_count); - - if (likely(REF_STATUS(pcpu_count) == PCPU_REF_PTR)) + if (__pcpu_ref_alive(ref, &pcpu_count)) this_cpu_dec(*pcpu_count); else if (unlikely(atomic_dec_and_test(&ref->count))) ref->release(ref); @@ -203,4 +210,19 @@ static inline void percpu_ref_put(struct percpu_ref *ref) rcu_read_unlock_sched(); } +/** + * percpu_ref_is_zero - test whether a percpu refcount reached zero + * @ref: percpu_ref to test + * + * Returns %true if @ref reached zero. + */ +static inline bool percpu_ref_is_zero(struct percpu_ref *ref) +{ + unsigned __percpu *pcpu_count; + + if (__pcpu_ref_alive(ref, &pcpu_count)) + return false; + return !atomic_read(&ref->count); +} + #endif diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 8419053d0f2e..6f61b61b7996 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -23,32 +23,6 @@ PERCPU_MODULE_RESERVE) #endif -/* - * Must be an lvalue. Since @var must be a simple identifier, - * we force a syntax error here if it isn't. - */ -#define get_cpu_var(var) (*({ \ - preempt_disable(); \ - this_cpu_ptr(&var); })) - -/* - * The weird & is necessary because sparse considers (void)(var) to be - * a direct dereference of percpu variable (var). - */ -#define put_cpu_var(var) do { \ - (void)&(var); \ - preempt_enable(); \ -} while (0) - -#define get_cpu_ptr(var) ({ \ - preempt_disable(); \ - this_cpu_ptr(var); }) - -#define put_cpu_ptr(var) do { \ - (void)(var); \ - preempt_enable(); \ -} while (0) - /* minimum unit size, also is the maximum supported allocation size */ #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) @@ -140,17 +114,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, pcpu_fc_populate_pte_fn_t populate_pte_fn); #endif -/* - * Use this to get to a cpu's version of the per-cpu object - * dynamically allocated. Non-atomic access to the current CPU's - * version should probably be combined with get_cpu()/put_cpu(). - */ -#ifdef CONFIG_SMP -#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) -#else -#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) -#endif - extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); extern bool is_kernel_percpu_address(unsigned long addr); @@ -166,640 +129,4 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr); #define alloc_percpu(type) \ (typeof(type) __percpu *)__alloc_percpu(sizeof(type), __alignof__(type)) -/* - * Branching function to split up a function into a set of functions that - * are called for different scalar sizes of the objects handled. - */ - -extern void __bad_size_call_parameter(void); - -#ifdef CONFIG_DEBUG_PREEMPT -extern void __this_cpu_preempt_check(const char *op); -#else -static inline void __this_cpu_preempt_check(const char *op) { } -#endif - -#define __pcpu_size_call_return(stem, variable) \ -({ typeof(variable) pscr_ret__; \ - __verify_pcpu_ptr(&(variable)); \ - switch(sizeof(variable)) { \ - case 1: pscr_ret__ = stem##1(variable);break; \ - case 2: pscr_ret__ = stem##2(variable);break; \ - case 4: pscr_ret__ = stem##4(variable);break; \ - case 8: pscr_ret__ = stem##8(variable);break; \ - default: \ - __bad_size_call_parameter();break; \ - } \ - pscr_ret__; \ -}) - -#define __pcpu_size_call_return2(stem, variable, ...) \ -({ \ - typeof(variable) pscr2_ret__; \ - __verify_pcpu_ptr(&(variable)); \ - switch(sizeof(variable)) { \ - case 1: pscr2_ret__ = stem##1(variable, __VA_ARGS__); break; \ - case 2: pscr2_ret__ = stem##2(variable, __VA_ARGS__); break; \ - case 4: pscr2_ret__ = stem##4(variable, __VA_ARGS__); break; \ - case 8: pscr2_ret__ = stem##8(variable, __VA_ARGS__); break; \ - default: \ - __bad_size_call_parameter(); break; \ - } \ - pscr2_ret__; \ -}) - -/* - * Special handling for cmpxchg_double. cmpxchg_double is passed two - * percpu variables. The first has to be aligned to a double word - * boundary and the second has to follow directly thereafter. - * We enforce this on all architectures even if they don't support - * a double cmpxchg instruction, since it's a cheap requirement, and it - * avoids breaking the requirement for architectures with the instruction. - */ -#define __pcpu_double_call_return_bool(stem, pcp1, pcp2, ...) \ -({ \ - bool pdcrb_ret__; \ - __verify_pcpu_ptr(&pcp1); \ - BUILD_BUG_ON(sizeof(pcp1) != sizeof(pcp2)); \ - VM_BUG_ON((unsigned long)(&pcp1) % (2 * sizeof(pcp1))); \ - VM_BUG_ON((unsigned long)(&pcp2) != \ - (unsigned long)(&pcp1) + sizeof(pcp1)); \ - switch(sizeof(pcp1)) { \ - case 1: pdcrb_ret__ = stem##1(pcp1, pcp2, __VA_ARGS__); break; \ - case 2: pdcrb_ret__ = stem##2(pcp1, pcp2, __VA_ARGS__); break; \ - case 4: pdcrb_ret__ = stem##4(pcp1, pcp2, __VA_ARGS__); break; \ - case 8: pdcrb_ret__ = stem##8(pcp1, pcp2, __VA_ARGS__); break; \ - default: \ - __bad_size_call_parameter(); break; \ - } \ - pdcrb_ret__; \ -}) - -#define __pcpu_size_call(stem, variable, ...) \ -do { \ - __verify_pcpu_ptr(&(variable)); \ - switch(sizeof(variable)) { \ - case 1: stem##1(variable, __VA_ARGS__);break; \ - case 2: stem##2(variable, __VA_ARGS__);break; \ - case 4: stem##4(variable, __VA_ARGS__);break; \ - case 8: stem##8(variable, __VA_ARGS__);break; \ - default: \ - __bad_size_call_parameter();break; \ - } \ -} while (0) - -/* - * this_cpu operations (C) 2008-2013 Christoph Lameter <cl@linux.com> - * - * Optimized manipulation for memory allocated through the per cpu - * allocator or for addresses of per cpu variables. - * - * These operation guarantee exclusivity of access for other operations - * on the *same* processor. The assumption is that per cpu data is only - * accessed by a single processor instance (the current one). - * - * The first group is used for accesses that must be done in a - * preemption safe way since we know that the context is not preempt - * safe. Interrupts may occur. If the interrupt modifies the variable - * too then RMW actions will not be reliable. - * - * The arch code can provide optimized functions in two ways: - * - * 1. Override the function completely. F.e. define this_cpu_add(). - * The arch must then ensure that the various scalar format passed - * are handled correctly. - * - * 2. Provide functions for certain scalar sizes. F.e. provide - * this_cpu_add_2() to provide per cpu atomic operations for 2 byte - * sized RMW actions. If arch code does not provide operations for - * a scalar size then the fallback in the generic code will be - * used. - */ - -#define _this_cpu_generic_read(pcp) \ -({ typeof(pcp) ret__; \ - preempt_disable(); \ - ret__ = *this_cpu_ptr(&(pcp)); \ - preempt_enable(); \ - ret__; \ -}) - -#ifndef this_cpu_read -# ifndef this_cpu_read_1 -# define this_cpu_read_1(pcp) _this_cpu_generic_read(pcp) -# endif -# ifndef this_cpu_read_2 -# define this_cpu_read_2(pcp) _this_cpu_generic_read(pcp) -# endif -# ifndef this_cpu_read_4 -# define this_cpu_read_4(pcp) _this_cpu_generic_read(pcp) -# endif -# ifndef this_cpu_read_8 -# define this_cpu_read_8(pcp) _this_cpu_generic_read(pcp) -# endif -# define this_cpu_read(pcp) __pcpu_size_call_return(this_cpu_read_, (pcp)) -#endif - -#define _this_cpu_generic_to_op(pcp, val, op) \ -do { \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - *raw_cpu_ptr(&(pcp)) op val; \ - raw_local_irq_restore(flags); \ -} while (0) - -#ifndef this_cpu_write -# ifndef this_cpu_write_1 -# define this_cpu_write_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef this_cpu_write_2 -# define this_cpu_write_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef this_cpu_write_4 -# define this_cpu_write_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef this_cpu_write_8 -# define this_cpu_write_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), =) -# endif -# define this_cpu_write(pcp, val) __pcpu_size_call(this_cpu_write_, (pcp), (val)) -#endif - -#ifndef this_cpu_add -# ifndef this_cpu_add_1 -# define this_cpu_add_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef this_cpu_add_2 -# define this_cpu_add_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef this_cpu_add_4 -# define this_cpu_add_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef this_cpu_add_8 -# define this_cpu_add_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), +=) -# endif -# define this_cpu_add(pcp, val) __pcpu_size_call(this_cpu_add_, (pcp), (val)) -#endif - -#ifndef this_cpu_sub -# define this_cpu_sub(pcp, val) this_cpu_add((pcp), -(typeof(pcp))(val)) -#endif - -#ifndef this_cpu_inc -# define this_cpu_inc(pcp) this_cpu_add((pcp), 1) -#endif - -#ifndef this_cpu_dec -# define this_cpu_dec(pcp) this_cpu_sub((pcp), 1) -#endif - -#ifndef this_cpu_and -# ifndef this_cpu_and_1 -# define this_cpu_and_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef this_cpu_and_2 -# define this_cpu_and_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef this_cpu_and_4 -# define this_cpu_and_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef this_cpu_and_8 -# define this_cpu_and_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), &=) -# endif -# define this_cpu_and(pcp, val) __pcpu_size_call(this_cpu_and_, (pcp), (val)) -#endif - -#ifndef this_cpu_or -# ifndef this_cpu_or_1 -# define this_cpu_or_1(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef this_cpu_or_2 -# define this_cpu_or_2(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef this_cpu_or_4 -# define this_cpu_or_4(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef this_cpu_or_8 -# define this_cpu_or_8(pcp, val) _this_cpu_generic_to_op((pcp), (val), |=) -# endif -# define this_cpu_or(pcp, val) __pcpu_size_call(this_cpu_or_, (pcp), (val)) -#endif - -#define _this_cpu_generic_add_return(pcp, val) \ -({ \ - typeof(pcp) ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - raw_cpu_add(pcp, val); \ - ret__ = raw_cpu_read(pcp); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_add_return -# ifndef this_cpu_add_return_1 -# define this_cpu_add_return_1(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_2 -# define this_cpu_add_return_2(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_4 -# define this_cpu_add_return_4(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# ifndef this_cpu_add_return_8 -# define this_cpu_add_return_8(pcp, val) _this_cpu_generic_add_return(pcp, val) -# endif -# define this_cpu_add_return(pcp, val) __pcpu_size_call_return2(this_cpu_add_return_, pcp, val) -#endif - -#define this_cpu_sub_return(pcp, val) this_cpu_add_return(pcp, -(typeof(pcp))(val)) -#define this_cpu_inc_return(pcp) this_cpu_add_return(pcp, 1) -#define this_cpu_dec_return(pcp) this_cpu_add_return(pcp, -1) - -#define _this_cpu_generic_xchg(pcp, nval) \ -({ typeof(pcp) ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - ret__ = raw_cpu_read(pcp); \ - raw_cpu_write(pcp, nval); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_xchg -# ifndef this_cpu_xchg_1 -# define this_cpu_xchg_1(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# ifndef this_cpu_xchg_2 -# define this_cpu_xchg_2(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# ifndef this_cpu_xchg_4 -# define this_cpu_xchg_4(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# ifndef this_cpu_xchg_8 -# define this_cpu_xchg_8(pcp, nval) _this_cpu_generic_xchg(pcp, nval) -# endif -# define this_cpu_xchg(pcp, nval) \ - __pcpu_size_call_return2(this_cpu_xchg_, (pcp), nval) -#endif - -#define _this_cpu_generic_cmpxchg(pcp, oval, nval) \ -({ \ - typeof(pcp) ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - ret__ = raw_cpu_read(pcp); \ - if (ret__ == (oval)) \ - raw_cpu_write(pcp, nval); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_cmpxchg -# ifndef this_cpu_cmpxchg_1 -# define this_cpu_cmpxchg_1(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef this_cpu_cmpxchg_2 -# define this_cpu_cmpxchg_2(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef this_cpu_cmpxchg_4 -# define this_cpu_cmpxchg_4(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef this_cpu_cmpxchg_8 -# define this_cpu_cmpxchg_8(pcp, oval, nval) _this_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# define this_cpu_cmpxchg(pcp, oval, nval) \ - __pcpu_size_call_return2(this_cpu_cmpxchg_, pcp, oval, nval) -#endif - -/* - * cmpxchg_double replaces two adjacent scalars at once. The first - * two parameters are per cpu variables which have to be of the same - * size. A truth value is returned to indicate success or failure - * (since a double register result is difficult to handle). There is - * very limited hardware support for these operations, so only certain - * sizes may work. - */ -#define _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ -({ \ - int ret__; \ - unsigned long flags; \ - raw_local_irq_save(flags); \ - ret__ = raw_cpu_generic_cmpxchg_double(pcp1, pcp2, \ - oval1, oval2, nval1, nval2); \ - raw_local_irq_restore(flags); \ - ret__; \ -}) - -#ifndef this_cpu_cmpxchg_double -# ifndef this_cpu_cmpxchg_double_1 -# define this_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef this_cpu_cmpxchg_double_2 -# define this_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef this_cpu_cmpxchg_double_4 -# define this_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef this_cpu_cmpxchg_double_8 -# define this_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - _this_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# define this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - __pcpu_double_call_return_bool(this_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) -#endif - -/* - * Generic percpu operations for contexts where we do not want to do - * any checks for preemptiosn. - * - * If there is no other protection through preempt disable and/or - * disabling interupts then one of these RMW operations can show unexpected - * behavior because the execution thread was rescheduled on another processor - * or an interrupt occurred and the same percpu variable was modified from - * the interrupt context. - */ -#ifndef raw_cpu_read -# ifndef raw_cpu_read_1 -# define raw_cpu_read_1(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# ifndef raw_cpu_read_2 -# define raw_cpu_read_2(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# ifndef raw_cpu_read_4 -# define raw_cpu_read_4(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# ifndef raw_cpu_read_8 -# define raw_cpu_read_8(pcp) (*raw_cpu_ptr(&(pcp))) -# endif -# define raw_cpu_read(pcp) __pcpu_size_call_return(raw_cpu_read_, (pcp)) -#endif - -#define raw_cpu_generic_to_op(pcp, val, op) \ -do { \ - *raw_cpu_ptr(&(pcp)) op val; \ -} while (0) - - -#ifndef raw_cpu_write -# ifndef raw_cpu_write_1 -# define raw_cpu_write_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef raw_cpu_write_2 -# define raw_cpu_write_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef raw_cpu_write_4 -# define raw_cpu_write_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# ifndef raw_cpu_write_8 -# define raw_cpu_write_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), =) -# endif -# define raw_cpu_write(pcp, val) __pcpu_size_call(raw_cpu_write_, (pcp), (val)) -#endif - -#ifndef raw_cpu_add -# ifndef raw_cpu_add_1 -# define raw_cpu_add_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef raw_cpu_add_2 -# define raw_cpu_add_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef raw_cpu_add_4 -# define raw_cpu_add_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# ifndef raw_cpu_add_8 -# define raw_cpu_add_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), +=) -# endif -# define raw_cpu_add(pcp, val) __pcpu_size_call(raw_cpu_add_, (pcp), (val)) -#endif - -#ifndef raw_cpu_sub -# define raw_cpu_sub(pcp, val) raw_cpu_add((pcp), -(val)) -#endif - -#ifndef raw_cpu_inc -# define raw_cpu_inc(pcp) raw_cpu_add((pcp), 1) -#endif - -#ifndef raw_cpu_dec -# define raw_cpu_dec(pcp) raw_cpu_sub((pcp), 1) -#endif - -#ifndef raw_cpu_and -# ifndef raw_cpu_and_1 -# define raw_cpu_and_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef raw_cpu_and_2 -# define raw_cpu_and_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef raw_cpu_and_4 -# define raw_cpu_and_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# ifndef raw_cpu_and_8 -# define raw_cpu_and_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), &=) -# endif -# define raw_cpu_and(pcp, val) __pcpu_size_call(raw_cpu_and_, (pcp), (val)) -#endif - -#ifndef raw_cpu_or -# ifndef raw_cpu_or_1 -# define raw_cpu_or_1(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef raw_cpu_or_2 -# define raw_cpu_or_2(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef raw_cpu_or_4 -# define raw_cpu_or_4(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# ifndef raw_cpu_or_8 -# define raw_cpu_or_8(pcp, val) raw_cpu_generic_to_op((pcp), (val), |=) -# endif -# define raw_cpu_or(pcp, val) __pcpu_size_call(raw_cpu_or_, (pcp), (val)) -#endif - -#define raw_cpu_generic_add_return(pcp, val) \ -({ \ - raw_cpu_add(pcp, val); \ - raw_cpu_read(pcp); \ -}) - -#ifndef raw_cpu_add_return -# ifndef raw_cpu_add_return_1 -# define raw_cpu_add_return_1(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# ifndef raw_cpu_add_return_2 -# define raw_cpu_add_return_2(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# ifndef raw_cpu_add_return_4 -# define raw_cpu_add_return_4(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# ifndef raw_cpu_add_return_8 -# define raw_cpu_add_return_8(pcp, val) raw_cpu_generic_add_return(pcp, val) -# endif -# define raw_cpu_add_return(pcp, val) \ - __pcpu_size_call_return2(raw_cpu_add_return_, pcp, val) -#endif - -#define raw_cpu_sub_return(pcp, val) raw_cpu_add_return(pcp, -(typeof(pcp))(val)) -#define raw_cpu_inc_return(pcp) raw_cpu_add_return(pcp, 1) -#define raw_cpu_dec_return(pcp) raw_cpu_add_return(pcp, -1) - -#define raw_cpu_generic_xchg(pcp, nval) \ -({ typeof(pcp) ret__; \ - ret__ = raw_cpu_read(pcp); \ - raw_cpu_write(pcp, nval); \ - ret__; \ -}) - -#ifndef raw_cpu_xchg -# ifndef raw_cpu_xchg_1 -# define raw_cpu_xchg_1(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# ifndef raw_cpu_xchg_2 -# define raw_cpu_xchg_2(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# ifndef raw_cpu_xchg_4 -# define raw_cpu_xchg_4(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# ifndef raw_cpu_xchg_8 -# define raw_cpu_xchg_8(pcp, nval) raw_cpu_generic_xchg(pcp, nval) -# endif -# define raw_cpu_xchg(pcp, nval) \ - __pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval) -#endif - -#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \ -({ \ - typeof(pcp) ret__; \ - ret__ = raw_cpu_read(pcp); \ - if (ret__ == (oval)) \ - raw_cpu_write(pcp, nval); \ - ret__; \ -}) - -#ifndef raw_cpu_cmpxchg -# ifndef raw_cpu_cmpxchg_1 -# define raw_cpu_cmpxchg_1(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef raw_cpu_cmpxchg_2 -# define raw_cpu_cmpxchg_2(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef raw_cpu_cmpxchg_4 -# define raw_cpu_cmpxchg_4(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# ifndef raw_cpu_cmpxchg_8 -# define raw_cpu_cmpxchg_8(pcp, oval, nval) raw_cpu_generic_cmpxchg(pcp, oval, nval) -# endif -# define raw_cpu_cmpxchg(pcp, oval, nval) \ - __pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval) -#endif - -#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ -({ \ - int __ret = 0; \ - if (raw_cpu_read(pcp1) == (oval1) && \ - raw_cpu_read(pcp2) == (oval2)) { \ - raw_cpu_write(pcp1, (nval1)); \ - raw_cpu_write(pcp2, (nval2)); \ - __ret = 1; \ - } \ - (__ret); \ -}) - -#ifndef raw_cpu_cmpxchg_double -# ifndef raw_cpu_cmpxchg_double_1 -# define raw_cpu_cmpxchg_double_1(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef raw_cpu_cmpxchg_double_2 -# define raw_cpu_cmpxchg_double_2(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef raw_cpu_cmpxchg_double_4 -# define raw_cpu_cmpxchg_double_4(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# ifndef raw_cpu_cmpxchg_double_8 -# define raw_cpu_cmpxchg_double_8(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) -# endif -# define raw_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - __pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2)) -#endif - -/* - * Generic percpu operations for context that are safe from preemption/interrupts. - */ -#ifndef __this_cpu_read -# define __this_cpu_read(pcp) \ - (__this_cpu_preempt_check("read"),__pcpu_size_call_return(raw_cpu_read_, (pcp))) -#endif - -#ifndef __this_cpu_write -# define __this_cpu_write(pcp, val) \ -do { __this_cpu_preempt_check("write"); \ - __pcpu_size_call(raw_cpu_write_, (pcp), (val)); \ -} while (0) -#endif - -#ifndef __this_cpu_add -# define __this_cpu_add(pcp, val) \ -do { __this_cpu_preempt_check("add"); \ - __pcpu_size_call(raw_cpu_add_, (pcp), (val)); \ -} while (0) -#endif - -#ifndef __this_cpu_sub -# define __this_cpu_sub(pcp, val) __this_cpu_add((pcp), -(typeof(pcp))(val)) -#endif - -#ifndef __this_cpu_inc -# define __this_cpu_inc(pcp) __this_cpu_add((pcp), 1) -#endif - -#ifndef __this_cpu_dec -# define __this_cpu_dec(pcp) __this_cpu_sub((pcp), 1) -#endif - -#ifndef __this_cpu_and -# define __this_cpu_and(pcp, val) \ -do { __this_cpu_preempt_check("and"); \ - __pcpu_size_call(raw_cpu_and_, (pcp), (val)); \ -} while (0) - -#endif - -#ifndef __this_cpu_or -# define __this_cpu_or(pcp, val) \ -do { __this_cpu_preempt_check("or"); \ - __pcpu_size_call(raw_cpu_or_, (pcp), (val)); \ -} while (0) -#endif - -#ifndef __this_cpu_add_return -# define __this_cpu_add_return(pcp, val) \ - (__this_cpu_preempt_check("add_return"),__pcpu_size_call_return2(raw_cpu_add_return_, pcp, val)) -#endif - -#define __this_cpu_sub_return(pcp, val) __this_cpu_add_return(pcp, -(typeof(pcp))(val)) -#define __this_cpu_inc_return(pcp) __this_cpu_add_return(pcp, 1) -#define __this_cpu_dec_return(pcp) __this_cpu_add_return(pcp, -1) - -#ifndef __this_cpu_xchg -# define __this_cpu_xchg(pcp, nval) \ - (__this_cpu_preempt_check("xchg"),__pcpu_size_call_return2(raw_cpu_xchg_, (pcp), nval)) -#endif - -#ifndef __this_cpu_cmpxchg -# define __this_cpu_cmpxchg(pcp, oval, nval) \ - (__this_cpu_preempt_check("cmpxchg"),__pcpu_size_call_return2(raw_cpu_cmpxchg_, pcp, oval, nval)) -#endif - -#ifndef __this_cpu_cmpxchg_double -# define __this_cpu_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \ - (__this_cpu_preempt_check("cmpxchg_double"),__pcpu_double_call_return_bool(raw_cpu_cmpxchg_double_, (pcp1), (pcp2), (oval1), (oval2), (nval1), (nval2))) -#endif - #endif /* __LINUX_PERCPU_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 68041446c450..ed39956b5613 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -545,6 +545,24 @@ struct phy_driver { */ void (*link_change_notify)(struct phy_device *dev); + /* A function provided by a phy specific driver to override the + * the PHY driver framework support for reading a MMD register + * from the PHY. If not supported, return -1. This function is + * optional for PHY specific drivers, if not provided then the + * default MMD read function is used by the PHY framework. + */ + int (*read_mmd_indirect)(struct phy_device *dev, int ptrad, + int devnum, int regnum); + + /* A function provided by a phy specific driver to override the + * the PHY driver framework support for writing a MMD register + * from the PHY. This function is optional for PHY specific drivers, + * if not provided then the default MMD read function is used by + * the PHY framework. + */ + void (*write_mmd_indirect)(struct phy_device *dev, int ptrad, + int devnum, int regnum, u32 val); + struct device_driver driver; }; #define to_phy_driver(d) container_of(d, struct phy_driver, driver) diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h index 5450403c7546..e9e6cfbfbb58 100644 --- a/include/linux/phy/omap_control_phy.h +++ b/include/linux/phy/omap_control_phy.h @@ -23,6 +23,7 @@ enum omap_control_phy_type { OMAP_CTRL_TYPE_OTGHS = 1, /* Mailbox OTGHS_CONTROL */ OMAP_CTRL_TYPE_USB2, /* USB2_PHY, power down in CONTROL_DEV_CONF */ OMAP_CTRL_TYPE_PIPE3, /* PIPE3 PHY, DPLL & seperate Rx/Tx power */ + OMAP_CTRL_TYPE_PCIE, /* RX TX control of ACSPCIE */ OMAP_CTRL_TYPE_DRA7USB2, /* USB2 PHY, power and power_aux e.g. DRA7 */ OMAP_CTRL_TYPE_AM437USB2, /* USB2 PHY, power e.g. AM437x */ }; @@ -33,6 +34,7 @@ struct omap_control_phy { u32 __iomem *otghs_control; u32 __iomem *power; u32 __iomem *power_aux; + u32 __iomem *pcie_pcs; struct clk *sys_clk; @@ -63,6 +65,9 @@ enum omap_control_usb_mode { #define OMAP_CTRL_PIPE3_PHY_TX_RX_POWERON 0x3 #define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 +#define OMAP_CTRL_PCIE_PCS_MASK 0xff +#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8 + #define OMAP_CTRL_USB2_PHY_PD BIT(28) #define AM437X_CTRL_USB2_PHY_PD BIT(0) @@ -74,6 +79,7 @@ enum omap_control_usb_mode { void omap_control_phy_power(struct device *dev, int on); void omap_control_usb_set_mode(struct device *dev, enum omap_control_usb_mode mode); +void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay); #else static inline void omap_control_phy_power(struct device *dev, int on) @@ -84,6 +90,10 @@ static inline void omap_control_usb_set_mode(struct device *dev, enum omap_control_usb_mode mode) { } + +static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) +{ +} #endif #endif /* __OMAP_CONTROL_PHY_H__ */ diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index 2760744cb2a7..8cb6f815475b 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -18,6 +18,7 @@ #include <linux/of.h> #include <linux/device.h> #include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> struct phy; @@ -65,6 +66,7 @@ struct phy { int init_count; int power_count; struct phy_attrs attrs; + struct regulator *pwr; }; /** @@ -156,9 +158,10 @@ void devm_phy_put(struct device *dev, struct phy *phy); struct phy *of_phy_get(struct device_node *np, const char *con_id); struct phy *of_phy_simple_xlate(struct device *dev, struct of_phandle_args *args); -struct phy *phy_create(struct device *dev, const struct phy_ops *ops, - struct phy_init_data *init_data); -struct phy *devm_phy_create(struct device *dev, +struct phy *phy_create(struct device *dev, struct device_node *node, + const struct phy_ops *ops, + struct phy_init_data *init_data); +struct phy *devm_phy_create(struct device *dev, struct device_node *node, const struct phy_ops *ops, struct phy_init_data *init_data); void phy_destroy(struct phy *phy); void devm_phy_destroy(struct device *dev, struct phy *phy); @@ -297,13 +300,17 @@ static inline struct phy *of_phy_simple_xlate(struct device *dev, } static inline struct phy *phy_create(struct device *dev, - const struct phy_ops *ops, struct phy_init_data *init_data) + struct device_node *node, + const struct phy_ops *ops, + struct phy_init_data *init_data) { return ERR_PTR(-ENOSYS); } static inline struct phy *devm_phy_create(struct device *dev, - const struct phy_ops *ops, struct phy_init_data *init_data) + struct device_node *node, + const struct phy_ops *ops, + struct phy_init_data *init_data) { return ERR_PTR(-ENOSYS); } diff --git a/include/linux/platform_data/ad7291.h b/include/linux/platform_data/ad7291.h new file mode 100644 index 000000000000..bbd89fa51188 --- /dev/null +++ b/include/linux/platform_data/ad7291.h @@ -0,0 +1,12 @@ +#ifndef __IIO_AD7291_H__ +#define __IIO_AD7291_H__ + +/** + * struct ad7291_platform_data - AD7291 platform data + * @use_external_ref: Whether to use an external or internal reference voltage + */ +struct ad7291_platform_data { + bool use_external_ref; +}; + +#endif diff --git a/include/linux/platform_data/asoc-s3c.h b/include/linux/platform_data/asoc-s3c.h index 709c6f7e2f8c..a6591c693ebb 100644 --- a/include/linux/platform_data/asoc-s3c.h +++ b/include/linux/platform_data/asoc-s3c.h @@ -15,15 +15,6 @@ #define S3C64XX_AC97_GPE 1 extern void s3c64xx_ac97_setup_gpio(int); -/* - * The machine init code calls s5p*_spdif_setup_gpio with - * one of these defines in order to select appropriate bank - * of GPIO for S/PDIF pins - */ -#define S5PC100_SPDIF_GPD 0 -#define S5PC100_SPDIF_GPG3 1 -extern void s5pc100_spdif_setup_gpio(int); - struct samsung_i2s { /* If the Primary DAI has 5.1 Channels */ #define QUIRK_PRI_6CHAN (1 << 0) diff --git a/include/linux/platform_data/ata-samsung_cf.h b/include/linux/platform_data/ata-samsung_cf.h index c2049e3d7444..748e71642c4a 100644 --- a/include/linux/platform_data/ata-samsung_cf.h +++ b/include/linux/platform_data/ata-samsung_cf.h @@ -29,7 +29,6 @@ extern void s3c_ide_set_platdata(struct s3c_ide_platdata *pdata); /* architecture-specific IDE configuration */ extern void s3c64xx_ide_setup_gpio(void); -extern void s5pc100_ide_setup_gpio(void); extern void s5pv210_ide_setup_gpio(void); #endif /*__ATA_SAMSUNG_CF_H */ diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index bcbc6c3c14c0..d05542aafa3e 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h @@ -50,6 +50,7 @@ enum imx_dma_prio { struct imx_dma_data { int dma_request; /* DMA request line */ + int dma_request2; /* secondary DMA request line */ enum sdma_peripheral_type peripheral_type; int priority; }; diff --git a/include/linux/platform_data/dsp-omap.h b/include/linux/platform_data/dsp-omap.h deleted file mode 100644 index 5927709b1908..000000000000 --- a/include/linux/platform_data/dsp-omap.h +++ /dev/null @@ -1,34 +0,0 @@ -#ifndef __OMAP_DSP_H__ -#define __OMAP_DSP_H__ - -#include <linux/types.h> - -struct omap_dsp_platform_data { - void (*dsp_set_min_opp) (u8 opp_id); - u8 (*dsp_get_opp) (void); - void (*cpu_set_freq) (unsigned long f); - unsigned long (*cpu_get_freq) (void); - unsigned long mpu_speed[6]; - - /* functions to write and read PRCM registers */ - void (*dsp_prm_write)(u32, s16 , u16); - u32 (*dsp_prm_read)(s16 , u16); - u32 (*dsp_prm_rmw_bits)(u32, u32, s16, s16); - void (*dsp_cm_write)(u32, s16 , u16); - u32 (*dsp_cm_read)(s16 , u16); - u32 (*dsp_cm_rmw_bits)(u32, u32, s16, s16); - - void (*set_bootaddr)(u32); - void (*set_bootmode)(u8); - - phys_addr_t phys_mempool_base; - phys_addr_t phys_mempool_size; -}; - -#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE) -extern void omap_dsp_reserve_sdram_memblock(void); -#else -static inline void omap_dsp_reserve_sdram_memblock(void) { } -#endif - -#endif diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h index 5b429c43a297..54a0a9582fad 100644 --- a/include/linux/platform_data/iommu-omap.h +++ b/include/linux/platform_data/iommu-omap.h @@ -31,14 +31,10 @@ struct omap_iommu_arch_data { /** * struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod - * @da_start: device address where the va space starts. - * @da_end: device address where the va space ends. * @nr_tlb_entries: number of entries supported by the translation * look-aside buffer (TLB). */ struct omap_mmu_dev_attr { - u32 da_start; - u32 da_end; int nr_tlb_entries; }; @@ -46,8 +42,6 @@ struct iommu_platform_data { const char *name; const char *reset_name; int nr_tlb_entries; - u32 da_start; - u32 da_end; int (*assert_reset)(struct platform_device *pdev, const char *name); int (*deassert_reset)(struct platform_device *pdev, const char *name); diff --git a/include/linux/platform_data/ntc_thermistor.h b/include/linux/platform_data/ntc_thermistor.h index c7285b575462..0a6de4ca4930 100644 --- a/include/linux/platform_data/ntc_thermistor.h +++ b/include/linux/platform_data/ntc_thermistor.h @@ -26,6 +26,7 @@ struct iio_channel; enum ntc_thermistor_type { TYPE_NCPXXWB473, TYPE_NCPXXWL333, + TYPE_B57330V2103, }; struct ntc_thermistor_platform_data { diff --git a/include/linux/platform_data/st21nfcb.h b/include/linux/platform_data/st21nfcb.h new file mode 100644 index 000000000000..2d11f1f5efab --- /dev/null +++ b/include/linux/platform_data/st21nfcb.h @@ -0,0 +1,32 @@ +/* + * Driver include for the ST21NFCB NFC chip. + * + * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + */ + +#ifndef _ST21NFCB_NCI_H_ +#define _ST21NFCB_NCI_H_ + +#include <linux/i2c.h> + +#define ST21NFCB_NCI_DRIVER_NAME "st21nfcb_nci" + +struct st21nfcb_nfc_platform_data { + unsigned int gpio_irq; + unsigned int gpio_reset; + unsigned int irq_polarity; +}; + +#endif /* _ST21NFCA_HCI_H_ */ diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index 16f6654082dd..153d303af7eb 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h @@ -28,6 +28,7 @@ struct platform_device { struct resource *resource; const struct platform_device_id *id_entry; + char *driver_override; /* Driver name to force a match */ /* MFD cell pointer */ struct mfd_cell *mfd_cell; diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 43fd6716f662..367f49b9a1c9 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -24,11 +24,20 @@ #define RPM_AUTO 0x08 /* Use autosuspend_delay */ #ifdef CONFIG_PM +extern struct workqueue_struct *pm_wq; + +static inline bool queue_pm_work(struct work_struct *work) +{ + return queue_work(pm_wq, work); +} + extern int pm_generic_runtime_suspend(struct device *dev); extern int pm_generic_runtime_resume(struct device *dev); extern int pm_runtime_force_suspend(struct device *dev); extern int pm_runtime_force_resume(struct device *dev); #else +static inline bool queue_pm_work(struct work_struct *work) { return false; } + static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } static inline int pm_runtime_force_suspend(struct device *dev) { return 0; } @@ -37,8 +46,6 @@ static inline int pm_runtime_force_resume(struct device *dev) { return 0; } #ifdef CONFIG_PM_RUNTIME -extern struct workqueue_struct *pm_wq; - extern int __pm_runtime_idle(struct device *dev, int rpmflags); extern int __pm_runtime_suspend(struct device *dev, int rpmflags); extern int __pm_runtime_resume(struct device *dev, int rpmflags); diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index f2b76aeaf4e4..f3dea41dbcd2 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -120,6 +120,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_CONSTANT_CHARGE_VOLTAGE_MAX, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT, POWER_SUPPLY_PROP_CHARGE_CONTROL_LIMIT_MAX, + POWER_SUPPLY_PROP_INPUT_CURRENT_LIMIT, POWER_SUPPLY_PROP_ENERGY_FULL_DESIGN, POWER_SUPPLY_PROP_ENERGY_EMPTY_DESIGN, POWER_SUPPLY_PROP_ENERGY_FULL, @@ -131,6 +132,8 @@ enum power_supply_property { POWER_SUPPLY_PROP_CAPACITY_ALERT_MAX, /* in percents! */ POWER_SUPPLY_PROP_CAPACITY_LEVEL, POWER_SUPPLY_PROP_TEMP, + POWER_SUPPLY_PROP_TEMP_MAX, + POWER_SUPPLY_PROP_TEMP_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MIN, POWER_SUPPLY_PROP_TEMP_ALERT_MAX, POWER_SUPPLY_PROP_TEMP_AMBIENT, @@ -142,6 +145,7 @@ enum power_supply_property { POWER_SUPPLY_PROP_TIME_TO_FULL_AVG, POWER_SUPPLY_PROP_TYPE, /* use power_supply.type instead */ POWER_SUPPLY_PROP_SCOPE, + POWER_SUPPLY_PROP_CHARGE_TERM_CURRENT, /* Properties of type `const char *' */ POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, diff --git a/include/linux/ptp_classify.h b/include/linux/ptp_classify.h index 7dfed71d76a6..159c987b1853 100644 --- a/include/linux/ptp_classify.h +++ b/include/linux/ptp_classify.h @@ -33,8 +33,8 @@ #define PTP_CLASS_IPV4 0x10 /* event in an IPV4 UDP packet */ #define PTP_CLASS_IPV6 0x20 /* event in an IPV6 UDP packet */ #define PTP_CLASS_L2 0x30 /* event in a L2 packet */ -#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged L2 packet */ -#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */ +#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */ +#define PTP_CLASS_VLAN 0x40 /* event in a VLAN tagged packet */ #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4) #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */ @@ -54,7 +54,6 @@ #define IP6_HLEN 40 #define UDP_HLEN 8 #define OFF_IHL 14 -#define OFF_PTP6 (ETH_HLEN + IP6_HLEN + UDP_HLEN) #define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2) #if defined(CONFIG_NET_PTP_CLASSIFY) diff --git a/include/linux/ras.h b/include/linux/ras.h new file mode 100644 index 000000000000..2aceeafd6fe5 --- /dev/null +++ b/include/linux/ras.h @@ -0,0 +1,14 @@ +#ifndef __RAS_H__ +#define __RAS_H__ + +#ifdef CONFIG_DEBUG_FS +int ras_userspace_consumers(void); +void ras_debugfs_init(void); +int ras_add_daemon_trace(void); +#else +static inline int ras_userspace_consumers(void) { return 0; } +static inline void ras_debugfs_init(void) { return; } +static inline int ras_add_daemon_trace(void) { return 0; } +#endif + +#endif diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 6a94cc8b1ca0..d231aa17b1d7 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -826,15 +826,14 @@ static inline void rcu_preempt_sleep_check(void) * read-side critical section that would block in a !PREEMPT kernel. * But if you want the full story, read on! * - * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it - * is illegal to block while in an RCU read-side critical section. In - * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) - * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may - * be preempted, but explicit blocking is illegal. Finally, in preemptible - * RCU implementations in real-time (with -rt patchset) kernel builds, - * RCU read-side critical sections may be preempted and they may also - * block, but only when acquiring spinlocks that are subject to priority - * inheritance. + * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), + * it is illegal to block while in an RCU read-side critical section. + * In preemptible RCU implementations (TREE_PREEMPT_RCU) in CONFIG_PREEMPT + * kernel builds, RCU read-side critical sections may be preempted, + * but explicit blocking is illegal. Finally, in preemptible RCU + * implementations in real-time (with -rt patchset) kernel builds, RCU + * read-side critical sections may be preempted and they may also block, but + * only when acquiring spinlocks that are subject to priority inheritance. */ static inline void rcu_read_lock(void) { @@ -858,6 +857,34 @@ static inline void rcu_read_lock(void) /** * rcu_read_unlock() - marks the end of an RCU read-side critical section. * + * In most situations, rcu_read_unlock() is immune from deadlock. + * However, in kernels built with CONFIG_RCU_BOOST, rcu_read_unlock() + * is responsible for deboosting, which it does via rt_mutex_unlock(). + * Unfortunately, this function acquires the scheduler's runqueue and + * priority-inheritance spinlocks. This means that deadlock could result + * if the caller of rcu_read_unlock() already holds one of these locks or + * any lock that is ever acquired while holding them. + * + * That said, RCU readers are never priority boosted unless they were + * preempted. Therefore, one way to avoid deadlock is to make sure + * that preemption never happens within any RCU read-side critical + * section whose outermost rcu_read_unlock() is called with one of + * rt_mutex_unlock()'s locks held. Such preemption can be avoided in + * a number of ways, for example, by invoking preempt_disable() before + * critical section's outermost rcu_read_lock(). + * + * Given that the set of locks acquired by rt_mutex_unlock() might change + * at any time, a somewhat more future-proofed approach is to make sure + * that that preemption never happens within any RCU read-side critical + * section whose outermost rcu_read_unlock() is called with irqs disabled. + * This approach relies on the fact that rt_mutex_unlock() currently only + * acquires irq-disabled locks. + * + * The second of these two approaches is best in most situations, + * however, the first approach can also be useful, at least to those + * developers willing to keep abreast of the set of locks acquired by + * rt_mutex_unlock(). + * * See rcu_read_lock() for more information. */ static inline void rcu_read_unlock(void) diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 7b0e4b425cdf..c5ed83f49c4e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -396,6 +396,7 @@ void regmap_exit(struct regmap *map); int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config); struct regmap *dev_get_regmap(struct device *dev, const char *name); +struct device *regmap_get_device(struct regmap *map); int regmap_write(struct regmap *map, unsigned int reg, unsigned int val); int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val); int regmap_raw_write(struct regmap *map, unsigned int reg, @@ -729,6 +730,12 @@ static inline struct regmap *dev_get_regmap(struct device *dev, return NULL; } +static inline struct device *regmap_get_device(struct regmap *map) +{ + WARN_ONCE(1, "regmap API is disabled"); + return NULL; +} + #endif #endif diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h index 75307447cef9..d8ecefaf63ca 100644 --- a/include/linux/regulator/ab8500.h +++ b/include/linux/regulator/ab8500.h @@ -322,18 +322,4 @@ struct ab8500_regulator_platform_data { struct regulator_init_data *ext_regulator; }; -#ifdef CONFIG_REGULATOR_AB8500_DEBUG -int ab8500_regulator_debug_init(struct platform_device *pdev); -int ab8500_regulator_debug_exit(struct platform_device *pdev); -#else -static inline int ab8500_regulator_debug_init(struct platform_device *pdev) -{ - return 0; -} -static inline int ab8500_regulator_debug_exit(struct platform_device *pdev) -{ - return 0; -} -#endif - #endif diff --git a/include/linux/regulator/act8865.h b/include/linux/regulator/act8865.h index 49206c1b4905..b6c4909b33af 100644 --- a/include/linux/regulator/act8865.h +++ b/include/linux/regulator/act8865.h @@ -1,5 +1,5 @@ /* - * act8865.h -- Voltage regulation for the active-semi act8865 + * act8865.h -- Voltage regulation for active-semi act88xx PMUs * * Copyright (C) 2013 Atmel Corporation. * @@ -29,6 +29,27 @@ enum { ACT8865_REG_NUM, }; +enum { + ACT8846_ID_REG1, + ACT8846_ID_REG2, + ACT8846_ID_REG3, + ACT8846_ID_REG4, + ACT8846_ID_REG5, + ACT8846_ID_REG6, + ACT8846_ID_REG7, + ACT8846_ID_REG8, + ACT8846_ID_REG9, + ACT8846_ID_REG10, + ACT8846_ID_REG11, + ACT8846_ID_REG12, + ACT8846_REG_NUM, +}; + +enum { + ACT8865, + ACT8846, +}; + /** * act8865_regulator_data - regulator data * @id: regulator id diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h index 14ec18d5e18b..f8a8733068a7 100644 --- a/include/linux/regulator/consumer.h +++ b/include/linux/regulator/consumer.h @@ -37,6 +37,7 @@ struct device; struct notifier_block; +struct regmap; /* * Regulator operating modes. @@ -215,6 +216,13 @@ int regulator_set_optimum_mode(struct regulator *regulator, int load_uA); int regulator_allow_bypass(struct regulator *regulator, bool allow); +struct regmap *regulator_get_regmap(struct regulator *regulator); +int regulator_get_hardware_vsel_register(struct regulator *regulator, + unsigned *vsel_reg, + unsigned *vsel_mask); +int regulator_list_hardware_vsel(struct regulator *regulator, + unsigned selector); + /* regulator notifier block */ int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb); @@ -457,6 +465,24 @@ static inline int regulator_allow_bypass(struct regulator *regulator, return 0; } +static inline struct regmap *regulator_get_regmap(struct regulator *regulator) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int regulator_get_hardware_vsel_register(struct regulator *regulator, + unsigned *vsel_reg, + unsigned *vsel_mask) +{ + return -EOPNOTSUPP; +} + +static inline int regulator_list_hardware_vsel(struct regulator *regulator, + unsigned selector) +{ + return -EOPNOTSUPP; +} + static inline int regulator_register_notifier(struct regulator *regulator, struct notifier_block *nb) { diff --git a/include/linux/regulator/da9211.h b/include/linux/regulator/da9211.h new file mode 100644 index 000000000000..0981ce0e72cc --- /dev/null +++ b/include/linux/regulator/da9211.h @@ -0,0 +1,32 @@ +/* + * da9211.h - Regulator device driver for DA9211 + * Copyright (C) 2014 Dialog Semiconductor Ltd. + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + */ + +#ifndef __LINUX_REGULATOR_DA9211_H +#define __LINUX_REGULATOR_DA9211_H + +#include <linux/regulator/machine.h> + +#define DA9211_MAX_REGULATORS 2 + +struct da9211_pdata { + /* + * Number of buck + * 1 : 4 phase 1 buck + * 2 : 2 phase 2 buck + */ + int num_buck; + struct regulator_init_data *init_data; +}; +#endif diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 813dae960ebd..5a0b64cf68b4 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h @@ -6,7 +6,7 @@ * Copyright (C) 2012 Texas Instruments * * Authors: - * Rob Clark <rob.clark@linaro.org> + * Rob Clark <robdclark@gmail.com> * Maarten Lankhorst <maarten.lankhorst@canonical.com> * Thomas Hellstrom <thellstrom-at-vmware-dot-com> * @@ -40,23 +40,103 @@ #define _LINUX_RESERVATION_H #include <linux/ww_mutex.h> +#include <linux/fence.h> +#include <linux/slab.h> +#include <linux/seqlock.h> +#include <linux/rcupdate.h> extern struct ww_class reservation_ww_class; +extern struct lock_class_key reservation_seqcount_class; +extern const char reservation_seqcount_string[]; + +struct reservation_object_list { + struct rcu_head rcu; + u32 shared_count, shared_max; + struct fence __rcu *shared[]; +}; struct reservation_object { struct ww_mutex lock; + seqcount_t seq; + + struct fence __rcu *fence_excl; + struct reservation_object_list __rcu *fence; + struct reservation_object_list *staged; }; +#define reservation_object_held(obj) lockdep_is_held(&(obj)->lock.base) +#define reservation_object_assert_held(obj) \ + lockdep_assert_held(&(obj)->lock.base) + static inline void reservation_object_init(struct reservation_object *obj) { ww_mutex_init(&obj->lock, &reservation_ww_class); + + __seqcount_init(&obj->seq, reservation_seqcount_string, &reservation_seqcount_class); + RCU_INIT_POINTER(obj->fence, NULL); + RCU_INIT_POINTER(obj->fence_excl, NULL); + obj->staged = NULL; } static inline void reservation_object_fini(struct reservation_object *obj) { + int i; + struct reservation_object_list *fobj; + struct fence *excl; + + /* + * This object should be dead and all references must have + * been released to it, so no need to be protected with rcu. + */ + excl = rcu_dereference_protected(obj->fence_excl, 1); + if (excl) + fence_put(excl); + + fobj = rcu_dereference_protected(obj->fence, 1); + if (fobj) { + for (i = 0; i < fobj->shared_count; ++i) + fence_put(rcu_dereference_protected(fobj->shared[i], 1)); + + kfree(fobj); + } + kfree(obj->staged); + ww_mutex_destroy(&obj->lock); } +static inline struct reservation_object_list * +reservation_object_get_list(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence, + reservation_object_held(obj)); +} + +static inline struct fence * +reservation_object_get_excl(struct reservation_object *obj) +{ + return rcu_dereference_protected(obj->fence_excl, + reservation_object_held(obj)); +} + +int reservation_object_reserve_shared(struct reservation_object *obj); +void reservation_object_add_shared_fence(struct reservation_object *obj, + struct fence *fence); + +void reservation_object_add_excl_fence(struct reservation_object *obj, + struct fence *fence); + +int reservation_object_get_fences_rcu(struct reservation_object *obj, + struct fence **pfence_excl, + unsigned *pshared_count, + struct fence ***pshared); + +long reservation_object_wait_timeout_rcu(struct reservation_object *obj, + bool wait_all, bool intr, + unsigned long timeout); + +bool reservation_object_test_signaled_rcu(struct reservation_object *obj, + bool test_all); + #endif /* _LINUX_RESERVATION_H */ diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h new file mode 100644 index 000000000000..9cda293c867d --- /dev/null +++ b/include/linux/rhashtable.h @@ -0,0 +1,213 @@ +/* + * Resizable, Scalable, Concurrent Hash Table + * + * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch> + * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net> + * + * Based on the following paper by Josh Triplett, Paul E. McKenney + * and Jonathan Walpole: + * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf + * + * Code partially derived from nft_hash + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _LINUX_RHASHTABLE_H +#define _LINUX_RHASHTABLE_H + +#include <linux/rculist.h> + +struct rhash_head { + struct rhash_head *next; +}; + +#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL) + +struct bucket_table { + size_t size; + struct rhash_head __rcu *buckets[]; +}; + +typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed); +typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed); + +struct rhashtable; + +/** + * struct rhashtable_params - Hash table construction parameters + * @nelem_hint: Hint on number of elements, should be 75% of desired size + * @key_len: Length of key + * @key_offset: Offset of key in struct to be hashed + * @head_offset: Offset of rhash_head in struct to be hashed + * @hash_rnd: Seed to use while hashing + * @max_shift: Maximum number of shifts while expanding + * @hashfn: Function to hash key + * @obj_hashfn: Function to hash object + * @grow_decision: If defined, may return true if table should expand + * @shrink_decision: If defined, may return true if table should shrink + * @mutex_is_held: Must return true if protecting mutex is held + */ +struct rhashtable_params { + size_t nelem_hint; + size_t key_len; + size_t key_offset; + size_t head_offset; + u32 hash_rnd; + size_t max_shift; + rht_hashfn_t hashfn; + rht_obj_hashfn_t obj_hashfn; + bool (*grow_decision)(const struct rhashtable *ht, + size_t new_size); + bool (*shrink_decision)(const struct rhashtable *ht, + size_t new_size); + int (*mutex_is_held)(void); +}; + +/** + * struct rhashtable - Hash table handle + * @tbl: Bucket table + * @nelems: Number of elements in table + * @shift: Current size (1 << shift) + * @p: Configuration parameters + */ +struct rhashtable { + struct bucket_table __rcu *tbl; + size_t nelems; + size_t shift; + struct rhashtable_params p; +}; + +#ifdef CONFIG_PROVE_LOCKING +int lockdep_rht_mutex_is_held(const struct rhashtable *ht); +#else +static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht) +{ + return 1; +} +#endif /* CONFIG_PROVE_LOCKING */ + +int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params); + +u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len); +u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr); + +void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t); +bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t); +void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj, + struct rhash_head **pprev, gfp_t flags); + +bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size); +bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size); + +int rhashtable_expand(struct rhashtable *ht, gfp_t flags); +int rhashtable_shrink(struct rhashtable *ht, gfp_t flags); + +void *rhashtable_lookup(const struct rhashtable *ht, const void *key); +void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash, + bool (*compare)(void *, void *), void *arg); + +void rhashtable_destroy(const struct rhashtable *ht); + +#define rht_dereference(p, ht) \ + rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht)) + +#define rht_dereference_rcu(p, ht) \ + rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht)) + +/* Internal, use rht_obj() instead */ +#define rht_entry(ptr, type, member) container_of(ptr, type, member) +#define rht_entry_safe(ptr, type, member) \ +({ \ + typeof(ptr) __ptr = (ptr); \ + __ptr ? rht_entry(__ptr, type, member) : NULL; \ +}) +#define rht_entry_safe_rcu(ptr, type, member) \ +({ \ + typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \ + __ptr ? container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member) : NULL; \ +}) + +#define rht_next_entry_safe(pos, ht, member) \ +({ \ + pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \ + typeof(*(pos)), member) : NULL; \ +}) + +/** + * rht_for_each - iterate over hash chain + * @pos: &struct rhash_head to use as a loop cursor. + * @head: head of the hash chain (struct rhash_head *) + * @ht: pointer to your struct rhashtable + */ +#define rht_for_each(pos, head, ht) \ + for (pos = rht_dereference(head, ht); \ + pos; \ + pos = rht_dereference((pos)->next, ht)) + +/** + * rht_for_each_entry - iterate over hash chain of given type + * @pos: type * to use as a loop cursor. + * @head: head of the hash chain (struct rhash_head *) + * @ht: pointer to your struct rhashtable + * @member: name of the rhash_head within the hashable struct. + */ +#define rht_for_each_entry(pos, head, ht, member) \ + for (pos = rht_entry_safe(rht_dereference(head, ht), \ + typeof(*(pos)), member); \ + pos; \ + pos = rht_next_entry_safe(pos, ht, member)) + +/** + * rht_for_each_entry_safe - safely iterate over hash chain of given type + * @pos: type * to use as a loop cursor. + * @n: type * to use for temporary next object storage + * @head: head of the hash chain (struct rhash_head *) + * @ht: pointer to your struct rhashtable + * @member: name of the rhash_head within the hashable struct. + * + * This hash chain list-traversal primitive allows for the looped code to + * remove the loop cursor from the list. + */ +#define rht_for_each_entry_safe(pos, n, head, ht, member) \ + for (pos = rht_entry_safe(rht_dereference(head, ht), \ + typeof(*(pos)), member), \ + n = rht_next_entry_safe(pos, ht, member); \ + pos; \ + pos = n, \ + n = rht_next_entry_safe(pos, ht, member)) + +/** + * rht_for_each_rcu - iterate over rcu hash chain + * @pos: &struct rhash_head to use as a loop cursor. + * @head: head of the hash chain (struct rhash_head *) + * @ht: pointer to your struct rhashtable + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu fkht mutation primitives such as rht_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_rcu(pos, head, ht) \ + for (pos = rht_dereference_rcu(head, ht); \ + pos; \ + pos = rht_dereference_rcu((pos)->next, ht)) + +/** + * rht_for_each_entry_rcu - iterate over rcu hash chain of given type + * @pos: type * to use as a loop cursor. + * @head: head of the hash chain (struct rhash_head *) + * @member: name of the rhash_head within the hashable struct. + * + * This hash chain list-traversal primitive may safely run concurrently with + * the _rcu fkht mutation primitives such as rht_insert() as long as the + * traversal is guarded by rcu_read_lock(). + */ +#define rht_for_each_entry_rcu(pos, head, member) \ + for (pos = rht_entry_safe_rcu(head, typeof(*(pos)), member); \ + pos; \ + pos = rht_entry_safe_rcu((pos)->member.next, \ + typeof(*(pos)), member)) + +#endif /* _LINUX_RHASHTABLE_H */ diff --git a/include/linux/rndis.h b/include/linux/rndis.h index 0c8dc7195cdb..93c0a64aefa6 100644 --- a/include/linux/rndis.h +++ b/include/linux/rndis.h @@ -65,6 +65,7 @@ #define RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION 0x40010012 #define RNDIS_STATUS_WW_INDICATION RDIA_SPECIFIC_INDICATION #define RNDIS_STATUS_LINK_SPEED_CHANGE 0x40010013L +#define RNDIS_STATUS_NETWORK_CHANGE 0x40010018 #define RNDIS_STATUS_NOT_RESETTABLE 0x80010001 #define RNDIS_STATUS_SOFT_ERRORS 0x80010003 diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 3aed8d737e1a..1abba5ce2a2f 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h @@ -90,11 +90,9 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name); extern void rt_mutex_destroy(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock); -extern int rt_mutex_lock_interruptible(struct rt_mutex *lock, - int detect_deadlock); +extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout, - int detect_deadlock); + struct hrtimer_sleeper *timeout); extern int rt_mutex_trylock(struct rt_mutex *lock); diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 953937ea5233..167bae7bdfa4 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h @@ -78,6 +78,7 @@ extern void __rtnl_unlock(void); extern int ndo_dflt_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb, struct net_device *dev, + struct net_device *filter_dev, int idx); extern int ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index a964f7285600..f4ec8bbcb372 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -229,10 +229,10 @@ void sg_init_one(struct scatterlist *, const void *, unsigned int); typedef struct scatterlist *(sg_alloc_fn)(unsigned int, gfp_t); typedef void (sg_free_fn)(struct scatterlist *, unsigned int); -void __sg_free_table(struct sg_table *, unsigned int, sg_free_fn *); +void __sg_free_table(struct sg_table *, unsigned int, bool, sg_free_fn *); void sg_free_table(struct sg_table *); -int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, gfp_t, - sg_alloc_fn *); +int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, + struct scatterlist *, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, diff --git a/include/linux/sched.h b/include/linux/sched.h index 0376b054a0d0..84729f7c472c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -168,8 +168,7 @@ extern int nr_processes(void); extern unsigned long nr_running(void); extern unsigned long nr_iowait(void); extern unsigned long nr_iowait_cpu(int cpu); -extern unsigned long this_cpu_load(void); - +extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load); extern void calc_global_load(unsigned long ticks); extern void update_cpu_load_nohz(void); @@ -813,7 +812,7 @@ struct task_delay_info { * associated with the operation is added to XXX_delay. * XXX_delay contains the accumulated delay time in nanoseconds. */ - struct timespec blkio_start, blkio_end; /* Shared by blkio, swapin */ + u64 blkio_start; /* Shared by blkio, swapin */ u64 blkio_delay; /* wait for sync block io completion */ u64 swapin_delay; /* wait for swapin block io completion */ u32 blkio_count; /* total count of the number of sync block */ @@ -821,7 +820,7 @@ struct task_delay_info { u32 swapin_count; /* total count of the number of swapin block */ /* io operations performed */ - struct timespec freepages_start, freepages_end; + u64 freepages_start; u64 freepages_delay; /* wait for memory reclaim */ u32 freepages_count; /* total count of memory reclaim */ }; @@ -1270,9 +1269,6 @@ struct task_struct { #ifdef CONFIG_TREE_PREEMPT_RCU struct rcu_node *rcu_blocked_node; #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -#ifdef CONFIG_RCU_BOOST - struct rt_mutex *rcu_boost_mutex; -#endif /* #ifdef CONFIG_RCU_BOOST */ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; @@ -1307,13 +1303,12 @@ struct task_struct { * execve */ unsigned in_iowait:1; - /* task may not gain privileges */ - unsigned no_new_privs:1; - /* Revert to default priority/policy when forking */ unsigned sched_reset_on_fork:1; unsigned sched_contributes_to_load:1; + unsigned long atomic_flags; /* Flags needing atomic access. */ + pid_t pid; pid_t tgid; @@ -1367,8 +1362,8 @@ struct task_struct { } vtime_snap_whence; #endif unsigned long nvcsw, nivcsw; /* context switch counts */ - struct timespec start_time; /* monotonic time */ - struct timespec real_start_time; /* boot based time */ + u64 start_time; /* monotonic time in nsec */ + u64 real_start_time; /* boot based time in nsec */ /* mm fault and swap info: this can arguably be seen as either mm-specific or thread-specific */ unsigned long min_flt, maj_flt; @@ -1440,8 +1435,6 @@ struct task_struct { struct rb_node *pi_waiters_leftmost; /* Deadlock detection and priority inheritance handling */ struct rt_mutex_waiter *pi_blocked_on; - /* Top pi_waiters task */ - struct task_struct *pi_top_task; #endif #ifdef CONFIG_DEBUG_MUTEXES @@ -1967,6 +1960,19 @@ static inline void memalloc_noio_restore(unsigned int flags) current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags; } +/* Per-process atomic flags. */ +#define PFA_NO_NEW_PRIVS 0x00000001 /* May not gain new privileges. */ + +static inline bool task_no_new_privs(struct task_struct *p) +{ + return test_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); +} + +static inline void task_set_no_new_privs(struct task_struct *p) +{ + set_bit(PFA_NO_NEW_PRIVS, &p->atomic_flags); +} + /* * task->jobctl flags */ @@ -2009,9 +2015,6 @@ static inline void rcu_copy_process(struct task_struct *p) #ifdef CONFIG_TREE_PREEMPT_RCU p->rcu_blocked_node = NULL; #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ -#ifdef CONFIG_RCU_BOOST - p->rcu_boost_mutex = NULL; -#endif /* #ifdef CONFIG_RCU_BOOST */ INIT_LIST_HEAD(&p->rcu_node_entry); } @@ -2788,7 +2791,7 @@ static inline bool __must_check current_set_polling_and_test(void) /* * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_task() + * paired by resched_curr() */ smp_mb__after_atomic(); @@ -2806,7 +2809,7 @@ static inline bool __must_check current_clr_polling_and_test(void) /* * Polling state must be visible before we test NEED_RESCHED, - * paired by resched_task() + * paired by resched_curr() */ smp_mb__after_atomic(); @@ -2838,7 +2841,7 @@ static inline void current_clr_polling(void) * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also * fold. */ - smp_mb(); /* paired with resched_task() */ + smp_mb(); /* paired with resched_curr() */ preempt_fold_need_resched(); } diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 4054b0994071..5d586a45a319 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h @@ -3,6 +3,8 @@ #include <uapi/linux/seccomp.h> +#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC) + #ifdef CONFIG_SECCOMP #include <linux/thread_info.h> @@ -14,11 +16,11 @@ struct seccomp_filter; * * @mode: indicates one of the valid values above for controlled * system calls available to a process. - * @filter: The metadata and ruleset for determining what system calls - * are allowed for a task. + * @filter: must always point to a valid seccomp-filter or NULL as it is + * accessed without locking during system call entry. * * @filter must only be accessed from the context of current as there - * is no locking. + * is no read locking. */ struct seccomp { int mode; diff --git a/include/linux/security.h b/include/linux/security.h index 9c6b9722ff48..623f90e5f38d 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -702,6 +702,15 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) * @inode points to the inode to use as a reference. * The current task must be the one that nominated @inode. * Return 0 if successful. + * @kernel_fw_from_file: + * Load firmware from userspace (not called for built-in firmware). + * @file contains the file structure pointing to the file containing + * the firmware to load. This argument will be NULL if the firmware + * was loaded via the uevent-triggered blob-based interface exposed + * by CONFIG_FW_LOADER_USER_HELPER. + * @buf pointer to buffer containing firmware contents. + * @size length of the firmware contents. + * Return 0 if permission is granted. * @kernel_module_request: * Ability to trigger the kernel to automatically upcall to userspace for * userspace to load a kernel module with the given name. @@ -1565,6 +1574,7 @@ struct security_operations { void (*cred_transfer)(struct cred *new, const struct cred *old); int (*kernel_act_as)(struct cred *new, u32 secid); int (*kernel_create_files_as)(struct cred *new, struct inode *inode); + int (*kernel_fw_from_file)(struct file *file, char *buf, size_t size); int (*kernel_module_request)(char *kmod_name); int (*kernel_module_from_file)(struct file *file); int (*task_fix_setuid) (struct cred *new, const struct cred *old, @@ -1837,6 +1847,7 @@ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp); void security_transfer_creds(struct cred *new, const struct cred *old); int security_kernel_act_as(struct cred *new, u32 secid); int security_kernel_create_files_as(struct cred *new, struct inode *inode); +int security_kernel_fw_from_file(struct file *file, char *buf, size_t size); int security_kernel_module_request(char *kmod_name); int security_kernel_module_from_file(struct file *file); int security_task_fix_setuid(struct cred *new, const struct cred *old, @@ -2363,6 +2374,12 @@ static inline int security_kernel_create_files_as(struct cred *cred, return 0; } +static inline int security_kernel_fw_from_file(struct file *file, + char *buf, size_t size) +{ + return 0; +} + static inline int security_kernel_module_request(char *kmod_name) { return 0; diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 535f158977b9..cc359636cfa3 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -117,6 +117,22 @@ repeat: } /** + * raw_read_seqcount - Read the raw seqcount + * @s: pointer to seqcount_t + * Returns: count to be passed to read_seqcount_retry + * + * raw_read_seqcount opens a read critical section of the given + * seqcount without any lockdep checking and without checking or + * masking the LSB. Calling code is responsible for handling that. + */ +static inline unsigned raw_read_seqcount(const seqcount_t *s) +{ + unsigned ret = ACCESS_ONCE(s->sequence); + smp_rmb(); + return ret; +} + +/** * raw_read_seqcount_begin - start seq-read critical section w/o lockdep * @s: pointer to seqcount_t * Returns: count to be passed to read_seqcount_retry @@ -164,8 +180,6 @@ static inline unsigned read_seqcount_begin(const seqcount_t *s) static inline unsigned raw_seqcount_begin(const seqcount_t *s) { unsigned ret = ACCESS_ONCE(s->sequence); - - seqcount_lockdep_reader_access(s); smp_rmb(); return ret & ~1; } @@ -220,6 +234,17 @@ static inline void raw_write_seqcount_end(seqcount_t *s) } /* + * raw_write_seqcount_latch - redirect readers to even/odd copy + * @s: pointer to seqcount_t + */ +static inline void raw_write_seqcount_latch(seqcount_t *s) +{ + smp_wmb(); /* prior stores before incrementing "sequence" */ + s->sequence++; + smp_wmb(); /* increment "sequence" before following stores */ +} + +/* * Sequence counter only version assumes that callers are using their * own mutexing. */ diff --git a/include/linux/seqno-fence.h b/include/linux/seqno-fence.h new file mode 100644 index 000000000000..3d6003de4b0d --- /dev/null +++ b/include/linux/seqno-fence.h @@ -0,0 +1,116 @@ +/* + * seqno-fence, using a dma-buf to synchronize fencing + * + * Copyright (C) 2012 Texas Instruments + * Copyright (C) 2012 Canonical Ltd + * Authors: + * Rob Clark <robdclark@gmail.com> + * Maarten Lankhorst <maarten.lankhorst@canonical.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published by + * the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#ifndef __LINUX_SEQNO_FENCE_H +#define __LINUX_SEQNO_FENCE_H + +#include <linux/fence.h> +#include <linux/dma-buf.h> + +enum seqno_fence_condition { + SEQNO_FENCE_WAIT_GEQUAL, + SEQNO_FENCE_WAIT_NONZERO +}; + +struct seqno_fence { + struct fence base; + + const struct fence_ops *ops; + struct dma_buf *sync_buf; + uint32_t seqno_ofs; + enum seqno_fence_condition condition; +}; + +extern const struct fence_ops seqno_fence_ops; + +/** + * to_seqno_fence - cast a fence to a seqno_fence + * @fence: fence to cast to a seqno_fence + * + * Returns NULL if the fence is not a seqno_fence, + * or the seqno_fence otherwise. + */ +static inline struct seqno_fence * +to_seqno_fence(struct fence *fence) +{ + if (fence->ops != &seqno_fence_ops) + return NULL; + return container_of(fence, struct seqno_fence, base); +} + +/** + * seqno_fence_init - initialize a seqno fence + * @fence: seqno_fence to initialize + * @lock: pointer to spinlock to use for fence + * @sync_buf: buffer containing the memory location to signal on + * @context: the execution context this fence is a part of + * @seqno_ofs: the offset within @sync_buf + * @seqno: the sequence # to signal on + * @ops: the fence_ops for operations on this seqno fence + * + * This function initializes a struct seqno_fence with passed parameters, + * and takes a reference on sync_buf which is released on fence destruction. + * + * A seqno_fence is a dma_fence which can complete in software when + * enable_signaling is called, but it also completes when + * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true + * + * The seqno_fence will take a refcount on the sync_buf until it's + * destroyed, but actual lifetime of sync_buf may be longer if one of the + * callers take a reference to it. + * + * Certain hardware have instructions to insert this type of wait condition + * in the command stream, so no intervention from software would be needed. + * This type of fence can be destroyed before completed, however a reference + * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same + * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the + * device's vm can be expensive. + * + * It is recommended for creators of seqno_fence to call fence_signal + * before destruction. This will prevent possible issues from wraparound at + * time of issue vs time of check, since users can check fence_is_signaled + * before submitting instructions for the hardware to wait on the fence. + * However, when ops.enable_signaling is not called, it doesn't have to be + * done as soon as possible, just before there's any real danger of seqno + * wraparound. + */ +static inline void +seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock, + struct dma_buf *sync_buf, uint32_t context, + uint32_t seqno_ofs, uint32_t seqno, + enum seqno_fence_condition cond, + const struct fence_ops *ops) +{ + BUG_ON(!fence || !sync_buf || !ops); + BUG_ON(!ops->wait || !ops->enable_signaling || + !ops->get_driver_name || !ops->get_timeline_name); + + /* + * ops is used in fence_init for get_driver_name, so needs to be + * initialized first + */ + fence->ops = ops; + fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno); + get_dma_buf(sync_buf); + fence->sync_buf = sync_buf; + fence->seqno_ofs = seqno_ofs; + fence->condition = cond; +} + +#endif /* __LINUX_SEQNO_FENCE_H */ diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index af47a8af6024..f93649e22c43 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h @@ -74,8 +74,10 @@ struct uart_8250_port { struct list_head list; /* ports on this IRQ */ unsigned short capabilities; /* port capabilities */ unsigned short bugs; /* port bugs */ + bool fifo_bug; /* min RX trigger if enabled */ unsigned int tx_loadsz; /* transmit fifo load size */ unsigned char acr; + unsigned char fcr; unsigned char ier; unsigned char lcr; unsigned char mcr; @@ -100,6 +102,11 @@ struct uart_8250_port { void (*dl_write)(struct uart_8250_port *, int); }; +static inline struct uart_8250_port *up_to_u8250p(struct uart_port *up) +{ + return container_of(up, struct uart_8250_port, port); +} + int serial8250_register_8250_port(struct uart_8250_port *); void serial8250_unregister_port(int line); void serial8250_suspend_port(int line); diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 5bbb809ee197..cf3a1e789bf5 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -199,6 +199,8 @@ struct uart_port { unsigned char suspended; unsigned char irq_wake; unsigned char unused[2]; + struct attribute_group *attr_group; /* port specific attributes */ + const struct attribute_group **tty_groups; /* all attributes (serial core use only) */ void *private_data; /* generic platform data pointer */ }; diff --git a/include/linux/sfi_acpi.h b/include/linux/sfi_acpi.h index 4723bbfa1c26..a6e555cbe05c 100644 --- a/include/linux/sfi_acpi.h +++ b/include/linux/sfi_acpi.h @@ -63,8 +63,6 @@ #include <linux/sfi.h> #ifdef CONFIG_SFI -#include <acpi/acpi.h> /* FIXME: inclusion should be removed */ - extern int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id, int (*handler)(struct acpi_table_header *)); @@ -78,7 +76,6 @@ static inline int __init acpi_sfi_table_parse(char *signature, return sfi_acpi_table_parse(signature, NULL, NULL, handler); } #else /* !CONFIG_SFI */ - static inline int sfi_acpi_table_parse(char *signature, char *oem_id, char *oem_table_id, int (*handler)(struct acpi_table_header *)) diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h index 8e1e036d6d45..64638b058076 100644 --- a/include/linux/sh_timer.h +++ b/include/linux/sh_timer.h @@ -2,11 +2,6 @@ #define __SH_TIMER_H__ struct sh_timer_config { - char *name; - long channel_offset; - int timer_bit; - unsigned long clockevent_rating; - unsigned long clocksource_rating; unsigned int channels_mask; }; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ec89301ada41..11c270551d25 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -112,8 +112,7 @@ #define CHECKSUM_COMPLETE 2 #define CHECKSUM_PARTIAL 3 -#define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES - 1)) & \ - ~(SMP_CACHE_BYTES - 1)) +#define SKB_DATA_ALIGN(X) ALIGN(X, SMP_CACHE_BYTES) #define SKB_WITH_OVERHEAD(X) \ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define SKB_MAX_ORDER(X, ORDER) \ @@ -211,18 +210,9 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) * struct skb_shared_hwtstamps - hardware time stamps * @hwtstamp: hardware time stamp transformed into duration * since arbitrary point in time - * @syststamp: hwtstamp transformed to system time base * * Software time stamps generated by ktime_get_real() are stored in - * skb->tstamp. The relation between the different kinds of time - * stamps is as follows: - * - * syststamp and tstamp can be compared against each other in - * arbitrary combinations. The accuracy of a - * syststamp/tstamp/"syststamp from other device" comparison is - * limited by the accuracy of the transformation into system time - * base. This depends on the device driver and its underlying - * hardware. + * skb->tstamp. * * hwtstamps can only be compared against other hwtstamps from * the same device. @@ -232,7 +222,6 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) */ struct skb_shared_hwtstamps { ktime_t hwtstamp; - ktime_t syststamp; }; /* Definitions for tx_flags in struct skb_shared_info */ @@ -240,7 +229,7 @@ enum { /* generate hardware time stamp */ SKBTX_HW_TSTAMP = 1 << 0, - /* generate software time stamp */ + /* generate software time stamp when queueing packet to NIC */ SKBTX_SW_TSTAMP = 1 << 1, /* device driver is going to provide hardware time stamp */ @@ -258,8 +247,19 @@ enum { * all frags to avoid possible bad checksum */ SKBTX_SHARED_FRAG = 1 << 5, + + /* generate software time stamp when entering packet scheduling */ + SKBTX_SCHED_TSTAMP = 1 << 6, + + /* generate software timestamp on peer data acknowledgment */ + SKBTX_ACK_TSTAMP = 1 << 7, }; +#define SKBTX_ANY_SW_TSTAMP (SKBTX_SW_TSTAMP | \ + SKBTX_SCHED_TSTAMP | \ + SKBTX_ACK_TSTAMP) +#define SKBTX_ANY_TSTAMP (SKBTX_HW_TSTAMP | SKBTX_ANY_SW_TSTAMP) + /* * The callback notifies userspace to release buffers when skb DMA is done in * lower device, the skb last reference should be 0 when calling this. @@ -286,6 +286,7 @@ struct skb_shared_info { unsigned short gso_type; struct sk_buff *frag_list; struct skb_shared_hwtstamps hwtstamps; + u32 tskey; __be32 ip6_frag_id; /* @@ -455,6 +456,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1, * @ooo_okay: allow the mapping of a socket to a queue to be changed * @l4_hash: indicate hash is a canonical 4-tuple hash over transport * ports. + * @sw_hash: indicates hash was computed in software stack * @wifi_acked_valid: wifi_acked was set * @wifi_acked: whether frame was acked on wifi or not * @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS @@ -562,6 +564,7 @@ struct sk_buff { __u8 pfmemalloc:1; __u8 ooo_okay:1; __u8 l4_hash:1; + __u8 sw_hash:1; __u8 wifi_acked_valid:1; __u8 wifi_acked:1; __u8 no_fcs:1; @@ -575,7 +578,7 @@ struct sk_buff { __u8 encap_hdr_csum:1; __u8 csum_valid:1; __u8 csum_complete_sw:1; - /* 3/5 bit hole (depending on ndisc_nodetype presence) */ + /* 2/4 bit hole (depending on ndisc_nodetype presence) */ kmemcheck_bitfield_end(flags2); #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL @@ -830,13 +833,14 @@ static inline void skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type) { skb->l4_hash = (type == PKT_HASH_TYPE_L4); + skb->sw_hash = 0; skb->hash = hash; } void __skb_get_hash(struct sk_buff *skb); static inline __u32 skb_get_hash(struct sk_buff *skb) { - if (!skb->l4_hash) + if (!skb->l4_hash && !skb->sw_hash) __skb_get_hash(skb); return skb->hash; @@ -850,6 +854,7 @@ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb) static inline void skb_clear_hash(struct sk_buff *skb) { skb->hash = 0; + skb->sw_hash = 0; skb->l4_hash = 0; } @@ -862,6 +867,7 @@ static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb) static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from) { to->hash = from->hash; + to->sw_hash = from->sw_hash; to->l4_hash = from->l4_hash; }; @@ -2697,6 +2703,10 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb) void skb_complete_tx_timestamp(struct sk_buff *skb, struct skb_shared_hwtstamps *hwtstamps); +void __skb_tstamp_tx(struct sk_buff *orig_skb, + struct skb_shared_hwtstamps *hwtstamps, + struct sock *sk, int tstype); + /** * skb_tstamp_tx - queue clone of skb with send time stamps * @orig_skb: the original outgoing packet @@ -3005,7 +3015,7 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) return skb->queue_mapping != 0; } -u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb, +u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb, unsigned int num_tx_queues); static inline struct sec_path *skb_sec_path(struct sk_buff *skb) diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h new file mode 100644 index 000000000000..85b8ee67e937 --- /dev/null +++ b/include/linux/spi/cc2520.h @@ -0,0 +1,26 @@ +/* Header file for cc2520 radio driver + * + * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in> + * Md.Jamal Mohiuddin <mjmohiuddin@cdac.in> + * P Sowjanya <sowjanyap@cdac.in> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + */ + +#ifndef __CC2520_H +#define __CC2520_H + +struct cc2520_platform_data { + int fifo; + int fifop; + int cca; + int sfd; + int reset; + int vreg; +}; + +#endif diff --git a/include/linux/spi/sh_msiof.h b/include/linux/spi/sh_msiof.h index 2e8db3d2d2e5..88a14d81c49e 100644 --- a/include/linux/spi/sh_msiof.h +++ b/include/linux/spi/sh_msiof.h @@ -5,6 +5,8 @@ struct sh_msiof_spi_info { int tx_fifo_override; int rx_fifo_override; u16 num_chipselect; + unsigned int dma_tx_id; + unsigned int dma_rx_id; }; #endif /* __SPI_SH_MSIOF_H__ */ diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index f9f931c89e3e..f7b9100686c3 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h @@ -345,6 +345,43 @@ #define SSB_SPROM4_TXPID5GH2_SHIFT 0 #define SSB_SPROM4_TXPID5GH3 0xFF00 #define SSB_SPROM4_TXPID5GH3_SHIFT 8 + +/* There are 4 blocks with power info sharing the same layout */ +#define SSB_SPROM4_PWR_INFO_CORE0 0x0080 +#define SSB_SPROM4_PWR_INFO_CORE1 0x00AE +#define SSB_SPROM4_PWR_INFO_CORE2 0x00DC +#define SSB_SPROM4_PWR_INFO_CORE3 0x010A + +#define SSB_SPROM4_2G_MAXP_ITSSI 0x00 /* 2 GHz ITSSI and 2 GHz Max Power */ +#define SSB_SPROM4_2G_MAXP 0x00FF +#define SSB_SPROM4_2G_ITSSI 0xFF00 +#define SSB_SPROM4_2G_ITSSI_SHIFT 8 +#define SSB_SPROM4_2G_PA_0 0x02 /* 2 GHz power amp */ +#define SSB_SPROM4_2G_PA_1 0x04 +#define SSB_SPROM4_2G_PA_2 0x06 +#define SSB_SPROM4_2G_PA_3 0x08 +#define SSB_SPROM4_5G_MAXP_ITSSI 0x0A /* 5 GHz ITSSI and 5.3 GHz Max Power */ +#define SSB_SPROM4_5G_MAXP 0x00FF +#define SSB_SPROM4_5G_ITSSI 0xFF00 +#define SSB_SPROM4_5G_ITSSI_SHIFT 8 +#define SSB_SPROM4_5GHL_MAXP 0x0C /* 5.2 GHz and 5.8 GHz Max Power */ +#define SSB_SPROM4_5GH_MAXP 0x00FF +#define SSB_SPROM4_5GL_MAXP 0xFF00 +#define SSB_SPROM4_5GL_MAXP_SHIFT 8 +#define SSB_SPROM4_5G_PA_0 0x0E /* 5.3 GHz power amp */ +#define SSB_SPROM4_5G_PA_1 0x10 +#define SSB_SPROM4_5G_PA_2 0x12 +#define SSB_SPROM4_5G_PA_3 0x14 +#define SSB_SPROM4_5GL_PA_0 0x16 /* 5.2 GHz power amp */ +#define SSB_SPROM4_5GL_PA_1 0x18 +#define SSB_SPROM4_5GL_PA_2 0x1A +#define SSB_SPROM4_5GL_PA_3 0x1C +#define SSB_SPROM4_5GH_PA_0 0x1E /* 5.8 GHz power amp */ +#define SSB_SPROM4_5GH_PA_1 0x20 +#define SSB_SPROM4_5GH_PA_2 0x22 +#define SSB_SPROM4_5GH_PA_3 0x24 + +/* TODO: Make it deprecated */ #define SSB_SPROM4_MAXP_BG 0x0080 /* Max Power BG in path 1 */ #define SSB_SPROM4_MAXP_BG_MASK 0x00FF /* Mask for Max Power BG */ #define SSB_SPROM4_ITSSI_BG 0xFF00 /* Mask for path 1 itssi_bg */ diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 6f27d4f957bd..cd63851b57f2 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -112,6 +112,8 @@ struct plat_stmmacenet_data { int riwt_off; int max_speed; int maxmtu; + int multicast_filter_bins; + int unicast_filter_entries; void (*fix_mac_speed)(void *priv, unsigned int speed); void (*bus_setup)(void __iomem *ioaddr); void *(*setup)(struct platform_device *pdev); diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index ad7dbe2cfecd..1a8959944c5f 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -236,7 +236,7 @@ void * rpc_malloc(struct rpc_task *, size_t); void rpc_free(void *); int rpciod_up(void); void rpciod_down(void); -int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *)); +int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); #ifdef RPC_DEBUG struct net; void rpc_show_tasks(struct net *); diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index b0881a0ed322..701daff5d899 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -866,4 +866,9 @@ asmlinkage long sys_process_vm_writev(pid_t pid, asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type, unsigned long idx1, unsigned long idx2); asmlinkage long sys_finit_module(int fd, const char __user *uargs, int flags); +asmlinkage long sys_seccomp(unsigned int op, unsigned int flags, + const char __user *uargs); +asmlinkage long sys_getrandom(char __user *buf, size_t count, + unsigned int flags); + #endif diff --git a/include/linux/tcp.h b/include/linux/tcp.h index a0513210798f..fa5258f322e7 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h @@ -111,10 +111,7 @@ struct tcp_request_sock_ops; struct tcp_request_sock { struct inet_request_sock req; -#ifdef CONFIG_TCP_MD5SIG - /* Only used by TCP MD5 Signature so far. */ const struct tcp_request_sock_ops *af_specific; -#endif struct sock *listener; /* needed for TFO */ u32 rcv_isn; u32 snt_isn; diff --git a/include/linux/tick.h b/include/linux/tick.h index b84773cb9f4c..059052306831 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h @@ -12,6 +12,7 @@ #include <linux/hrtimer.h> #include <linux/context_tracking_state.h> #include <linux/cpumask.h> +#include <linux/sched.h> #ifdef CONFIG_GENERIC_CLOCKEVENTS @@ -162,6 +163,7 @@ static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; } #ifdef CONFIG_NO_HZ_FULL extern bool tick_nohz_full_running; extern cpumask_var_t tick_nohz_full_mask; +extern cpumask_var_t housekeeping_mask; static inline bool tick_nohz_full_enabled(void) { @@ -181,7 +183,13 @@ static inline bool tick_nohz_full_cpu(int cpu) extern void tick_nohz_init(void); extern void __tick_nohz_full_check(void); -extern void tick_nohz_full_kick(void); +extern void tick_nohz_full_kick_cpu(int cpu); + +static inline void tick_nohz_full_kick(void) +{ + tick_nohz_full_kick_cpu(smp_processor_id()); +} + extern void tick_nohz_full_kick_all(void); extern void __tick_nohz_task_switch(struct task_struct *tsk); #else @@ -189,11 +197,30 @@ static inline void tick_nohz_init(void) { } static inline bool tick_nohz_full_enabled(void) { return false; } static inline bool tick_nohz_full_cpu(int cpu) { return false; } static inline void __tick_nohz_full_check(void) { } +static inline void tick_nohz_full_kick_cpu(int cpu) { } static inline void tick_nohz_full_kick(void) { } static inline void tick_nohz_full_kick_all(void) { } static inline void __tick_nohz_task_switch(struct task_struct *tsk) { } #endif +static inline bool is_housekeeping_cpu(int cpu) +{ +#ifdef CONFIG_NO_HZ_FULL + if (tick_nohz_full_enabled()) + return cpumask_test_cpu(cpu, housekeeping_mask); +#endif + return true; +} + +static inline void housekeeping_affine(struct task_struct *t) +{ +#ifdef CONFIG_NO_HZ_FULL + if (tick_nohz_full_enabled()) + set_cpus_allowed_ptr(t, housekeeping_mask); + +#endif +} + static inline void tick_nohz_full_check(void) { if (tick_nohz_full_enabled()) diff --git a/include/linux/time.h b/include/linux/time.h index d5d229b2e5af..8c42cf8d2444 100644 --- a/include/linux/time.h +++ b/include/linux/time.h @@ -4,19 +4,10 @@ # include <linux/cache.h> # include <linux/seqlock.h> # include <linux/math64.h> -#include <uapi/linux/time.h> +# include <linux/time64.h> extern struct timezone sys_tz; -/* Parameters used to convert the timespec values: */ -#define MSEC_PER_SEC 1000L -#define USEC_PER_MSEC 1000L -#define NSEC_PER_USEC 1000L -#define NSEC_PER_MSEC 1000000L -#define USEC_PER_SEC 1000000L -#define NSEC_PER_SEC 1000000000L -#define FSEC_PER_SEC 1000000000000000LL - #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) static inline int timespec_equal(const struct timespec *a, @@ -84,13 +75,6 @@ static inline struct timespec timespec_sub(struct timespec lhs, return ts_delta; } -#define KTIME_MAX ((s64)~((u64)1 << 63)) -#if (BITS_PER_LONG == 64) -# define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) -#else -# define KTIME_SEC_MAX LONG_MAX -#endif - /* * Returns true if the timespec is norm, false if denorm: */ @@ -115,27 +99,7 @@ static inline bool timespec_valid_strict(const struct timespec *ts) return true; } -extern bool persistent_clock_exist; - -static inline bool has_persistent_clock(void) -{ - return persistent_clock_exist; -} - -extern void read_persistent_clock(struct timespec *ts); -extern void read_boot_clock(struct timespec *ts); -extern int persistent_clock_is_local; -extern int update_persistent_clock(struct timespec now); -void timekeeping_init(void); -extern int timekeeping_suspended; - -unsigned long get_seconds(void); -struct timespec current_kernel_time(void); -struct timespec __current_kernel_time(void); /* does not take xtime_lock */ -struct timespec get_monotonic_coarse(void); -void get_xtime_and_monotonic_and_sleep_offset(struct timespec *xtim, - struct timespec *wtom, struct timespec *sleep); -void timekeeping_inject_sleeptime(struct timespec *delta); +extern struct timespec timespec_trunc(struct timespec t, unsigned gran); #define CURRENT_TIME (current_kernel_time()) #define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 }) @@ -153,33 +117,14 @@ void timekeeping_inject_sleeptime(struct timespec *delta); extern u32 (*arch_gettimeoffset)(void); #endif -extern void do_gettimeofday(struct timeval *tv); -extern int do_settimeofday(const struct timespec *tv); -extern int do_sys_settimeofday(const struct timespec *tv, - const struct timezone *tz); -#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) -extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); struct itimerval; extern int do_setitimer(int which, struct itimerval *value, struct itimerval *ovalue); -extern unsigned int alarm_setitimer(unsigned int seconds); extern int do_getitimer(int which, struct itimerval *value); -extern int __getnstimeofday(struct timespec *tv); -extern void getnstimeofday(struct timespec *tv); -extern void getrawmonotonic(struct timespec *ts); -extern void getnstime_raw_and_real(struct timespec *ts_raw, - struct timespec *ts_real); -extern void getboottime(struct timespec *ts); -extern void monotonic_to_bootbased(struct timespec *ts); -extern void get_monotonic_boottime(struct timespec *ts); -extern struct timespec timespec_trunc(struct timespec t, unsigned gran); -extern int timekeeping_valid_for_hres(void); -extern u64 timekeeping_max_deferment(void); -extern int timekeeping_inject_offset(struct timespec *ts); -extern s32 timekeeping_get_tai_offset(void); -extern void timekeeping_set_tai_offset(s32 tai_offset); -extern void timekeeping_clocktai(struct timespec *ts); +extern unsigned int alarm_setitimer(unsigned int seconds); + +extern long do_utimes(int dfd, const char __user *filename, struct timespec *times, int flags); struct tms; extern void do_sys_times(struct tms *); diff --git a/include/linux/time64.h b/include/linux/time64.h new file mode 100644 index 000000000000..a3831478d9cf --- /dev/null +++ b/include/linux/time64.h @@ -0,0 +1,190 @@ +#ifndef _LINUX_TIME64_H +#define _LINUX_TIME64_H + +#include <uapi/linux/time.h> + +typedef __s64 time64_t; + +/* + * This wants to go into uapi/linux/time.h once we agreed about the + * userspace interfaces. + */ +#if __BITS_PER_LONG == 64 +# define timespec64 timespec +#else +struct timespec64 { + time64_t tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; +#endif + +/* Parameters used to convert the timespec values: */ +#define MSEC_PER_SEC 1000L +#define USEC_PER_MSEC 1000L +#define NSEC_PER_USEC 1000L +#define NSEC_PER_MSEC 1000000L +#define USEC_PER_SEC 1000000L +#define NSEC_PER_SEC 1000000000L +#define FSEC_PER_SEC 1000000000000000LL + +/* Located here for timespec[64]_valid_strict */ +#define KTIME_MAX ((s64)~((u64)1 << 63)) +#define KTIME_SEC_MAX (KTIME_MAX / NSEC_PER_SEC) + +#if __BITS_PER_LONG == 64 + +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + return ts64; +} + +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + return ts; +} + +# define timespec64_equal timespec_equal +# define timespec64_compare timespec_compare +# define set_normalized_timespec64 set_normalized_timespec +# define timespec64_add_safe timespec_add_safe +# define timespec64_add timespec_add +# define timespec64_sub timespec_sub +# define timespec64_valid timespec_valid +# define timespec64_valid_strict timespec_valid_strict +# define timespec64_to_ns timespec_to_ns +# define ns_to_timespec64 ns_to_timespec +# define timespec64_add_ns timespec_add_ns + +#else + +static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) +{ + struct timespec ret; + + ret.tv_sec = (time_t)ts64.tv_sec; + ret.tv_nsec = ts64.tv_nsec; + return ret; +} + +static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) +{ + struct timespec64 ret; + + ret.tv_sec = ts.tv_sec; + ret.tv_nsec = ts.tv_nsec; + return ret; +} + +static inline int timespec64_equal(const struct timespec64 *a, + const struct timespec64 *b) +{ + return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec); +} + +/* + * lhs < rhs: return <0 + * lhs == rhs: return 0 + * lhs > rhs: return >0 + */ +static inline int timespec64_compare(const struct timespec64 *lhs, const struct timespec64 *rhs) +{ + if (lhs->tv_sec < rhs->tv_sec) + return -1; + if (lhs->tv_sec > rhs->tv_sec) + return 1; + return lhs->tv_nsec - rhs->tv_nsec; +} + +extern void set_normalized_timespec64(struct timespec64 *ts, time64_t sec, s64 nsec); + +/* + * timespec64_add_safe assumes both values are positive and checks for + * overflow. It will return TIME_T_MAX if the returned value would be + * smaller then either of the arguments. + */ +extern struct timespec64 timespec64_add_safe(const struct timespec64 lhs, + const struct timespec64 rhs); + + +static inline struct timespec64 timespec64_add(struct timespec64 lhs, + struct timespec64 rhs) +{ + struct timespec64 ts_delta; + set_normalized_timespec64(&ts_delta, lhs.tv_sec + rhs.tv_sec, + lhs.tv_nsec + rhs.tv_nsec); + return ts_delta; +} + +/* + * sub = lhs - rhs, in normalized form + */ +static inline struct timespec64 timespec64_sub(struct timespec64 lhs, + struct timespec64 rhs) +{ + struct timespec64 ts_delta; + set_normalized_timespec64(&ts_delta, lhs.tv_sec - rhs.tv_sec, + lhs.tv_nsec - rhs.tv_nsec); + return ts_delta; +} + +/* + * Returns true if the timespec64 is norm, false if denorm: + */ +static inline bool timespec64_valid(const struct timespec64 *ts) +{ + /* Dates before 1970 are bogus */ + if (ts->tv_sec < 0) + return false; + /* Can't have more nanoseconds then a second */ + if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC) + return false; + return true; +} + +static inline bool timespec64_valid_strict(const struct timespec64 *ts) +{ + if (!timespec64_valid(ts)) + return false; + /* Disallow values that could overflow ktime_t */ + if ((unsigned long long)ts->tv_sec >= KTIME_SEC_MAX) + return false; + return true; +} + +/** + * timespec64_to_ns - Convert timespec64 to nanoseconds + * @ts: pointer to the timespec64 variable to be converted + * + * Returns the scalar nanosecond representation of the timespec64 + * parameter. + */ +static inline s64 timespec64_to_ns(const struct timespec64 *ts) +{ + return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec; +} + +/** + * ns_to_timespec64 - Convert nanoseconds to timespec64 + * @nsec: the nanoseconds value to be converted + * + * Returns the timespec64 representation of the nsec parameter. + */ +extern struct timespec64 ns_to_timespec64(const s64 nsec); + +/** + * timespec64_add_ns - Adds nanoseconds to a timespec64 + * @a: pointer to timespec64 to be incremented + * @ns: unsigned nanoseconds value to be added + * + * This must always be inlined because its used from the x86-64 vdso, + * which cannot call other kernel functions. + */ +static __always_inline void timespec64_add_ns(struct timespec64 *a, u64 ns) +{ + a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns); + a->tv_nsec = ns; +} + +#endif + +#endif /* _LINUX_TIME64_H */ diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index c1825eb436ed..95640dcd1899 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -10,77 +10,100 @@ #include <linux/jiffies.h> #include <linux/time.h> -/* Structure holding internal timekeeping values. */ -struct timekeeper { - /* Current clocksource used for timekeeping. */ +/** + * struct tk_read_base - base structure for timekeeping readout + * @clock: Current clocksource used for timekeeping. + * @read: Read function of @clock + * @mask: Bitmask for two's complement subtraction of non 64bit clocks + * @cycle_last: @clock cycle value at last update + * @mult: NTP adjusted multiplier for scaled math conversion + * @shift: Shift value for scaled math conversion + * @xtime_nsec: Shifted (fractional) nano seconds offset for readout + * @base_mono: ktime_t (nanoseconds) base time for readout + * + * This struct has size 56 byte on 64 bit. Together with a seqcount it + * occupies a single 64byte cache line. + * + * The struct is separate from struct timekeeper as it is also used + * for a fast NMI safe accessor to clock monotonic. + */ +struct tk_read_base { struct clocksource *clock; - /* NTP adjusted clock multiplier */ + cycle_t (*read)(struct clocksource *cs); + cycle_t mask; + cycle_t cycle_last; u32 mult; - /* The shift value of the current clocksource. */ u32 shift; - /* Number of clock cycles in one NTP interval. */ + u64 xtime_nsec; + ktime_t base_mono; +}; + +/** + * struct timekeeper - Structure holding internal timekeeping values. + * @tkr: The readout base structure + * @xtime_sec: Current CLOCK_REALTIME time in seconds + * @wall_to_monotonic: CLOCK_REALTIME to CLOCK_MONOTONIC offset + * @offs_real: Offset clock monotonic -> clock realtime + * @offs_boot: Offset clock monotonic -> clock boottime + * @offs_tai: Offset clock monotonic -> clock tai + * @tai_offset: The current UTC to TAI offset in seconds + * @base_raw: Monotonic raw base time in ktime_t format + * @raw_time: Monotonic raw base time in timespec64 format + * @cycle_interval: Number of clock cycles in one NTP interval + * @xtime_interval: Number of clock shifted nano seconds in one NTP + * interval. + * @xtime_remainder: Shifted nano seconds left over when rounding + * @cycle_interval + * @raw_interval: Raw nano seconds accumulated per NTP interval. + * @ntp_error: Difference between accumulated time and NTP time in ntp + * shifted nano seconds. + * @ntp_error_shift: Shift conversion between clock shifted nano seconds and + * ntp shifted nano seconds. + * + * Note: For timespec(64) based interfaces wall_to_monotonic is what + * we need to add to xtime (or xtime corrected for sub jiffie times) + * to get to monotonic time. Monotonic is pegged at zero at system + * boot time, so wall_to_monotonic will be negative, however, we will + * ALWAYS keep the tv_nsec part positive so we can use the usual + * normalization. + * + * wall_to_monotonic is moved after resume from suspend for the + * monotonic time not to jump. We need to add total_sleep_time to + * wall_to_monotonic to get the real boot based time offset. + * + * wall_to_monotonic is no longer the boot time, getboottime must be + * used instead. + */ +struct timekeeper { + struct tk_read_base tkr; + u64 xtime_sec; + struct timespec64 wall_to_monotonic; + ktime_t offs_real; + ktime_t offs_boot; + ktime_t offs_tai; + s32 tai_offset; + ktime_t base_raw; + struct timespec64 raw_time; + + /* The following members are for timekeeping internal use */ cycle_t cycle_interval; - /* Last cycle value (also stored in clock->cycle_last) */ - cycle_t cycle_last; - /* Number of clock shifted nano seconds in one NTP interval. */ u64 xtime_interval; - /* shifted nano seconds left over when rounding cycle_interval */ s64 xtime_remainder; - /* Raw nano seconds accumulated per NTP interval. */ u32 raw_interval; - - /* Current CLOCK_REALTIME time in seconds */ - u64 xtime_sec; - /* Clock shifted nano seconds */ - u64 xtime_nsec; - + /* The ntp_tick_length() value currently being used. + * This cached copy ensures we consistently apply the tick + * length for an entire tick, as ntp_tick_length may change + * mid-tick, and we don't want to apply that new value to + * the tick in progress. + */ + u64 ntp_tick; /* Difference between accumulated time and NTP time in ntp * shifted nano seconds. */ s64 ntp_error; - /* Shift conversion between clock shifted nano seconds and - * ntp shifted nano seconds. */ u32 ntp_error_shift; - - /* - * wall_to_monotonic is what we need to add to xtime (or xtime corrected - * for sub jiffie times) to get to monotonic time. Monotonic is pegged - * at zero at system boot time, so wall_to_monotonic will be negative, - * however, we will ALWAYS keep the tv_nsec part positive so we can use - * the usual normalization. - * - * wall_to_monotonic is moved after resume from suspend for the - * monotonic time not to jump. We need to add total_sleep_time to - * wall_to_monotonic to get the real boot based time offset. - * - * - wall_to_monotonic is no longer the boot time, getboottime must be - * used instead. - */ - struct timespec wall_to_monotonic; - /* Offset clock monotonic -> clock realtime */ - ktime_t offs_real; - /* time spent in suspend */ - struct timespec total_sleep_time; - /* Offset clock monotonic -> clock boottime */ - ktime_t offs_boot; - /* The raw monotonic time for the CLOCK_MONOTONIC_RAW posix clock. */ - struct timespec raw_time; - /* The current UTC to TAI offset in seconds */ - s32 tai_offset; - /* Offset clock monotonic -> clock tai */ - ktime_t offs_tai; - + u32 ntp_err_mult; }; -static inline struct timespec tk_xtime(struct timekeeper *tk) -{ - struct timespec ts; - - ts.tv_sec = tk->xtime_sec; - ts.tv_nsec = (long)(tk->xtime_nsec >> tk->shift); - return ts; -} - - #ifdef CONFIG_GENERIC_TIME_VSYSCALL extern void update_vsyscall(struct timekeeper *tk); @@ -89,17 +112,10 @@ extern void update_vsyscall_tz(void); #elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD) extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm, - struct clocksource *c, u32 mult); + struct clocksource *c, u32 mult, + cycle_t cycle_last); extern void update_vsyscall_tz(void); -static inline void update_vsyscall(struct timekeeper *tk) -{ - struct timespec xt; - - xt = tk_xtime(tk); - update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult); -} - #else static inline void update_vsyscall(struct timekeeper *tk) diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h new file mode 100644 index 000000000000..1caa6b04fdc5 --- /dev/null +++ b/include/linux/timekeeping.h @@ -0,0 +1,209 @@ +#ifndef _LINUX_TIMEKEEPING_H +#define _LINUX_TIMEKEEPING_H + +/* Included from linux/ktime.h */ + +void timekeeping_init(void); +extern int timekeeping_suspended; + +/* + * Get and set timeofday + */ +extern void do_gettimeofday(struct timeval *tv); +extern int do_settimeofday(const struct timespec *tv); +extern int do_sys_settimeofday(const struct timespec *tv, + const struct timezone *tz); + +/* + * Kernel time accessors + */ +unsigned long get_seconds(void); +struct timespec current_kernel_time(void); +/* does not take xtime_lock */ +struct timespec __current_kernel_time(void); + +/* + * timespec based interfaces + */ +struct timespec get_monotonic_coarse(void); +extern void getrawmonotonic(struct timespec *ts); +extern void ktime_get_ts64(struct timespec64 *ts); + +extern int __getnstimeofday64(struct timespec64 *tv); +extern void getnstimeofday64(struct timespec64 *tv); + +#if BITS_PER_LONG == 64 +static inline int __getnstimeofday(struct timespec *ts) +{ + return __getnstimeofday64(ts); +} + +static inline void getnstimeofday(struct timespec *ts) +{ + getnstimeofday64(ts); +} + +static inline void ktime_get_ts(struct timespec *ts) +{ + ktime_get_ts64(ts); +} + +static inline void ktime_get_real_ts(struct timespec *ts) +{ + getnstimeofday64(ts); +} + +#else +static inline int __getnstimeofday(struct timespec *ts) +{ + struct timespec64 ts64; + int ret = __getnstimeofday64(&ts64); + + *ts = timespec64_to_timespec(ts64); + return ret; +} + +static inline void getnstimeofday(struct timespec *ts) +{ + struct timespec64 ts64; + + getnstimeofday64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void ktime_get_ts(struct timespec *ts) +{ + struct timespec64 ts64; + + ktime_get_ts64(&ts64); + *ts = timespec64_to_timespec(ts64); +} + +static inline void ktime_get_real_ts(struct timespec *ts) +{ + struct timespec64 ts64; + + getnstimeofday64(&ts64); + *ts = timespec64_to_timespec(ts64); +} +#endif + +extern void getboottime(struct timespec *ts); + +#define do_posix_clock_monotonic_gettime(ts) ktime_get_ts(ts) +#define ktime_get_real_ts64(ts) getnstimeofday64(ts) + +/* + * ktime_t based interfaces + */ + +enum tk_offsets { + TK_OFFS_REAL, + TK_OFFS_BOOT, + TK_OFFS_TAI, + TK_OFFS_MAX, +}; + +extern ktime_t ktime_get(void); +extern ktime_t ktime_get_with_offset(enum tk_offsets offs); +extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs); +extern ktime_t ktime_get_raw(void); + +/** + * ktime_get_real - get the real (wall-) time in ktime_t format + */ +static inline ktime_t ktime_get_real(void) +{ + return ktime_get_with_offset(TK_OFFS_REAL); +} + +/** + * ktime_get_boottime - Returns monotonic time since boot in ktime_t format + * + * This is similar to CLOCK_MONTONIC/ktime_get, but also includes the + * time spent in suspend. + */ +static inline ktime_t ktime_get_boottime(void) +{ + return ktime_get_with_offset(TK_OFFS_BOOT); +} + +/** + * ktime_get_clocktai - Returns the TAI time of day in ktime_t format + */ +static inline ktime_t ktime_get_clocktai(void) +{ + return ktime_get_with_offset(TK_OFFS_TAI); +} + +/** + * ktime_mono_to_real - Convert monotonic time to clock realtime + */ +static inline ktime_t ktime_mono_to_real(ktime_t mono) +{ + return ktime_mono_to_any(mono, TK_OFFS_REAL); +} + +static inline u64 ktime_get_ns(void) +{ + return ktime_to_ns(ktime_get()); +} + +static inline u64 ktime_get_real_ns(void) +{ + return ktime_to_ns(ktime_get_real()); +} + +static inline u64 ktime_get_boot_ns(void) +{ + return ktime_to_ns(ktime_get_boottime()); +} + +static inline u64 ktime_get_raw_ns(void) +{ + return ktime_to_ns(ktime_get_raw()); +} + +extern u64 ktime_get_mono_fast_ns(void); + +/* + * Timespec interfaces utilizing the ktime based ones + */ +static inline void get_monotonic_boottime(struct timespec *ts) +{ + *ts = ktime_to_timespec(ktime_get_boottime()); +} + +static inline void timekeeping_clocktai(struct timespec *ts) +{ + *ts = ktime_to_timespec(ktime_get_clocktai()); +} + +/* + * RTC specific + */ +extern void timekeeping_inject_sleeptime(struct timespec *delta); + +/* + * PPS accessor + */ +extern void getnstime_raw_and_real(struct timespec *ts_raw, + struct timespec *ts_real); + +/* + * Persistent clock related interfaces + */ +extern bool persistent_clock_exist; +extern int persistent_clock_is_local; + +static inline bool has_persistent_clock(void) +{ + return persistent_clock_exist; +} + +extern void read_persistent_clock(struct timespec *ts); +extern void read_boot_clock(struct timespec *ts); +extern int update_persistent_clock(struct timespec now); + + +#endif diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h index d3b57fa12225..bd36ce431e32 100644 --- a/include/linux/timerfd.h +++ b/include/linux/timerfd.h @@ -11,6 +11,9 @@ /* For O_CLOEXEC and O_NONBLOCK */ #include <linux/fcntl.h> +/* For _IO helpers */ +#include <linux/ioctl.h> + /* * CAREFUL: Check include/asm-generic/fcntl.h when defining * new flags, since they might collide with O_* ones. We want @@ -29,4 +32,6 @@ /* Flags for timerfd_settime. */ #define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET) +#define TFD_IOC_SET_TICKS _IOW('T', 0, u64) + #endif /* _LINUX_TIMERFD_H */ diff --git a/include/linux/tpm.h b/include/linux/tpm.h index fff1d0976f80..8350c538b486 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h @@ -39,6 +39,9 @@ struct tpm_class_ops { int (*send) (struct tpm_chip *chip, u8 *buf, size_t len); void (*cancel) (struct tpm_chip *chip); u8 (*status) (struct tpm_chip *chip); + bool (*update_timeouts)(struct tpm_chip *chip, + unsigned long *timeout_cap); + }; #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h index 136116924d8d..ea6c9dea79e3 100644 --- a/include/linux/trace_seq.h +++ b/include/linux/trace_seq.h @@ -25,6 +25,21 @@ trace_seq_init(struct trace_seq *s) s->full = 0; } +/** + * trace_seq_buffer_ptr - return pointer to next location in buffer + * @s: trace sequence descriptor + * + * Returns the pointer to the buffer where the next write to + * the buffer will happen. This is useful to save the location + * that is about to be written to and then return the result + * of that write. + */ +static inline unsigned char * +trace_seq_buffer_ptr(struct trace_seq *s) +{ + return s->buffer + s->len; +} + /* * Currently only defined when tracing is enabled. */ @@ -36,14 +51,13 @@ int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); extern int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); -extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, - size_t cnt); +extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + int cnt); extern int trace_seq_puts(struct trace_seq *s, const char *str); extern int trace_seq_putc(struct trace_seq *s, unsigned char c); -extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len); +extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, - size_t len); -extern void *trace_seq_reserve(struct trace_seq *s, size_t len); + unsigned int len); extern int trace_seq_path(struct trace_seq *s, const struct path *path); extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, @@ -71,8 +85,8 @@ static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) { return 0; } -static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, - size_t cnt) +static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, + int cnt) { return 0; } @@ -85,19 +99,15 @@ static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) return 0; } static inline int -trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len) +trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) { return 0; } static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, - size_t len) + unsigned int len) { return 0; } -static inline void *trace_seq_reserve(struct trace_seq *s, size_t len) -{ - return NULL; -} static inline int trace_seq_path(struct trace_seq *s, const struct path *path) { return 0; diff --git a/include/linux/tty.h b/include/linux/tty.h index 1c3316a47d7e..84132942902a 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h @@ -477,13 +477,11 @@ extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file, unsigned int cmd, unsigned long arg); extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg); extern void tty_default_fops(struct file_operations *fops); -extern struct tty_struct *alloc_tty_struct(void); +extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx); extern int tty_alloc_file(struct file *file); extern void tty_add_file(struct tty_struct *tty, struct file *file); extern void tty_free_file(struct file *file); extern void free_tty_struct(struct tty_struct *tty); -extern void initialize_tty_struct(struct tty_struct *tty, - struct tty_driver *driver, int idx); extern void deinitialize_tty_struct(struct tty_struct *tty); extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx); extern int tty_release(struct inode *inode, struct file *filp); diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index 756a60989294..e48c608a8fa8 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h @@ -35,14 +35,14 @@ * This routine is mandatory; if this routine is not filled in, * the attempted open will fail with ENODEV. * - * Required method. - * + * Required method. Called with tty lock held. + * * void (*close)(struct tty_struct * tty, struct file * filp); * * This routine is called when a particular tty device is closed. * Note: called even if the corresponding open() failed. * - * Required method. + * Required method. Called with tty lock held. * * void (*shutdown)(struct tty_struct * tty); * @@ -172,6 +172,8 @@ * * Optional: * + * Called with tty lock held. + * * int (*break_ctl)(struct tty_struct *tty, int state); * * This optional routine requests the tty driver to turn on or diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 7373203140e7..c330f5ef42cf 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h @@ -386,6 +386,21 @@ struct usb_composite_driver { extern int usb_composite_probe(struct usb_composite_driver *driver); extern void usb_composite_unregister(struct usb_composite_driver *driver); + +/** + * module_usb_composite_driver() - Helper macro for registering a USB gadget + * composite driver + * @__usb_composite_driver: usb_composite_driver struct + * + * Helper macro for USB gadget composite drivers which do not do anything + * special in module init/exit. This eliminates a lot of boilerplate. Each + * module may only use this macro once, and calling it replaces module_init() + * and module_exit() + */ +#define module_usb_composite_driver(__usb_composite_driver) \ + module_driver(__usb_composite_driver, usb_composite_probe, \ + usb_composite_unregister) + extern void usb_composite_setup_continue(struct usb_composite_dev *cdev); extern int composite_dev_prepare(struct usb_composite_driver *composite, struct usb_composite_dev *cdev); diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h index 52f944dfe2fd..55a17b188daa 100644 --- a/include/linux/usb/quirks.h +++ b/include/linux/usb/quirks.h @@ -30,4 +30,15 @@ descriptor */ #define USB_QUIRK_DELAY_INIT 0x00000040 +/* + * For high speed and super speed interupt endpoints, the USB 2.0 and + * USB 3.0 spec require the interval in microframes + * (1 microframe = 125 microseconds) to be calculated as + * interval = 2 ^ (bInterval-1). + * + * Devices with this quirk report their bInterval as the result of this + * calculation instead of the exponent variable used in the calculation. + */ +#define USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL 0x00000080 + #endif /* __LINUX_USB_QUIRKS_H */ diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index e452ba6ec6bd..d5952bb66752 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h @@ -153,6 +153,9 @@ struct renesas_usbhs_driver_param { */ int pio_dma_border; /* default is 64byte */ + u32 type; + u32 enable_gpio; + /* * option: */ @@ -160,6 +163,9 @@ struct renesas_usbhs_driver_param { u32 has_sudmac:1; /* for SUDMAC */ }; +#define USBHS_TYPE_R8A7790 1 +#define USBHS_TYPE_R8A7791 2 + /* * option: * diff --git a/include/linux/usb/usb338x.h b/include/linux/usb/usb338x.h new file mode 100644 index 000000000000..f92eb635b9d3 --- /dev/null +++ b/include/linux/usb/usb338x.h @@ -0,0 +1,199 @@ +/* + * USB 338x super/high/full speed USB device controller. + * Unlike many such controllers, this one talks PCI. + * + * Copyright (C) 2002 NetChip Technology, Inc. (http://www.netchip.com) + * Copyright (C) 2003 David Brownell + * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __LINUX_USB_USB338X_H +#define __LINUX_USB_USB338X_H + +#include <linux/usb/net2280.h> + +/* + * Extra defined bits for net2280 registers + */ +#define SCRATCH 0x0b + +#define DEFECT7374_FSM_FIELD 28 +#define SUPER_SPEED 8 +#define DMA_REQUEST_OUTSTANDING 5 +#define DMA_PAUSE_DONE_INTERRUPT 26 +#define SET_ISOCHRONOUS_DELAY 24 +#define SET_SEL 22 +#define SUPER_SPEED_MODE 8 + +/*ep_cfg*/ +#define MAX_BURST_SIZE 24 +#define EP_FIFO_BYTE_COUNT 16 +#define IN_ENDPOINT_ENABLE 14 +#define IN_ENDPOINT_TYPE 12 +#define OUT_ENDPOINT_ENABLE 10 +#define OUT_ENDPOINT_TYPE 8 + +struct usb338x_usb_ext_regs { + u32 usbclass; +#define DEVICE_PROTOCOL 16 +#define DEVICE_SUB_CLASS 8 +#define DEVICE_CLASS 0 + u32 ss_sel; +#define U2_SYSTEM_EXIT_LATENCY 8 +#define U1_SYSTEM_EXIT_LATENCY 0 + u32 ss_del; +#define U2_DEVICE_EXIT_LATENCY 8 +#define U1_DEVICE_EXIT_LATENCY 0 + u32 usb2lpm; +#define USB_L1_LPM_HIRD 2 +#define USB_L1_LPM_REMOTE_WAKE 1 +#define USB_L1_LPM_SUPPORT 0 + u32 usb3belt; +#define BELT_MULTIPLIER 10 +#define BEST_EFFORT_LATENCY_TOLERANCE 0 + u32 usbctl2; +#define LTM_ENABLE 7 +#define U2_ENABLE 6 +#define U1_ENABLE 5 +#define FUNCTION_SUSPEND 4 +#define USB3_CORE_ENABLE 3 +#define USB2_CORE_ENABLE 2 +#define SERIAL_NUMBER_STRING_ENABLE 0 + u32 in_timeout; +#define GPEP3_TIMEOUT 19 +#define GPEP2_TIMEOUT 18 +#define GPEP1_TIMEOUT 17 +#define GPEP0_TIMEOUT 16 +#define GPEP3_TIMEOUT_VALUE 13 +#define GPEP3_TIMEOUT_ENABLE 12 +#define GPEP2_TIMEOUT_VALUE 9 +#define GPEP2_TIMEOUT_ENABLE 8 +#define GPEP1_TIMEOUT_VALUE 5 +#define GPEP1_TIMEOUT_ENABLE 4 +#define GPEP0_TIMEOUT_VALUE 1 +#define GPEP0_TIMEOUT_ENABLE 0 + u32 isodelay; +#define ISOCHRONOUS_DELAY 0 +} __packed; + +struct usb338x_fifo_regs { + /* offset 0x0500, 0x0520, 0x0540, 0x0560, 0x0580 */ + u32 ep_fifo_size_base; +#define IN_FIFO_BASE_ADDRESS 22 +#define IN_FIFO_SIZE 16 +#define OUT_FIFO_BASE_ADDRESS 6 +#define OUT_FIFO_SIZE 0 + u32 ep_fifo_out_wrptr; + u32 ep_fifo_out_rdptr; + u32 ep_fifo_in_wrptr; + u32 ep_fifo_in_rdptr; + u32 unused[3]; +} __packed; + + +/* Link layer */ +struct usb338x_ll_regs { + /* offset 0x700 */ + u32 ll_ltssm_ctrl1; + u32 ll_ltssm_ctrl2; + u32 ll_ltssm_ctrl3; + u32 unused[2]; + u32 ll_general_ctrl0; + u32 ll_general_ctrl1; +#define PM_U3_AUTO_EXIT 29 +#define PM_U2_AUTO_EXIT 28 +#define PM_U1_AUTO_EXIT 27 +#define PM_FORCE_U2_ENTRY 26 +#define PM_FORCE_U1_ENTRY 25 +#define PM_LGO_COLLISION_SEND_LAU 24 +#define PM_DIR_LINK_REJECT 23 +#define PM_FORCE_LINK_ACCEPT 22 +#define PM_DIR_ENTRY_U3 20 +#define PM_DIR_ENTRY_U2 19 +#define PM_DIR_ENTRY_U1 18 +#define PM_U2_ENABLE 17 +#define PM_U1_ENABLE 16 +#define SKP_THRESHOLD_ADJUST_FMW 8 +#define RESEND_DPP_ON_LRTY_FMW 7 +#define DL_BIT_VALUE_FMW 6 +#define FORCE_DL_BIT 5 + u32 ll_general_ctrl2; +#define SELECT_INVERT_LANE_POLARITY 7 +#define FORCE_INVERT_LANE_POLARITY 6 + u32 ll_general_ctrl3; + u32 ll_general_ctrl4; + u32 ll_error_gen; +} __packed; + +struct usb338x_ll_lfps_regs { + /* offset 0x748 */ + u32 ll_lfps_5; +#define TIMER_LFPS_6US 16 + u32 ll_lfps_6; +#define TIMER_LFPS_80US 0 +} __packed; + +struct usb338x_ll_tsn_regs { + /* offset 0x77C */ + u32 ll_tsn_counters_2; +#define HOT_TX_NORESET_TS2 24 + u32 ll_tsn_counters_3; +#define HOT_RX_RESET_TS2 0 +} __packed; + +struct usb338x_ll_chi_regs { + /* offset 0x79C */ + u32 ll_tsn_chicken_bit; +#define RECOVERY_IDLE_TO_RECOVER_FMW 3 +} __packed; + +/* protocol layer */ +struct usb338x_pl_regs { + /* offset 0x800 */ + u32 pl_reg_1; + u32 pl_reg_2; + u32 pl_reg_3; + u32 pl_reg_4; + u32 pl_ep_ctrl; + /* Protocol Layer Endpoint Control*/ +#define PL_EP_CTRL 0x810 +#define ENDPOINT_SELECT 0 + /* [4:0] */ +#define EP_INITIALIZED 16 +#define SEQUENCE_NUMBER_RESET 17 +#define CLEAR_ACK_ERROR_CODE 20 + u32 pl_reg_6; + u32 pl_reg_7; + u32 pl_reg_8; + u32 pl_ep_status_1; + /* Protocol Layer Endpoint Status 1*/ +#define PL_EP_STATUS_1 0x820 +#define STATE 16 +#define ACK_GOOD_NORMAL 0x11 +#define ACK_GOOD_MORE_ACKS_TO_COME 0x16 + u32 pl_ep_status_2; + u32 pl_ep_status_3; + /* Protocol Layer Endpoint Status 3*/ +#define PL_EP_STATUS_3 0x828 +#define SEQUENCE_NUMBER 0 + u32 pl_ep_status_4; + /* Protocol Layer Endpoint Status 4*/ +#define PL_EP_STATUS_4 0x82c + u32 pl_ep_cfg_4; + /* Protocol Layer Endpoint Configuration 4*/ +#define PL_EP_CFG_4 0x830 +#define NON_CTRL_IN_TOLERATE_BAD_DIR 6 +} __packed; + +#endif /* __LINUX_USB_USB338X_H */ diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 0662e98fef72..26088feb6608 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h @@ -148,6 +148,9 @@ struct driver_info { struct sk_buff *(*tx_fixup)(struct usbnet *dev, struct sk_buff *skb, gfp_t flags); + /* recover from timeout */ + void (*recover)(struct usbnet *dev); + /* early initialization code, can sleep. This is for minidrivers * having 'subminidrivers' that need to do extra initialization * right after minidriver have initialized hardware. */ diff --git a/include/linux/usb/xhci_pdriver.h b/include/linux/usb/xhci_pdriver.h new file mode 100644 index 000000000000..376654b5b0f7 --- /dev/null +++ b/include/linux/usb/xhci_pdriver.h @@ -0,0 +1,27 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY + * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * for more details. + * + */ + +#ifndef __USB_CORE_XHCI_PDRIVER_H +#define __USB_CORE_XHCI_PDRIVER_H + +/** + * struct usb_xhci_pdata - platform_data for generic xhci platform driver + * + * @usb3_lpm_capable: determines if this xhci platform supports USB3 + * LPM capability + * + */ +struct usb_xhci_pdata { + unsigned usb3_lpm_capable:1; +}; + +#endif /* __USB_CORE_XHCI_PDRIVER_H */ diff --git a/include/linux/verify_pefile.h b/include/linux/verify_pefile.h new file mode 100644 index 000000000000..ac34819214f9 --- /dev/null +++ b/include/linux/verify_pefile.h @@ -0,0 +1,18 @@ +/* Signed PE file verification + * + * Copyright (C) 2014 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#ifndef _LINUX_VERIFY_PEFILE_H +#define _LINUX_VERIFY_PEFILE_H + +extern int verify_pefile_signature(const void *pebuf, unsigned pelen, + struct key *trusted_keyring, bool *_trusted); + +#endif /* _LINUX_VERIFY_PEFILE_H */ diff --git a/include/linux/wait.h b/include/linux/wait.h index bd68819f0815..6fb1ba5f9b2f 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -25,6 +25,7 @@ struct wait_bit_key { void *flags; int bit_nr; #define WAIT_ATOMIC_T_BIT_NR -1 + unsigned long private; }; struct wait_bit_queue { @@ -141,18 +142,19 @@ __remove_wait_queue(wait_queue_head_t *head, wait_queue_t *old) list_del(&old->task_list); } +typedef int wait_bit_action_f(struct wait_bit_key *); void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key); void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr, void *key); void __wake_up_locked(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr); void __wake_up_bit(wait_queue_head_t *, void *, int); -int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); -int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, int (*)(void *), unsigned); +int __wait_on_bit(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); +int __wait_on_bit_lock(wait_queue_head_t *, struct wait_bit_queue *, wait_bit_action_f *, unsigned); void wake_up_bit(void *, int); void wake_up_atomic_t(atomic_t *); -int out_of_line_wait_on_bit(void *, int, int (*)(void *), unsigned); -int out_of_line_wait_on_bit_lock(void *, int, int (*)(void *), unsigned); +int out_of_line_wait_on_bit(void *, int, wait_bit_action_f *, unsigned); +int out_of_line_wait_on_bit_lock(void *, int, wait_bit_action_f *, unsigned); int out_of_line_wait_on_atomic_t(atomic_t *, int (*)(atomic_t *), unsigned); wait_queue_head_t *bit_waitqueue(void *, int); @@ -854,11 +856,14 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); (wait)->flags = 0; \ } while (0) + +extern int bit_wait(struct wait_bit_key *); +extern int bit_wait_io(struct wait_bit_key *); + /** * wait_on_bit - wait for a bit to be cleared * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on - * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * * There is a standard hashed waitqueue table for generic use. This @@ -867,9 +872,62 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); * call wait_on_bit() in threads waiting for the bit to clear. * One uses wait_on_bit() where one is waiting for the bit to clear, * but has no intention of setting it. + * Returned value will be zero if the bit was cleared, or non-zero + * if the process received a signal and the mode permitted wakeup + * on that signal. + */ +static inline int +wait_on_bit(void *word, int bit, unsigned mode) +{ + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait, + mode); +} + +/** + * wait_on_bit_io - wait for a bit to be cleared + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared. This is similar to wait_on_bit(), but calls + * io_schedule() instead of schedule() for the actual waiting. + * + * Returned value will be zero if the bit was cleared, or non-zero + * if the process received a signal and the mode permitted wakeup + * on that signal. + */ +static inline int +wait_on_bit_io(void *word, int bit, unsigned mode) +{ + if (!test_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit(word, bit, + bit_wait_io, + mode); +} + +/** + * wait_on_bit_action - wait for a bit to be cleared + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @action: the function used to sleep, which may take special actions + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared, and allow the waiting action to be specified. + * This is like wait_on_bit() but allows fine control of how the waiting + * is done. + * + * Returned value will be zero if the bit was cleared, or non-zero + * if the process received a signal and the mode permitted wakeup + * on that signal. */ static inline int -wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) +wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { if (!test_bit(bit, word)) return 0; @@ -880,7 +938,6 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) * wait_on_bit_lock - wait for a bit to be cleared, when wanting to set it * @word: the word being waited on, a kernel virtual address * @bit: the bit of the word being waited on - * @action: the function used to sleep, which may take special actions * @mode: the task state to sleep in * * There is a standard hashed waitqueue table for generic use. This @@ -891,9 +948,61 @@ wait_on_bit(void *word, int bit, int (*action)(void *), unsigned mode) * wait_on_bit() in threads waiting to be able to set the bit. * One uses wait_on_bit_lock() where one is waiting for the bit to * clear with the intention of setting it, and when done, clearing it. + * + * Returns zero if the bit was (eventually) found to be clear and was + * set. Returns non-zero if a signal was delivered to the process and + * the @mode allows that signal to wake the process. + */ +static inline int +wait_on_bit_lock(void *word, int bit, unsigned mode) +{ + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, bit_wait, mode); +} + +/** + * wait_on_bit_lock_io - wait for a bit to be cleared, when wanting to set it + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared and then to atomically set it. This is similar + * to wait_on_bit(), but calls io_schedule() instead of schedule() + * for the actual waiting. + * + * Returns zero if the bit was (eventually) found to be clear and was + * set. Returns non-zero if a signal was delivered to the process and + * the @mode allows that signal to wake the process. + */ +static inline int +wait_on_bit_lock_io(void *word, int bit, unsigned mode) +{ + if (!test_and_set_bit(bit, word)) + return 0; + return out_of_line_wait_on_bit_lock(word, bit, bit_wait_io, mode); +} + +/** + * wait_on_bit_lock_action - wait for a bit to be cleared, when wanting to set it + * @word: the word being waited on, a kernel virtual address + * @bit: the bit of the word being waited on + * @action: the function used to sleep, which may take special actions + * @mode: the task state to sleep in + * + * Use the standard hashed waitqueue table to wait for a bit + * to be cleared and then to set it, and allow the waiting action + * to be specified. + * This is like wait_on_bit() but allows fine control of how the waiting + * is done. + * + * Returns zero if the bit was (eventually) found to be clear and was + * set. Returns non-zero if a signal was delivered to the process and + * the @mode allows that signal to wake the process. */ static inline int -wait_on_bit_lock(void *word, int bit, int (*action)(void *), unsigned mode) +wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode) { if (!test_and_set_bit(bit, word)) return 0; diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 5777c13849ba..a219be961c0a 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -90,7 +90,6 @@ struct writeback_control { * fs/fs-writeback.c */ struct bdi_writeback; -int inode_wait(void *); void writeback_inodes_sb(struct super_block *, enum wb_reason reason); void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, enum wb_reason reason); @@ -105,7 +104,7 @@ void inode_wait_for_writeback(struct inode *inode); static inline void wait_on_inode(struct inode *inode) { might_sleep(); - wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); + wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); } /* |