summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/aer.h1
-rw-r--r--include/linux/bcm963xx_nvram.h112
-rw-r--r--include/linux/bcm963xx_tag.h102
-rw-r--r--include/linux/bio.h32
-rw-r--r--include/linux/blk-iopoll.h46
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/ceph/ceph_frag.h37
-rw-r--r--include/linux/ceph/messenger.h2
-rw-r--r--include/linux/cleancache.h16
-rw-r--r--include/linux/clk/mmp.h17
-rw-r--r--include/linux/clk/ti.h1
-rw-r--r--include/linux/clksrc-dbx500-prcmu.h20
-rw-r--r--include/linux/configfs.h21
-rw-r--r--include/linux/cpumask.h55
-rw-r--r--include/linux/dax.h18
-rw-r--r--include/linux/devfreq.h2
-rw-r--r--include/linux/device.h2
-rw-r--r--include/linux/dma-attrs.h10
-rw-r--r--include/linux/dma-mapping.h411
-rw-r--r--include/linux/drbd.h26
-rw-r--r--include/linux/drbd_genl.h149
-rw-r--r--include/linux/fs.h37
-rw-r--r--include/linux/ftrace.h1
-rw-r--r--include/linux/hrtimer.h34
-rw-r--r--include/linux/huge_mm.h16
-rw-r--r--include/linux/idr.h14
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io.h1
-rw-r--r--include/linux/iommu.h16
-rw-r--r--include/linux/irq_poll.h25
-rw-r--r--include/linux/irqdomain.h1
-rw-r--r--include/linux/kexec.h62
-rw-r--r--include/linux/lightnvm.h238
-rw-r--r--include/linux/list_lru.h4
-rw-r--r--include/linux/lru_cache.h2
-rw-r--r--include/linux/lz4.h4
-rw-r--r--include/linux/memcontrol.h86
-rw-r--r--include/linux/mlx4/cmd.h3
-rw-r--r--include/linux/mlx4/device.h15
-rw-r--r--include/linux/mlx4/qp.h15
-rw-r--r--include/linux/mlx5/device.h40
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mlx5/mlx5_ifc.h48
-rw-r--r--include/linux/mlx5/qp.h46
-rw-r--r--include/linux/mlx5/transobj.h78
-rw-r--r--include/linux/mlx5/vport.h8
-rw-r--r--include/linux/mm.h9
-rw-r--r--include/linux/mm_types.h6
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/msi.h4
-rw-r--r--include/linux/netdevice.h11
-rw-r--r--include/linux/nvme.h27
-rw-r--r--include/linux/of.h2
-rw-r--r--include/linux/of_pci.h7
-rw-r--r--include/linux/pagemap.h3
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pci_ids.h5
-rw-r--r--include/linux/perf_event.h9
-rw-r--r--include/linux/pfn_t.h4
-rw-r--r--include/linux/pipe_fs_i.h4
-rw-r--r--include/linux/platform_data/iommu-omap.h9
-rw-r--r--include/linux/platform_data/pwm_omap_dmtimer.h69
-rw-r--r--include/linux/platform_data/sdhci-pic32.h22
-rw-r--r--include/linux/platform_data/touchscreen-s3c2410.h1
-rw-r--r--include/linux/pm.h1
-rw-r--r--include/linux/pm_domain.h3
-rw-r--r--include/linux/pmem.h22
-rw-r--r--include/linux/ptrace.h24
-rw-r--r--include/linux/radix-tree.h27
-rw-r--r--include/linux/rbtree.h2
-rw-r--r--include/linux/reset.h17
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/sh_clk.h4
-rw-r--r--include/linux/shm.h6
-rw-r--r--include/linux/shmem_fs.h5
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slab_def.h3
-rw-r--r--include/linux/slub_def.h2
-rw-r--r--include/linux/soc/dove/pmu.h19
-rw-r--r--include/linux/soc/qcom/smem_state.h18
-rw-r--r--include/linux/sunrpc/svc_rdma.h39
-rw-r--r--include/linux/swap.h76
-rw-r--r--include/linux/swiotlb.h3
-rw-r--r--include/linux/thermal.h5
-rw-r--r--include/linux/tick.h2
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/uaccess.h7
-rw-r--r--include/linux/wkup_m3_ipc.h55
90 files changed, 1959 insertions, 501 deletions
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 744b997d6a94..164049357e5c 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,7 @@
#ifndef _AER_H_
#define _AER_H_
+#include <linux/errno.h>
#include <linux/types.h>
#define AER_NONFATAL 0
diff --git a/include/linux/bcm963xx_nvram.h b/include/linux/bcm963xx_nvram.h
new file mode 100644
index 000000000000..290c231b8cf1
--- /dev/null
+++ b/include/linux/bcm963xx_nvram.h
@@ -0,0 +1,112 @@
+#ifndef __LINUX_BCM963XX_NVRAM_H__
+#define __LINUX_BCM963XX_NVRAM_H__
+
+#include <linux/crc32.h>
+#include <linux/if_ether.h>
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+/*
+ * Broadcom BCM963xx SoC board nvram data structure.
+ *
+ * The nvram structure varies in size depending on the SoC board version. Use
+ * the appropriate minimum BCM963XX_NVRAM_*_SIZE define for the information
+ * you need instead of sizeof(struct bcm963xx_nvram) as this may change.
+ */
+
+#define BCM963XX_NVRAM_V4_SIZE 300
+#define BCM963XX_NVRAM_V5_SIZE (1 * SZ_1K)
+
+#define BCM963XX_DEFAULT_PSI_SIZE 64
+
+enum bcm963xx_nvram_nand_part {
+ BCM963XX_NVRAM_NAND_PART_BOOT = 0,
+ BCM963XX_NVRAM_NAND_PART_ROOTFS_1,
+ BCM963XX_NVRAM_NAND_PART_ROOTFS_2,
+ BCM963XX_NVRAM_NAND_PART_DATA,
+ BCM963XX_NVRAM_NAND_PART_BBT,
+
+ __BCM963XX_NVRAM_NAND_NR_PARTS
+};
+
+struct bcm963xx_nvram {
+ u32 version;
+ char bootline[256];
+ char name[16];
+ u32 main_tp_number;
+ u32 psi_size;
+ u32 mac_addr_count;
+ u8 mac_addr_base[ETH_ALEN];
+ u8 __reserved1[2];
+ u32 checksum_v4;
+
+ u8 __reserved2[292];
+ u32 nand_part_offset[__BCM963XX_NVRAM_NAND_NR_PARTS];
+ u32 nand_part_size[__BCM963XX_NVRAM_NAND_NR_PARTS];
+ u8 __reserved3[388];
+ u32 checksum_v5;
+};
+
+#define BCM963XX_NVRAM_NAND_PART_OFFSET(nvram, part) \
+ bcm963xx_nvram_nand_part_offset(nvram, BCM963XX_NVRAM_NAND_PART_ ##part)
+
+static inline u64 __pure bcm963xx_nvram_nand_part_offset(
+ const struct bcm963xx_nvram *nvram,
+ enum bcm963xx_nvram_nand_part part)
+{
+ return nvram->nand_part_offset[part] * SZ_1K;
+}
+
+#define BCM963XX_NVRAM_NAND_PART_SIZE(nvram, part) \
+ bcm963xx_nvram_nand_part_size(nvram, BCM963XX_NVRAM_NAND_PART_ ##part)
+
+static inline u64 __pure bcm963xx_nvram_nand_part_size(
+ const struct bcm963xx_nvram *nvram,
+ enum bcm963xx_nvram_nand_part part)
+{
+ return nvram->nand_part_size[part] * SZ_1K;
+}
+
+/*
+ * bcm963xx_nvram_checksum - Verify nvram checksum
+ *
+ * @nvram: pointer to full size nvram data structure
+ * @expected_out: optional pointer to store expected checksum value
+ * @actual_out: optional pointer to store actual checksum value
+ *
+ * Return: 0 if the checksum is valid, otherwise -EINVAL
+ */
+static int __maybe_unused bcm963xx_nvram_checksum(
+ const struct bcm963xx_nvram *nvram,
+ u32 *expected_out, u32 *actual_out)
+{
+ u32 expected, actual;
+ size_t len;
+
+ if (nvram->version <= 4) {
+ expected = nvram->checksum_v4;
+ len = BCM963XX_NVRAM_V4_SIZE - sizeof(u32);
+ } else {
+ expected = nvram->checksum_v5;
+ len = BCM963XX_NVRAM_V5_SIZE - sizeof(u32);
+ }
+
+ /*
+ * Calculate the CRC32 value for the nvram with a checksum value
+ * of 0 without modifying or copying the nvram by combining:
+ * - The CRC32 of the nvram without the checksum value
+ * - The CRC32 of a zero checksum value (which is also 0)
+ */
+ actual = crc32_le_combine(
+ crc32_le(~0, (u8 *)nvram, len), 0, sizeof(u32));
+
+ if (expected_out)
+ *expected_out = expected;
+
+ if (actual_out)
+ *actual_out = actual;
+
+ return expected == actual ? 0 : -EINVAL;
+};
+
+#endif /* __LINUX_BCM963XX_NVRAM_H__ */
diff --git a/include/linux/bcm963xx_tag.h b/include/linux/bcm963xx_tag.h
new file mode 100644
index 000000000000..161c7b37a77b
--- /dev/null
+++ b/include/linux/bcm963xx_tag.h
@@ -0,0 +1,102 @@
+#ifndef __LINUX_BCM963XX_TAG_H__
+#define __LINUX_BCM963XX_TAG_H__
+
+#include <linux/types.h>
+
+#define TAGVER_LEN 4 /* Length of Tag Version */
+#define TAGLAYOUT_LEN 4 /* Length of FlashLayoutVer */
+#define SIG1_LEN 20 /* Company Signature 1 Length */
+#define SIG2_LEN 14 /* Company Signature 2 Length */
+#define BOARDID_LEN 16 /* Length of BoardId */
+#define ENDIANFLAG_LEN 2 /* Endian Flag Length */
+#define CHIPID_LEN 6 /* Chip Id Length */
+#define IMAGE_LEN 10 /* Length of Length Field */
+#define ADDRESS_LEN 12 /* Length of Address field */
+#define IMAGE_SEQUENCE_LEN 4 /* Image sequence Length */
+#define RSASIG_LEN 20 /* Length of RSA Signature in tag */
+#define TAGINFO1_LEN 30 /* Length of vendor information field1 in tag */
+#define FLASHLAYOUTVER_LEN 4 /* Length of Flash Layout Version String tag */
+#define TAGINFO2_LEN 16 /* Length of vendor information field2 in tag */
+#define ALTTAGINFO_LEN 54 /* Alternate length for vendor information; Pirelli */
+
+#define NUM_PIRELLI 2
+#define IMAGETAG_CRC_START 0xFFFFFFFF
+
+#define PIRELLI_BOARDS { \
+ "AGPF-S0", \
+ "DWV-S0", \
+}
+
+/* Extended flash address, needs to be subtracted
+ * from bcm_tag flash image offsets.
+ */
+#define BCM963XX_EXTENDED_SIZE 0xBFC00000
+
+/*
+ * The broadcom firmware assumes the rootfs starts the image,
+ * therefore uses the rootfs start (flash_image_address)
+ * to determine where to flash the image. Since we have the kernel first
+ * we have to give it the kernel address, but the crc uses the length
+ * associated with this address (root_length), which is added to the kernel
+ * length (kernel_length) to determine the length of image to flash and thus
+ * needs to be rootfs + deadcode (jffs2 EOF marker)
+*/
+
+struct bcm_tag {
+ /* 0-3: Version of the image tag */
+ char tag_version[TAGVER_LEN];
+ /* 4-23: Company Line 1 */
+ char sig_1[SIG1_LEN];
+ /* 24-37: Company Line 2 */
+ char sig_2[SIG2_LEN];
+ /* 38-43: Chip this image is for */
+ char chip_id[CHIPID_LEN];
+ /* 44-59: Board name */
+ char board_id[BOARDID_LEN];
+ /* 60-61: Map endianness -- 1 BE 0 LE */
+ char big_endian[ENDIANFLAG_LEN];
+ /* 62-71: Total length of image */
+ char total_length[IMAGE_LEN];
+ /* 72-83: Address in memory of CFE */
+ char cfe__address[ADDRESS_LEN];
+ /* 84-93: Size of CFE */
+ char cfe_length[IMAGE_LEN];
+ /* 94-105: Address in memory of image start
+ * (kernel for OpenWRT, rootfs for stock firmware)
+ */
+ char flash_image_start[ADDRESS_LEN];
+ /* 106-115: Size of rootfs */
+ char root_length[IMAGE_LEN];
+ /* 116-127: Address in memory of kernel */
+ char kernel_address[ADDRESS_LEN];
+ /* 128-137: Size of kernel */
+ char kernel_length[IMAGE_LEN];
+ /* 138-141: Image sequence number
+ * (to be incremented when flashed with a new image)
+ */
+ char image_sequence[IMAGE_SEQUENCE_LEN];
+ /* 142-161: RSA Signature (not used; some vendors may use this) */
+ char rsa_signature[RSASIG_LEN];
+ /* 162-191: Compilation and related information (not used in OpenWrt) */
+ char information1[TAGINFO1_LEN];
+ /* 192-195: Version flash layout */
+ char flash_layout_ver[FLASHLAYOUTVER_LEN];
+ /* 196-199: kernel+rootfs CRC32 */
+ __u32 fskernel_crc;
+ /* 200-215: Unused except on Alice Gate where is is information */
+ char information2[TAGINFO2_LEN];
+ /* 216-219: CRC32 of image less imagetag (kernel for Alice Gate) */
+ __u32 image_crc;
+ /* 220-223: CRC32 of rootfs partition */
+ __u32 rootfs_crc;
+ /* 224-227: CRC32 of kernel partition */
+ __u32 kernel_crc;
+ /* 228-235: Unused at present */
+ char reserved1[8];
+ /* 236-239: CRC32 of header excluding last 20 bytes */
+ __u32 header_crc;
+ /* 240-255: Unused at present */
+ char reserved2[16];
+};
+
+#endif /* __LINUX_BCM63XX_TAG_H__ */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index b9b6e046b52e..5349e6816cbb 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -318,16 +318,6 @@ enum bip_flags {
BIP_IP_CHECKSUM = 1 << 4, /* IP checksum */
};
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-
-static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
-{
- if (bio->bi_rw & REQ_INTEGRITY)
- return bio->bi_integrity;
-
- return NULL;
-}
-
/*
* bio integrity payload
*/
@@ -349,6 +339,16 @@ struct bio_integrity_payload {
struct bio_vec bip_inline_vecs[0];/* embedded bvec array */
};
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+
+static inline struct bio_integrity_payload *bio_integrity(struct bio *bio)
+{
+ if (bio->bi_rw & REQ_INTEGRITY)
+ return bio->bi_integrity;
+
+ return NULL;
+}
+
static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
{
struct bio_integrity_payload *bip = bio_integrity(bio);
@@ -795,6 +795,18 @@ static inline bool bio_integrity_flagged(struct bio *bio, enum bip_flags flag)
return false;
}
+static inline void *bio_integrity_alloc(struct bio * bio, gfp_t gfp,
+ unsigned int nr)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int bio_integrity_add_page(struct bio *bio, struct page *page,
+ unsigned int len, unsigned int offset)
+{
+ return 0;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#endif /* CONFIG_BLOCK */
diff --git a/include/linux/blk-iopoll.h b/include/linux/blk-iopoll.h
deleted file mode 100644
index 77ae77c0b704..000000000000
--- a/include/linux/blk-iopoll.h
+++ /dev/null
@@ -1,46 +0,0 @@
-#ifndef BLK_IOPOLL_H
-#define BLK_IOPOLL_H
-
-struct blk_iopoll;
-typedef int (blk_iopoll_fn)(struct blk_iopoll *, int);
-
-struct blk_iopoll {
- struct list_head list;
- unsigned long state;
- unsigned long data;
- int weight;
- int max;
- blk_iopoll_fn *poll;
-};
-
-enum {
- IOPOLL_F_SCHED = 0,
- IOPOLL_F_DISABLE = 1,
-};
-
-/*
- * Returns 0 if we successfully set the IOPOLL_F_SCHED bit, indicating
- * that we were the first to acquire this iop for scheduling. If this iop
- * is currently disabled, return "failure".
- */
-static inline int blk_iopoll_sched_prep(struct blk_iopoll *iop)
-{
- if (!test_bit(IOPOLL_F_DISABLE, &iop->state))
- return test_and_set_bit(IOPOLL_F_SCHED, &iop->state);
-
- return 1;
-}
-
-static inline int blk_iopoll_disable_pending(struct blk_iopoll *iop)
-{
- return test_bit(IOPOLL_F_DISABLE, &iop->state);
-}
-
-extern void blk_iopoll_sched(struct blk_iopoll *);
-extern void blk_iopoll_init(struct blk_iopoll *, int, blk_iopoll_fn *);
-extern void blk_iopoll_complete(struct blk_iopoll *);
-extern void __blk_iopoll_complete(struct blk_iopoll *);
-extern void blk_iopoll_enable(struct blk_iopoll *);
-extern void blk_iopoll_disable(struct blk_iopoll *);
-
-#endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index daf17d70aeca..7fc9296b5742 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -188,8 +188,14 @@ void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_free_request(struct request *rq);
void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
+
+enum {
+ BLK_MQ_REQ_NOWAIT = (1 << 0), /* return when out of requests */
+ BLK_MQ_REQ_RESERVED = (1 << 1), /* allocate from reserved pool */
+};
+
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
- gfp_t gfp, bool reserved);
+ unsigned int flags);
struct request *blk_mq_tag_to_rq(struct blk_mq_tags *tags, unsigned int tag);
struct cpumask *blk_mq_tags_cpumask(struct blk_mq_tags *tags);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 0fb65843ec1e..86a38ea1823f 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -188,7 +188,6 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
- __REQ_NO_TIMEOUT, /* requests may never expire */
__REQ_NR_BITS, /* stops here */
};
@@ -242,7 +241,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
-#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index bfb64d672e19..29189aeace19 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -409,6 +409,7 @@ struct request_queue {
unsigned int rq_timeout;
struct timer_list timeout;
+ struct work_struct timeout_work;
struct list_head timeout_list;
struct list_head icq_list;
@@ -795,7 +796,7 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
-extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+extern int blk_queue_enter(struct request_queue *q, bool nowait);
extern void blk_queue_exit(struct request_queue *q);
extern void blk_start_queue(struct request_queue *q);
extern void blk_start_queue_async(struct request_queue *q);
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h
index 5babb8e95352..b827e066e55a 100644
--- a/include/linux/ceph/ceph_frag.h
+++ b/include/linux/ceph/ceph_frag.h
@@ -40,46 +40,11 @@ static inline __u32 ceph_frag_mask_shift(__u32 f)
return 24 - ceph_frag_bits(f);
}
-static inline int ceph_frag_contains_value(__u32 f, __u32 v)
+static inline bool ceph_frag_contains_value(__u32 f, __u32 v)
{
return (v & ceph_frag_mask(f)) == ceph_frag_value(f);
}
-static inline int ceph_frag_contains_frag(__u32 f, __u32 sub)
-{
- /* is sub as specific as us, and contained by us? */
- return ceph_frag_bits(sub) >= ceph_frag_bits(f) &&
- (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f);
-}
-static inline __u32 ceph_frag_parent(__u32 f)
-{
- return ceph_frag_make(ceph_frag_bits(f) - 1,
- ceph_frag_value(f) & (ceph_frag_mask(f) << 1));
-}
-static inline int ceph_frag_is_left_child(__u32 f)
-{
- return ceph_frag_bits(f) > 0 &&
- (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0;
-}
-static inline int ceph_frag_is_right_child(__u32 f)
-{
- return ceph_frag_bits(f) > 0 &&
- (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1;
-}
-static inline __u32 ceph_frag_sibling(__u32 f)
-{
- return ceph_frag_make(ceph_frag_bits(f),
- ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f)));
-}
-static inline __u32 ceph_frag_left_child(__u32 f)
-{
- return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f));
-}
-static inline __u32 ceph_frag_right_child(__u32 f)
-{
- return ceph_frag_make(ceph_frag_bits(f)+1,
- ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f))));
-}
static inline __u32 ceph_frag_make_child(__u32 f, int by, int i)
{
int newbits = ceph_frag_bits(f) + by;
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 71b1d6cdcb5d..8dbd7879fdc6 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -220,6 +220,7 @@ struct ceph_connection {
struct ceph_entity_addr actual_peer_addr;
/* message out temps */
+ struct ceph_msg_header out_hdr;
struct ceph_msg *out_msg; /* sending message (== tail of
out_sent) */
bool out_msg_done;
@@ -229,7 +230,6 @@ struct ceph_connection {
int out_kvec_left; /* kvec's left in out_kvec */
int out_skip; /* skip this many bytes */
int out_kvec_bytes; /* total bytes left */
- bool out_kvec_is_msg; /* kvec refers to out_msg */
int out_more; /* there is more data after the kvecs */
__le64 out_temp_ack; /* for writing an ack */
struct ceph_timespec out_temp_keepalive2; /* for writing keepalive2
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
index bda5ec0b4b4d..fccf7f44139d 100644
--- a/include/linux/cleancache.h
+++ b/include/linux/cleancache.h
@@ -37,7 +37,7 @@ struct cleancache_ops {
void (*invalidate_fs)(int);
};
-extern int cleancache_register_ops(struct cleancache_ops *ops);
+extern int cleancache_register_ops(const struct cleancache_ops *ops);
extern void __cleancache_init_fs(struct super_block *);
extern void __cleancache_init_shared_fs(struct super_block *);
extern int __cleancache_get_page(struct page *);
@@ -48,14 +48,14 @@ extern void __cleancache_invalidate_fs(struct super_block *);
#ifdef CONFIG_CLEANCACHE
#define cleancache_enabled (1)
-static inline bool cleancache_fs_enabled(struct page *page)
-{
- return page->mapping->host->i_sb->cleancache_poolid >= 0;
-}
static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping)
{
return mapping->host->i_sb->cleancache_poolid >= 0;
}
+static inline bool cleancache_fs_enabled(struct page *page)
+{
+ return cleancache_fs_enabled_mapping(page->mapping);
+}
#else
#define cleancache_enabled (0)
#define cleancache_fs_enabled(_page) (0)
@@ -89,11 +89,9 @@ static inline void cleancache_init_shared_fs(struct super_block *sb)
static inline int cleancache_get_page(struct page *page)
{
- int ret = -1;
-
if (cleancache_enabled && cleancache_fs_enabled(page))
- ret = __cleancache_get_page(page);
- return ret;
+ return __cleancache_get_page(page);
+ return -1;
}
static inline void cleancache_put_page(struct page *page)
diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h
new file mode 100644
index 000000000000..607321fa2c2b
--- /dev/null
+++ b/include/linux/clk/mmp.h
@@ -0,0 +1,17 @@
+#ifndef __CLK_MMP_H
+#define __CLK_MMP_H
+
+#include <linux/types.h>
+
+extern void pxa168_clk_init(phys_addr_t mpmu_phys,
+ phys_addr_t apmu_phys,
+ phys_addr_t apbc_phys);
+extern void pxa910_clk_init(phys_addr_t mpmu_phys,
+ phys_addr_t apmu_phys,
+ phys_addr_t apbc_phys,
+ phys_addr_t apbcp_phys);
+extern void mmp2_clk_init(phys_addr_t mpmu_phys,
+ phys_addr_t apmu_phys,
+ phys_addr_t apbc_phys);
+
+#endif
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 75205df29b9c..9a638601cb09 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -195,6 +195,7 @@ enum {
TI_CLKM_PRM,
TI_CLKM_SCRM,
TI_CLKM_CTRL,
+ TI_CLKM_PLLSS,
CLK_MAX_MEMMAPS
};
diff --git a/include/linux/clksrc-dbx500-prcmu.h b/include/linux/clksrc-dbx500-prcmu.h
deleted file mode 100644
index 4fb8119c49e4..000000000000
--- a/include/linux/clksrc-dbx500-prcmu.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * Copyright (C) ST-Ericsson SA 2011
- *
- * License Terms: GNU General Public License v2
- * Author: Mattias Wallin <mattias.wallin@stericsson.com>
- *
- */
-#ifndef __CLKSRC_DBX500_PRCMU_H
-#define __CLKSRC_DBX500_PRCMU_H
-
-#include <linux/init.h>
-#include <linux/io.h>
-
-#ifdef CONFIG_CLKSRC_DBX500_PRCMU
-void __init clksrc_dbx500_prcmu_init(void __iomem *base);
-#else
-static inline void __init clksrc_dbx500_prcmu_init(void __iomem *base) {}
-#endif
-
-#endif
diff --git a/include/linux/configfs.h b/include/linux/configfs.h
index f7300d023dbe..f8165c129ccb 100644
--- a/include/linux/configfs.h
+++ b/include/linux/configfs.h
@@ -259,7 +259,24 @@ void configfs_unregister_default_group(struct config_group *group);
/* These functions can sleep and can alloc with GFP_KERNEL */
/* WARNING: These cannot be called underneath configfs callbacks!! */
-int configfs_depend_item(struct configfs_subsystem *subsys, struct config_item *target);
-void configfs_undepend_item(struct configfs_subsystem *subsys, struct config_item *target);
+int configfs_depend_item(struct configfs_subsystem *subsys,
+ struct config_item *target);
+void configfs_undepend_item(struct config_item *target);
+
+/*
+ * These functions can sleep and can alloc with GFP_KERNEL
+ * NOTE: These should be called only underneath configfs callbacks.
+ * NOTE: First parameter is a caller's subsystem, not target's.
+ * WARNING: These cannot be called on newly created item
+ * (in make_group()/make_item() callback)
+ */
+int configfs_depend_item_unlocked(struct configfs_subsystem *caller_subsys,
+ struct config_item *target);
+
+
+static inline void configfs_undepend_item_unlocked(struct config_item *target)
+{
+ configfs_undepend_item(target);
+}
#endif /* _CONFIGFS_H_ */
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 59915ea5373c..fc14275ff34e 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -85,10 +85,14 @@ extern int nr_cpu_ids;
* only one CPU.
*/
-extern const struct cpumask *const cpu_possible_mask;
-extern const struct cpumask *const cpu_online_mask;
-extern const struct cpumask *const cpu_present_mask;
-extern const struct cpumask *const cpu_active_mask;
+extern struct cpumask __cpu_possible_mask;
+extern struct cpumask __cpu_online_mask;
+extern struct cpumask __cpu_present_mask;
+extern struct cpumask __cpu_active_mask;
+#define cpu_possible_mask ((const struct cpumask *)&__cpu_possible_mask)
+#define cpu_online_mask ((const struct cpumask *)&__cpu_online_mask)
+#define cpu_present_mask ((const struct cpumask *)&__cpu_present_mask)
+#define cpu_active_mask ((const struct cpumask *)&__cpu_active_mask)
#if NR_CPUS > 1
#define num_online_cpus() cpumask_weight(cpu_online_mask)
@@ -716,14 +720,49 @@ extern const DECLARE_BITMAP(cpu_all_bits, NR_CPUS);
#define for_each_present_cpu(cpu) for_each_cpu((cpu), cpu_present_mask)
/* Wrappers for arch boot code to manipulate normally-constant masks */
-void set_cpu_possible(unsigned int cpu, bool possible);
-void set_cpu_present(unsigned int cpu, bool present);
-void set_cpu_online(unsigned int cpu, bool online);
-void set_cpu_active(unsigned int cpu, bool active);
void init_cpu_present(const struct cpumask *src);
void init_cpu_possible(const struct cpumask *src);
void init_cpu_online(const struct cpumask *src);
+static inline void
+set_cpu_possible(unsigned int cpu, bool possible)
+{
+ if (possible)
+ cpumask_set_cpu(cpu, &__cpu_possible_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_possible_mask);
+}
+
+static inline void
+set_cpu_present(unsigned int cpu, bool present)
+{
+ if (present)
+ cpumask_set_cpu(cpu, &__cpu_present_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_present_mask);
+}
+
+static inline void
+set_cpu_online(unsigned int cpu, bool online)
+{
+ if (online) {
+ cpumask_set_cpu(cpu, &__cpu_online_mask);
+ cpumask_set_cpu(cpu, &__cpu_active_mask);
+ } else {
+ cpumask_clear_cpu(cpu, &__cpu_online_mask);
+ }
+}
+
+static inline void
+set_cpu_active(unsigned int cpu, bool active)
+{
+ if (active)
+ cpumask_set_cpu(cpu, &__cpu_active_mask);
+ else
+ cpumask_clear_cpu(cpu, &__cpu_active_mask);
+}
+
+
/**
* to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
diff --git a/include/linux/dax.h b/include/linux/dax.h
index b415e521528d..818e45078929 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -14,6 +14,17 @@ int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
int __dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
dax_iodone_t);
+
+#ifdef CONFIG_FS_DAX
+struct page *read_dax_sector(struct block_device *bdev, sector_t n);
+#else
+static inline struct page *read_dax_sector(struct block_device *bdev,
+ sector_t n)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
int dax_pmd_fault(struct vm_area_struct *, unsigned long addr, pmd_t *,
unsigned int flags, get_block_t, dax_iodone_t);
@@ -36,4 +47,11 @@ static inline bool vma_is_dax(struct vm_area_struct *vma)
{
return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
}
+
+static inline bool dax_mapping(struct address_space *mapping)
+{
+ return mapping->host && IS_DAX(mapping->host);
+}
+int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
+ loff_t end);
#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 68030e22af35..6fa02a20eb63 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -89,7 +89,7 @@ struct devfreq_dev_profile {
int (*get_cur_freq)(struct device *dev, unsigned long *freq);
void (*exit)(struct device *dev);
- unsigned int *freq_table;
+ unsigned long *freq_table;
unsigned int max_state;
};
diff --git a/include/linux/device.h b/include/linux/device.h
index f627ba20a46c..6d6f1fec092f 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -1044,6 +1044,8 @@ extern int __must_check driver_attach(struct device_driver *drv);
extern void device_initial_probe(struct device *dev);
extern int __must_check device_reprobe(struct device *dev);
+extern bool device_is_bound(struct device *dev);
+
/*
* Easy functions for dynamically creating devices on the fly
*/
diff --git a/include/linux/dma-attrs.h b/include/linux/dma-attrs.h
index c8e1831d7572..99c0be00b47c 100644
--- a/include/linux/dma-attrs.h
+++ b/include/linux/dma-attrs.h
@@ -41,7 +41,6 @@ static inline void init_dma_attrs(struct dma_attrs *attrs)
bitmap_zero(attrs->flags, __DMA_ATTRS_LONGS);
}
-#ifdef CONFIG_HAVE_DMA_ATTRS
/**
* dma_set_attr - set a specific attribute
* @attr: attribute to set
@@ -67,14 +66,5 @@ static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
BUG_ON(attr >= DMA_ATTR_MAX);
return test_bit(attr, attrs->flags);
}
-#else /* !CONFIG_HAVE_DMA_ATTRS */
-static inline void dma_set_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
-}
-static inline int dma_get_attr(enum dma_attr attr, struct dma_attrs *attrs)
-{
- return 0;
-}
-#endif /* CONFIG_HAVE_DMA_ATTRS */
#endif /* _DMA_ATTR_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 2e551e2d2d03..75857cda38e9 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -6,8 +6,11 @@
#include <linux/device.h>
#include <linux/err.h>
#include <linux/dma-attrs.h>
+#include <linux/dma-debug.h>
#include <linux/dma-direction.h>
#include <linux/scatterlist.h>
+#include <linux/kmemcheck.h>
+#include <linux/bug.h>
/*
* A dma_addr_t can hold any valid DMA or bus address for the platform.
@@ -83,10 +86,383 @@ static inline int is_device_dma_capable(struct device *dev)
return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
}
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+/*
+ * These three functions are only for dma allocator.
+ * Don't use them in device drivers.
+ */
+int dma_alloc_from_coherent(struct device *dev, ssize_t size,
+ dma_addr_t *dma_handle, void **ret);
+int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
+
+int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, size_t size, int *ret);
+#else
+#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
+#define dma_release_from_coherent(dev, order, vaddr) (0)
+#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+
#ifdef CONFIG_HAS_DMA
#include <asm/dma-mapping.h>
#else
-#include <asm-generic/dma-mapping-broken.h>
+/*
+ * Define the dma api to allow compilation but not linking of
+ * dma dependent code. Code that depends on the dma-mapping
+ * API needs to set 'depends on HAS_DMA' in its Kconfig
+ */
+extern struct dma_map_ops bad_dma_ops;
+static inline struct dma_map_ops *get_dma_ops(struct device *dev)
+{
+ return &bad_dma_ops;
+}
+#endif
+
+static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(ptr, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = ops->map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size,
+ dir, attrs);
+ debug_dma_map_page(dev, virt_to_page(ptr),
+ offset_in_page(ptr), size,
+ dir, addr, true);
+ return addr;
+}
+
+static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+ ops->unmap_page(dev, addr, size, dir, attrs);
+ debug_dma_unmap_page(dev, addr, size, dir, true);
+}
+
+/*
+ * dma_maps_sg_attrs returns 0 on error and > 0 on success.
+ * It should never return a value < 0.
+ */
+static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ int i, ents;
+ struct scatterlist *s;
+
+ for_each_sg(sg, s, nents, i)
+ kmemcheck_mark_initialized(sg_virt(s), s->length);
+ BUG_ON(!valid_dma_direction(dir));
+ ents = ops->map_sg(dev, sg, nents, dir, attrs);
+ BUG_ON(ents < 0);
+ debug_dma_map_sg(dev, sg, nents, ents, dir);
+
+ return ents;
+}
+
+static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ debug_dma_unmap_sg(dev, sg, nents, dir);
+ if (ops->unmap_sg)
+ ops->unmap_sg(dev, sg, nents, dir, attrs);
+}
+
+static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
+ size_t offset, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ dma_addr_t addr;
+
+ kmemcheck_mark_initialized(page_address(page) + offset, size);
+ BUG_ON(!valid_dma_direction(dir));
+ addr = ops->map_page(dev, page, offset, size, dir, NULL);
+ debug_dma_map_page(dev, page, offset, size, dir, addr, false);
+
+ return addr;
+}
+
+static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->unmap_page)
+ ops->unmap_page(dev, addr, size, dir, NULL);
+ debug_dma_unmap_page(dev, addr, size, dir, false);
+}
+
+static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(dev, addr, size, dir);
+ debug_dma_sync_single_for_cpu(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_for_device(struct device *dev,
+ dma_addr_t addr, size_t size,
+ enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(dev, addr, size, dir);
+ debug_dma_sync_single_for_device(dev, addr, size, dir);
+}
+
+static inline void dma_sync_single_range_for_cpu(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_cpu)
+ ops->sync_single_for_cpu(dev, addr + offset, size, dir);
+ debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
+}
+
+static inline void dma_sync_single_range_for_device(struct device *dev,
+ dma_addr_t addr,
+ unsigned long offset,
+ size_t size,
+ enum dma_data_direction dir)
+{
+ const struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_single_for_device)
+ ops->sync_single_for_device(dev, addr + offset, size, dir);
+ debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
+}
+
+static inline void
+dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_cpu)
+ ops->sync_sg_for_cpu(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
+}
+
+static inline void
+dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
+ int nelems, enum dma_data_direction dir)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!valid_dma_direction(dir));
+ if (ops->sync_sg_for_device)
+ ops->sync_sg_for_device(dev, sg, nelems, dir);
+ debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
+
+}
+
+#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
+#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
+#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
+#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
+
+extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+ unsigned long vm_flags,
+ pgprot_t prot, const void *caller);
+
+void *dma_common_pages_remap(struct page **pages, size_t size,
+ unsigned long vm_flags, pgprot_t prot,
+ const void *caller);
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
+
+/**
+ * dma_mmap_attrs - map a coherent DMA allocation into user space
+ * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
+ * @vma: vm_area_struct describing requested user mapping
+ * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
+ * @handle: device-view address returned from dma_alloc_attrs
+ * @size: size of memory originally requested in dma_alloc_attrs
+ * @attrs: attributes of mapping properties requested in dma_alloc_attrs
+ *
+ * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
+ * into user space. The coherent DMA buffer must not be freed by the
+ * driver until the user space mapping has been released.
+ */
+static inline int
+dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->mmap)
+ return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
+ return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
+}
+
+#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
+
+int
+dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size);
+
+static inline int
+dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
+ dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ BUG_ON(!ops);
+ if (ops->get_sgtable)
+ return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
+ attrs);
+ return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
+}
+
+#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
+
+#ifndef arch_dma_alloc_attrs
+#define arch_dma_alloc_attrs(dev, flag) (true)
+#endif
+
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+ void *cpu_addr;
+
+ BUG_ON(!ops);
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
+ return cpu_addr;
+
+ if (!arch_dma_alloc_attrs(&dev, &flag))
+ return NULL;
+ if (!ops->alloc)
+ return NULL;
+
+ cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
+ debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
+ return cpu_addr;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle,
+ struct dma_attrs *attrs)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ BUG_ON(!ops);
+ WARN_ON(irqs_disabled());
+
+ if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
+ return;
+
+ if (!ops->free)
+ return;
+
+ debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
+ ops->free(dev, size, cpu_addr, dma_handle, attrs);
+}
+
+static inline void *dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag)
+{
+ return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
+}
+
+static inline void dma_free_coherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
+}
+
+static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
+}
+
+static inline void dma_free_noncoherent(struct device *dev, size_t size,
+ void *cpu_addr, dma_addr_t dma_handle)
+{
+ DEFINE_DMA_ATTRS(attrs);
+
+ dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
+ dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
+}
+
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+ debug_dma_mapping_error(dev, dma_addr);
+
+ if (get_dma_ops(dev)->mapping_error)
+ return get_dma_ops(dev)->mapping_error(dev, dma_addr);
+
+#ifdef DMA_ERROR_CODE
+ return dma_addr == DMA_ERROR_CODE;
+#else
+ return 0;
+#endif
+}
+
+#ifndef HAVE_ARCH_DMA_SUPPORTED
+static inline int dma_supported(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (!ops)
+ return 0;
+ if (!ops->dma_supported)
+ return 1;
+ return ops->dma_supported(dev, mask);
+}
+#endif
+
+#ifndef HAVE_ARCH_DMA_SET_MASK
+static inline int dma_set_mask(struct device *dev, u64 mask)
+{
+ struct dma_map_ops *ops = get_dma_ops(dev);
+
+ if (ops->set_dma_mask)
+ return ops->set_dma_mask(dev, mask);
+
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+ *dev->dma_mask = mask;
+ return 0;
+}
#endif
static inline u64 dma_get_mask(struct device *dev)
@@ -208,7 +584,13 @@ static inline int dma_get_cache_alignment(void)
#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
#define DMA_MEMORY_EXCLUSIVE 0x08
-#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size, int flags);
+void dma_release_declared_memory(struct device *dev);
+void *dma_mark_declared_memory_occupied(struct device *dev,
+ dma_addr_t device_addr, size_t size);
+#else
static inline int
dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size, int flags)
@@ -227,7 +609,7 @@ dma_mark_declared_memory_occupied(struct device *dev,
{
return ERR_PTR(-EBUSY);
}
-#endif
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
/*
* Managed DMA API
@@ -240,13 +622,13 @@ extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp);
extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle);
-#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
+#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
extern int dmam_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size,
int flags);
extern void dmam_release_declared_memory(struct device *dev);
-#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
+#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
static inline int dmam_declare_coherent_memory(struct device *dev,
phys_addr_t phys_addr, dma_addr_t device_addr,
size_t size, gfp_t gfp)
@@ -257,24 +639,8 @@ static inline int dmam_declare_coherent_memory(struct device *dev,
static inline void dmam_release_declared_memory(struct device *dev)
{
}
-#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
-
-#ifndef CONFIG_HAVE_DMA_ATTRS
-struct dma_attrs;
-
-#define dma_map_single_attrs(dev, cpu_addr, size, dir, attrs) \
- dma_map_single(dev, cpu_addr, size, dir)
+#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
-#define dma_unmap_single_attrs(dev, dma_addr, size, dir, attrs) \
- dma_unmap_single(dev, dma_addr, size, dir)
-
-#define dma_map_sg_attrs(dev, sgl, nents, dir, attrs) \
- dma_map_sg(dev, sgl, nents, dir)
-
-#define dma_unmap_sg_attrs(dev, sgl, nents, dir, attrs) \
- dma_unmap_sg(dev, sgl, nents, dir)
-
-#else
static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
dma_addr_t *dma_addr, gfp_t gfp)
{
@@ -300,7 +666,6 @@ static inline int dma_mmap_writecombine(struct device *dev,
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
}
-#endif /* CONFIG_HAVE_DMA_ATTRS */
#ifdef CONFIG_NEED_DMA_MAP_STATE
#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
diff --git a/include/linux/drbd.h b/include/linux/drbd.h
index 8723f2a99e15..d6b3c9943a2c 100644
--- a/include/linux/drbd.h
+++ b/include/linux/drbd.h
@@ -25,7 +25,6 @@
*/
#ifndef DRBD_H
#define DRBD_H
-#include <linux/connector.h>
#include <asm/types.h>
#ifdef __KERNEL__
@@ -52,7 +51,7 @@
#endif
extern const char *drbd_buildtag(void);
-#define REL_VERSION "8.4.5"
+#define REL_VERSION "8.4.6"
#define API_VERSION 1
#define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 101
@@ -339,6 +338,8 @@ enum drbd_state_rv {
#define MDF_AL_CLEAN (1 << 7)
#define MDF_AL_DISABLED (1 << 8)
+#define MAX_PEERS 32
+
enum drbd_uuid_index {
UI_CURRENT,
UI_BITMAP,
@@ -349,14 +350,35 @@ enum drbd_uuid_index {
UI_EXTENDED_SIZE /* Everything. */
};
+#define HISTORY_UUIDS MAX_PEERS
+
enum drbd_timeout_flag {
UT_DEFAULT = 0,
UT_DEGRADED = 1,
UT_PEER_OUTDATED = 2,
};
+enum drbd_notification_type {
+ NOTIFY_EXISTS,
+ NOTIFY_CREATE,
+ NOTIFY_CHANGE,
+ NOTIFY_DESTROY,
+ NOTIFY_CALL,
+ NOTIFY_RESPONSE,
+
+ NOTIFY_CONTINUES = 0x8000,
+ NOTIFY_FLAGS = NOTIFY_CONTINUES,
+};
+
#define UUID_JUST_CREATED ((__u64)4)
+enum write_ordering_e {
+ WO_NONE,
+ WO_DRAIN_IO,
+ WO_BDEV_FLUSH,
+ WO_BIO_BARRIER
+};
+
/* magic numbers used in meta data and network packets */
#define DRBD_MAGIC 0x83740267
#define DRBD_MAGIC_BIG 0x835a
diff --git a/include/linux/drbd_genl.h b/include/linux/drbd_genl.h
index 7b131ed8f9c6..2d0e5ad5de9d 100644
--- a/include/linux/drbd_genl.h
+++ b/include/linux/drbd_genl.h
@@ -250,6 +250,76 @@ GENL_struct(DRBD_NLA_DETACH_PARMS, 13, detach_parms,
__flg_field(1, DRBD_GENLA_F_MANDATORY, force_detach)
)
+GENL_struct(DRBD_NLA_RESOURCE_INFO, 15, resource_info,
+ __u32_field(1, 0, res_role)
+ __flg_field(2, 0, res_susp)
+ __flg_field(3, 0, res_susp_nod)
+ __flg_field(4, 0, res_susp_fen)
+ /* __flg_field(5, 0, res_weak) */
+)
+
+GENL_struct(DRBD_NLA_DEVICE_INFO, 16, device_info,
+ __u32_field(1, 0, dev_disk_state)
+)
+
+GENL_struct(DRBD_NLA_CONNECTION_INFO, 17, connection_info,
+ __u32_field(1, 0, conn_connection_state)
+ __u32_field(2, 0, conn_role)
+)
+
+GENL_struct(DRBD_NLA_PEER_DEVICE_INFO, 18, peer_device_info,
+ __u32_field(1, 0, peer_repl_state)
+ __u32_field(2, 0, peer_disk_state)
+ __u32_field(3, 0, peer_resync_susp_user)
+ __u32_field(4, 0, peer_resync_susp_peer)
+ __u32_field(5, 0, peer_resync_susp_dependency)
+)
+
+GENL_struct(DRBD_NLA_RESOURCE_STATISTICS, 19, resource_statistics,
+ __u32_field(1, 0, res_stat_write_ordering)
+)
+
+GENL_struct(DRBD_NLA_DEVICE_STATISTICS, 20, device_statistics,
+ __u64_field(1, 0, dev_size) /* (sectors) */
+ __u64_field(2, 0, dev_read) /* (sectors) */
+ __u64_field(3, 0, dev_write) /* (sectors) */
+ __u64_field(4, 0, dev_al_writes) /* activity log writes (count) */
+ __u64_field(5, 0, dev_bm_writes) /* bitmap writes (count) */
+ __u32_field(6, 0, dev_upper_pending) /* application requests in progress */
+ __u32_field(7, 0, dev_lower_pending) /* backing device requests in progress */
+ __flg_field(8, 0, dev_upper_blocked)
+ __flg_field(9, 0, dev_lower_blocked)
+ __flg_field(10, 0, dev_al_suspended) /* activity log suspended */
+ __u64_field(11, 0, dev_exposed_data_uuid)
+ __u64_field(12, 0, dev_current_uuid)
+ __u32_field(13, 0, dev_disk_flags)
+ __bin_field(14, 0, history_uuids, HISTORY_UUIDS * sizeof(__u64))
+)
+
+GENL_struct(DRBD_NLA_CONNECTION_STATISTICS, 21, connection_statistics,
+ __flg_field(1, 0, conn_congested)
+)
+
+GENL_struct(DRBD_NLA_PEER_DEVICE_STATISTICS, 22, peer_device_statistics,
+ __u64_field(1, 0, peer_dev_received) /* sectors */
+ __u64_field(2, 0, peer_dev_sent) /* sectors */
+ __u32_field(3, 0, peer_dev_pending) /* number of requests */
+ __u32_field(4, 0, peer_dev_unacked) /* number of requests */
+ __u64_field(5, 0, peer_dev_out_of_sync) /* sectors */
+ __u64_field(6, 0, peer_dev_resync_failed) /* sectors */
+ __u64_field(7, 0, peer_dev_bitmap_uuid)
+ __u32_field(9, 0, peer_dev_flags)
+)
+
+GENL_struct(DRBD_NLA_NOTIFICATION_HEADER, 23, drbd_notification_header,
+ __u32_field(1, DRBD_GENLA_F_MANDATORY, nh_type)
+)
+
+GENL_struct(DRBD_NLA_HELPER, 24, drbd_helper_info,
+ __str_field(1, DRBD_GENLA_F_MANDATORY, helper_name, 32)
+ __u32_field(2, DRBD_GENLA_F_MANDATORY, helper_status)
+)
+
/*
* Notifications and commands (genlmsghdr->cmd)
*/
@@ -382,3 +452,82 @@ GENL_op(DRBD_ADM_GET_TIMEOUT_TYPE, 26, GENL_doit(drbd_adm_get_timeout_type),
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
GENL_op(DRBD_ADM_DOWN, 27, GENL_doit(drbd_adm_down),
GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED))
+
+GENL_op(DRBD_ADM_GET_RESOURCES, 30,
+ GENL_op_init(
+ .dumpit = drbd_adm_dump_resources,
+ ),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_INFO, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_STATISTICS, DRBD_GENLA_F_MANDATORY))
+
+GENL_op(DRBD_ADM_GET_DEVICES, 31,
+ GENL_op_init(
+ .dumpit = drbd_adm_dump_devices,
+ .done = drbd_adm_dump_devices_done,
+ ),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_DEVICE_INFO, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY))
+
+GENL_op(DRBD_ADM_GET_CONNECTIONS, 32,
+ GENL_op_init(
+ .dumpit = drbd_adm_dump_connections,
+ .done = drbd_adm_dump_connections_done,
+ ),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_CONNECTION_INFO, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_CONNECTION_STATISTICS, DRBD_GENLA_F_MANDATORY))
+
+GENL_op(DRBD_ADM_GET_PEER_DEVICES, 33,
+ GENL_op_init(
+ .dumpit = drbd_adm_dump_peer_devices,
+ .done = drbd_adm_dump_peer_devices_done,
+ ),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_PEER_DEVICE_INFO, DRBD_GENLA_F_MANDATORY)
+ GENL_tla_expected(DRBD_NLA_PEER_DEVICE_STATISTICS, DRBD_GENLA_F_MANDATORY))
+
+GENL_notification(
+ DRBD_RESOURCE_STATE, 34, events,
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_INFO, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_RESOURCE_STATISTICS, DRBD_F_REQUIRED))
+
+GENL_notification(
+ DRBD_DEVICE_STATE, 35, events,
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DEVICE_INFO, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_DEVICE_STATISTICS, DRBD_F_REQUIRED))
+
+GENL_notification(
+ DRBD_CONNECTION_STATE, 36, events,
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_CONNECTION_INFO, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_CONNECTION_STATISTICS, DRBD_F_REQUIRED))
+
+GENL_notification(
+ DRBD_PEER_DEVICE_STATE, 37, events,
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_PEER_DEVICE_INFO, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_PEER_DEVICE_STATISTICS, DRBD_F_REQUIRED))
+
+GENL_op(
+ DRBD_ADM_GET_INITIAL_STATE, 38,
+ GENL_op_init(
+ .dumpit = drbd_adm_get_initial_state,
+ ),
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_GENLA_F_MANDATORY))
+
+GENL_notification(
+ DRBD_HELPER, 40, events,
+ GENL_tla_expected(DRBD_NLA_CFG_CONTEXT, DRBD_F_REQUIRED)
+ GENL_tla_expected(DRBD_NLA_HELPER, DRBD_F_REQUIRED))
+
+GENL_notification(
+ DRBD_INITIAL_STATE_DONE, 41, events,
+ GENL_tla_expected(DRBD_NLA_NOTIFICATION_HEADER, DRBD_F_REQUIRED))
diff --git a/include/linux/fs.h b/include/linux/fs.h
index eb73d74ed992..ae681002100a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -433,7 +433,8 @@ struct address_space {
struct rw_semaphore i_mmap_rwsem; /* protect tree, count, list */
/* Protected by tree_lock together with the radix tree */
unsigned long nrpages; /* number of total pages */
- unsigned long nrshadows; /* number of shadow entries */
+ /* number of shadow or DAX exceptional entries */
+ unsigned long nrexceptional;
pgoff_t writeback_index;/* writeback starts here */
const struct address_space_operations *a_ops; /* methods */
unsigned long flags; /* error bits/gfp mask */
@@ -483,9 +484,6 @@ struct block_device {
int bd_fsfreeze_count;
/* Mutex for freeze */
struct mutex bd_fsfreeze_mutex;
-#ifdef CONFIG_FS_DAX
- int bd_map_count;
-#endif
};
/*
@@ -714,6 +712,31 @@ enum inode_i_mutex_lock_class
I_MUTEX_PARENT2,
};
+static inline void inode_lock(struct inode *inode)
+{
+ mutex_lock(&inode->i_mutex);
+}
+
+static inline void inode_unlock(struct inode *inode)
+{
+ mutex_unlock(&inode->i_mutex);
+}
+
+static inline int inode_trylock(struct inode *inode)
+{
+ return mutex_trylock(&inode->i_mutex);
+}
+
+static inline int inode_is_locked(struct inode *inode)
+{
+ return mutex_is_locked(&inode->i_mutex);
+}
+
+static inline void inode_lock_nested(struct inode *inode, unsigned subclass)
+{
+ mutex_lock_nested(&inode->i_mutex, subclass);
+}
+
void lock_two_nondirectories(struct inode *, struct inode*);
void unlock_two_nondirectories(struct inode *, struct inode*);
@@ -2881,7 +2904,7 @@ extern void replace_mount_options(struct super_block *sb, char *options);
static inline bool io_is_direct(struct file *filp)
{
- return (filp->f_flags & O_DIRECT) || IS_DAX(file_inode(filp));
+ return (filp->f_flags & O_DIRECT) || IS_DAX(filp->f_mapping->host);
}
static inline int iocb_flags(struct file *file)
@@ -3047,8 +3070,8 @@ static inline bool dir_emit_dots(struct file *file, struct dir_context *ctx)
}
static inline bool dir_relax(struct inode *inode)
{
- mutex_unlock(&inode->i_mutex);
- mutex_lock(&inode->i_mutex);
+ inode_unlock(inode);
+ inode_lock(inode);
return !IS_DEADDIR(inode);
}
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 0639dcc98195..81de7123959d 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -165,7 +165,6 @@ struct ftrace_ops {
ftrace_func_t saved_func;
int __percpu *disabled;
#ifdef CONFIG_DYNAMIC_FTRACE
- int nr_trampolines;
struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash;
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 76dd4f0da5ca..2ead22dd74a0 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -87,7 +87,8 @@ enum hrtimer_restart {
* @function: timer expiry callback function
* @base: pointer to the timer base (per cpu and per clock)
* @state: state information (See bit values above)
- * @start_pid: timer statistics field to store the pid of the task which
+ * @is_rel: Set if the timer was armed relative
+ * @start_pid: timer statistics field to store the pid of the task which
* started the timer
* @start_site: timer statistics field to store the site where the timer
* was started
@@ -101,7 +102,8 @@ struct hrtimer {
ktime_t _softexpires;
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
- unsigned long state;
+ u8 state;
+ u8 is_rel;
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
@@ -321,6 +323,27 @@ static inline void clock_was_set_delayed(void) { }
#endif
+static inline ktime_t
+__hrtimer_expires_remaining_adjusted(const struct hrtimer *timer, ktime_t now)
+{
+ ktime_t rem = ktime_sub(timer->node.expires, now);
+
+ /*
+ * Adjust relative timers for the extra we added in
+ * hrtimer_start_range_ns() to prevent short timeouts.
+ */
+ if (IS_ENABLED(CONFIG_TIME_LOW_RES) && timer->is_rel)
+ rem.tv64 -= hrtimer_resolution;
+ return rem;
+}
+
+static inline ktime_t
+hrtimer_expires_remaining_adjusted(const struct hrtimer *timer)
+{
+ return __hrtimer_expires_remaining_adjusted(timer,
+ timer->base->get_time());
+}
+
extern void clock_was_set(void);
#ifdef CONFIG_TIMERFD
extern void timerfd_clock_was_set(void);
@@ -390,7 +413,12 @@ static inline void hrtimer_restart(struct hrtimer *timer)
}
/* Query timers: */
-extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
+extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust);
+
+static inline ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
+{
+ return __hrtimer_get_remaining(timer, false);
+}
extern u64 hrtimer_get_next_event(void);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cfe81e10bd54..459fd25b378e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next);
-extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
- spinlock_t **ptl);
+extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma);
/* mmap_sem must be held on entry */
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
- spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma)
{
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
- return __pmd_trans_huge_lock(pmd, vma, ptl);
+ return __pmd_trans_huge_lock(pmd, vma);
else
return false;
}
@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
long adjust_next)
{
}
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
- spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+ struct vm_area_struct *vma)
{
- return false;
+ return NULL;
}
static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/include/linux/idr.h b/include/linux/idr.h
index 013fd9bc4cb6..083d61e92706 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -135,6 +135,20 @@ static inline void *idr_find(struct idr *idr, int id)
#define idr_for_each_entry(idp, entry, id) \
for (id = 0; ((entry) = idr_get_next(idp, &(id))) != NULL; ++id)
+/**
+ * idr_for_each_entry - continue iteration over an idr's elements of a given type
+ * @idp: idr handle
+ * @entry: the type * to use as cursor
+ * @id: id entry's key
+ *
+ * Continue to iterate over list of given type, continuing after
+ * the current position.
+ */
+#define idr_for_each_entry_continue(idp, entry, id) \
+ for ((entry) = idr_get_next((idp), &(id)); \
+ entry; \
+ ++id, (entry) = idr_get_next((idp), &(id)))
+
/*
* IDA - IDR based id allocator, use when translation from id to
* pointer isn't necessary.
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index cb30edbfe9fc..0e95fcc75b2a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -413,7 +413,7 @@ enum
NET_TX_SOFTIRQ,
NET_RX_SOFTIRQ,
BLOCK_SOFTIRQ,
- BLOCK_IOPOLL_SOFTIRQ,
+ IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
diff --git a/include/linux/io.h b/include/linux/io.h
index fffd88d7f426..32403b5716e5 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -29,6 +29,7 @@ struct device;
struct resource;
__visible void __iowrite32_copy(void __iomem *to, const void *from, size_t count);
+void __ioread32_copy(void *to, const void __iomem *from, size_t count);
void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
#ifdef CONFIG_MMU
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f28dff313b07..a5c539fa5d2b 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -133,8 +133,9 @@ struct iommu_dm_region {
/**
* struct iommu_ops - iommu ops and capabilities
- * @domain_init: init iommu domain
- * @domain_destroy: destroy iommu domain
+ * @capable: check capability
+ * @domain_alloc: allocate iommu domain
+ * @domain_free: free iommu domain
* @attach_dev: attach device to an iommu domain
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
@@ -144,8 +145,15 @@ struct iommu_dm_region {
* @iova_to_phys: translate iova to physical address
* @add_device: add device to iommu grouping
* @remove_device: remove device from iommu grouping
+ * @device_group: find iommu group for a particular device
* @domain_get_attr: Query domain attributes
* @domain_set_attr: Change domain attributes
+ * @get_dm_regions: Request list of direct mapping requirements for a device
+ * @put_dm_regions: Free list of direct mapping requirements for a device
+ * @domain_window_enable: Configure and enable a particular window for a domain
+ * @domain_window_disable: Disable a particular window for a domain
+ * @domain_set_windows: Set the number of windows for a domain
+ * @domain_get_windows: Return the number of windows for a domain
* @of_xlate: add OF master IDs to iommu grouping
* @pgsize_bitmap: bitmap of supported page sizes
* @priv: per-instance data private to the iommu driver
@@ -182,9 +190,9 @@ struct iommu_ops {
int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t paddr, u64 size, int prot);
void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
- /* Set the numer of window per domain */
+ /* Set the number of windows per domain */
int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count);
- /* Get the numer of window per domain */
+ /* Get the number of windows per domain */
u32 (*domain_get_windows)(struct iommu_domain *domain);
#ifdef CONFIG_OF_IOMMU
diff --git a/include/linux/irq_poll.h b/include/linux/irq_poll.h
new file mode 100644
index 000000000000..3e8c1b8fb9be
--- /dev/null
+++ b/include/linux/irq_poll.h
@@ -0,0 +1,25 @@
+#ifndef IRQ_POLL_H
+#define IRQ_POLL_H
+
+struct irq_poll;
+typedef int (irq_poll_fn)(struct irq_poll *, int);
+
+struct irq_poll {
+ struct list_head list;
+ unsigned long state;
+ int weight;
+ irq_poll_fn *poll;
+};
+
+enum {
+ IRQ_POLL_F_SCHED = 0,
+ IRQ_POLL_F_DISABLE = 1,
+};
+
+extern void irq_poll_sched(struct irq_poll *);
+extern void irq_poll_init(struct irq_poll *, int, irq_poll_fn *);
+extern void irq_poll_complete(struct irq_poll *);
+extern void irq_poll_enable(struct irq_poll *);
+extern void irq_poll_disable(struct irq_poll *);
+
+#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index f64622ad02c1..04579d9fbce4 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -70,6 +70,7 @@ struct irq_fwspec {
*/
enum irq_domain_bus_token {
DOMAIN_BUS_ANY = 0,
+ DOMAIN_BUS_WIRED,
DOMAIN_BUS_PCI_MSI,
DOMAIN_BUS_PLATFORM_MSI,
DOMAIN_BUS_NEXUS,
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 7b68d2788a56..2cc643c6e870 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -109,11 +109,7 @@ struct compat_kexec_segment {
};
#endif
-struct kexec_sha_region {
- unsigned long start;
- unsigned long len;
-};
-
+#ifdef CONFIG_KEXEC_FILE
struct purgatory_info {
/* Pointer to elf header of read only purgatory */
Elf_Ehdr *ehdr;
@@ -130,6 +126,28 @@ struct purgatory_info {
unsigned long purgatory_load_addr;
};
+typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
+typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
+ unsigned long kernel_len, char *initrd,
+ unsigned long initrd_len, char *cmdline,
+ unsigned long cmdline_len);
+typedef int (kexec_cleanup_t)(void *loader_data);
+
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+typedef int (kexec_verify_sig_t)(const char *kernel_buf,
+ unsigned long kernel_len);
+#endif
+
+struct kexec_file_ops {
+ kexec_probe_t *probe;
+ kexec_load_t *load;
+ kexec_cleanup_t *cleanup;
+#ifdef CONFIG_KEXEC_VERIFY_SIG
+ kexec_verify_sig_t *verify_sig;
+#endif
+};
+#endif
+
struct kimage {
kimage_entry_t head;
kimage_entry_t *entry;
@@ -161,6 +179,7 @@ struct kimage {
struct kimage_arch arch;
#endif
+#ifdef CONFIG_KEXEC_FILE
/* Additional fields for file based kexec syscall */
void *kernel_buf;
unsigned long kernel_buf_len;
@@ -179,38 +198,7 @@ struct kimage {
/* Information for loading purgatory */
struct purgatory_info purgatory_info;
-};
-
-/*
- * Keeps track of buffer parameters as provided by caller for requesting
- * memory placement of buffer.
- */
-struct kexec_buf {
- struct kimage *image;
- char *buffer;
- unsigned long bufsz;
- unsigned long mem;
- unsigned long memsz;
- unsigned long buf_align;
- unsigned long buf_min;
- unsigned long buf_max;
- bool top_down; /* allocate from top of memory hole */
-};
-
-typedef int (kexec_probe_t)(const char *kernel_buf, unsigned long kernel_size);
-typedef void *(kexec_load_t)(struct kimage *image, char *kernel_buf,
- unsigned long kernel_len, char *initrd,
- unsigned long initrd_len, char *cmdline,
- unsigned long cmdline_len);
-typedef int (kexec_cleanup_t)(void *loader_data);
-typedef int (kexec_verify_sig_t)(const char *kernel_buf,
- unsigned long kernel_len);
-
-struct kexec_file_ops {
- kexec_probe_t *probe;
- kexec_load_t *load;
- kexec_cleanup_t *cleanup;
- kexec_verify_sig_t *verify_sig;
+#endif
};
/* kexec interface functions */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index 034117b3be5f..d6750111e48e 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -1,6 +1,8 @@
#ifndef NVM_H
#define NVM_H
+#include <linux/types.h>
+
enum {
NVM_IO_OK = 0,
NVM_IO_REQUEUE = 1,
@@ -11,12 +13,74 @@ enum {
NVM_IOTYPE_GC = 1,
};
+#define NVM_BLK_BITS (16)
+#define NVM_PG_BITS (16)
+#define NVM_SEC_BITS (8)
+#define NVM_PL_BITS (8)
+#define NVM_LUN_BITS (8)
+#define NVM_CH_BITS (8)
+
+struct ppa_addr {
+ /* Generic structure for all addresses */
+ union {
+ struct {
+ u64 blk : NVM_BLK_BITS;
+ u64 pg : NVM_PG_BITS;
+ u64 sec : NVM_SEC_BITS;
+ u64 pl : NVM_PL_BITS;
+ u64 lun : NVM_LUN_BITS;
+ u64 ch : NVM_CH_BITS;
+ } g;
+
+ u64 ppa;
+ };
+};
+
+struct nvm_rq;
+struct nvm_id;
+struct nvm_dev;
+
+typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
+typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
+typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
+typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
+ nvm_l2p_update_fn *, void *);
+typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
+ nvm_bb_update_fn *, void *);
+typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
+typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
+typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
+typedef void (nvm_destroy_dma_pool_fn)(void *);
+typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
+ dma_addr_t *);
+typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
+
+struct nvm_dev_ops {
+ nvm_id_fn *identity;
+ nvm_get_l2p_tbl_fn *get_l2p_tbl;
+ nvm_op_bb_tbl_fn *get_bb_tbl;
+ nvm_op_set_bb_fn *set_bb_tbl;
+
+ nvm_submit_io_fn *submit_io;
+ nvm_erase_blk_fn *erase_block;
+
+ nvm_create_dma_pool_fn *create_dma_pool;
+ nvm_destroy_dma_pool_fn *destroy_dma_pool;
+ nvm_dev_dma_alloc_fn *dev_dma_alloc;
+ nvm_dev_dma_free_fn *dev_dma_free;
+
+ unsigned int max_phys_sect;
+};
+
+
+
#ifdef CONFIG_NVM
#include <linux/blkdev.h>
-#include <linux/types.h>
#include <linux/file.h>
#include <linux/dmapool.h>
+#include <uapi/linux/lightnvm.h>
enum {
/* HW Responsibilities */
@@ -58,8 +122,29 @@ enum {
/* Block Types */
NVM_BLK_T_FREE = 0x0,
NVM_BLK_T_BAD = 0x1,
- NVM_BLK_T_DEV = 0x2,
- NVM_BLK_T_HOST = 0x4,
+ NVM_BLK_T_GRWN_BAD = 0x2,
+ NVM_BLK_T_DEV = 0x4,
+ NVM_BLK_T_HOST = 0x8,
+
+ /* Memory capabilities */
+ NVM_ID_CAP_SLC = 0x1,
+ NVM_ID_CAP_CMD_SUSPEND = 0x2,
+ NVM_ID_CAP_SCRAMBLE = 0x4,
+ NVM_ID_CAP_ENCRYPT = 0x8,
+
+ /* Memory types */
+ NVM_ID_FMTYPE_SLC = 0,
+ NVM_ID_FMTYPE_MLC = 1,
+};
+
+struct nvm_id_lp_mlc {
+ u16 num_pairs;
+ u8 pairs[886];
+};
+
+struct nvm_id_lp_tbl {
+ __u8 id[8];
+ struct nvm_id_lp_mlc mlc;
};
struct nvm_id_group {
@@ -82,6 +167,8 @@ struct nvm_id_group {
u32 mpos;
u32 mccap;
u16 cpar;
+
+ struct nvm_id_lp_tbl lptbl;
};
struct nvm_addr_format {
@@ -125,28 +212,8 @@ struct nvm_tgt_instance {
#define NVM_VERSION_MINOR 0
#define NVM_VERSION_PATCH 0
-#define NVM_BLK_BITS (16)
-#define NVM_PG_BITS (16)
-#define NVM_SEC_BITS (8)
-#define NVM_PL_BITS (8)
-#define NVM_LUN_BITS (8)
-#define NVM_CH_BITS (8)
-
-struct ppa_addr {
- /* Generic structure for all addresses */
- union {
- struct {
- u64 blk : NVM_BLK_BITS;
- u64 pg : NVM_PG_BITS;
- u64 sec : NVM_SEC_BITS;
- u64 pl : NVM_PL_BITS;
- u64 lun : NVM_LUN_BITS;
- u64 ch : NVM_CH_BITS;
- } g;
-
- u64 ppa;
- };
-};
+struct nvm_rq;
+typedef void (nvm_end_io_fn)(struct nvm_rq *);
struct nvm_rq {
struct nvm_tgt_instance *ins;
@@ -164,9 +231,14 @@ struct nvm_rq {
void *metadata;
dma_addr_t dma_metadata;
+ struct completion *wait;
+ nvm_end_io_fn *end_io;
+
uint8_t opcode;
uint16_t nr_pages;
uint16_t flags;
+
+ int error;
};
static inline struct nvm_rq *nvm_rq_from_pdu(void *pdu)
@@ -181,51 +253,31 @@ static inline void *nvm_rq_to_pdu(struct nvm_rq *rqdata)
struct nvm_block;
-typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *);
-typedef int (nvm_bb_update_fn)(struct ppa_addr, int, u8 *, void *);
-typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *);
-typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
- nvm_l2p_update_fn *, void *);
-typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, int,
- nvm_bb_update_fn *, void *);
-typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct nvm_rq *, int);
-typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
-typedef void (nvm_destroy_dma_pool_fn)(void *);
-typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
- dma_addr_t *);
-typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t);
-
-struct nvm_dev_ops {
- nvm_id_fn *identity;
- nvm_get_l2p_tbl_fn *get_l2p_tbl;
- nvm_op_bb_tbl_fn *get_bb_tbl;
- nvm_op_set_bb_fn *set_bb_tbl;
-
- nvm_submit_io_fn *submit_io;
- nvm_erase_blk_fn *erase_block;
-
- nvm_create_dma_pool_fn *create_dma_pool;
- nvm_destroy_dma_pool_fn *destroy_dma_pool;
- nvm_dev_dma_alloc_fn *dev_dma_alloc;
- nvm_dev_dma_free_fn *dev_dma_free;
-
- unsigned int max_phys_sect;
-};
-
struct nvm_lun {
int id;
int lun_id;
int chnl_id;
- unsigned int nr_inuse_blocks; /* Number of used blocks */
+ /* It is up to the target to mark blocks as closed. If the target does
+ * not do it, all blocks are marked as open, and nr_open_blocks
+ * represents the number of blocks in use
+ */
+ unsigned int nr_open_blocks; /* Number of used, writable blocks */
+ unsigned int nr_closed_blocks; /* Number of used, read-only blocks */
unsigned int nr_free_blocks; /* Number of unused blocks */
unsigned int nr_bad_blocks; /* Number of bad blocks */
- struct nvm_block *blocks;
spinlock_t lock;
+
+ struct nvm_block *blocks;
+};
+
+enum {
+ NVM_BLK_ST_FREE = 0x1, /* Free block */
+ NVM_BLK_ST_OPEN = 0x2, /* Open block - read-write */
+ NVM_BLK_ST_CLOSED = 0x4, /* Closed block - read-only */
+ NVM_BLK_ST_BAD = 0x8, /* Bad block */
};
struct nvm_block {
@@ -234,7 +286,16 @@ struct nvm_block {
unsigned long id;
void *priv;
- int type;
+ int state;
+};
+
+/* system block cpu representation */
+struct nvm_sb_info {
+ unsigned long seqnr;
+ unsigned long erase_cnt;
+ unsigned int version;
+ char mmtype[NVM_MMTYPE_LEN];
+ struct ppa_addr fs_ppa;
};
struct nvm_dev {
@@ -247,6 +308,9 @@ struct nvm_dev {
struct nvmm_type *mt;
void *mp;
+ /* System blocks */
+ struct nvm_sb_info sb;
+
/* Device information */
int nr_chnls;
int nr_planes;
@@ -256,6 +320,7 @@ struct nvm_dev {
int blks_per_lun;
int sec_size;
int oob_size;
+ int mccap;
struct nvm_addr_format ppaf;
/* Calculated/Cached values. These do not reflect the actual usable
@@ -268,6 +333,10 @@ struct nvm_dev {
int sec_per_blk;
int sec_per_lun;
+ /* lower page table */
+ int lps_per_blk;
+ int *lptbl;
+
unsigned long total_pages;
unsigned long total_blocks;
int nr_luns;
@@ -280,6 +349,8 @@ struct nvm_dev {
/* Backend device */
struct request_queue *q;
char name[DISK_NAME_LEN];
+
+ struct mutex mlock;
};
static inline struct ppa_addr generic_to_dev_addr(struct nvm_dev *dev,
@@ -345,9 +416,13 @@ static inline struct ppa_addr block_to_ppa(struct nvm_dev *dev,
return ppa;
}
+static inline int ppa_to_slc(struct nvm_dev *dev, int slc_pg)
+{
+ return dev->lptbl[slc_pg];
+}
+
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef int (nvm_tgt_end_io_fn)(struct nvm_rq *, int);
typedef void *(nvm_tgt_init_fn)(struct nvm_dev *, struct gendisk *, int, int);
typedef void (nvm_tgt_exit_fn)(void *);
@@ -358,7 +433,7 @@ struct nvm_tgt_type {
/* target entry points */
nvm_tgt_make_rq_fn *make_rq;
nvm_tgt_capacity_fn *capacity;
- nvm_tgt_end_io_fn *end_io;
+ nvm_end_io_fn *end_io;
/* module-specific init/teardown */
nvm_tgt_init_fn *init;
@@ -383,7 +458,6 @@ typedef int (nvmm_open_blk_fn)(struct nvm_dev *, struct nvm_block *);
typedef int (nvmm_close_blk_fn)(struct nvm_dev *, struct nvm_block *);
typedef void (nvmm_flush_blk_fn)(struct nvm_dev *, struct nvm_block *);
typedef int (nvmm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvmm_end_io_fn)(struct nvm_rq *, int);
typedef int (nvmm_erase_blk_fn)(struct nvm_dev *, struct nvm_block *,
unsigned long);
typedef struct nvm_lun *(nvmm_get_lun_fn)(struct nvm_dev *, int);
@@ -397,6 +471,8 @@ struct nvmm_type {
nvmm_unregister_fn *unregister_mgr;
/* Block administration callbacks */
+ nvmm_get_blk_fn *get_blk_unlocked;
+ nvmm_put_blk_fn *put_blk_unlocked;
nvmm_get_blk_fn *get_blk;
nvmm_put_blk_fn *put_blk;
nvmm_open_blk_fn *open_blk;
@@ -404,7 +480,6 @@ struct nvmm_type {
nvmm_flush_blk_fn *flush_blk;
nvmm_submit_io_fn *submit_io;
- nvmm_end_io_fn *end_io;
nvmm_erase_blk_fn *erase_blk;
/* Configuration management */
@@ -418,6 +493,10 @@ struct nvmm_type {
extern int nvm_register_mgr(struct nvmm_type *);
extern void nvm_unregister_mgr(struct nvmm_type *);
+extern struct nvm_block *nvm_get_blk_unlocked(struct nvm_dev *,
+ struct nvm_lun *, unsigned long);
+extern void nvm_put_blk_unlocked(struct nvm_dev *, struct nvm_block *);
+
extern struct nvm_block *nvm_get_blk(struct nvm_dev *, struct nvm_lun *,
unsigned long);
extern void nvm_put_blk(struct nvm_dev *, struct nvm_block *);
@@ -427,7 +506,36 @@ extern int nvm_register(struct request_queue *, char *,
extern void nvm_unregister(char *);
extern int nvm_submit_io(struct nvm_dev *, struct nvm_rq *);
+extern void nvm_generic_to_addr_mode(struct nvm_dev *, struct nvm_rq *);
+extern void nvm_addr_to_generic_mode(struct nvm_dev *, struct nvm_rq *);
+extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
+ struct ppa_addr *, int);
+extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
+extern int nvm_erase_ppa(struct nvm_dev *, struct ppa_addr *, int);
extern int nvm_erase_blk(struct nvm_dev *, struct nvm_block *);
+extern void nvm_end_io(struct nvm_rq *, int);
+extern int nvm_submit_ppa(struct nvm_dev *, struct ppa_addr *, int, int, int,
+ void *, int);
+
+/* sysblk.c */
+#define NVM_SYSBLK_MAGIC 0x4E564D53 /* "NVMS" */
+
+/* system block on disk representation */
+struct nvm_system_block {
+ __be32 magic; /* magic signature */
+ __be32 seqnr; /* sequence number */
+ __be32 erase_cnt; /* erase count */
+ __be16 version; /* version number */
+ u8 mmtype[NVM_MMTYPE_LEN]; /* media manager name */
+ __be64 fs_ppa; /* PPA for media manager
+ * superblock */
+};
+
+extern int nvm_get_sysblock(struct nvm_dev *, struct nvm_sb_info *);
+extern int nvm_update_sysblock(struct nvm_dev *, struct nvm_sb_info *);
+extern int nvm_init_sysblock(struct nvm_dev *, struct nvm_sb_info *);
+
+extern int nvm_dev_factory(struct nvm_dev *, int flags);
#else /* CONFIG_NVM */
struct nvm_dev_ops;
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 2a6b9947aaa3..cb0ba9f2a9a2 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -40,7 +40,7 @@ struct list_lru_node {
spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg *memcg_lrus;
#endif
@@ -48,7 +48,7 @@ struct list_lru_node {
struct list_lru {
struct list_lru_node *node;
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
struct list_head list;
#endif
};
diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h
index 46262284de47..04fc6e6c7ff0 100644
--- a/include/linux/lru_cache.h
+++ b/include/linux/lru_cache.h
@@ -264,7 +264,7 @@ extern unsigned int lc_put(struct lru_cache *lc, struct lc_element *e);
extern void lc_committed(struct lru_cache *lc);
struct seq_file;
-extern size_t lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
+extern void lc_seq_printf_stats(struct seq_file *seq, struct lru_cache *lc);
extern void lc_seq_dump_details(struct seq_file *seq, struct lru_cache *lc, char *utext,
void (*detail) (struct seq_file *, struct lc_element *));
diff --git a/include/linux/lz4.h b/include/linux/lz4.h
index 4356686b0a39..6b784c59f321 100644
--- a/include/linux/lz4.h
+++ b/include/linux/lz4.h
@@ -9,8 +9,8 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#define LZ4_MEM_COMPRESS (4096 * sizeof(unsigned char *))
-#define LZ4HC_MEM_COMPRESS (65538 * sizeof(unsigned char *))
+#define LZ4_MEM_COMPRESS (16384)
+#define LZ4HC_MEM_COMPRESS (262144 + (2 * sizeof(unsigned char *)))
/*
* lz4_compressbound()
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 189f04d4d2ec..792c8981e633 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -50,6 +50,9 @@ enum mem_cgroup_stat_index {
MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
MEM_CGROUP_STAT_NSTATS,
+ /* default hierarchy stats */
+ MEMCG_SOCK = MEM_CGROUP_STAT_NSTATS,
+ MEMCG_NR_STAT,
};
struct mem_cgroup_reclaim_cookie {
@@ -85,15 +88,9 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-struct cg_proto {
- struct page_counter memory_allocated; /* Current allocated memory. */
- int memory_pressure;
- bool active;
-};
-
#ifdef CONFIG_MEMCG
struct mem_cgroup_stat_cpu {
- long count[MEM_CGROUP_STAT_NSTATS];
+ long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
unsigned long nr_page_events;
unsigned long targets[MEM_CGROUP_NTARGETS];
@@ -152,6 +149,12 @@ struct mem_cgroup_thresholds {
struct mem_cgroup_threshold_ary *spare;
};
+enum memcg_kmem_state {
+ KMEM_NONE,
+ KMEM_ALLOCATED,
+ KMEM_ONLINE,
+};
+
/*
* The memory controller data structure. The memory controller controls both
* page cache and RSS per cgroup. We would eventually like to provide
@@ -163,8 +166,12 @@ struct mem_cgroup {
/* Accounted resources */
struct page_counter memory;
+ struct page_counter swap;
+
+ /* Legacy consumer-oriented counters */
struct page_counter memsw;
struct page_counter kmem;
+ struct page_counter tcpmem;
/* Normal memory consumption range */
unsigned long low;
@@ -178,9 +185,6 @@ struct mem_cgroup {
/* vmpressure notifications */
struct vmpressure vmpressure;
- /* css_online() has been completed */
- int initialized;
-
/*
* Should the accounting and control be hierarchical, per subtree?
*/
@@ -227,14 +231,16 @@ struct mem_cgroup {
*/
struct mem_cgroup_stat_cpu __percpu *stat;
-#if defined(CONFIG_MEMCG_KMEM) && defined(CONFIG_INET)
- struct cg_proto tcp_mem;
-#endif
-#if defined(CONFIG_MEMCG_KMEM)
+ unsigned long socket_pressure;
+
+ /* Legacy tcp memory accounting */
+ bool tcpmem_active;
+ int tcpmem_pressure;
+
+#ifndef CONFIG_SLOB
/* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
- bool kmem_acct_activated;
- bool kmem_acct_active;
+ enum memcg_kmem_state kmem_state;
#endif
int last_scanned_node;
@@ -249,10 +255,6 @@ struct mem_cgroup {
struct wb_domain cgwb_domain;
#endif
-#ifdef CONFIG_INET
- unsigned long socket_pressure;
-#endif
-
/* List of events which userspace want to receive */
struct list_head event_list;
spinlock_t event_list_lock;
@@ -356,6 +358,13 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
+static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
+{
+ if (mem_cgroup_disabled())
+ return true;
+ return !!(memcg->css.flags & CSS_ONLINE);
+}
+
/*
* For memory reclaim.
*/
@@ -364,20 +373,6 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
int nr_pages);
-static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
-{
- struct mem_cgroup_per_zone *mz;
- struct mem_cgroup *memcg;
-
- if (mem_cgroup_disabled())
- return true;
-
- mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
- memcg = mz->memcg;
-
- return !!(memcg->css.flags & CSS_ONLINE);
-}
-
static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
@@ -590,13 +585,13 @@ static inline bool mem_cgroup_disabled(void)
return true;
}
-static inline bool
-mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
+static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
{
return true;
}
-static inline bool mem_cgroup_lruvec_online(struct lruvec *lruvec)
+static inline bool
+mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
{
return true;
}
@@ -707,15 +702,13 @@ void sock_update_memcg(struct sock *sk);
void sock_release_memcg(struct sock *sk);
bool mem_cgroup_charge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
void mem_cgroup_uncharge_skmem(struct mem_cgroup *memcg, unsigned int nr_pages);
-#if defined(CONFIG_MEMCG) && defined(CONFIG_INET)
+#ifdef CONFIG_MEMCG
extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
-#ifdef CONFIG_MEMCG_KMEM
- if (memcg->tcp_mem.memory_pressure)
+ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys) && memcg->tcpmem_pressure)
return true;
-#endif
do {
if (time_before(jiffies, memcg->socket_pressure))
return true;
@@ -730,7 +723,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
}
#endif
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
extern struct static_key_false memcg_kmem_enabled_key;
extern int memcg_nr_cache_ids;
@@ -750,9 +743,9 @@ static inline bool memcg_kmem_enabled(void)
return static_branch_unlikely(&memcg_kmem_enabled_key);
}
-static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
{
- return memcg->kmem_acct_active;
+ return memcg->kmem_state == KMEM_ONLINE;
}
/*
@@ -850,7 +843,7 @@ static inline bool memcg_kmem_enabled(void)
return false;
}
-static inline bool memcg_kmem_is_active(struct mem_cgroup *memcg)
+static inline bool memcg_kmem_online(struct mem_cgroup *memcg)
{
return false;
}
@@ -886,5 +879,6 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
static inline void memcg_kmem_put_cache(struct kmem_cache *cachep)
{
}
-#endif /* CONFIG_MEMCG_KMEM */
+#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h
index 58391f2e0414..116b284bc4ce 100644
--- a/include/linux/mlx4/cmd.h
+++ b/include/linux/mlx4/cmd.h
@@ -206,7 +206,8 @@ enum {
MLX4_SET_PORT_GID_TABLE = 0x5,
MLX4_SET_PORT_PRIO2TC = 0x8,
MLX4_SET_PORT_SCHEDULER = 0x9,
- MLX4_SET_PORT_VXLAN = 0xB
+ MLX4_SET_PORT_VXLAN = 0xB,
+ MLX4_SET_PORT_ROCE_ADDR = 0xD
};
enum {
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index d3133be12d92..430a929f048b 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -216,6 +216,7 @@ enum {
MLX4_DEV_CAP_FLAG2_SKIP_OUTER_VLAN = 1LL << 30,
MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB = 1ULL << 31,
MLX4_DEV_CAP_FLAG2_LB_SRC_CHK = 1ULL << 32,
+ MLX4_DEV_CAP_FLAG2_ROCE_V1_V2 = 1ULL << 33,
};
enum {
@@ -267,12 +268,14 @@ enum {
MLX4_BMME_FLAG_TYPE_2_WIN = 1 << 9,
MLX4_BMME_FLAG_RESERVED_LKEY = 1 << 10,
MLX4_BMME_FLAG_FAST_REG_WR = 1 << 11,
+ MLX4_BMME_FLAG_ROCE_V1_V2 = 1 << 19,
MLX4_BMME_FLAG_PORT_REMAP = 1 << 24,
MLX4_BMME_FLAG_VSD_INIT2RTR = 1 << 28,
};
enum {
- MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP
+ MLX4_FLAG_PORT_REMAP = MLX4_BMME_FLAG_PORT_REMAP,
+ MLX4_FLAG_ROCE_V1_V2 = MLX4_BMME_FLAG_ROCE_V1_V2
};
enum mlx4_event {
@@ -979,14 +982,11 @@ struct mlx4_mad_ifc {
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
if ((type) == (dev)->caps.port_mask[(port)])
-#define mlx4_foreach_non_ib_transport_port(port, dev) \
- for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
- if (((dev)->caps.port_mask[port] != MLX4_PORT_TYPE_IB))
-
#define mlx4_foreach_ib_transport_port(port, dev) \
- for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
+ for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
- ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
+ ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) || \
+ ((dev)->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2))
#define MLX4_INVALID_SLAVE_ID 0xFF
#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
@@ -1457,6 +1457,7 @@ int mlx4_get_base_gid_ix(struct mlx4_dev *dev, int slave, int port);
int mlx4_config_vxlan_port(struct mlx4_dev *dev, __be16 udp_port);
int mlx4_disable_rx_port_check(struct mlx4_dev *dev, bool dis);
+int mlx4_config_roce_v2_port(struct mlx4_dev *dev, u16 udp_port);
int mlx4_virt2phy_port_map(struct mlx4_dev *dev, u32 port1, u32 port2);
int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index fe052e234906..587cdf943b52 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -194,7 +194,7 @@ struct mlx4_qp_context {
u8 mtu_msgmax;
u8 rq_size_stride;
u8 sq_size_stride;
- u8 rlkey;
+ u8 rlkey_roce_mode;
__be32 usr_page;
__be32 local_qpn;
__be32 remote_qpn;
@@ -204,7 +204,8 @@ struct mlx4_qp_context {
u32 reserved1;
__be32 next_send_psn;
__be32 cqn_send;
- u32 reserved2[2];
+ __be16 roce_entropy;
+ __be16 reserved2[3];
__be32 last_acked_psn;
__be32 ssn;
__be32 params2;
@@ -487,4 +488,14 @@ static inline struct mlx4_qp *__mlx4_qp_lookup(struct mlx4_dev *dev, u32 qpn)
void mlx4_qp_remove(struct mlx4_dev *dev, struct mlx4_qp *qp);
+static inline u16 folded_qp(u32 q)
+{
+ u16 res;
+
+ res = ((q & 0xff) ^ ((q & 0xff0000) >> 16)) | (q & 0xff00);
+ return res;
+}
+
+u16 mlx4_qp_roce_entropy(struct mlx4_dev *dev, u32 qpn);
+
#endif /* MLX4_QP_H */
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 7be845e30689..987764afa65c 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -223,6 +223,14 @@ enum {
#define MLX5_UMR_MTT_MASK (MLX5_UMR_MTT_ALIGNMENT - 1)
#define MLX5_UMR_MTT_MIN_CHUNK_SIZE MLX5_UMR_MTT_ALIGNMENT
+#define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
+
+enum {
+ MLX5_EVENT_QUEUE_TYPE_QP = 0,
+ MLX5_EVENT_QUEUE_TYPE_RQ = 1,
+ MLX5_EVENT_QUEUE_TYPE_SQ = 2,
+};
+
enum mlx5_event {
MLX5_EVENT_TYPE_COMP = 0x0,
@@ -280,6 +288,26 @@ enum {
};
enum {
+ MLX5_ROCE_VERSION_1 = 0,
+ MLX5_ROCE_VERSION_2 = 2,
+};
+
+enum {
+ MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
+ MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
+};
+
+enum {
+ MLX5_ROCE_L3_TYPE_IPV4 = 0,
+ MLX5_ROCE_L3_TYPE_IPV6 = 1,
+};
+
+enum {
+ MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
+ MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
+};
+
+enum {
MLX5_OPCODE_NOP = 0x00,
MLX5_OPCODE_SEND_INVAL = 0x01,
MLX5_OPCODE_RDMA_WRITE = 0x08,
@@ -446,7 +474,7 @@ struct mlx5_init_seg {
__be32 rsvd2[880];
__be32 internal_timer_h;
__be32 internal_timer_l;
- __be32 rsrv3[2];
+ __be32 rsvd3[2];
__be32 health_counter;
__be32 rsvd4[1019];
__be64 ieee1588_clk;
@@ -460,7 +488,9 @@ struct mlx5_eqe_comp {
};
struct mlx5_eqe_qp_srq {
- __be32 reserved[6];
+ __be32 reserved1[5];
+ u8 type;
+ u8 reserved2[3];
__be32 qp_srq_n;
};
@@ -651,6 +681,12 @@ enum {
};
enum {
+ MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
+ MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
+ MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
+};
+
+enum {
CQE_L2_OK = 1 << 0,
CQE_L3_OK = 1 << 1,
CQE_L4_OK = 1 << 2,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 5162f3533042..1e3006dcf35d 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -115,6 +115,11 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
};
+enum {
+ MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0,
+ MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1,
+};
+
enum mlx5_page_fault_resume_flags {
MLX5_PAGE_FAULT_RESUME_REQUESTOR = 1 << 0,
MLX5_PAGE_FAULT_RESUME_WRITE = 1 << 1,
@@ -341,9 +346,11 @@ struct mlx5_core_mr {
};
enum mlx5_res_type {
- MLX5_RES_QP,
- MLX5_RES_SRQ,
- MLX5_RES_XSRQ,
+ MLX5_RES_QP = MLX5_EVENT_QUEUE_TYPE_QP,
+ MLX5_RES_RQ = MLX5_EVENT_QUEUE_TYPE_RQ,
+ MLX5_RES_SQ = MLX5_EVENT_QUEUE_TYPE_SQ,
+ MLX5_RES_SRQ = 3,
+ MLX5_RES_XSRQ = 4,
};
struct mlx5_core_rsc_common {
@@ -651,13 +658,6 @@ extern struct workqueue_struct *mlx5_core_wq;
.struct_offset_bytes = offsetof(struct ib_unpacked_ ## header, field), \
.struct_size_bytes = sizeof((struct ib_unpacked_ ## header *)0)->field
-struct ib_field {
- size_t struct_offset_bytes;
- size_t struct_size_bytes;
- int offset_bits;
- int size_bits;
-};
-
static inline struct mlx5_core_dev *pci2mlx5_core_dev(struct pci_dev *pdev)
{
return pci_get_drvdata(pdev);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 68d73f82e009..231ab6bcea76 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -67,6 +67,11 @@ enum {
};
enum {
+ MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0,
+ MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3,
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -573,21 +578,24 @@ enum {
struct mlx5_ifc_atomic_caps_bits {
u8 reserved_0[0x40];
- u8 atomic_req_endianness[0x1];
- u8 reserved_1[0x1f];
+ u8 atomic_req_8B_endianess_mode[0x2];
+ u8 reserved_1[0x4];
+ u8 supported_atomic_req_8B_endianess_mode_1[0x1];
- u8 reserved_2[0x20];
+ u8 reserved_2[0x19];
- u8 reserved_3[0x10];
- u8 atomic_operations[0x10];
+ u8 reserved_3[0x20];
u8 reserved_4[0x10];
- u8 atomic_size_qp[0x10];
+ u8 atomic_operations[0x10];
u8 reserved_5[0x10];
+ u8 atomic_size_qp[0x10];
+
+ u8 reserved_6[0x10];
u8 atomic_size_dc[0x10];
- u8 reserved_6[0x720];
+ u8 reserved_7[0x720];
};
struct mlx5_ifc_odp_cap_bits {
@@ -850,7 +858,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_66[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_67[0x40];
+ u8 reserved_67[0x20];
+ u8 device_frequency_mhz[0x20];
u8 device_frequency_khz[0x20];
u8 reserved_68[0x5f];
u8 cqe_zip[0x1];
@@ -2215,19 +2224,25 @@ struct mlx5_ifc_nic_vport_context_bits {
u8 mtu[0x10];
- u8 reserved_3[0x640];
+ u8 system_image_guid[0x40];
+ u8 port_guid[0x40];
+ u8 node_guid[0x40];
+
+ u8 reserved_3[0x140];
+ u8 qkey_violation_counter[0x10];
+ u8 reserved_4[0x430];
u8 promisc_uc[0x1];
u8 promisc_mc[0x1];
u8 promisc_all[0x1];
- u8 reserved_4[0x2];
+ u8 reserved_5[0x2];
u8 allowed_list_type[0x3];
- u8 reserved_5[0xc];
+ u8 reserved_6[0xc];
u8 allowed_list_size[0xc];
struct mlx5_ifc_mac_address_layout_bits permanent_address;
- u8 reserved_6[0x20];
+ u8 reserved_7[0x20];
u8 current_uc_mac_address[0][0x40];
};
@@ -4199,6 +4214,13 @@ struct mlx5_ifc_modify_tis_out_bits {
u8 reserved_1[0x40];
};
+struct mlx5_ifc_modify_tis_bitmask_bits {
+ u8 reserved_0[0x20];
+
+ u8 reserved_1[0x1f];
+ u8 prio[0x1];
+};
+
struct mlx5_ifc_modify_tis_in_bits {
u8 opcode[0x10];
u8 reserved_0[0x10];
@@ -4211,7 +4233,7 @@ struct mlx5_ifc_modify_tis_in_bits {
u8 reserved_3[0x20];
- u8 modify_bitmask[0x40];
+ struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
u8 reserved_4[0x40];
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index f079fb1a31f7..5b8c89ffaa58 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -85,7 +85,16 @@ enum mlx5_qp_state {
MLX5_QP_STATE_ERR = 6,
MLX5_QP_STATE_SQ_DRAINING = 7,
MLX5_QP_STATE_SUSPENDED = 9,
- MLX5_QP_NUM_STATE
+ MLX5_QP_NUM_STATE,
+ MLX5_QP_STATE,
+ MLX5_QP_STATE_BAD,
+};
+
+enum {
+ MLX5_SQ_STATE_NA = MLX5_SQC_STATE_ERR + 1,
+ MLX5_SQ_NUM_STATE = MLX5_SQ_STATE_NA + 1,
+ MLX5_RQ_STATE_NA = MLX5_RQC_STATE_ERR + 1,
+ MLX5_RQ_NUM_STATE = MLX5_RQ_STATE_NA + 1,
};
enum {
@@ -130,6 +139,9 @@ enum {
MLX5_QP_BIT_RWE = 1 << 14,
MLX5_QP_BIT_RAE = 1 << 13,
MLX5_QP_BIT_RIC = 1 << 4,
+ MLX5_QP_BIT_CC_SLAVE_RECV = 1 << 2,
+ MLX5_QP_BIT_CC_SLAVE_SEND = 1 << 1,
+ MLX5_QP_BIT_CC_MASTER = 1 << 0
};
enum {
@@ -248,8 +260,12 @@ struct mlx5_av {
__be32 dqp_dct;
u8 stat_rate_sl;
u8 fl_mlid;
- __be16 rlid;
- u8 reserved0[10];
+ union {
+ __be16 rlid;
+ __be16 udp_sport;
+ };
+ u8 reserved0[4];
+ u8 rmac[6];
u8 tclass;
u8 hop_limit;
__be32 grh_gid_fl;
@@ -456,11 +472,16 @@ struct mlx5_qp_path {
u8 static_rate;
u8 hop_limit;
__be32 tclass_flowlabel;
- u8 rgid[16];
- u8 rsvd1[4];
- u8 sl;
+ union {
+ u8 rgid[16];
+ u8 rip[16];
+ };
+ u8 f_dscp_ecn_prio;
+ u8 ecn_dscp;
+ __be16 udp_sport;
+ u8 dci_cfi_prio_sl;
u8 port;
- u8 rsvd2[6];
+ u8 rmac[6];
};
struct mlx5_qp_context {
@@ -620,8 +641,7 @@ int mlx5_core_create_qp(struct mlx5_core_dev *dev,
struct mlx5_core_qp *qp,
struct mlx5_create_qp_mbox_in *in,
int inlen);
-int mlx5_core_qp_modify(struct mlx5_core_dev *dev, enum mlx5_qp_state cur_state,
- enum mlx5_qp_state new_state,
+int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 operation,
struct mlx5_modify_qp_mbox_in *in, int sqd_event,
struct mlx5_core_qp *qp);
int mlx5_core_destroy_qp(struct mlx5_core_dev *dev,
@@ -639,6 +659,14 @@ void mlx5_debug_qp_remove(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp);
int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 qpn,
u8 context, int error);
#endif
+int mlx5_core_create_rq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *rq);
+void mlx5_core_destroy_rq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *rq);
+int mlx5_core_create_sq_tracked(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ struct mlx5_core_qp *sq);
+void mlx5_core_destroy_sq_tracked(struct mlx5_core_dev *dev,
+ struct mlx5_core_qp *sq);
static inline const char *mlx5_qp_type_str(int type)
{
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h
new file mode 100644
index 000000000000..88441f5ece25
--- /dev/null
+++ b/include/linux/mlx5/transobj.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __TRANSOBJ_H__
+#define __TRANSOBJ_H__
+
+#include <linux/mlx5/driver.h>
+
+int mlx5_core_alloc_transport_domain(struct mlx5_core_dev *dev, u32 *tdn);
+void mlx5_core_dealloc_transport_domain(struct mlx5_core_dev *dev, u32 tdn);
+int mlx5_core_create_rq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqn);
+int mlx5_core_modify_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *in, int inlen);
+void mlx5_core_destroy_rq(struct mlx5_core_dev *dev, u32 rqn);
+int mlx5_core_query_rq(struct mlx5_core_dev *dev, u32 rqn, u32 *out);
+int mlx5_core_create_sq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *sqn);
+int mlx5_core_modify_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *in, int inlen);
+void mlx5_core_destroy_sq(struct mlx5_core_dev *dev, u32 sqn);
+int mlx5_core_query_sq(struct mlx5_core_dev *dev, u32 sqn, u32 *out);
+int mlx5_core_create_tir(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tirn);
+int mlx5_core_modify_tir(struct mlx5_core_dev *dev, u32 tirn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_tir(struct mlx5_core_dev *dev, u32 tirn);
+int mlx5_core_create_tis(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *tisn);
+int mlx5_core_modify_tis(struct mlx5_core_dev *dev, u32 tisn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn);
+int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen);
+int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rmpn);
+int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 rmpn);
+int mlx5_core_query_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u32 *out);
+int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm);
+
+int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen,
+ u32 *rqtn);
+int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in,
+ int inlen);
+void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn);
+
+#endif /* __TRANSOBJ_H__ */
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 638f2ca7a527..123771003e68 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,11 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
+int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
+ u64 *system_image_guid);
+int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
+int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev,
+ u16 *qkey_viol_cntr);
int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport,
u8 port_num, u16 vf_num, u16 gid_index,
union ib_gid *gid);
@@ -85,4 +90,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev,
u16 vlans[],
int list_size);
+int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev);
+int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev);
+
#endif /* __MLX5_VPORT_H__ */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f1cd22f2df1a..516e14944339 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -201,11 +201,13 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_STACK_GROWSUP
-#define VM_STACK_FLAGS (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK VM_GROWSUP
#else
-#define VM_STACK_FLAGS (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+#define VM_STACK VM_GROWSDOWN
#endif
+#define VM_STACK_FLAGS (VM_STACK | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
+
/*
* Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
@@ -1341,8 +1343,7 @@ static inline int stack_guard_page_end(struct vm_area_struct *vma,
!vma_growsup(vma->vm_next, addr);
}
-extern struct task_struct *task_of_stack(struct task_struct *task,
- struct vm_area_struct *vma, bool in_group);
+int vma_is_stack_for_task(struct vm_area_struct *vma, struct task_struct *t);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index d3ebb9d21a53..624b78b848b8 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -424,9 +424,9 @@ struct mm_struct {
unsigned long total_vm; /* Total pages mapped */
unsigned long locked_vm; /* Pages that have PG_mlocked set */
unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED/GROWSDOWN */
- unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE */
- unsigned long stack_vm; /* VM_GROWSUP/DOWN */
+ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+ unsigned long stack_vm; /* VM_STACK */
unsigned long def_flags;
unsigned long start_code, end_code, start_data, end_data;
unsigned long start_brk, brk, start_stack;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 33bb1b19273e..7b6c2cfee390 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -682,6 +682,12 @@ typedef struct pglist_data {
*/
unsigned long first_deferred_pfn;
#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ spinlock_t split_queue_lock;
+ struct list_head split_queue;
+ unsigned long split_queue_len;
+#endif
} pg_data_t;
#define node_present_pages(nid) (NODE_DATA(nid)->node_present_pages)
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 1c6342ab8c0e..a2a0068a8387 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -187,7 +187,7 @@ struct msi_domain_info;
* @msi_free: Domain specific function to free a MSI interrupts
* @msi_check: Callback for verification of the domain/info/dev data
* @msi_prepare: Prepare the allocation of the interrupts in the domain
- * @msi_finish: Optional callbacl to finalize the allocation
+ * @msi_finish: Optional callback to finalize the allocation
* @set_desc: Set the msi descriptor for an interrupt
* @handle_error: Optional error handler if the allocation fails
*
@@ -195,7 +195,7 @@ struct msi_domain_info;
* msi_create_irq_domain() and related interfaces
*
* @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
- * are callbacks used by msi_irq_domain_alloc_irqs() and related
+ * are callbacks used by msi_domain_alloc_irqs() and related
* interfaces which are based on msi_desc.
*/
struct msi_domain_ops {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 5ac140dcb789..289c2314d766 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -512,7 +512,6 @@ static inline void napi_enable(struct napi_struct *n)
clear_bit(NAPI_STATE_NPSVC, &n->state);
}
-#ifdef CONFIG_SMP
/**
* napi_synchronize - wait until NAPI is not running
* @n: napi context
@@ -523,12 +522,12 @@ static inline void napi_enable(struct napi_struct *n)
*/
static inline void napi_synchronize(const struct napi_struct *n)
{
- while (test_bit(NAPI_STATE_SCHED, &n->state))
- msleep(1);
+ if (IS_ENABLED(CONFIG_SMP))
+ while (test_bit(NAPI_STATE_SCHED, &n->state))
+ msleep(1);
+ else
+ barrier();
}
-#else
-# define napi_synchronize(n) barrier()
-#endif
enum netdev_queue_state_t {
__QUEUE_STATE_DRV_XOFF,
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 3af5f454c04a..a55986f6fe38 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -17,20 +17,19 @@
#include <linux/types.h>
-struct nvme_bar {
- __u64 cap; /* Controller Capabilities */
- __u32 vs; /* Version */
- __u32 intms; /* Interrupt Mask Set */
- __u32 intmc; /* Interrupt Mask Clear */
- __u32 cc; /* Controller Configuration */
- __u32 rsvd1; /* Reserved */
- __u32 csts; /* Controller Status */
- __u32 nssr; /* Subsystem Reset */
- __u32 aqa; /* Admin Queue Attributes */
- __u64 asq; /* Admin SQ Base Address */
- __u64 acq; /* Admin CQ Base Address */
- __u32 cmbloc; /* Controller Memory Buffer Location */
- __u32 cmbsz; /* Controller Memory Buffer Size */
+enum {
+ NVME_REG_CAP = 0x0000, /* Controller Capabilities */
+ NVME_REG_VS = 0x0008, /* Version */
+ NVME_REG_INTMS = 0x000c, /* Interrupt Mask Set */
+ NVME_REG_INTMC = 0x0010, /* Interrupt Mask Set */
+ NVME_REG_CC = 0x0014, /* Controller Configuration */
+ NVME_REG_CSTS = 0x001c, /* Controller Status */
+ NVME_REG_NSSR = 0x0020, /* NVM Subsystem Reset */
+ NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
+ NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
+ NVME_REG_ACQ = 0x0030, /* Admin SQ Base Address */
+ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
+ NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
};
#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
diff --git a/include/linux/of.h b/include/linux/of.h
index dd10626a615f..dc6e39696b64 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -929,7 +929,7 @@ static inline int of_get_available_child_count(const struct device_node *np)
return num;
}
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && !defined(MODULE)
#define _OF_DECLARE(table, name, compat, fn, fn_type) \
static const struct of_device_id __of_table_##name \
__used __section(__##table##_of_table) \
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 2c51ee78b1c0..f6e9e85164e8 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -59,6 +59,13 @@ static inline void of_pci_check_probe_only(void) { }
int of_pci_get_host_bridge_resources(struct device_node *dev,
unsigned char busno, unsigned char bus_max,
struct list_head *resources, resource_size_t *io_base);
+#else
+static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
+ unsigned char busno, unsigned char bus_max,
+ struct list_head *resources, resource_size_t *io_base)
+{
+ return -EINVAL;
+}
#endif
#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 4d08b6c33557..92395a0a7dc5 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -361,6 +361,9 @@ unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t start,
unsigned int nr_pages, struct page **pages);
unsigned find_get_pages_tag(struct address_space *mapping, pgoff_t *index,
int tag, unsigned int nr_pages, struct page **pages);
+unsigned find_get_entries_tag(struct address_space *mapping, pgoff_t start,
+ int tag, unsigned int nr_entries,
+ struct page **entries, pgoff_t *indices);
struct page *grab_cache_page_write_begin(struct address_space *mapping,
pgoff_t index, unsigned flags);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d86378c226fb..27df4a6585da 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1257,8 +1257,6 @@ struct msix_entry {
u16 entry; /* driver uses to specify entry, OS writes */
};
-void pci_msi_setup_pci_dev(struct pci_dev *dev);
-
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
void pci_msi_shutdown(struct pci_dev *dev);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 1acbefc4bbda..37f05cb1dfd6 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2496,6 +2496,11 @@
#define PCI_DEVICE_ID_KORENIX_JETCARDF3 0x17ff
#define PCI_VENDOR_ID_NETRONOME 0x19ee
+#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
+#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
+#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
+#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
+#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
#define PCI_VENDOR_ID_QMI 0x1a32
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f9828a48f16a..b35a61a481fa 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -634,9 +634,6 @@ struct perf_event_context {
int nr_cgroups; /* cgroup evts */
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
-
- struct delayed_work orphans_remove;
- bool orphans_remove_sched;
};
/*
@@ -729,7 +726,7 @@ extern int perf_event_init_task(struct task_struct *child);
extern void perf_event_exit_task(struct task_struct *child);
extern void perf_event_free_task(struct task_struct *task);
extern void perf_event_delayed_put(struct task_struct *task);
-extern struct perf_event *perf_event_get(unsigned int fd);
+extern struct file *perf_event_get(unsigned int fd);
extern const struct perf_event_attr *perf_event_attrs(struct perf_event *event);
extern void perf_event_print_debug(void);
extern void perf_pmu_disable(struct pmu *pmu);
@@ -1044,7 +1041,7 @@ extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
extern void perf_event_enable(struct perf_event *event);
extern void perf_event_disable(struct perf_event *event);
-extern int __perf_event_disable(void *info);
+extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_task_tick(void);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
@@ -1070,7 +1067,7 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
static inline void perf_event_exit_task(struct task_struct *child) { }
static inline void perf_event_free_task(struct task_struct *task) { }
static inline void perf_event_delayed_put(struct task_struct *task) { }
-static inline struct perf_event *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
+static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); }
static inline const struct perf_event_attr *perf_event_attrs(struct perf_event *event)
{
return ERR_PTR(-EINVAL);
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 0703b5360d31..37448ab5fb5c 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -29,7 +29,7 @@ static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
return __pfn_to_pfn_t(pfn, 0);
}
-extern pfn_t phys_to_pfn_t(dma_addr_t addr, unsigned long flags);
+extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
static inline bool pfn_t_has_page(pfn_t pfn)
{
@@ -48,7 +48,7 @@ static inline struct page *pfn_t_to_page(pfn_t pfn)
return NULL;
}
-static inline dma_addr_t pfn_t_to_phys(pfn_t pfn)
+static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
{
return PFN_PHYS(pfn_t_to_pfn(pfn));
}
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index eb8b8ac6df3c..24f5470d3944 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -42,6 +42,7 @@ struct pipe_buffer {
* @fasync_readers: reader side fasync
* @fasync_writers: writer side fasync
* @bufs: the circular array of pipe buffers
+ * @user: the user who created this pipe
**/
struct pipe_inode_info {
struct mutex mutex;
@@ -57,6 +58,7 @@ struct pipe_inode_info {
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
struct pipe_buffer *bufs;
+ struct user_struct *user;
};
/*
@@ -123,6 +125,8 @@ void pipe_unlock(struct pipe_inode_info *);
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
extern unsigned int pipe_max_size, pipe_min_size;
+extern unsigned long pipe_user_pages_hard;
+extern unsigned long pipe_user_pages_soft;
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index 54a0a9582fad..0496d171700a 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -29,15 +29,6 @@ struct omap_iommu_arch_data {
struct omap_iommu *iommu_dev;
};
-/**
- * struct omap_mmu_dev_attr - OMAP mmu device attributes for omap_hwmod
- * @nr_tlb_entries: number of entries supported by the translation
- * look-aside buffer (TLB).
- */
-struct omap_mmu_dev_attr {
- int nr_tlb_entries;
-};
-
struct iommu_platform_data {
const char *name;
const char *reset_name;
diff --git a/include/linux/platform_data/pwm_omap_dmtimer.h b/include/linux/platform_data/pwm_omap_dmtimer.h
new file mode 100644
index 000000000000..59384217208f
--- /dev/null
+++ b/include/linux/platform_data/pwm_omap_dmtimer.h
@@ -0,0 +1,69 @@
+/*
+ * include/linux/platform_data/pwm_omap_dmtimer.h
+ *
+ * OMAP Dual-Mode Timer PWM platform data
+ *
+ * Copyright (C) 2010 Texas Instruments Incorporated - http://www.ti.com/
+ * Tarun Kanti DebBarma <tarun.kanti@ti.com>
+ * Thara Gopinath <thara@ti.com>
+ *
+ * Platform device conversion and hwmod support.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Lauri Leukkunen <lauri.leukkunen@nokia.com>
+ * PWM and clock framework support by Timo Teras.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+ * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __PWM_OMAP_DMTIMER_PDATA_H
+#define __PWM_OMAP_DMTIMER_PDATA_H
+
+/* trigger types */
+#define PWM_OMAP_DMTIMER_TRIGGER_NONE 0x00
+#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW 0x01
+#define PWM_OMAP_DMTIMER_TRIGGER_OVERFLOW_AND_COMPARE 0x02
+
+struct omap_dm_timer;
+typedef struct omap_dm_timer pwm_omap_dmtimer;
+
+struct pwm_omap_dmtimer_pdata {
+ pwm_omap_dmtimer *(*request_by_node)(struct device_node *np);
+ int (*free)(pwm_omap_dmtimer *timer);
+
+ void (*enable)(pwm_omap_dmtimer *timer);
+ void (*disable)(pwm_omap_dmtimer *timer);
+
+ struct clk *(*get_fclk)(pwm_omap_dmtimer *timer);
+
+ int (*start)(pwm_omap_dmtimer *timer);
+ int (*stop)(pwm_omap_dmtimer *timer);
+
+ int (*set_load)(pwm_omap_dmtimer *timer, int autoreload,
+ unsigned int value);
+ int (*set_match)(pwm_omap_dmtimer *timer, int enable,
+ unsigned int match);
+ int (*set_pwm)(pwm_omap_dmtimer *timer, int def_on,
+ int toggle, int trigger);
+ int (*set_prescaler)(pwm_omap_dmtimer *timer, int prescaler);
+
+ int (*write_counter)(pwm_omap_dmtimer *timer, unsigned int value);
+};
+
+#endif /* __PWM_OMAP_DMTIMER_PDATA_H */
diff --git a/include/linux/platform_data/sdhci-pic32.h b/include/linux/platform_data/sdhci-pic32.h
new file mode 100644
index 000000000000..7e0efe64c8c5
--- /dev/null
+++ b/include/linux/platform_data/sdhci-pic32.h
@@ -0,0 +1,22 @@
+/*
+ * Purna Chandra Mandal, purna.mandal@microchip.com
+ * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
+ *
+ * This program is free software; you can distribute it and/or modify it
+ * under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * for more details.
+ */
+#ifndef __PIC32_SDHCI_PDATA_H__
+#define __PIC32_SDHCI_PDATA_H__
+
+struct pic32_sdhci_platform_data {
+ /* read & write fifo threshold */
+ int (*setup_dma)(u32 rfifo, u32 wfifo);
+};
+
+#endif
diff --git a/include/linux/platform_data/touchscreen-s3c2410.h b/include/linux/platform_data/touchscreen-s3c2410.h
index 58dc7c5ae63b..71eccaa9835d 100644
--- a/include/linux/platform_data/touchscreen-s3c2410.h
+++ b/include/linux/platform_data/touchscreen-s3c2410.h
@@ -17,6 +17,7 @@ struct s3c2410_ts_mach_info {
};
extern void s3c24xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
+extern void s3c64xx_ts_set_platdata(struct s3c2410_ts_mach_info *);
/* defined by architecture to configure gpio */
extern void s3c24xx_ts_cfg_gpio(struct platform_device *dev);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 528be6787796..6a5d654f4447 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -573,6 +573,7 @@ struct dev_pm_info {
struct wakeup_source *wakeup;
bool wakeup_path:1;
bool syscore:1;
+ bool no_pm_callbacks:1; /* Owned by the PM core */
#else
unsigned int should_wakeup:1;
#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index ba4ced38efae..db21d3995f7e 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -240,12 +240,15 @@ static inline int of_genpd_add_provider_onecell(struct device_node *np,
#ifdef CONFIG_PM
extern int dev_pm_domain_attach(struct device *dev, bool power_on);
extern void dev_pm_domain_detach(struct device *dev, bool power_off);
+extern void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
return -ENODEV;
}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline void dev_pm_domain_set(struct device *dev,
+ struct dev_pm_domain *pd) {}
#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index acfea8ce4a07..7c3d11a6b4ad 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -53,12 +53,18 @@ static inline void arch_clear_pmem(void __pmem *addr, size_t size)
{
BUG();
}
+
+static inline void arch_wb_cache_pmem(void __pmem *addr, size_t size)
+{
+ BUG();
+}
#endif
/*
* Architectures that define ARCH_HAS_PMEM_API must provide
* implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
- * arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
+ * arch_copy_from_iter_pmem(), arch_clear_pmem(), arch_wb_cache_pmem()
+ * and arch_has_wmb_pmem().
*/
static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
{
@@ -178,4 +184,18 @@ static inline void clear_pmem(void __pmem *addr, size_t size)
else
default_clear_pmem(addr, size);
}
+
+/**
+ * wb_cache_pmem - write back processor cache for PMEM memory range
+ * @addr: virtual start address
+ * @size: number of bytes to write back
+ *
+ * Write back the processor cache range starting at 'addr' for 'size' bytes.
+ * This function requires explicit ordering with a wmb_pmem() call.
+ */
+static inline void wb_cache_pmem(void __pmem *addr, size_t size)
+{
+ if (arch_has_pmem_api())
+ arch_wb_cache_pmem(addr, size);
+}
#endif /* __PMEM_H__ */
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 061265f92876..504c98a278d4 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -57,7 +57,29 @@ extern void exit_ptrace(struct task_struct *tracer, struct list_head *dead);
#define PTRACE_MODE_READ 0x01
#define PTRACE_MODE_ATTACH 0x02
#define PTRACE_MODE_NOAUDIT 0x04
-/* Returns true on success, false on denial. */
+#define PTRACE_MODE_FSCREDS 0x08
+#define PTRACE_MODE_REALCREDS 0x10
+
+/* shorthands for READ/ATTACH and FSCREDS/REALCREDS combinations */
+#define PTRACE_MODE_READ_FSCREDS (PTRACE_MODE_READ | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_READ_REALCREDS (PTRACE_MODE_READ | PTRACE_MODE_REALCREDS)
+#define PTRACE_MODE_ATTACH_FSCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_FSCREDS)
+#define PTRACE_MODE_ATTACH_REALCREDS (PTRACE_MODE_ATTACH | PTRACE_MODE_REALCREDS)
+
+/**
+ * ptrace_may_access - check whether the caller is permitted to access
+ * a target task.
+ * @task: target task
+ * @mode: selects type of access and caller credentials
+ *
+ * Returns true on success, false on denial.
+ *
+ * One of the flags PTRACE_MODE_FSCREDS and PTRACE_MODE_REALCREDS must
+ * be set in @mode to specify whether the access was requested through
+ * a filesystem syscall (should use effective capabilities and fsuid
+ * of the caller) or through an explicit syscall such as
+ * process_vm_writev or ptrace (and should use the real credentials).
+ */
extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
static inline int ptrace_reparented(struct task_struct *child)
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 33170dbd9db4..00b17c526c1f 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -51,6 +51,15 @@
#define RADIX_TREE_EXCEPTIONAL_ENTRY 2
#define RADIX_TREE_EXCEPTIONAL_SHIFT 2
+#define RADIX_DAX_MASK 0xf
+#define RADIX_DAX_SHIFT 4
+#define RADIX_DAX_PTE (0x4 | RADIX_TREE_EXCEPTIONAL_ENTRY)
+#define RADIX_DAX_PMD (0x8 | RADIX_TREE_EXCEPTIONAL_ENTRY)
+#define RADIX_DAX_TYPE(entry) ((unsigned long)entry & RADIX_DAX_MASK)
+#define RADIX_DAX_SECTOR(entry) (((unsigned long)entry >> RADIX_DAX_SHIFT))
+#define RADIX_DAX_ENTRY(sector, pmd) ((void *)((unsigned long)sector << \
+ RADIX_DAX_SHIFT | (pmd ? RADIX_DAX_PMD : RADIX_DAX_PTE)))
+
static inline int radix_tree_is_indirect_ptr(void *ptr)
{
return (int)((unsigned long)ptr & RADIX_TREE_INDIRECT_PTR);
@@ -154,7 +163,7 @@ do { \
* radix_tree_gang_lookup_tag_slot
* radix_tree_tagged
*
- * The first 7 functions are able to be called locklessly, using RCU. The
+ * The first 8 functions are able to be called locklessly, using RCU. The
* caller must ensure calls to these functions are made within rcu_read_lock()
* regions. Other readers (lock-free or otherwise) and modifications may be
* running concurrently.
@@ -370,6 +379,22 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
struct radix_tree_iter *iter, unsigned flags);
/**
+ * radix_tree_iter_retry - retry this chunk of the iteration
+ * @iter: iterator state
+ *
+ * If we iterate over a tree protected only by the RCU lock, a race
+ * against deletion or creation may result in seeing a slot for which
+ * radix_tree_deref_retry() returns true. If so, call this function
+ * and continue the iteration.
+ */
+static inline __must_check
+void **radix_tree_iter_retry(struct radix_tree_iter *iter)
+{
+ iter->next_index = iter->index;
+ return NULL;
+}
+
+/**
* radix_tree_chunk_size - get current chunk size
*
* @iter: pointer to radix tree iterator
diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h
index a5aa7ae671f4..b6900099ea81 100644
--- a/include/linux/rbtree.h
+++ b/include/linux/rbtree.h
@@ -50,7 +50,7 @@ struct rb_root {
#define RB_ROOT (struct rb_root) { NULL, }
#define rb_entry(ptr, type, member) container_of(ptr, type, member)
-#define RB_EMPTY_ROOT(root) ((root)->rb_node == NULL)
+#define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL)
/* 'empty' nodes are nodes that are known not to be inserted in an rbtree */
#define RB_EMPTY_NODE(node) \
diff --git a/include/linux/reset.h b/include/linux/reset.h
index 7f65f9cff951..c4c097de0ba9 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -38,6 +38,9 @@ static inline struct reset_control *devm_reset_control_get_optional(
struct reset_control *of_reset_control_get(struct device_node *node,
const char *id);
+struct reset_control *of_reset_control_get_by_index(
+ struct device_node *node, int index);
+
#else
static inline int reset_control_reset(struct reset_control *rstc)
@@ -71,7 +74,7 @@ static inline void reset_control_put(struct reset_control *rstc)
static inline int device_reset_optional(struct device *dev)
{
- return -ENOSYS;
+ return -ENOTSUPP;
}
static inline struct reset_control *__must_check reset_control_get(
@@ -91,19 +94,25 @@ static inline struct reset_control *__must_check devm_reset_control_get(
static inline struct reset_control *reset_control_get_optional(
struct device *dev, const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *devm_reset_control_get_optional(
struct device *dev, const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOTSUPP);
}
static inline struct reset_control *of_reset_control_get(
struct device_node *node, const char *id)
{
- return ERR_PTR(-ENOSYS);
+ return ERR_PTR(-ENOTSUPP);
+}
+
+static inline struct reset_control *of_reset_control_get_by_index(
+ struct device_node *node, int index)
+{
+ return ERR_PTR(-ENOTSUPP);
}
#endif /* CONFIG_RESET_CONTROLLER */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 61aa9bbea871..a10494a94cc3 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -835,6 +835,7 @@ struct user_struct {
#endif
unsigned long locked_shm; /* How many pages of mlocked shm ? */
unsigned long unix_inflight; /* How many files in flight in unix sockets */
+ atomic_long_t pipe_bufs; /* how many pages are allocated in pipe buffers */
#ifdef CONFIG_KEYS
struct key *uid_keyring; /* UID specific keyring */
@@ -1476,10 +1477,10 @@ struct task_struct {
unsigned in_iowait:1;
#ifdef CONFIG_MEMCG
unsigned memcg_may_oom:1;
-#endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifndef CONFIG_SLOB
unsigned memcg_kmem_skip_account:1;
#endif
+#endif
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
@@ -1643,6 +1644,9 @@ struct task_struct {
struct held_lock held_locks[MAX_LOCK_DEPTH];
gfp_t lockdep_reclaim_gfp;
#endif
+#ifdef CONFIG_UBSAN
+ unsigned int in_ubsan;
+#endif
/* journalling filesystem info */
void *journal_info;
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h
index 1f208b2a1ed6..645896b81244 100644
--- a/include/linux/sh_clk.h
+++ b/include/linux/sh_clk.h
@@ -113,10 +113,6 @@ long clk_rate_div_range_round(struct clk *clk, unsigned int div_min,
long clk_rate_mult_range_round(struct clk *clk, unsigned int mult_min,
unsigned int mult_max, unsigned long rate);
-long clk_round_parent(struct clk *clk, unsigned long target,
- unsigned long *best_freq, unsigned long *parent_freq,
- unsigned int div_min, unsigned int div_max);
-
#define SH_CLK_MSTP(_parent, _enable_reg, _enable_bit, _status_reg, _flags) \
{ \
.parent = _parent, \
diff --git a/include/linux/shm.h b/include/linux/shm.h
index 6fb801686ad6..04e881829625 100644
--- a/include/linux/shm.h
+++ b/include/linux/shm.h
@@ -52,7 +52,7 @@ struct sysv_shm {
long do_shmat(int shmid, char __user *shmaddr, int shmflg, unsigned long *addr,
unsigned long shmlba);
-int is_file_shm_hugepages(struct file *file);
+bool is_file_shm_hugepages(struct file *file);
void exit_shm(struct task_struct *task);
#define shm_init_task(task) INIT_LIST_HEAD(&(task)->sysvshm.shm_clist)
#else
@@ -66,9 +66,9 @@ static inline long do_shmat(int shmid, char __user *shmaddr,
{
return -ENOSYS;
}
-static inline int is_file_shm_hugepages(struct file *file)
+static inline bool is_file_shm_hugepages(struct file *file)
{
- return 0;
+ return false;
}
static inline void exit_shm(struct task_struct *task)
{
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index a43f41cb3c43..4d4780c00d34 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -15,10 +15,7 @@ struct shmem_inode_info {
unsigned int seals; /* shmem seals */
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
- union {
- unsigned long swapped; /* subtotal assigned to swap */
- char *symlink; /* unswappable short symlink */
- };
+ unsigned long swapped; /* subtotal assigned to swap */
struct shared_policy policy; /* NUMA memory alloc policy */
struct list_head swaplist; /* chain of maybes on swap */
struct simple_xattrs xattrs; /* list of xattrs */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3ffee7422012..3627d5c1bc47 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -86,7 +86,7 @@
#else
# define SLAB_FAILSLAB 0x00000000UL
#endif
-#ifdef CONFIG_MEMCG_KMEM
+#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
# define SLAB_ACCOUNT 0x04000000UL /* Account to memcg */
#else
# define SLAB_ACCOUNT 0x00000000UL
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 33d049066c3d..cf139d3fa513 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -69,7 +69,8 @@ struct kmem_cache {
*/
int obj_offset;
#endif /* CONFIG_DEBUG_SLAB */
-#ifdef CONFIG_MEMCG_KMEM
+
+#ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params;
#endif
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 33885118523c..b7e57927f521 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -84,7 +84,7 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
-#ifdef CONFIG_MEMCG_KMEM
+#ifdef CONFIG_MEMCG
struct memcg_cache_params memcg_params;
int max_attr_size; /* for propagation, maximum size of a stored attr */
#ifdef CONFIG_SYSFS
diff --git a/include/linux/soc/dove/pmu.h b/include/linux/soc/dove/pmu.h
index 9c99f84bcc0e..765386972b55 100644
--- a/include/linux/soc/dove/pmu.h
+++ b/include/linux/soc/dove/pmu.h
@@ -1,6 +1,25 @@
#ifndef LINUX_SOC_DOVE_PMU_H
#define LINUX_SOC_DOVE_PMU_H
+#include <linux/types.h>
+
+struct dove_pmu_domain_initdata {
+ u32 pwr_mask;
+ u32 rst_mask;
+ u32 iso_mask;
+ const char *name;
+};
+
+struct dove_pmu_initdata {
+ void __iomem *pmc_base;
+ void __iomem *pmu_base;
+ int irq;
+ int irq_domain_start;
+ const struct dove_pmu_domain_initdata *domains;
+};
+
+int dove_init_pmu_legacy(const struct dove_pmu_initdata *);
+
int dove_init_pmu(void);
#endif
diff --git a/include/linux/soc/qcom/smem_state.h b/include/linux/soc/qcom/smem_state.h
new file mode 100644
index 000000000000..f35e1512fcaa
--- /dev/null
+++ b/include/linux/soc/qcom/smem_state.h
@@ -0,0 +1,18 @@
+#ifndef __QCOM_SMEM_STATE__
+#define __QCOM_SMEM_STATE__
+
+struct qcom_smem_state;
+
+struct qcom_smem_state_ops {
+ int (*update_bits)(void *, u32, u32);
+};
+
+struct qcom_smem_state *qcom_smem_state_get(struct device *dev, const char *con_id, unsigned *bit);
+void qcom_smem_state_put(struct qcom_smem_state *);
+
+int qcom_smem_state_update_bits(struct qcom_smem_state *state, u32 mask, u32 value);
+
+struct qcom_smem_state *qcom_smem_state_register(struct device_node *of_node, const struct qcom_smem_state_ops *ops, void *data);
+void qcom_smem_state_unregister(struct qcom_smem_state *state);
+
+#endif
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index f869807a0d0e..5322fea6fe4c 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -51,6 +51,7 @@
/* RPC/RDMA parameters and stats */
extern unsigned int svcrdma_ord;
extern unsigned int svcrdma_max_requests;
+extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
extern atomic_t rdma_stat_recv;
@@ -69,6 +70,7 @@ extern atomic_t rdma_stat_sq_prod;
* completes.
*/
struct svc_rdma_op_ctxt {
+ struct list_head free;
struct svc_rdma_op_ctxt *read_hdr;
struct svc_rdma_fastreg_mr *frmr;
int hdr_count;
@@ -112,6 +114,7 @@ struct svc_rdma_fastreg_mr {
struct list_head frmr_list;
};
struct svc_rdma_req_map {
+ struct list_head free;
unsigned long count;
union {
struct kvec sge[RPCSVC_MAXPAGES];
@@ -132,28 +135,32 @@ struct svcxprt_rdma {
int sc_max_sge;
int sc_max_sge_rd; /* max sge for read target */
- int sc_sq_depth; /* Depth of SQ */
atomic_t sc_sq_count; /* Number of SQ WR on queue */
-
- int sc_max_requests; /* Depth of RQ */
+ unsigned int sc_sq_depth; /* Depth of SQ */
+ unsigned int sc_rq_depth; /* Depth of RQ */
+ u32 sc_max_requests; /* Forward credits */
+ u32 sc_max_bc_requests;/* Backward credits */
int sc_max_req_size; /* Size of each RQ WR buf */
struct ib_pd *sc_pd;
atomic_t sc_dma_used;
- atomic_t sc_ctxt_used;
+ spinlock_t sc_ctxt_lock;
+ struct list_head sc_ctxts;
+ int sc_ctxt_used;
+ spinlock_t sc_map_lock;
+ struct list_head sc_maps;
+
struct list_head sc_rq_dto_q;
spinlock_t sc_rq_dto_lock;
struct ib_qp *sc_qp;
struct ib_cq *sc_rq_cq;
struct ib_cq *sc_sq_cq;
- struct ib_mr *sc_phys_mr; /* MR for server memory */
int (*sc_reader)(struct svcxprt_rdma *,
struct svc_rqst *,
struct svc_rdma_op_ctxt *,
int *, u32 *, u32, u32, u64, bool);
u32 sc_dev_caps; /* distilled device caps */
- u32 sc_dma_lkey; /* local dma key */
unsigned int sc_frmr_pg_list_len;
struct list_head sc_frmr_q;
spinlock_t sc_frmr_q_lock;
@@ -179,8 +186,18 @@ struct svcxprt_rdma {
#define RPCRDMA_MAX_REQUESTS 32
#define RPCRDMA_MAX_REQ_SIZE 4096
+/* Typical ULP usage of BC requests is NFSv4.1 backchannel. Our
+ * current NFSv4.1 implementation supports one backchannel slot.
+ */
+#define RPCRDMA_MAX_BC_REQUESTS 2
+
#define RPCSVC_MAXPAYLOAD_RDMA RPCSVC_MAXPAYLOAD
+/* svc_rdma_backchannel.c */
+extern int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt,
+ struct rpcrdma_msg *rmsgp,
+ struct xdr_buf *rcvbuf);
+
/* svc_rdma_marshal.c */
extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
@@ -206,6 +223,8 @@ extern int rdma_read_chunk_frmr(struct svcxprt_rdma *, struct svc_rqst *,
u32, u32, u64, bool);
/* svc_rdma_sendto.c */
+extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
+ struct svc_rdma_req_map *);
extern int svc_rdma_sendto(struct svc_rqst *);
extern struct rpcrdma_read_chunk *
svc_rdma_get_read_chunk(struct rpcrdma_msg *);
@@ -214,13 +233,14 @@ extern struct rpcrdma_read_chunk *
extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
enum rpcrdma_errcode);
-extern int svc_rdma_post_recv(struct svcxprt_rdma *);
+extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
extern void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt);
-extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
-extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
+extern struct svc_rdma_req_map *svc_rdma_get_req_map(struct svcxprt_rdma *);
+extern void svc_rdma_put_req_map(struct svcxprt_rdma *,
+ struct svc_rdma_req_map *);
extern struct svc_rdma_fastreg_mr *svc_rdma_get_frmr(struct svcxprt_rdma *);
extern void svc_rdma_put_frmr(struct svcxprt_rdma *,
struct svc_rdma_fastreg_mr *);
@@ -234,6 +254,7 @@ extern struct svc_xprt_class svc_rdma_bc_class;
#endif
/* svc_rdma.c */
+extern struct workqueue_struct *svc_rdma_wq;
extern int svc_rdma_init(void);
extern void svc_rdma_cleanup(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 414e101cd061..d18b65c53dbb 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -350,33 +350,7 @@ extern void check_move_unevictable_pages(struct page **, int nr_pages);
extern int kswapd_run(int nid);
extern void kswapd_stop(int nid);
-#ifdef CONFIG_MEMCG
-static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
-{
- /* root ? */
- if (mem_cgroup_disabled() || !memcg->css.parent)
- return vm_swappiness;
-
- return memcg->swappiness;
-}
-#else
-static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
-{
- return vm_swappiness;
-}
-#endif
-#ifdef CONFIG_MEMCG_SWAP
-extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
-extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
-#else
-static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
-{
-}
-static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
-{
-}
-#endif
#ifdef CONFIG_SWAP
/* linux/mm/page_io.c */
extern int swap_readpage(struct page *);
@@ -555,5 +529,55 @@ static inline swp_entry_t get_swap_page(void)
}
#endif /* CONFIG_SWAP */
+
+#ifdef CONFIG_MEMCG
+static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
+{
+ /* root ? */
+ if (mem_cgroup_disabled() || !memcg->css.parent)
+ return vm_swappiness;
+
+ return memcg->swappiness;
+}
+
+#else
+static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
+{
+ return vm_swappiness;
+}
+#endif
+
+#ifdef CONFIG_MEMCG_SWAP
+extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
+extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
+extern void mem_cgroup_uncharge_swap(swp_entry_t entry);
+extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
+extern bool mem_cgroup_swap_full(struct page *page);
+#else
+static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+{
+}
+
+static inline int mem_cgroup_try_charge_swap(struct page *page,
+ swp_entry_t entry)
+{
+ return 0;
+}
+
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry)
+{
+}
+
+static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
+{
+ return get_nr_swap_pages();
+}
+
+static inline bool mem_cgroup_swap_full(struct page *page)
+{
+ return vm_swap_full();
+}
+#endif
+
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index e7a018eaf3a2..017fced60242 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -1,10 +1,13 @@
#ifndef __LINUX_SWIOTLB_H
#define __LINUX_SWIOTLB_H
+#include <linux/dma-direction.h>
+#include <linux/init.h>
#include <linux/types.h>
struct device;
struct dma_attrs;
+struct page;
struct scatterlist;
extern int swiotlb_force;
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 613c29bd6baf..e13a1ace50e9 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -43,6 +43,9 @@
/* Default weight of a bound cooling device */
#define THERMAL_WEIGHT_DEFAULT 0
+/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
+#define THERMAL_TEMP_INVALID -274000
+
/* Unit conversion macros */
#define DECI_KELVIN_TO_CELSIUS(t) ({ \
long _t = (t); \
@@ -167,6 +170,7 @@ struct thermal_attr {
* @forced_passive: If > 0, temperature at which to switch on all ACPI
* processor cooling devices. Currently only used by the
* step-wise governor.
+ * @need_update: if equals 1, thermal_zone_device_update needs to be invoked.
* @ops: operations this &thermal_zone_device supports
* @tzp: thermal zone parameters
* @governor: pointer to the governor for this thermal zone
@@ -194,6 +198,7 @@ struct thermal_zone_device {
int emul_temperature;
int passive;
unsigned int forced_passive;
+ atomic_t need_update;
struct thermal_zone_device_ops *ops;
struct thermal_zone_params *tzp;
struct thermal_governor *governor;
diff --git a/include/linux/tick.h b/include/linux/tick.h
index e312219ff823..97fd4e543846 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -98,6 +98,7 @@ static inline void tick_broadcast_exit(void)
}
#ifdef CONFIG_NO_HZ_COMMON
+extern int tick_nohz_enabled;
extern int tick_nohz_tick_stopped(void);
extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
@@ -106,6 +107,7 @@ extern ktime_t tick_nohz_get_sleep_length(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
+#define tick_nohz_enabled (0)
static inline int tick_nohz_tick_stopped(void) { return 0; }
static inline void tick_nohz_idle_enter(void) { }
static inline void tick_nohz_idle_exit(void) { }
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 2fd8708ea888..d9fb4b043f56 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -649,6 +649,7 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
/* tty_mutex.c */
/* functions for preparation of BKL removal */
extern void __lockfunc tty_lock(struct tty_struct *tty);
+extern int tty_lock_interruptible(struct tty_struct *tty);
extern void __lockfunc tty_unlock(struct tty_struct *tty);
extern void __lockfunc tty_lock_slave(struct tty_struct *tty);
extern void __lockfunc tty_unlock_slave(struct tty_struct *tty);
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 558129af828a..349557825428 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -111,4 +111,11 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
#define probe_kernel_address(addr, retval) \
probe_kernel_read(&retval, addr, sizeof(retval))
+#ifndef user_access_begin
+#define user_access_begin() do { } while (0)
+#define user_access_end() do { } while (0)
+#define unsafe_get_user(x, ptr) __get_user(x, ptr)
+#define unsafe_put_user(x, ptr) __put_user(x, ptr)
+#endif
+
#endif /* __LINUX_UACCESS_H__ */
diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h
new file mode 100644
index 000000000000..d6ba7d39a62f
--- /dev/null
+++ b/include/linux/wkup_m3_ipc.h
@@ -0,0 +1,55 @@
+/*
+ * TI Wakeup M3 for AMx3 SoCs Power Management Routines
+ *
+ * Copyright (C) 2015 Texas Instruments Incorporated - http://www.ti.com/
+ * Dave Gerlach <d-gerlach@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed "as is" WITHOUT ANY WARRANTY of any
+ * kind, whether express or implied; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _LINUX_WKUP_M3_IPC_H
+#define _LINUX_WKUP_M3_IPC_H
+
+#define WKUP_M3_DEEPSLEEP 1
+#define WKUP_M3_STANDBY 2
+#define WKUP_M3_IDLE 3
+
+#include <linux/mailbox_client.h>
+
+struct wkup_m3_ipc_ops;
+
+struct wkup_m3_ipc {
+ struct rproc *rproc;
+
+ void __iomem *ipc_mem_base;
+ struct device *dev;
+
+ int mem_type;
+ unsigned long resume_addr;
+ int state;
+
+ struct completion sync_complete;
+ struct mbox_client mbox_client;
+ struct mbox_chan *mbox;
+
+ struct wkup_m3_ipc_ops *ops;
+};
+
+struct wkup_m3_ipc_ops {
+ void (*set_mem_type)(struct wkup_m3_ipc *m3_ipc, int mem_type);
+ void (*set_resume_address)(struct wkup_m3_ipc *m3_ipc, void *addr);
+ int (*prepare_low_power)(struct wkup_m3_ipc *m3_ipc, int state);
+ int (*finish_low_power)(struct wkup_m3_ipc *m3_ipc);
+ int (*request_pm_status)(struct wkup_m3_ipc *m3_ipc);
+};
+
+struct wkup_m3_ipc *wkup_m3_ipc_get(void);
+void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc);
+#endif /* _LINUX_WKUP_M3_IPC_H */