summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-11-02 08:59:52 +0300
committerDavid S. Miller <davem@davemloft.net>2017-11-02 09:23:39 +0300
commited29668d1aa2c6f01e61dd616df13b5241cee7e0 (patch)
treea086cf6311ed8623b292d3ea8d73c03f53207be0 /include
parent65c959a39b7e9ad6b443b74904486b4a75b0232f (diff)
parent3a99df9a3d14cd866b5516f8cba515a3bfd554ab (diff)
downloadlinux-ed29668d1aa2c6f01e61dd616df13b5241cee7e0.tar.xz
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Smooth Cong Wang's bug fix into 'net-next'. Basically put the bulk of the tcf_block_put() logic from 'net' into tcf_block_put_ext(), but after the offload unbind. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include')
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/net/tcp.h6
-rw-r--r--include/uapi/linux/bpf.h1
3 files changed, 5 insertions, 7 deletions
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 6737a8c9e8c6..032b55909145 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -27,17 +27,16 @@ enum pm_qos_flags_status {
PM_QOS_FLAGS_ALL,
};
-#define PM_QOS_DEFAULT_VALUE (-1)
-#define PM_QOS_LATENCY_ANY S32_MAX
+#define PM_QOS_DEFAULT_VALUE -1
#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
-#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
+#define PM_QOS_LATENCY_ANY ((s32)(~(__u32)0 >> 1))
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
#define PM_QOS_FLAG_REMOTE_WAKEUP (1 << 1)
diff --git a/include/net/tcp.h b/include/net/tcp.h
index a2510cdef4b5..c2bf2a822b10 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -1738,12 +1738,12 @@ static inline void tcp_highest_sack_reset(struct sock *sk)
tcp_sk(sk)->highest_sack = skb ?: tcp_send_head(sk);
}
-/* Called when old skb is about to be deleted (to be combined with new skb) */
-static inline void tcp_highest_sack_combine(struct sock *sk,
+/* Called when old skb is about to be deleted and replaced by new skb */
+static inline void tcp_highest_sack_replace(struct sock *sk,
struct sk_buff *old,
struct sk_buff *new)
{
- if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
+ if (old == tcp_highest_sack(sk))
tcp_sk(sk)->highest_sack = new;
}
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0b7b54d898bd..7cebba491011 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -889,7 +889,6 @@ struct xdp_md {
enum sk_action {
SK_DROP = 0,
SK_PASS,
- SK_REDIRECT,
};
#define BPF_TAG_SIZE 8