diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 00:45:08 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-09-07 00:45:08 +0300 |
commit | aae3dbb4776e7916b6cd442d00159bea27a695c1 (patch) | |
tree | d074c5d783a81e7e2e084b1eba77f57459da7e37 /drivers/s390/net | |
parent | ec3604c7a5aae8953545b0d05495357009a960e5 (diff) | |
parent | 66bed8465a808400eb14562510e26c8818082cb8 (diff) | |
download | linux-aae3dbb4776e7916b6cd442d00159bea27a695c1.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
Pull networking updates from David Miller:
1) Support ipv6 checksum offload in sunvnet driver, from Shannon
Nelson.
2) Move to RB-tree instead of custom AVL code in inetpeer, from Eric
Dumazet.
3) Allow generic XDP to work on virtual devices, from John Fastabend.
4) Add bpf device maps and XDP_REDIRECT, which can be used to build
arbitrary switching frameworks using XDP. From John Fastabend.
5) Remove UFO offloads from the tree, gave us little other than bugs.
6) Remove the IPSEC flow cache, from Florian Westphal.
7) Support ipv6 route offload in mlxsw driver.
8) Support VF representors in bnxt_en, from Sathya Perla.
9) Add support for forward error correction modes to ethtool, from
Vidya Sagar Ravipati.
10) Add time filter for packet scheduler action dumping, from Jamal Hadi
Salim.
11) Extend the zerocopy sendmsg() used by virtio and tap to regular
sockets via MSG_ZEROCOPY. From Willem de Bruijn.
12) Significantly rework value tracking in the BPF verifier, from Edward
Cree.
13) Add new jump instructions to eBPF, from Daniel Borkmann.
14) Rework rtnetlink plumbing so that operations can be run without
taking the RTNL semaphore. From Florian Westphal.
15) Support XDP in tap driver, from Jason Wang.
16) Add 32-bit eBPF JIT for ARM, from Shubham Bansal.
17) Add Huawei hinic ethernet driver.
18) Allow to report MD5 keys in TCP inet_diag dumps, from Ivan
Delalande.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next: (1780 commits)
i40e: point wb_desc at the nvm_wb_desc during i40e_read_nvm_aq
i40e: avoid NVM acquire deadlock during NVM update
drivers: net: xgene: Remove return statement from void function
drivers: net: xgene: Configure tx/rx delay for ACPI
drivers: net: xgene: Read tx/rx delay for ACPI
rocker: fix kcalloc parameter order
rds: Fix non-atomic operation on shared flag variable
net: sched: don't use GFP_KERNEL under spin lock
vhost_net: correctly check tx avail during rx busy polling
net: mdio-mux: add mdio_mux parameter to mdio_mux_init()
rxrpc: Make service connection lookup always check for retry
net: stmmac: Delete dead code for MDIO registration
gianfar: Fix Tx flow control deactivation
cxgb4: Ignore MPS_TX_INT_CAUSE[Bubble] for T6
cxgb4: Fix pause frame count in t4_get_port_stats
cxgb4: fix memory leak
tun: rename generic_xdp to skb_xdp
tun: reserve extra headroom only when XDP is set
net: dsa: bcm_sf2: Configure IMP port TC2QOS mapping
net: dsa: bcm_sf2: Advertise number of egress queues
...
Diffstat (limited to 'drivers/s390/net')
-rw-r--r-- | drivers/s390/net/ctcm_main.c | 2 | ||||
-rw-r--r-- | drivers/s390/net/lcs.c | 28 | ||||
-rw-r--r-- | drivers/s390/net/netiucv.c | 4 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core.h | 17 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 205 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_sys.c | 2 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2_main.c | 343 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 67 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_sys.c | 17 |
9 files changed, 356 insertions, 329 deletions
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 2ade6131a89f..26363e0816fe 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -305,7 +305,7 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) * ch The channel, the sense code belongs to. * sense The sense code to inspect. */ -static inline void ccw_unit_check(struct channel *ch, __u8 sense) +static void ccw_unit_check(struct channel *ch, __u8 sense) { CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, "%s(%s): %02x", diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 619da81dca70..d01b5c2a7760 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -327,8 +327,7 @@ lcs_set_allowed_threads(struct lcs_card *card, unsigned long threads) spin_unlock_irqrestore(&card->mask_lock, flags); wake_up(&card->wait_q); } -static inline int -lcs_threads_running(struct lcs_card *card, unsigned long threads) +static int lcs_threads_running(struct lcs_card *card, unsigned long threads) { unsigned long flags; int rc = 0; @@ -346,8 +345,7 @@ lcs_wait_for_threads(struct lcs_card *card, unsigned long threads) lcs_threads_running(card, threads) == 0); } -static inline int -lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) +static int lcs_set_thread_start_bit(struct lcs_card *card, unsigned long thread) { unsigned long flags; @@ -373,8 +371,7 @@ lcs_clear_thread_running_bit(struct lcs_card *card, unsigned long thread) wake_up(&card->wait_q); } -static inline int -__lcs_do_run_thread(struct lcs_card *card, unsigned long thread) +static int __lcs_do_run_thread(struct lcs_card *card, unsigned long thread) { unsigned long flags; int rc = 0; @@ -444,8 +441,7 @@ lcs_setup_card(struct lcs_card *card) INIT_LIST_HEAD(&card->lancmd_waiters); } -static inline void -lcs_clear_multicast_list(struct lcs_card *card) +static void lcs_clear_multicast_list(struct lcs_card *card) { #ifdef CONFIG_IP_MULTICAST struct lcs_ipm_list *ipm; @@ -656,8 +652,7 @@ __lcs_resume_channel(struct lcs_channel *channel) /** * Make a buffer ready for processing. */ -static inline void -__lcs_ready_buffer_bits(struct lcs_channel *channel, int index) +static void __lcs_ready_buffer_bits(struct lcs_channel *channel, int index) { int prev, next; @@ -1169,8 +1164,8 @@ lcs_get_mac_for_ipm(__be32 ipm, char *mac, struct net_device *dev) /** * function called by net device to handle multicast address relevant things */ -static inline void -lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +static void lcs_remove_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) { struct ip_mc_list *im4; struct list_head *l; @@ -1196,8 +1191,9 @@ lcs_remove_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) spin_unlock_irqrestore(&card->ipm_lock, flags); } -static inline struct lcs_ipm_list * -lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf) +static struct lcs_ipm_list *lcs_check_addr_entry(struct lcs_card *card, + struct ip_mc_list *im4, + char *buf) { struct lcs_ipm_list *tmp, *ipm = NULL; struct list_head *l; @@ -1218,8 +1214,8 @@ lcs_check_addr_entry(struct lcs_card *card, struct ip_mc_list *im4, char *buf) return ipm; } -static inline void -lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) +static void lcs_set_mc_addresses(struct lcs_card *card, + struct in_device *in4_dev) { struct ip_mc_list *im4; diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 7e0e6a4019f3..b9c7c1e61da2 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c @@ -249,14 +249,14 @@ struct ll_header { * Compatibility macros for busy handling * of network devices. */ -static inline void netiucv_clear_busy(struct net_device *dev) +static void netiucv_clear_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); clear_bit(0, &priv->tbusy); netif_wake_queue(dev); } -static inline int netiucv_test_and_set_busy(struct net_device *dev) +static int netiucv_test_and_set_busy(struct net_device *dev) { struct netiucv_priv *priv = netdev_priv(dev); netif_stop_queue(dev); diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 7a0ffc71b25d..59e09854c4f7 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -857,11 +857,6 @@ static inline int qeth_get_ip_version(struct sk_buff *skb) } } -static inline int qeth_get_ip_protocol(struct sk_buff *skb) -{ - return ip_hdr(skb)->protocol; -} - static inline void qeth_put_buffer_pool_entry(struct qeth_card *card, struct qeth_buffer_pool_entry *entry) { @@ -951,10 +946,13 @@ int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); int qeth_get_elements_no(struct qeth_card *card, struct sk_buff *skb, int extra_elems, int data_offset); int qeth_get_elements_for_frags(struct sk_buff *); -int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int, int); -int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, - struct sk_buff *, struct qeth_hdr *, int); +int qeth_do_send_packet_fast(struct qeth_card *card, + struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len); +int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int hd_len, unsigned int offset, int elements); int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); int qeth_core_get_sset_count(struct net_device *, int); void qeth_core_get_ethtool_stats(struct net_device *, @@ -987,6 +985,7 @@ int qeth_set_features(struct net_device *, netdev_features_t); int qeth_recover_features(struct net_device *); netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t); int qeth_vm_request_mac(struct qeth_card *card); +int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len); /* exports for OSN */ int qeth_osn_assist(struct net_device *, void *, int); diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 4792cabb862e..bae7440abc01 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -101,7 +101,7 @@ void qeth_close_dev(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_close_dev); -static inline const char *qeth_get_cardname(struct qeth_card *card) +static const char *qeth_get_cardname(struct qeth_card *card) { if (card->info.guestlan) { switch (card->info.type) { @@ -330,7 +330,7 @@ static struct qeth_qdio_q *qeth_alloc_qdio_queue(void) return q; } -static inline int qeth_cq_init(struct qeth_card *card) +static int qeth_cq_init(struct qeth_card *card) { int rc; @@ -352,7 +352,7 @@ out: return rc; } -static inline int qeth_alloc_cq(struct qeth_card *card) +static int qeth_alloc_cq(struct qeth_card *card) { int rc; @@ -397,7 +397,7 @@ kmsg_out: goto out; } -static inline void qeth_free_cq(struct qeth_card *card) +static void qeth_free_cq(struct qeth_card *card) { if (card->qdio.c_q) { --card->qdio.no_in_queues; @@ -408,8 +408,9 @@ static inline void qeth_free_cq(struct qeth_card *card) card->qdio.out_bufstates = NULL; } -static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, - int delayed) { +static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, + int delayed) +{ enum iucv_tx_notify n; switch (sbalf15) { @@ -432,8 +433,8 @@ static inline enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15, return n; } -static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, - int bidx, int forced_cleanup) +static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx, + int forced_cleanup) { if (q->card->options.cq != QETH_CQ_ENABLED) return; @@ -475,8 +476,9 @@ static inline void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, } -static inline void qeth_qdio_handle_aob(struct qeth_card *card, - unsigned long phys_aob_addr) { +static void qeth_qdio_handle_aob(struct qeth_card *card, + unsigned long phys_aob_addr) +{ struct qaob *aob; struct qeth_qdio_out_buffer *buffer; enum iucv_tx_notify notification; @@ -2228,7 +2230,7 @@ static int qeth_cm_setup(struct qeth_card *card) } -static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) +static int qeth_get_initial_mtu_for_card(struct qeth_card *card) { switch (card->info.type) { case QETH_CARD_TYPE_UNKNOWN: @@ -2251,7 +2253,7 @@ static inline int qeth_get_initial_mtu_for_card(struct qeth_card *card) } } -static inline int qeth_get_mtu_outof_framesize(int framesize) +static int qeth_get_mtu_outof_framesize(int framesize) { switch (framesize) { case 0x4000: @@ -2267,7 +2269,7 @@ static inline int qeth_get_mtu_outof_framesize(int framesize) } } -static inline int qeth_mtu_is_valid(struct qeth_card *card, int mtu) +static int qeth_mtu_is_valid(struct qeth_card *card, int mtu) { switch (card->info.type) { case QETH_CARD_TYPE_OSD: @@ -2738,8 +2740,8 @@ static void qeth_initialize_working_pool_list(struct qeth_card *card) } } -static inline struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( - struct qeth_card *card) +static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry( + struct qeth_card *card) { struct list_head *plh; struct qeth_buffer_pool_entry *entry; @@ -2870,7 +2872,7 @@ int qeth_init_qdio_queues(struct qeth_card *card) } EXPORT_SYMBOL_GPL(qeth_init_qdio_queues); -static inline __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) +static __u8 qeth_get_ipa_adp_type(enum qeth_link_types link_type) { switch (link_type) { case QETH_LINK_TYPE_HSTR: @@ -3888,27 +3890,45 @@ int qeth_hdr_chk_and_bounce(struct sk_buff *skb, struct qeth_hdr **hdr, int len) } EXPORT_SYMBOL_GPL(qeth_hdr_chk_and_bounce); -static inline void __qeth_fill_buffer(struct sk_buff *skb, - struct qdio_buffer *buffer, int is_tso, int *next_element_to_fill, - int offset) +/** + * qeth_push_hdr() - push a qeth_hdr onto an skb. + * @skb: skb that the qeth_hdr should be pushed onto. + * @hdr: double pointer to a qeth_hdr. When returning with >= 0, + * it contains a valid pointer to a qeth_hdr. + * @len: length of the hdr that needs to be pushed on. + * + * Returns the pushed length. If the header can't be pushed on + * (eg. because it would cross a page boundary), it is allocated from + * the cache instead and 0 is returned. + * Error to create the hdr is indicated by returning with < 0. + */ +int qeth_push_hdr(struct sk_buff *skb, struct qeth_hdr **hdr, unsigned int len) { - int length = skb_headlen(skb); - int length_here; - int element; - char *data; - int first_lap, cnt; - struct skb_frag_struct *frag; - - element = *next_element_to_fill; - data = skb->data; - first_lap = (is_tso == 0 ? 1 : 0); - - if (offset >= 0) { - data = skb->data + offset; - length -= offset; - first_lap = 0; + if (skb_headroom(skb) >= len && + qeth_get_elements_for_range((addr_t)skb->data - len, + (addr_t)skb->data) == 1) { + *hdr = skb_push(skb, len); + return len; } + /* fall back */ + *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!*hdr) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL_GPL(qeth_push_hdr); + +static void __qeth_fill_buffer(struct sk_buff *skb, + struct qeth_qdio_out_buffer *buf, + bool is_first_elem, unsigned int offset) +{ + struct qdio_buffer *buffer = buf->buffer; + int element = buf->next_element_to_fill; + int length = skb_headlen(skb) - offset; + char *data = skb->data + offset; + int length_here, cnt; + /* map linear part into buffer element(s) */ while (length > 0) { /* length_here is the remaining amount of data in this page */ length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); @@ -3918,34 +3938,28 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, buffer->element[element].addr = data; buffer->element[element].length = length_here; length -= length_here; - if (!length) { - if (first_lap) - if (skb_shinfo(skb)->nr_frags) - buffer->element[element].eflags = - SBAL_EFLAGS_FIRST_FRAG; - else - buffer->element[element].eflags = 0; - else + if (is_first_elem) { + is_first_elem = false; + if (length || skb_is_nonlinear(skb)) + /* skb needs additional elements */ buffer->element[element].eflags = - SBAL_EFLAGS_MIDDLE_FRAG; - } else { - if (first_lap) - buffer->element[element].eflags = - SBAL_EFLAGS_FIRST_FRAG; + SBAL_EFLAGS_FIRST_FRAG; else - buffer->element[element].eflags = - SBAL_EFLAGS_MIDDLE_FRAG; + buffer->element[element].eflags = 0; + } else { + buffer->element[element].eflags = + SBAL_EFLAGS_MIDDLE_FRAG; } data += length_here; element++; - first_lap = 0; } + /* map page frags into buffer element(s) */ for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { - frag = &skb_shinfo(skb)->frags[cnt]; - data = (char *)page_to_phys(skb_frag_page(frag)) + - frag->page_offset; - length = frag->size; + skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt]; + + data = skb_frag_address(frag); + length = skb_frag_size(frag); while (length > 0) { length_here = PAGE_SIZE - ((unsigned long) data % PAGE_SIZE); @@ -3964,48 +3978,45 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, if (buffer->element[element - 1].eflags) buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG; - *next_element_to_fill = element; + buf->next_element_to_fill = element; } -static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, - struct qeth_qdio_out_buffer *buf, struct sk_buff *skb, - struct qeth_hdr *hdr, int offset, int hd_len) +/** + * qeth_fill_buffer() - map skb into an output buffer + * @queue: QDIO queue to submit the buffer on + * @buf: buffer to transport the skb + * @skb: skb to map into the buffer + * @hdr: qeth_hdr for this skb. Either at skb->data, or allocated + * from qeth_core_header_cache. + * @offset: when mapping the skb, start at skb->data + offset + * @hd_len: if > 0, build a dedicated header element of this size + */ +static int qeth_fill_buffer(struct qeth_qdio_out_q *queue, + struct qeth_qdio_out_buffer *buf, + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len) { - struct qdio_buffer *buffer; - int flush_cnt = 0, hdr_len, large_send = 0; + struct qdio_buffer *buffer = buf->buffer; + bool is_first_elem = true; + int flush_cnt = 0; - buffer = buf->buffer; refcount_inc(&skb->users); skb_queue_tail(&buf->skb_list, skb); - /*check first on TSO ....*/ - if (hdr->hdr.l3.id == QETH_HEADER_TYPE_TSO) { + /* build dedicated header element */ + if (hd_len) { int element = buf->next_element_to_fill; + is_first_elem = false; - hdr_len = sizeof(struct qeth_hdr_tso) + - ((struct qeth_hdr_tso *)hdr)->ext.dg_hdr_len; - /*fill first buffer entry only with header information */ - buffer->element[element].addr = skb->data; - buffer->element[element].length = hdr_len; - buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; - buf->next_element_to_fill++; - skb->data += hdr_len; - skb->len -= hdr_len; - large_send = 1; - } - - if (offset >= 0) { - int element = buf->next_element_to_fill; buffer->element[element].addr = hdr; - buffer->element[element].length = sizeof(struct qeth_hdr) + - hd_len; + buffer->element[element].length = hd_len; buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG; - buf->is_header[element] = 1; + /* remember to free cache-allocated qeth_hdr: */ + buf->is_header[element] = ((void *)hdr != skb->data); buf->next_element_to_fill++; } - __qeth_fill_buffer(skb, buffer, large_send, - (int *)&buf->next_element_to_fill, offset); + __qeth_fill_buffer(skb, buf, is_first_elem, offset); if (!queue->do_pack) { QETH_CARD_TEXT(queue->card, 6, "fillbfnp"); @@ -4030,8 +4041,9 @@ static inline int qeth_fill_buffer(struct qeth_qdio_out_q *queue, } int qeth_do_send_packet_fast(struct qeth_card *card, - struct qeth_qdio_out_q *queue, struct sk_buff *skb, - struct qeth_hdr *hdr, int offset, int hd_len) + struct qeth_qdio_out_q *queue, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned int offset, + unsigned int hd_len) { struct qeth_qdio_out_buffer *buffer; int index; @@ -4061,8 +4073,9 @@ out: EXPORT_SYMBOL_GPL(qeth_do_send_packet_fast); int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, - struct sk_buff *skb, struct qeth_hdr *hdr, - int elements_needed) + struct sk_buff *skb, struct qeth_hdr *hdr, + unsigned int offset, unsigned int hd_len, + int elements_needed) { struct qeth_qdio_out_buffer *buffer; int start_index; @@ -4111,7 +4124,7 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, } } } - tmp = qeth_fill_buffer(queue, buffer, skb, hdr, -1, 0); + tmp = qeth_fill_buffer(queue, buffer, skb, hdr, offset, hd_len); queue->next_buf_to_fill = (queue->next_buf_to_fill + tmp) % QDIO_MAX_BUFFERS_PER_Q; flush_count += tmp; @@ -4834,7 +4847,7 @@ out: } EXPORT_SYMBOL_GPL(qeth_vm_request_mac); -static inline int qeth_get_qdio_q_format(struct qeth_card *card) +static int qeth_get_qdio_q_format(struct qeth_card *card) { if (card->info.type == QETH_CARD_TYPE_IQD) return QDIO_IQDIO_QFMT; @@ -4899,9 +4912,12 @@ out: return; } -static inline void qeth_qdio_establish_cq(struct qeth_card *card, - struct qdio_buffer **in_sbal_ptrs, - void (**queue_start_poll) (struct ccw_device *, int, unsigned long)) { +static void qeth_qdio_establish_cq(struct qeth_card *card, + struct qdio_buffer **in_sbal_ptrs, + void (**queue_start_poll) + (struct ccw_device *, int, + unsigned long)) +{ int i; if (card->options.cq == QETH_CQ_ENABLED) { @@ -5193,9 +5209,10 @@ out: } EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); -static inline int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, - struct qdio_buffer_element *element, - struct sk_buff **pskb, int offset, int *pfrag, int data_len) +static int qeth_create_skb_frag(struct qeth_qdio_buffer *qethbuffer, + struct qdio_buffer_element *element, + struct sk_buff **pskb, int offset, int *pfrag, + int data_len) { struct page *page = virt_to_page(element->addr); if (*pskb == NULL) { diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 6d255c22656d..d1ee9e30c68b 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c @@ -78,7 +78,7 @@ static ssize_t qeth_dev_card_type_show(struct device *dev, static DEVICE_ATTR(card_type, 0444, qeth_dev_card_type_show, NULL); -static inline const char *qeth_get_bufsize_str(struct qeth_card *card) +static const char *qeth_get_bufsize_str(struct qeth_card *card) { if (card->qdio.in_buf_size == 16384) return "16k"; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ad110abfdd47..760b023eae95 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -231,13 +231,7 @@ static void qeth_l2_del_all_macs(struct qeth_card *card) spin_unlock_bh(&card->mclock); } -static inline u32 qeth_l2_mac_hash(const u8 *addr) -{ - return get_unaligned((u32 *)(&addr[2])); -} - -static inline int qeth_l2_get_cast_type(struct qeth_card *card, - struct sk_buff *skb) +static int qeth_l2_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { if (card->info.type == QETH_CARD_TYPE_OSN) return RTN_UNSPEC; @@ -248,8 +242,8 @@ static inline int qeth_l2_get_cast_type(struct qeth_card *card, return RTN_UNSPEC; } -static inline void qeth_l2_hdr_csum(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l2_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); @@ -265,13 +259,14 @@ static inline void qeth_l2_hdr_csum(struct qeth_card *card, card->perf_stats.tx_csum++; } -static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, - struct sk_buff *skb, int cast_type) +static void qeth_l2_fill_header(struct qeth_hdr *hdr, struct sk_buff *skb, + int cast_type, unsigned int data_len) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb_mac_header(skb); memset(hdr, 0, sizeof(struct qeth_hdr)); hdr->hdr.l2.id = QETH_HEADER_TYPE_LAYER2; + hdr->hdr.l2.pkt_length = data_len; /* set byte byte 3 to casting flags */ if (cast_type == RTN_MULTICAST) @@ -281,7 +276,6 @@ static void qeth_l2_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, else hdr->hdr.l2.flags[2] |= QETH_LAYER2_FLAG_UNICAST; - hdr->hdr.l2.pkt_length = skb->len - sizeof(struct qeth_hdr); /* VSWITCH relies on the VLAN * information to be present in * the QDIO header */ @@ -519,15 +513,6 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) /* fall back to alternative mechanism: */ } - if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) { - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "could not query adapter " - "parameters on device %s: x%x\n", - CARD_BUS_ID(card), rc); - } - } - if (card->info.type == QETH_CARD_TYPE_IQD || card->info.type == QETH_CARD_TYPE_OSM || card->info.type == QETH_CARD_TYPE_OSX || @@ -615,13 +600,13 @@ static void qeth_promisc_to_bridge(struct qeth_card *card) * only if there is not in the hash table storage already * */ -static void -qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) +static void qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, + u8 is_uc) { + u32 mac_hash = get_unaligned((u32 *)(&ha->addr[2])); struct qeth_mac *mac; - hash_for_each_possible(card->mac_htable, mac, hnode, - qeth_l2_mac_hash(ha->addr)) { + hash_for_each_possible(card->mac_htable, mac, hnode, mac_hash) { if (is_uc == mac->is_uc && !memcmp(ha->addr, mac->mac_addr, OSA_ADDR_LEN)) { mac->disp_flag = QETH_DISP_ADDR_DO_NOTHING; @@ -638,9 +623,7 @@ qeth_l2_add_mac(struct qeth_card *card, struct netdev_hw_addr *ha, u8 is_uc) mac->is_uc = is_uc; mac->disp_flag = QETH_DISP_ADDR_ADD; - hash_add(card->mac_htable, &mac->hnode, - qeth_l2_mac_hash(mac->mac_addr)); - + hash_add(card->mac_htable, &mac->hnode, mac_hash); } static void qeth_l2_set_rx_mode(struct net_device *dev) @@ -693,21 +676,127 @@ static void qeth_l2_set_rx_mode(struct net_device *dev) qeth_promisc_to_bridge(card); } -static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, - struct net_device *dev) +static int qeth_l2_xmit_iqd(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type) { + unsigned int data_offset = ETH_HLEN; + struct qeth_hdr *hdr; int rc; + + hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); + if (!hdr) + return -ENOMEM; + qeth_l2_fill_header(hdr, skb, cast_type, skb->len); + skb_copy_from_linear_data(skb, ((char *)hdr) + sizeof(*hdr), + data_offset); + + if (!qeth_get_elements_no(card, skb, 1, data_offset)) { + rc = -E2BIG; + goto out; + } + rc = qeth_do_send_packet_fast(card, queue, skb, hdr, data_offset, + sizeof(*hdr) + data_offset); +out: + if (rc) + kmem_cache_free(qeth_core_header_cache, hdr); + return rc; +} + +static int qeth_l2_xmit_osa(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue, int cast_type) +{ + int push_len = sizeof(struct qeth_hdr); + unsigned int elements, nr_frags; + unsigned int hdr_elements = 0; struct qeth_hdr *hdr = NULL; - int elements = 0; + unsigned int hd_len = 0; + int rc; + + /* fix hardware limitation: as long as we do not have sbal + * chaining we can not send long frag lists + */ + if (!qeth_get_elements_no(card, skb, 0, 0)) { + rc = skb_linearize(skb); + + if (card->options.performance_stats) { + if (rc) + card->perf_stats.tx_linfail++; + else + card->perf_stats.tx_lin++; + } + if (rc) + return rc; + } + nr_frags = skb_shinfo(skb)->nr_frags; + + rc = skb_cow_head(skb, push_len); + if (rc) + return rc; + push_len = qeth_push_hdr(skb, &hdr, push_len); + if (push_len < 0) + return push_len; + if (!push_len) { + /* hdr was allocated from cache */ + hd_len = sizeof(*hdr); + hdr_elements = 1; + } + qeth_l2_fill_header(hdr, skb, cast_type, skb->len - push_len); + if (skb->ip_summed == CHECKSUM_PARTIAL) + qeth_l2_hdr_csum(card, hdr, skb); + + elements = qeth_get_elements_no(card, skb, hdr_elements, 0); + if (!elements) { + rc = -E2BIG; + goto out; + } + elements += hdr_elements; + + /* TODO: remove the skb_orphan() once TX completion is fast enough */ + skb_orphan(skb); + rc = qeth_do_send_packet(card, queue, skb, hdr, 0, hd_len, elements); +out: + if (!rc) { + if (card->options.performance_stats && nr_frags) { + card->perf_stats.sg_skbs_sent++; + /* nr_frags + skb->data */ + card->perf_stats.sg_frags_sent += nr_frags + 1; + } + } else { + if (hd_len) + kmem_cache_free(qeth_core_header_cache, hdr); + if (rc == -EBUSY) + /* roll back to ETH header */ + skb_pull(skb, push_len); + } + return rc; +} + +static int qeth_l2_xmit_osn(struct qeth_card *card, struct sk_buff *skb, + struct qeth_qdio_out_q *queue) +{ + unsigned int elements; + struct qeth_hdr *hdr; + + if (skb->protocol == htons(ETH_P_IPV6)) + return -EPROTONOSUPPORT; + + hdr = (struct qeth_hdr *)skb->data; + elements = qeth_get_elements_no(card, skb, 0, 0); + if (!elements) + return -E2BIG; + if (qeth_hdr_chk_and_bounce(skb, &hdr, sizeof(*hdr))) + return -EINVAL; + return qeth_do_send_packet(card, queue, skb, hdr, 0, 0, elements); +} + +static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ struct qeth_card *card = dev->ml_priv; - struct sk_buff *new_skb = skb; int cast_type = qeth_l2_get_cast_type(card, skb); struct qeth_qdio_out_q *queue; int tx_bytes = skb->len; - int data_offset = -1; - int elements_needed = 0; - int hd_len = 0; - int nr_frags; + int rc; if (card->qdio.do_prio_queueing || (cast_type && card->info.is_multicast_different)) @@ -721,118 +810,38 @@ static netdev_tx_t qeth_l2_hard_start_xmit(struct sk_buff *skb, goto tx_drop; } - if ((card->info.type == QETH_CARD_TYPE_OSN) && - (skb->protocol == htons(ETH_P_IPV6))) - goto tx_drop; - if (card->options.performance_stats) { card->perf_stats.outbound_cnt++; card->perf_stats.outbound_start_time = qeth_get_micros(); } netif_stop_queue(dev); - /* fix hardware limitation: as long as we do not have sbal - * chaining we can not send long frag lists - */ - if ((card->info.type != QETH_CARD_TYPE_IQD) && - !qeth_get_elements_no(card, new_skb, 0, 0)) { - int lin_rc = skb_linearize(new_skb); - - if (card->options.performance_stats) { - if (lin_rc) - card->perf_stats.tx_linfail++; - else - card->perf_stats.tx_lin++; - } - if (lin_rc) - goto tx_drop; - } - - if (card->info.type == QETH_CARD_TYPE_OSN) - hdr = (struct qeth_hdr *)skb->data; - else { - if (card->info.type == QETH_CARD_TYPE_IQD) { - new_skb = skb; - data_offset = ETH_HLEN; - hd_len = ETH_HLEN; - hdr = kmem_cache_alloc(qeth_core_header_cache, - GFP_ATOMIC); - if (!hdr) - goto tx_drop; - elements_needed++; - skb_reset_mac_header(new_skb); - qeth_l2_fill_header(card, hdr, new_skb, cast_type); - hdr->hdr.l2.pkt_length = new_skb->len; - memcpy(((char *)hdr) + sizeof(struct qeth_hdr), - skb_mac_header(new_skb), ETH_HLEN); - } else { - /* create a clone with writeable headroom */ - new_skb = skb_realloc_headroom(skb, - sizeof(struct qeth_hdr)); - if (!new_skb) - goto tx_drop; - hdr = skb_push(new_skb, sizeof(struct qeth_hdr)); - skb_set_mac_header(new_skb, sizeof(struct qeth_hdr)); - qeth_l2_fill_header(card, hdr, new_skb, cast_type); - if (new_skb->ip_summed == CHECKSUM_PARTIAL) - qeth_l2_hdr_csum(card, hdr, new_skb); - } - } - - elements = qeth_get_elements_no(card, new_skb, elements_needed, - (data_offset > 0) ? data_offset : 0); - if (!elements) { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - goto tx_drop; + switch (card->info.type) { + case QETH_CARD_TYPE_OSN: + rc = qeth_l2_xmit_osn(card, skb, queue); + break; + case QETH_CARD_TYPE_IQD: + rc = qeth_l2_xmit_iqd(card, skb, queue, cast_type); + break; + default: + rc = qeth_l2_xmit_osa(card, skb, queue, cast_type); } - if (card->info.type != QETH_CARD_TYPE_IQD) { - if (qeth_hdr_chk_and_bounce(new_skb, &hdr, - sizeof(struct qeth_hdr_layer2))) - goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, - elements); - } else - rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - data_offset, hd_len); if (!rc) { card->stats.tx_packets++; card->stats.tx_bytes += tx_bytes; - if (card->options.performance_stats) { - nr_frags = skb_shinfo(new_skb)->nr_frags; - if (nr_frags) { - card->perf_stats.sg_skbs_sent++; - /* nr_frags + skb->data */ - card->perf_stats.sg_frags_sent += nr_frags + 1; - } - } - if (new_skb != skb) - dev_kfree_skb_any(skb); - rc = NETDEV_TX_OK; - } else { - if (data_offset >= 0) - kmem_cache_free(qeth_core_header_cache, hdr); - - if (rc == -EBUSY) { - if (new_skb != skb) - dev_kfree_skb_any(new_skb); - return NETDEV_TX_BUSY; - } else - goto tx_drop; - } - - netif_wake_queue(dev); - if (card->options.performance_stats) - card->perf_stats.outbound_time += qeth_get_micros() - - card->perf_stats.outbound_start_time; - return rc; + if (card->options.performance_stats) + card->perf_stats.outbound_time += qeth_get_micros() - + card->perf_stats.outbound_start_time; + netif_wake_queue(dev); + return NETDEV_TX_OK; + } else if (rc == -EBUSY) { + return NETDEV_TX_BUSY; + } /* else fall through */ tx_drop: card->stats.tx_dropped++; card->stats.tx_errors++; - if ((new_skb != skb) && new_skb) - dev_kfree_skb_any(new_skb); dev_kfree_skb_any(skb); netif_wake_queue(dev); return NETDEV_TX_OK; @@ -1010,6 +1019,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card) card->dev->vlan_features |= NETIF_F_RXCSUM; } } + if (card->info.type != QETH_CARD_TYPE_OSN && + card->info.type != QETH_CARD_TYPE_IQD) { + card->dev->priv_flags &= ~IFF_TX_SKB_SHARING; + card->dev->needed_headroom = sizeof(struct qeth_hdr); + } + card->info.broadcast_capable = 1; qeth_l2_request_initial_mac(card); card->dev->gso_max_size = (QETH_MAX_BUFFER_ELEMENTS(card) - 1) * @@ -1744,11 +1759,26 @@ static int qeth_bridgeport_makerc(struct qeth_card *card, return rc; } -static inline int ipa_cmd_sbp(struct qeth_card *card) +static struct qeth_cmd_buffer *qeth_sbp_build_cmd(struct qeth_card *card, + enum qeth_ipa_sbp_cmd sbp_cmd, + unsigned int cmd_length) { - return (card->info.type == QETH_CARD_TYPE_IQD) ? - IPA_CMD_SETBRIDGEPORT_IQD : - IPA_CMD_SETBRIDGEPORT_OSA; + enum qeth_ipa_cmds ipa_cmd = (card->info.type == QETH_CARD_TYPE_IQD) ? + IPA_CMD_SETBRIDGEPORT_IQD : + IPA_CMD_SETBRIDGEPORT_OSA; + struct qeth_cmd_buffer *iob; + struct qeth_ipa_cmd *cmd; + + iob = qeth_get_ipacmd_buffer(card, ipa_cmd, 0); + if (!iob) + return iob; + cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); + cmd->data.sbp.hdr.cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + + cmd_length; + cmd->data.sbp.hdr.command_code = sbp_cmd; + cmd->data.sbp.hdr.used_total = 1; + cmd->data.sbp.hdr.seq_no = 1; + return iob; } static int qeth_bridgeport_query_support_cb(struct qeth_card *card, @@ -1778,21 +1808,13 @@ static int qeth_bridgeport_query_support_cb(struct qeth_card *card, static void qeth_bridgeport_query_support(struct qeth_card *card) { struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl; QETH_CARD_TEXT(card, 2, "brqsuppo"); - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_COMMANDS_SUPPORTED, + sizeof(struct qeth_sbp_query_cmds_supp)); if (!iob) return; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = - sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_query_cmds_supp); - cmd->data.sbp.hdr.command_code = - IPA_SBP_QUERY_COMMANDS_SUPPORTED; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; if (qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_support_cb, (void *)&cbctl) || qeth_bridgeport_makerc(card, &cbctl, @@ -1846,7 +1868,6 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, { int rc = 0; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl = { .data = { .qports = { @@ -1859,16 +1880,9 @@ int qeth_bridgeport_query_ports(struct qeth_card *card, QETH_CARD_TEXT(card, 2, "brqports"); if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS)) return -EOPNOTSUPP; - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, IPA_SBP_QUERY_BRIDGE_PORTS, 0); if (!iob) return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = - sizeof(struct qeth_ipacmd_sbp_hdr); - cmd->data.sbp.hdr.command_code = - IPA_SBP_QUERY_BRIDGE_PORTS; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_query_ports_cb, (void *)&cbctl); if (rc < 0) @@ -1900,7 +1914,6 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) int rc = 0; int cmdlength; struct qeth_cmd_buffer *iob; - struct qeth_ipa_cmd *cmd; struct _qeth_sbp_cbctl cbctl; enum qeth_ipa_sbp_cmd setcmd; @@ -1908,32 +1921,24 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role) switch (role) { case QETH_SBP_ROLE_NONE: setcmd = IPA_SBP_RESET_BRIDGE_PORT_ROLE; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_reset_role); + cmdlength = sizeof(struct qeth_sbp_reset_role); break; case QETH_SBP_ROLE_PRIMARY: setcmd = IPA_SBP_SET_PRIMARY_BRIDGE_PORT; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_set_primary); + cmdlength = sizeof(struct qeth_sbp_set_primary); break; case QETH_SBP_ROLE_SECONDARY: setcmd = IPA_SBP_SET_SECONDARY_BRIDGE_PORT; - cmdlength = sizeof(struct qeth_ipacmd_sbp_hdr) + - sizeof(struct qeth_sbp_set_secondary); + cmdlength = sizeof(struct qeth_sbp_set_secondary); break; default: return -EINVAL; } if (!(card->options.sbp.supported_funcs & setcmd)) return -EOPNOTSUPP; - iob = qeth_get_ipacmd_buffer(card, ipa_cmd_sbp(card), 0); + iob = qeth_sbp_build_cmd(card, setcmd, cmdlength); if (!iob) return -ENOMEM; - cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE); - cmd->data.sbp.hdr.cmdlength = cmdlength; - cmd->data.sbp.hdr.command_code = setcmd; - cmd->data.sbp.hdr.used_total = 1; - cmd->data.sbp.hdr.seq_no = 1; rc = qeth_send_ipa_cmd(card, iob, qeth_bridgeport_set_cb, (void *)&cbctl); if (rc < 0) diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index d42e758518ed..ab661a431f7c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -247,7 +247,8 @@ int qeth_l3_delete_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) return -ENOENT; addr->ref_counter--; - if (addr->type == QETH_IP_TYPE_NORMAL && addr->ref_counter > 0) + if (addr->ref_counter > 0 && (addr->type == QETH_IP_TYPE_NORMAL || + addr->type == QETH_IP_TYPE_RXIP)) return rc; if (addr->in_progress) return -EINPROGRESS; @@ -329,8 +330,9 @@ int qeth_l3_add_ip(struct qeth_card *card, struct qeth_ipaddr *tmp_addr) kfree(addr); } } else { - if (addr->type == QETH_IP_TYPE_NORMAL) - addr->ref_counter++; + if (addr->type == QETH_IP_TYPE_NORMAL || + addr->type == QETH_IP_TYPE_RXIP) + addr->ref_counter++; } return rc; @@ -784,11 +786,11 @@ void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions proto, ipaddr = qeth_l3_get_addr_buffer(proto); if (ipaddr) { if (proto == QETH_PROT_IPV4) { - QETH_CARD_TEXT(card, 2, "addrxip4"); + QETH_CARD_TEXT(card, 2, "delrxip4"); memcpy(&ipaddr->u.a4.addr, addr, 4); ipaddr->u.a4.mask = 0; } else if (proto == QETH_PROT_IPV6) { - QETH_CARD_TEXT(card, 2, "addrxip6"); + QETH_CARD_TEXT(card, 2, "delrxip6"); memcpy(&ipaddr->u.a6.addr, addr, 16); ipaddr->u.a6.pfxlen = 0; } @@ -867,7 +869,7 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *card, return rc; } -static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) +static u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) { if (cast_type == RTN_MULTICAST) return QETH_CAST_MULTICAST; @@ -876,7 +878,7 @@ static inline u8 qeth_l3_get_qeth_hdr_flags4(int cast_type) return QETH_CAST_UNICAST; } -static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) +static u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) { u8 ct = QETH_HDR_PASSTHRU | QETH_HDR_IPV6; if (cast_type == RTN_MULTICAST) @@ -890,22 +892,10 @@ static inline u8 qeth_l3_get_qeth_hdr_flags6(int cast_type) static int qeth_l3_setadapter_parms(struct qeth_card *card) { - int rc; + int rc = 0; QETH_DBF_TEXT(SETUP, 2, "setadprm"); - if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { - dev_info(&card->gdev->dev, - "set adapter parameters not supported.\n"); - QETH_DBF_TEXT(SETUP, 2, " notsupp"); - return 0; - } - rc = qeth_query_setadapterparms(card); - if (rc) { - QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " - "0x%x\n", dev_name(&card->gdev->dev), rc); - return rc; - } if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { rc = qeth_setadpparms_change_macaddr(card); if (rc) @@ -1656,9 +1646,8 @@ static int qeth_l3_vlan_rx_kill_vid(struct net_device *dev, return 0; } -static inline int qeth_l3_rebuild_skb(struct qeth_card *card, - struct sk_buff *skb, struct qeth_hdr *hdr, - unsigned short *vlan_id) +static int qeth_l3_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, + struct qeth_hdr *hdr, unsigned short *vlan_id) { __u16 prot; struct iphdr *ip_hdr; @@ -2408,7 +2397,7 @@ static int qeth_l3_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return rc; } -inline int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) +static int qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) { int cast_type = RTN_UNSPEC; struct neighbour *n = NULL; @@ -2546,8 +2535,8 @@ static void qeth_l3_fill_header(struct qeth_card *card, struct qeth_hdr *hdr, rcu_read_unlock(); } -static inline void qeth_l3_hdr_csum(struct qeth_card *card, - struct qeth_hdr *hdr, struct sk_buff *skb) +static void qeth_l3_hdr_csum(struct qeth_card *card, struct qeth_hdr *hdr, + struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); @@ -2582,7 +2571,7 @@ static void qeth_tso_fill_header(struct qeth_card *card, hdr->ext.hdr_len = 28; /*insert non-fix values */ hdr->ext.mss = skb_shinfo(skb)->gso_size; - hdr->ext.dg_hdr_len = (__u16)(iph->ihl*4 + tcph->doff*4); + hdr->ext.dg_hdr_len = (__u16)(ip_hdrlen(skb) + tcp_hdrlen(skb)); hdr->ext.payload_len = (__u16)(skb->len - hdr->ext.dg_hdr_len - sizeof(struct qeth_hdr_tso)); tcph->check = 0; @@ -2648,9 +2637,10 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, qeth_get_priority_queue(card, skb, ipv, cast_type) : card->qdio.default_out_queue]; int tx_bytes = skb->len; + unsigned int hd_len = 0; bool use_tso; int data_offset = -1; - int nr_frags; + unsigned int nr_frags; if (((card->info.type == QETH_CARD_TYPE_IQD) && (((card->options.cq != QETH_CQ_ENABLED) && !ipv) || @@ -2675,11 +2665,12 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, /* Ignore segment size from skb_is_gso(), 1 page is always used. */ use_tso = skb_is_gso(skb) && - (qeth_get_ip_protocol(skb) == IPPROTO_TCP) && (ipv == 4); + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4); if (card->info.type == QETH_CARD_TYPE_IQD) { new_skb = skb; data_offset = ETH_HLEN; + hd_len = sizeof(*hdr); hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC); if (!hdr) goto tx_drop; @@ -2727,6 +2718,7 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (lin_rc) goto tx_drop; } + nr_frags = skb_shinfo(new_skb)->nr_frags; if (use_tso) { hdr = skb_push(new_skb, sizeof(struct qeth_hdr_tso)); @@ -2766,19 +2758,21 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (card->info.type != QETH_CARD_TYPE_IQD) { int len; - if (use_tso) - len = ((unsigned long)tcp_hdr(new_skb) + - tcp_hdrlen(new_skb)) - - (unsigned long)new_skb->data; - else + if (use_tso) { + hd_len = sizeof(struct qeth_hdr_tso) + + ip_hdrlen(new_skb) + tcp_hdrlen(new_skb); + len = hd_len; + } else { len = sizeof(struct qeth_hdr_layer3); + } if (qeth_hdr_chk_and_bounce(new_skb, &hdr, len)) goto tx_drop; - rc = qeth_do_send_packet(card, queue, new_skb, hdr, elements); + rc = qeth_do_send_packet(card, queue, new_skb, hdr, hd_len, + hd_len, elements); } else rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, - data_offset, 0); + data_offset, hd_len); if (!rc) { card->stats.tx_packets++; @@ -2786,7 +2780,6 @@ static netdev_tx_t qeth_l3_hard_start_xmit(struct sk_buff *skb, if (new_skb != skb) dev_kfree_skb_any(skb); if (card->options.performance_stats) { - nr_frags = skb_shinfo(new_skb)->nr_frags; if (use_tso) { card->perf_stats.large_send_bytes += tx_bytes; card->perf_stats.large_send_cnt++; diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 1a80ce41425e..e8bcc314cc5f 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c @@ -895,9 +895,26 @@ static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev, static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, u8 *addr) { + __be32 ipv4_addr; + struct in6_addr ipv6_addr; + if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { return -EINVAL; } + if (proto == QETH_PROT_IPV4) { + memcpy(&ipv4_addr, addr, sizeof(ipv4_addr)); + if (ipv4_is_multicast(ipv4_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } else if (proto == QETH_PROT_IPV6) { + memcpy(&ipv6_addr, addr, sizeof(ipv6_addr)); + if (ipv6_addr_is_multicast(&ipv6_addr)) { + QETH_DBF_MESSAGE(2, "multicast rxip not supported.\n"); + return -EINVAL; + } + } + return 0; } |