diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-07-15 17:36:50 +0300 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-07-15 17:36:50 +0300 |
commit | ca6e4405779ed56ebac941570615abd667c72c02 (patch) | |
tree | 1b4fef494bc8d64ed889d52c7aabb4e09e4ffef3 /drivers/net/ethernet/brocade/bna/bnad.c | |
parent | 8f539a83efa7dceb7c2257ca96e2dfc846bd12f6 (diff) | |
parent | ccfb8b2ed4d4e12c3c35de3db5fbbbaa11277736 (diff) | |
download | linux-ca6e4405779ed56ebac941570615abd667c72c02.tar.xz |
Merge tag 'drm-intel-fixes-2015-07-15' into drm-intel-next-queued
Backmerge fixes since it's getting out of hand again with the massive
split due to atomic between -next and 4.2-rc. All the bugfixes in
4.2-rc are addressed already (by converting more towards atomic
instead of minimal duct-tape) so just always pick the version in next
for the conflicts in modeset code.
All the other conflicts are just adjacent lines changed.
Conflicts:
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_ringbuffer.h
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'drivers/net/ethernet/brocade/bna/bnad.c')
-rw-r--r-- | drivers/net/ethernet/brocade/bna/bnad.c | 123 |
1 files changed, 50 insertions, 73 deletions
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index 37072a83f9d6..0612b19f6313 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -57,7 +57,8 @@ static u32 bnad_rxqs_per_cq = 2; static u32 bna_id; static struct mutex bnad_list_mutex; static LIST_HEAD(bnad_list); -static const u8 bnad_bcast_addr[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; +static const u8 bnad_bcast_addr[] __aligned(2) = + { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; /* * Local MACROS @@ -308,7 +309,7 @@ bnad_rxq_alloc_init(struct bnad *bnad, struct bna_rcb *rcb) } } - BUG_ON(((PAGE_SIZE << order) % unmap_q->map_size)); + BUG_ON((PAGE_SIZE << order) % unmap_q->map_size); return 0; } @@ -724,7 +725,6 @@ next: cmpl->valid = 0; BNA_QE_INDX_INC(ccb->producer_index, ccb->q_depth); } - cmpl = &cq[ccb->producer_index]; } napi_gro_flush(&rx_ctrl->napi, false); @@ -757,7 +757,7 @@ bnad_msix_rx(int irq, void *data) struct bna_ccb *ccb = (struct bna_ccb *)data; if (ccb) { - ((struct bnad_rx_ctrl *)(ccb->ctrl))->rx_intr_ctr++; + ((struct bnad_rx_ctrl *)ccb->ctrl)->rx_intr_ctr++; bnad_netif_rx_schedule_poll(ccb->bnad, ccb); } @@ -875,9 +875,9 @@ bnad_set_netdev_perm_addr(struct bnad *bnad) { struct net_device *netdev = bnad->netdev; - memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); + ether_addr_copy(netdev->perm_addr, bnad->perm_addr); if (is_zero_ether_addr(netdev->dev_addr)) - memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, bnad->perm_addr); } /* Control Path Handlers */ @@ -946,8 +946,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad, if (link_up) { if (!netif_carrier_ok(bnad->netdev)) { uint tx_id, tcb_id; - printk(KERN_WARNING "bna: %s link up\n", - bnad->netdev->name); + netdev_info(bnad->netdev, "link up\n"); netif_carrier_on(bnad->netdev); BNAD_UPDATE_CTR(bnad, link_toggle); for (tx_id = 0; tx_id < bnad->num_tx; tx_id++) { @@ -966,10 +965,6 @@ bnad_cb_ethport_link_status(struct bnad *bnad, /* * Force an immediate * Transmit Schedule */ - printk(KERN_INFO "bna: %s %d " - "TXQ_STARTED\n", - bnad->netdev->name, - txq_id); netif_wake_subqueue( bnad->netdev, txq_id); @@ -987,8 +982,7 @@ bnad_cb_ethport_link_status(struct bnad *bnad, } } else { if (netif_carrier_ok(bnad->netdev)) { - printk(KERN_WARNING "bna: %s link down\n", - bnad->netdev->name); + netdev_info(bnad->netdev, "link down\n"); netif_carrier_off(bnad->netdev); BNAD_UPDATE_CTR(bnad, link_toggle); } @@ -1058,8 +1052,6 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tx *tx) txq_id = tcb->id; clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); netif_stop_subqueue(bnad->netdev, txq_id); - printk(KERN_INFO "bna: %s %d TXQ_STOPPED\n", - bnad->netdev->name, txq_id); } } @@ -1082,8 +1074,6 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) BUG_ON(*(tcb->hw_consumer_index) != 0); if (netif_carrier_ok(bnad->netdev)) { - printk(KERN_INFO "bna: %s %d TXQ_STARTED\n", - bnad->netdev->name, txq_id); netif_wake_subqueue(bnad->netdev, txq_id); BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); } @@ -1094,8 +1084,8 @@ bnad_cb_tx_resume(struct bnad *bnad, struct bna_tx *tx) * get a 0 MAC address. We try to get the MAC address * again here. */ - if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { - bna_enet_perm_mac_get(&bnad->bna.enet, &bnad->perm_addr); + if (is_zero_ether_addr(bnad->perm_addr)) { + bna_enet_perm_mac_get(&bnad->bna.enet, bnad->perm_addr); bnad_set_netdev_perm_addr(bnad); } } @@ -1703,7 +1693,7 @@ bnad_ioc_timeout(unsigned long data) unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_ioc_timeout((void *) &bnad->bna.ioceth.ioc); + bfa_nw_ioc_timeout(&bnad->bna.ioceth.ioc); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -1714,7 +1704,7 @@ bnad_ioc_hb_check(unsigned long data) unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_ioc_hb_check((void *) &bnad->bna.ioceth.ioc); + bfa_nw_ioc_hb_check(&bnad->bna.ioceth.ioc); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -1725,7 +1715,7 @@ bnad_iocpf_timeout(unsigned long data) unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_iocpf_timeout((void *) &bnad->bna.ioceth.ioc); + bfa_nw_iocpf_timeout(&bnad->bna.ioceth.ioc); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -1736,7 +1726,7 @@ bnad_iocpf_sem_timeout(unsigned long data) unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); - bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.ioceth.ioc); + bfa_nw_iocpf_sem_timeout(&bnad->bna.ioceth.ioc); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -1862,8 +1852,7 @@ bnad_netdev_mc_list_get(struct net_device *netdev, u8 *mc_list) struct netdev_hw_addr *mc_addr; netdev_for_each_mc_addr(mc_addr, netdev) { - memcpy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0], - ETH_ALEN); + ether_addr_copy(&mc_list[i * ETH_ALEN], &mc_addr->addr[0]); i++; } } @@ -2137,7 +2126,7 @@ bnad_reinit_rx(struct bnad *bnad) current_err = bnad_setup_rx(bnad, rx_id); if (current_err && !err) { err = current_err; - pr_err("RXQ:%u setup failed\n", rx_id); + netdev_err(netdev, "RXQ:%u setup failed\n", rx_id); } } @@ -2338,7 +2327,7 @@ bnad_rx_coalescing_timeo_set(struct bnad *bnad) * Called with bnad->bna_lock held */ int -bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) +bnad_mac_addr_set_locked(struct bnad *bnad, const u8 *mac_addr) { int ret; @@ -2349,7 +2338,7 @@ bnad_mac_addr_set_locked(struct bnad *bnad, u8 *mac_addr) if (!bnad->rx_info[0].rx) return 0; - ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr, NULL); + ret = bna_rx_ucast_set(bnad->rx_info[0].rx, mac_addr); if (ret != BNA_CB_SUCCESS) return -EADDRNOTAVAIL; @@ -2367,8 +2356,8 @@ bnad_enable_default_bcast(struct bnad *bnad) init_completion(&bnad->bnad_completions.mcast_comp); spin_lock_irqsave(&bnad->bna_lock, flags); - ret = bna_rx_mcast_add(rx_info->rx, (u8 *)bnad_bcast_addr, - bnad_cb_rx_mcast_add); + ret = bna_rx_mcast_add(rx_info->rx, bnad_bcast_addr, + bnad_cb_rx_mcast_add); spin_unlock_irqrestore(&bnad->bna_lock, flags); if (ret == BNA_CB_SUCCESS) @@ -2673,8 +2662,9 @@ bnad_enable_msix(struct bnad *bnad) if (ret < 0) { goto intx_mode; } else if (ret < bnad->msix_num) { - pr_warn("BNA: %d MSI-X vectors allocated < %d requested\n", - ret, bnad->msix_num); + dev_warn(&bnad->pcidev->dev, + "%d MSI-X vectors allocated < %d requested\n", + ret, bnad->msix_num); spin_lock_irqsave(&bnad->bna_lock, flags); /* ret = #of vectors that we got */ @@ -2696,7 +2686,8 @@ bnad_enable_msix(struct bnad *bnad) return; intx_mode: - pr_warn("BNA: MSI-X enable failed - operating in INTx mode\n"); + dev_warn(&bnad->pcidev->dev, + "MSI-X enable failed - operating in INTx mode\n"); kfree(bnad->msix_table); bnad->msix_table = NULL; @@ -2754,7 +2745,7 @@ bnad_open(struct net_device *netdev) spin_lock_irqsave(&bnad->bna_lock, flags); bna_enet_mtu_set(&bnad->bna.enet, BNAD_FRAME_SIZE(bnad->netdev->mtu), NULL); - bna_enet_pause_config(&bnad->bna.enet, &pause_config, NULL); + bna_enet_pause_config(&bnad->bna.enet, &pause_config); bna_enet_enable(&bnad->bna.enet); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -3128,7 +3119,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad) int entry; if (netdev_uc_empty(bnad->netdev)) { - bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); return; } @@ -3141,13 +3132,11 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad) entry = 0; netdev_for_each_uc_addr(ha, netdev) { - memcpy(&mac_list[entry * ETH_ALEN], - &ha->addr[0], ETH_ALEN); + ether_addr_copy(&mac_list[entry * ETH_ALEN], &ha->addr[0]); entry++; } - ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, - mac_list, NULL); + ret = bna_rx_ucast_listset(bnad->rx_info[0].rx, entry, mac_list); kfree(mac_list); if (ret != BNA_CB_SUCCESS) @@ -3158,7 +3147,7 @@ bnad_set_rx_ucast_fltr(struct bnad *bnad) /* ucast packets not in UCAM are routed to default function */ mode_default: bnad->cfg_flags |= BNAD_CF_DEFAULT; - bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL, NULL); + bna_rx_ucast_listset(bnad->rx_info[0].rx, 0, NULL); } static void @@ -3183,12 +3172,11 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad) if (mac_list == NULL) goto mode_allmulti; - memcpy(&mac_list[0], &bnad_bcast_addr[0], ETH_ALEN); + ether_addr_copy(&mac_list[0], &bnad_bcast_addr[0]); /* copy rest of the MCAST addresses */ bnad_netdev_mc_list_get(netdev, mac_list); - ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, - mac_list, NULL); + ret = bna_rx_mcast_listset(bnad->rx_info[0].rx, mc_count + 1, mac_list); kfree(mac_list); if (ret != BNA_CB_SUCCESS) @@ -3198,7 +3186,7 @@ bnad_set_rx_mcast_fltr(struct bnad *bnad) mode_allmulti: bnad->cfg_flags |= BNAD_CF_ALLMULTI; - bna_rx_mcast_delall(bnad->rx_info[0].rx, NULL); + bna_rx_mcast_delall(bnad->rx_info[0].rx); } void @@ -3237,7 +3225,7 @@ bnad_set_rx_mode(struct net_device *netdev) mode_mask = BNA_RXMODE_PROMISC | BNA_RXMODE_DEFAULT | BNA_RXMODE_ALLMULTI; - bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL); + bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask); spin_unlock_irqrestore(&bnad->bna_lock, flags); } @@ -3248,19 +3236,18 @@ bnad_set_rx_mode(struct net_device *netdev) * in a non-blocking context. */ static int -bnad_set_mac_address(struct net_device *netdev, void *mac_addr) +bnad_set_mac_address(struct net_device *netdev, void *addr) { int err; struct bnad *bnad = netdev_priv(netdev); - struct sockaddr *sa = (struct sockaddr *)mac_addr; + struct sockaddr *sa = (struct sockaddr *)addr; unsigned long flags; spin_lock_irqsave(&bnad->bna_lock, flags); err = bnad_mac_addr_set_locked(bnad, sa->sa_data); - if (!err) - memcpy(netdev->dev_addr, sa->sa_data, netdev->addr_len); + ether_addr_copy(netdev->dev_addr, sa->sa_data); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -3487,8 +3474,8 @@ bnad_init(struct bnad *bnad, dev_err(&pdev->dev, "ioremap for bar0 failed\n"); return -ENOMEM; } - pr_info("bar0 mapped to %p, len %llu\n", bnad->bar0, - (unsigned long long) bnad->mmio_len); + dev_info(&pdev->dev, "bar0 mapped to %p, len %llu\n", bnad->bar0, + (unsigned long long) bnad->mmio_len); spin_lock_irqsave(&bnad->bna_lock, flags); if (!bnad_msix_disable) @@ -3609,13 +3596,10 @@ bnad_pci_probe(struct pci_dev *pdev, struct bfa_pcidev pcidev_info; unsigned long flags; - pr_info("bnad_pci_probe : (0x%p, 0x%p) PCI Func : (%d)\n", - pdev, pcidev_id, PCI_FUNC(pdev->devfn)); - mutex_lock(&bnad_fwimg_mutex); if (!cna_get_firmware_buf(pdev)) { mutex_unlock(&bnad_fwimg_mutex); - pr_warn("Failed to load Firmware Image!\n"); + dev_err(&pdev->dev, "failed to load firmware image!\n"); return -ENODEV; } mutex_unlock(&bnad_fwimg_mutex); @@ -3693,17 +3677,13 @@ bnad_pci_probe(struct pci_dev *pdev, /* Set up timers */ setup_timer(&bnad->bna.ioceth.ioc.ioc_timer, bnad_ioc_timeout, - ((unsigned long)bnad)); + (unsigned long)bnad); setup_timer(&bnad->bna.ioceth.ioc.hb_timer, bnad_ioc_hb_check, - ((unsigned long)bnad)); + (unsigned long)bnad); setup_timer(&bnad->bna.ioceth.ioc.iocpf_timer, bnad_iocpf_timeout, - ((unsigned long)bnad)); + (unsigned long)bnad); setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout, - ((unsigned long)bnad)); - - /* Now start the timer before calling IOC */ - mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer, - jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); + (unsigned long)bnad); /* * Start the chip @@ -3712,8 +3692,7 @@ bnad_pci_probe(struct pci_dev *pdev, */ err = bnad_ioceth_enable(bnad); if (err) { - pr_err("BNA: Initialization failed err=%d\n", - err); + dev_err(&pdev->dev, "initialization failed err=%d\n", err); goto probe_success; } @@ -3746,7 +3725,7 @@ bnad_pci_probe(struct pci_dev *pdev, /* Get the burnt-in mac */ spin_lock_irqsave(&bnad->bna_lock, flags); - bna_enet_perm_mac_get(&bna->enet, &bnad->perm_addr); + bna_enet_perm_mac_get(&bna->enet, bnad->perm_addr); bnad_set_netdev_perm_addr(bnad); spin_unlock_irqrestore(&bnad->bna_lock, flags); @@ -3755,7 +3734,7 @@ bnad_pci_probe(struct pci_dev *pdev, /* Finally, reguister with net_device layer */ err = register_netdev(netdev); if (err) { - pr_err("BNA : Registering with netdev failed\n"); + dev_err(&pdev->dev, "registering net device failed\n"); goto probe_uninit; } set_bit(BNAD_RF_NETDEV_REGISTERED, &bnad->run_flags); @@ -3807,7 +3786,6 @@ bnad_pci_remove(struct pci_dev *pdev) if (!netdev) return; - pr_info("%s bnad_pci_remove\n", netdev->name); bnad = netdev_priv(netdev); bna = &bnad->bna; @@ -3868,15 +3846,14 @@ bnad_module_init(void) { int err; - pr_info("QLogic BR-series 10G Ethernet driver - version: %s\n", - BNAD_VERSION); + pr_info("bna: QLogic BR-series 10G Ethernet driver - version: %s\n", + BNAD_VERSION); bfa_nw_ioc_auto_recover(bnad_ioc_auto_recover); err = pci_register_driver(&bnad_pci_driver); if (err < 0) { - pr_err("bna : PCI registration failed in module init " - "(%d)\n", err); + pr_err("bna: PCI driver registration failed err=%d\n", err); return err; } |