diff options
Diffstat (limited to 'drivers')
74 files changed, 2401 insertions, 568 deletions
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c index a43fa1a57d57..1502c50273b5 100644 --- a/drivers/acpi/acpi_pad.c +++ b/drivers/acpi/acpi_pad.c @@ -36,6 +36,7 @@ #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator" #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80 static DEFINE_MUTEX(isolated_cpus_lock); +static DEFINE_MUTEX(round_robin_lock); static unsigned long power_saving_mwait_eax; @@ -107,7 +108,7 @@ static void round_robin_cpu(unsigned int tsk_index) if (!alloc_cpumask_var(&tmp, GFP_KERNEL)) return; - mutex_lock(&isolated_cpus_lock); + mutex_lock(&round_robin_lock); cpumask_clear(tmp); for_each_cpu(cpu, pad_busy_cpus) cpumask_or(tmp, tmp, topology_thread_cpumask(cpu)); @@ -116,7 +117,7 @@ static void round_robin_cpu(unsigned int tsk_index) if (cpumask_empty(tmp)) cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus); if (cpumask_empty(tmp)) { - mutex_unlock(&isolated_cpus_lock); + mutex_unlock(&round_robin_lock); return; } for_each_cpu(cpu, tmp) { @@ -131,7 +132,7 @@ static void round_robin_cpu(unsigned int tsk_index) tsk_in_cpu[tsk_index] = preferred_cpu; cpumask_set_cpu(preferred_cpu, pad_busy_cpus); cpu_weight[preferred_cpu]++; - mutex_unlock(&isolated_cpus_lock); + mutex_unlock(&round_robin_lock); set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu)); } diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index 5577762daee1..6686b1eaf13e 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx, u8 ins = entry->instruction; if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) - return acpi_os_map_generic_address(&entry->register_region); + return apei_map_generic_address(&entry->register_region); return 0; } @@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx, u8 ins = entry->instruction; if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER) - acpi_os_unmap_generic_address(&entry->register_region); + apei_unmap_generic_address(&entry->register_region); return 0; } @@ -606,6 +606,19 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr, return 0; } +int apei_map_generic_address(struct acpi_generic_address *reg) +{ + int rc; + u32 access_bit_width; + u64 address; + + rc = apei_check_gar(reg, &address, &access_bit_width); + if (rc) + return rc; + return acpi_os_map_generic_address(reg); +} +EXPORT_SYMBOL_GPL(apei_map_generic_address); + /* read GAR in interrupt (including NMI) or process context */ int apei_read(u64 *val, struct acpi_generic_address *reg) { diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h index cca240a33038..f220d642136e 100644 --- a/drivers/acpi/apei/apei-internal.h +++ b/drivers/acpi/apei/apei-internal.h @@ -7,6 +7,8 @@ #define APEI_INTERNAL_H #include <linux/cper.h> +#include <linux/acpi.h> +#include <linux/acpi_io.h> struct apei_exec_context; @@ -68,6 +70,13 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio /* IP has been set in instruction function */ #define APEI_EXEC_SET_IP 1 +int apei_map_generic_address(struct acpi_generic_address *reg); + +static inline void apei_unmap_generic_address(struct acpi_generic_address *reg) +{ + acpi_os_unmap_generic_address(reg); +} + int apei_read(u64 *val, struct acpi_generic_address *reg); int apei_write(u64 val, struct acpi_generic_address *reg); diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 9b3cac0abecc..1599566ed1fe 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -301,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic) if (!ghes) return ERR_PTR(-ENOMEM); ghes->generic = generic; - rc = acpi_os_map_generic_address(&generic->error_status_address); + rc = apei_map_generic_address(&generic->error_status_address); if (rc) goto err_free; error_block_length = generic->error_block_length; @@ -321,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic) return ghes; err_unmap: - acpi_os_unmap_generic_address(&generic->error_status_address); + apei_unmap_generic_address(&generic->error_status_address); err_free: kfree(ghes); return ERR_PTR(rc); @@ -330,7 +330,7 @@ err_free: static void ghes_fini(struct ghes *ghes) { kfree(ghes->estatus); - acpi_os_unmap_generic_address(&ghes->generic->error_status_address); + apei_unmap_generic_address(&ghes->generic->error_status_address); } enum { diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index f3decb30223f..47a8caa89dbe 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -224,6 +224,7 @@ static void lapic_timer_state_broadcast(struct acpi_processor *pr, /* * Suspend / resume control */ +static int acpi_idle_suspend; static u32 saved_bm_rld; static void acpi_idle_bm_rld_save(void) @@ -242,13 +243,21 @@ static void acpi_idle_bm_rld_restore(void) int acpi_processor_suspend(struct acpi_device * device, pm_message_t state) { + if (acpi_idle_suspend == 1) + return 0; + acpi_idle_bm_rld_save(); + acpi_idle_suspend = 1; return 0; } int acpi_processor_resume(struct acpi_device * device) { + if (acpi_idle_suspend == 0) + return 0; + acpi_idle_bm_rld_restore(); + acpi_idle_suspend = 0; return 0; } @@ -754,6 +763,12 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev, local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EBUSY; + } + lapic_timer_state_broadcast(pr, cx, 1); kt1 = ktime_get_real(); acpi_idle_do_entry(cx); @@ -823,6 +838,12 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev, local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EBUSY; + } + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* @@ -907,14 +928,21 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev, drv, drv->safe_state_index); } else { local_irq_disable(); - acpi_safe_halt(); + if (!acpi_idle_suspend) + acpi_safe_halt(); local_irq_enable(); - return -EINVAL; + return -EBUSY; } } local_irq_disable(); + if (acpi_idle_suspend) { + local_irq_enable(); + cpu_relax(); + return -EBUSY; + } + if (cx->entry_method != ACPI_CSTATE_FFH) { current_thread_info()->status &= ~TS_POLLING; /* diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c index 9f66181c814e..240a24400976 100644 --- a/drivers/acpi/sysfs.c +++ b/drivers/acpi/sysfs.c @@ -173,7 +173,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) { int result = 0; - if (!strncmp(val, "enable", strlen("enable") - 1)) { + if (!strncmp(val, "enable", strlen("enable"))) { result = acpi_debug_trace(trace_method_name, trace_debug_level, trace_debug_layer, 0); if (result) @@ -181,7 +181,7 @@ static int param_set_trace_state(const char *val, struct kernel_param *kp) goto exit; } - if (!strncmp(val, "disable", strlen("disable") - 1)) { + if (!strncmp(val, "disable", strlen("disable"))) { int name = 0; result = acpi_debug_trace((char *)&name, trace_debug_level, trace_debug_layer, 0); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index a576575617d7..1e0a9e17c31d 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -558,6 +558,8 @@ acpi_video_bus_DOS(struct acpi_video_bus *video, int bios_flag, int lcd_flag) union acpi_object arg0 = { ACPI_TYPE_INTEGER }; struct acpi_object_list args = { 1, &arg0 }; + if (!video->cap._DOS) + return 0; if (bios_flag < 0 || bios_flag > 3 || lcd_flag < 0 || lcd_flag > 1) return -EINVAL; diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index e0fb5b0435a3..9cb845e49334 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1031,7 +1031,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) dpm_wait_for_children(dev, async); if (async_error) - return 0; + goto Complete; pm_runtime_get_noresume(dev); if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) @@ -1040,7 +1040,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (pm_wakeup_pending()) { pm_runtime_put_sync(dev); async_error = -EBUSY; - return 0; + goto Complete; } device_lock(dev); @@ -1097,6 +1097,8 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) } device_unlock(dev); + + Complete: complete_all(&dev->power.completion); if (error) { diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c index b5c5ff53cb57..fcb956bb4b4c 100644 --- a/drivers/block/drbd/drbd_bitmap.c +++ b/drivers/block/drbd/drbd_bitmap.c @@ -1475,10 +1475,17 @@ void _drbd_bm_set_bits(struct drbd_conf *mdev, const unsigned long s, const unsi first_word = 0; spin_lock_irq(&b->bm_lock); } - /* last page (respectively only page, for first page == last page) */ last_word = MLPP(el >> LN2_BPL); - bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); + + /* consider bitmap->bm_bits = 32768, bitmap->bm_number_of_pages = 1. (or multiples). + * ==> e = 32767, el = 32768, last_page = 2, + * and now last_word = 0. + * We do not want to touch last_page in this case, + * as we did not allocate it, it is not present in bitmap->bm_pages. + */ + if (last_word) + bm_set_full_words_within_one_page(mdev->bitmap, last_page, first_word, last_word); /* possibly trailing bits. * example: (e & 63) == 63, el will be e+1. diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 9c5c84946b05..8e93a6ac9bb6 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -472,12 +472,17 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, req->rq_state |= RQ_LOCAL_COMPLETED; req->rq_state &= ~RQ_LOCAL_PENDING; - D_ASSERT(!(req->rq_state & RQ_NET_MASK)); + if (req->rq_state & RQ_LOCAL_ABORTED) { + _req_may_be_done(req, m); + break; + } __drbd_chk_io_error(mdev, false); goto_queue_for_net_read: + D_ASSERT(!(req->rq_state & RQ_NET_MASK)); + /* no point in retrying if there is no good remote data, * or we have no connection. */ if (mdev->state.pdsk != D_UP_TO_DATE) { @@ -765,6 +770,40 @@ static int drbd_may_do_local_read(struct drbd_conf *mdev, sector_t sector, int s return 0 == drbd_bm_count_bits(mdev, sbnr, ebnr); } +static void maybe_pull_ahead(struct drbd_conf *mdev) +{ + int congested = 0; + + /* If I don't even have good local storage, we can not reasonably try + * to pull ahead of the peer. We also need the local reference to make + * sure mdev->act_log is there. + * Note: caller has to make sure that net_conf is there. + */ + if (!get_ldev_if_state(mdev, D_UP_TO_DATE)) + return; + + if (mdev->net_conf->cong_fill && + atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { + dev_info(DEV, "Congestion-fill threshold reached\n"); + congested = 1; + } + + if (mdev->act_log->used >= mdev->net_conf->cong_extents) { + dev_info(DEV, "Congestion-extents threshold reached\n"); + congested = 1; + } + + if (congested) { + queue_barrier(mdev); /* last barrier, after mirrored writes */ + + if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) + _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); + else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ + _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); + } + put_ldev(mdev); +} + static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio, unsigned long start_time) { const int rw = bio_rw(bio); @@ -972,29 +1011,8 @@ allocate_barrier: _req_mod(req, queue_for_send_oos); if (remote && - mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) { - int congested = 0; - - if (mdev->net_conf->cong_fill && - atomic_read(&mdev->ap_in_flight) >= mdev->net_conf->cong_fill) { - dev_info(DEV, "Congestion-fill threshold reached\n"); - congested = 1; - } - - if (mdev->act_log->used >= mdev->net_conf->cong_extents) { - dev_info(DEV, "Congestion-extents threshold reached\n"); - congested = 1; - } - - if (congested) { - queue_barrier(mdev); /* last barrier, after mirrored writes */ - - if (mdev->net_conf->on_congestion == OC_PULL_AHEAD) - _drbd_set_state(_NS(mdev, conn, C_AHEAD), 0, NULL); - else /*mdev->net_conf->on_congestion == OC_DISCONNECT */ - _drbd_set_state(_NS(mdev, conn, C_DISCONNECTING), 0, NULL); - } - } + mdev->net_conf->on_congestion != OC_BLOCK && mdev->agreed_pro_version >= 96) + maybe_pull_ahead(mdev); spin_unlock_irq(&mdev->req_lock); kfree(b); /* if someone else has beaten us to it... */ diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c index cce7df367b79..553f43a90953 100644 --- a/drivers/block/floppy.c +++ b/drivers/block/floppy.c @@ -671,6 +671,7 @@ static void __reschedule_timeout(int drive, const char *message) if (drive == current_reqD) drive = current_drive; + __cancel_delayed_work(&fd_timeout); if (drive < 0 || drive >= N_DRIVE) { delay = 20UL * HZ; diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 264bc77dcb91..a8fddeb3d638 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -37,6 +37,7 @@ #include <linux/kthread.h> #include <../drivers/ata/ahci.h> #include <linux/export.h> +#include <linux/debugfs.h> #include "mtip32xx.h" #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) @@ -85,6 +86,7 @@ static int instance; * allocated in mtip_init(). */ static int mtip_major; +static struct dentry *dfs_parent; static DEFINE_SPINLOCK(rssd_index_lock); static DEFINE_IDA(rssd_index_ida); @@ -2546,7 +2548,7 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd, } /* - * Sysfs register/status dump. + * Sysfs status dump. * * @dev Pointer to the device structure, passed by the kernrel. * @attr Pointer to the device_attribute structure passed by the kernel. @@ -2555,45 +2557,68 @@ static struct scatterlist *mtip_hw_get_scatterlist(struct driver_data *dd, * return value * The size, in bytes, of the data copied into buf. */ -static ssize_t mtip_hw_show_registers(struct device *dev, +static ssize_t mtip_hw_show_status(struct device *dev, struct device_attribute *attr, char *buf) { - u32 group_allocated; struct driver_data *dd = dev_to_disk(dev)->private_data; int size = 0; + + if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) + size += sprintf(buf, "%s", "thermal_shutdown\n"); + else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag)) + size += sprintf(buf, "%s", "write_protect\n"); + else + size += sprintf(buf, "%s", "online\n"); + + return size; +} + +static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); + +static ssize_t mtip_hw_read_registers(struct file *f, char __user *ubuf, + size_t len, loff_t *offset) +{ + struct driver_data *dd = (struct driver_data *)f->private_data; + char buf[MTIP_DFS_MAX_BUF_SIZE]; + u32 group_allocated; + int size = *offset; int n; - size += sprintf(&buf[size], "Hardware\n--------\n"); - size += sprintf(&buf[size], "S ACTive : [ 0x"); + if (!len || size) + return 0; + + if (size < 0) + return -EINVAL; + + size += sprintf(&buf[size], "H/ S ACTive : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) size += sprintf(&buf[size], "%08X ", readl(dd->port->s_active[n])); size += sprintf(&buf[size], "]\n"); - size += sprintf(&buf[size], "Command Issue : [ 0x"); + size += sprintf(&buf[size], "H/ Command Issue : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) size += sprintf(&buf[size], "%08X ", readl(dd->port->cmd_issue[n])); size += sprintf(&buf[size], "]\n"); - size += sprintf(&buf[size], "Completed : [ 0x"); + size += sprintf(&buf[size], "H/ Completed : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) size += sprintf(&buf[size], "%08X ", readl(dd->port->completed[n])); size += sprintf(&buf[size], "]\n"); - size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n", + size += sprintf(&buf[size], "H/ PORT IRQ STAT : [ 0x%08X ]\n", readl(dd->port->mmio + PORT_IRQ_STAT)); - size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n", + size += sprintf(&buf[size], "H/ HOST IRQ STAT : [ 0x%08X ]\n", readl(dd->mmio + HOST_IRQ_STAT)); size += sprintf(&buf[size], "\n"); - size += sprintf(&buf[size], "Local\n-----\n"); - size += sprintf(&buf[size], "Allocated : [ 0x"); + size += sprintf(&buf[size], "L/ Allocated : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) { if (sizeof(long) > sizeof(u32)) @@ -2605,7 +2630,7 @@ static ssize_t mtip_hw_show_registers(struct device *dev, } size += sprintf(&buf[size], "]\n"); - size += sprintf(&buf[size], "Commands in Q: [ 0x"); + size += sprintf(&buf[size], "L/ Commands in Q : [ 0x"); for (n = dd->slot_groups-1; n >= 0; n--) { if (sizeof(long) > sizeof(u32)) @@ -2617,44 +2642,53 @@ static ssize_t mtip_hw_show_registers(struct device *dev, } size += sprintf(&buf[size], "]\n"); - return size; + *offset = size <= len ? size : len; + size = copy_to_user(ubuf, buf, *offset); + if (size) + return -EFAULT; + + return *offset; } -static ssize_t mtip_hw_show_status(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t mtip_hw_read_flags(struct file *f, char __user *ubuf, + size_t len, loff_t *offset) { - struct driver_data *dd = dev_to_disk(dev)->private_data; - int size = 0; + struct driver_data *dd = (struct driver_data *)f->private_data; + char buf[MTIP_DFS_MAX_BUF_SIZE]; + int size = *offset; - if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag)) - size += sprintf(buf, "%s", "thermal_shutdown\n"); - else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag)) - size += sprintf(buf, "%s", "write_protect\n"); - else - size += sprintf(buf, "%s", "online\n"); - - return size; -} + if (!len || size) + return 0; -static ssize_t mtip_hw_show_flags(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct driver_data *dd = dev_to_disk(dev)->private_data; - int size = 0; + if (size < 0) + return -EINVAL; - size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n", + size += sprintf(&buf[size], "Flag-port : [ %08lX ]\n", dd->port->flags); - size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n", + size += sprintf(&buf[size], "Flag-dd : [ %08lX ]\n", dd->dd_flag); - return size; + *offset = size <= len ? size : len; + size = copy_to_user(ubuf, buf, *offset); + if (size) + return -EFAULT; + + return *offset; } -static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL); -static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); -static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL); +static const struct file_operations mtip_regs_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mtip_hw_read_registers, + .llseek = no_llseek, +}; + +static const struct file_operations mtip_flags_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = mtip_hw_read_flags, + .llseek = no_llseek, +}; /* * Create the sysfs related attributes. @@ -2671,15 +2705,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj) if (!kobj || !dd) return -EINVAL; - if (sysfs_create_file(kobj, &dev_attr_registers.attr)) - dev_warn(&dd->pdev->dev, - "Error creating 'registers' sysfs entry\n"); if (sysfs_create_file(kobj, &dev_attr_status.attr)) dev_warn(&dd->pdev->dev, "Error creating 'status' sysfs entry\n"); - if (sysfs_create_file(kobj, &dev_attr_flags.attr)) - dev_warn(&dd->pdev->dev, - "Error creating 'flags' sysfs entry\n"); return 0; } @@ -2698,13 +2726,39 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj) if (!kobj || !dd) return -EINVAL; - sysfs_remove_file(kobj, &dev_attr_registers.attr); sysfs_remove_file(kobj, &dev_attr_status.attr); - sysfs_remove_file(kobj, &dev_attr_flags.attr); return 0; } +static int mtip_hw_debugfs_init(struct driver_data *dd) +{ + if (!dfs_parent) + return -1; + + dd->dfs_node = debugfs_create_dir(dd->disk->disk_name, dfs_parent); + if (IS_ERR_OR_NULL(dd->dfs_node)) { + dev_warn(&dd->pdev->dev, + "Error creating node %s under debugfs\n", + dd->disk->disk_name); + dd->dfs_node = NULL; + return -1; + } + + debugfs_create_file("flags", S_IRUGO, dd->dfs_node, dd, + &mtip_flags_fops); + debugfs_create_file("registers", S_IRUGO, dd->dfs_node, dd, + &mtip_regs_fops); + + return 0; +} + +static void mtip_hw_debugfs_exit(struct driver_data *dd) +{ + debugfs_remove_recursive(dd->dfs_node); +} + + /* * Perform any init/resume time hardware setup * @@ -3730,6 +3784,7 @@ skip_create_disk: mtip_hw_sysfs_init(dd, kobj); kobject_put(kobj); } + mtip_hw_debugfs_init(dd); if (dd->mtip_svc_handler) { set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag); @@ -3755,6 +3810,8 @@ start_service_thread: return rv; kthread_run_error: + mtip_hw_debugfs_exit(dd); + /* Delete our gendisk. This also removes the device from /dev */ del_gendisk(dd->disk); @@ -3805,6 +3862,7 @@ static int mtip_block_remove(struct driver_data *dd) kobject_put(kobj); } } + mtip_hw_debugfs_exit(dd); /* * Delete our gendisk structure. This also removes the device @@ -4152,10 +4210,20 @@ static int __init mtip_init(void) } mtip_major = error; + if (!dfs_parent) { + dfs_parent = debugfs_create_dir("rssd", NULL); + if (IS_ERR_OR_NULL(dfs_parent)) { + printk(KERN_WARNING "Error creating debugfs parent\n"); + dfs_parent = NULL; + } + } + /* Register our PCI operations. */ error = pci_register_driver(&mtip_pci_driver); - if (error) + if (error) { + debugfs_remove(dfs_parent); unregister_blkdev(mtip_major, MTIP_DRV_NAME); + } return error; } @@ -4172,6 +4240,8 @@ static int __init mtip_init(void) */ static void __exit mtip_exit(void) { + debugfs_remove_recursive(dfs_parent); + /* Release the allocated major block device number. */ unregister_blkdev(mtip_major, MTIP_DRV_NAME); diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index b2c88da26b2a..f51fc23d17bb 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -26,7 +26,6 @@ #include <linux/ata.h> #include <linux/interrupt.h> #include <linux/genhd.h> -#include <linux/version.h> /* Offset of Subsystem Device ID in pci confoguration space */ #define PCI_SUBSYSTEM_DEVICEID 0x2E @@ -111,6 +110,8 @@ #define dbg_printk(format, arg...) #endif +#define MTIP_DFS_MAX_BUF_SIZE 1024 + #define __force_bit2int (unsigned int __force) enum { @@ -447,6 +448,8 @@ struct driver_data { unsigned long dd_flag; /* NOTE: use atomic bit operations on this */ struct task_struct *mtip_svc_handler; /* task_struct of svc thd */ + + struct dentry *dfs_node; }; #endif diff --git a/drivers/block/umem.c b/drivers/block/umem.c index aa2712060bfb..9a72277a31df 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c @@ -513,6 +513,44 @@ static void process_page(unsigned long data) } } +struct mm_plug_cb { + struct blk_plug_cb cb; + struct cardinfo *card; +}; + +static void mm_unplug(struct blk_plug_cb *cb) +{ + struct mm_plug_cb *mmcb = container_of(cb, struct mm_plug_cb, cb); + + spin_lock_irq(&mmcb->card->lock); + activate(mmcb->card); + spin_unlock_irq(&mmcb->card->lock); + kfree(mmcb); +} + +static int mm_check_plugged(struct cardinfo *card) +{ + struct blk_plug *plug = current->plug; + struct mm_plug_cb *mmcb; + + if (!plug) + return 0; + + list_for_each_entry(mmcb, &plug->cb_list, cb.list) { + if (mmcb->cb.callback == mm_unplug && mmcb->card == card) + return 1; + } + /* Not currently on the callback list */ + mmcb = kmalloc(sizeof(*mmcb), GFP_ATOMIC); + if (!mmcb) + return 0; + + mmcb->card = card; + mmcb->cb.callback = mm_unplug; + list_add(&mmcb->cb.list, &plug->cb_list); + return 1; +} + static void mm_make_request(struct request_queue *q, struct bio *bio) { struct cardinfo *card = q->queuedata; @@ -523,6 +561,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio) *card->biotail = bio; bio->bi_next = NULL; card->biotail = &bio->bi_next; + if (bio->bi_rw & REQ_SYNC || !mm_check_plugged(card)) + activate(card); spin_unlock_irq(&card->lock); return; diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h index 773cf27dc23f..9ad3b5ec1dc1 100644 --- a/drivers/block/xen-blkback/common.h +++ b/drivers/block/xen-blkback/common.h @@ -257,6 +257,7 @@ static inline void blkif_get_x86_32_req(struct blkif_request *dst, break; case BLKIF_OP_DISCARD: dst->u.discard.flag = src->u.discard.flag; + dst->u.discard.id = src->u.discard.id; dst->u.discard.sector_number = src->u.discard.sector_number; dst->u.discard.nr_sectors = src->u.discard.nr_sectors; break; @@ -287,6 +288,7 @@ static inline void blkif_get_x86_64_req(struct blkif_request *dst, break; case BLKIF_OP_DISCARD: dst->u.discard.flag = src->u.discard.flag; + dst->u.discard.id = src->u.discard.id; dst->u.discard.sector_number = src->u.discard.sector_number; dst->u.discard.nr_sectors = src->u.discard.nr_sectors; break; diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 60eed4bdd2e4..e4fb3374dcd2 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -141,14 +141,36 @@ static int get_id_from_freelist(struct blkfront_info *info) return free; } -static void add_id_to_freelist(struct blkfront_info *info, +static int add_id_to_freelist(struct blkfront_info *info, unsigned long id) { + if (info->shadow[id].req.u.rw.id != id) + return -EINVAL; + if (info->shadow[id].request == NULL) + return -EINVAL; info->shadow[id].req.u.rw.id = info->shadow_free; info->shadow[id].request = NULL; info->shadow_free = id; + return 0; } +static const char *op_name(int op) +{ + static const char *const names[] = { + [BLKIF_OP_READ] = "read", + [BLKIF_OP_WRITE] = "write", + [BLKIF_OP_WRITE_BARRIER] = "barrier", + [BLKIF_OP_FLUSH_DISKCACHE] = "flush", + [BLKIF_OP_DISCARD] = "discard" }; + + if (op < 0 || op >= ARRAY_SIZE(names)) + return "unknown"; + + if (!names[op]) + return "reserved"; + + return names[op]; +} static int xlbd_reserve_minors(unsigned int minor, unsigned int nr) { unsigned int end = minor + nr; @@ -746,20 +768,36 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) bret = RING_GET_RESPONSE(&info->ring, i); id = bret->id; + /* + * The backend has messed up and given us an id that we would + * never have given to it (we stamp it up to BLK_RING_SIZE - + * look in get_id_from_freelist. + */ + if (id >= BLK_RING_SIZE) { + WARN(1, "%s: response to %s has incorrect id (%ld)\n", + info->gd->disk_name, op_name(bret->operation), id); + /* We can't safely get the 'struct request' as + * the id is busted. */ + continue; + } req = info->shadow[id].request; if (bret->operation != BLKIF_OP_DISCARD) blkif_completion(&info->shadow[id]); - add_id_to_freelist(info, id); + if (add_id_to_freelist(info, id)) { + WARN(1, "%s: response to %s (id %ld) couldn't be recycled!\n", + info->gd->disk_name, op_name(bret->operation), id); + continue; + } error = (bret->status == BLKIF_RSP_OKAY) ? 0 : -EIO; switch (bret->operation) { case BLKIF_OP_DISCARD: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { struct request_queue *rq = info->rq; - printk(KERN_WARNING "blkfront: %s: discard op failed\n", - info->gd->disk_name); + printk(KERN_WARNING "blkfront: %s: %s op failed\n", + info->gd->disk_name, op_name(bret->operation)); error = -EOPNOTSUPP; info->feature_discard = 0; info->feature_secdiscard = 0; @@ -771,18 +809,14 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id) case BLKIF_OP_FLUSH_DISKCACHE: case BLKIF_OP_WRITE_BARRIER: if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) { - printk(KERN_WARNING "blkfront: %s: write %s op failed\n", - info->flush_op == BLKIF_OP_WRITE_BARRIER ? - "barrier" : "flush disk cache", - info->gd->disk_name); + printk(KERN_WARNING "blkfront: %s: %s op failed\n", + info->gd->disk_name, op_name(bret->operation)); error = -EOPNOTSUPP; } if (unlikely(bret->status == BLKIF_RSP_ERROR && info->shadow[id].req.u.rw.nr_segments == 0)) { - printk(KERN_WARNING "blkfront: %s: empty write %s op failed\n", - info->flush_op == BLKIF_OP_WRITE_BARRIER ? - "barrier" : "flush disk cache", - info->gd->disk_name); + printk(KERN_WARNING "blkfront: %s: empty %s op failed\n", + info->gd->disk_name, op_name(bret->operation)); error = -EOPNOTSUPP; } if (unlikely(error)) { diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index dcbe05616090..9a1eb0cfa95f 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1067,26 +1067,24 @@ static int __clk_set_parent(struct clk *clk, struct clk *parent) old_parent = clk->parent; - /* find index of new parent clock using cached parent ptrs */ - if (clk->parents) - for (i = 0; i < clk->num_parents; i++) - if (clk->parents[i] == parent) - break; - else + if (!clk->parents) clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents), GFP_KERNEL); /* - * find index of new parent clock using string name comparison - * also try to cache the parent to avoid future calls to __clk_lookup + * find index of new parent clock using cached parent ptrs, + * or if not yet cached, use string name comparison and cache + * them now to avoid future calls to __clk_lookup. */ - if (i == clk->num_parents) - for (i = 0; i < clk->num_parents; i++) - if (!strcmp(clk->parent_names[i], parent->name)) { - if (clk->parents) - clk->parents[i] = __clk_lookup(parent->name); - break; - } + for (i = 0; i < clk->num_parents; i++) { + if (clk->parents && clk->parents[i] == parent) + break; + else if (!strcmp(clk->parent_names[i], parent->name)) { + if (clk->parents) + clk->parents[i] = __clk_lookup(parent->name); + break; + } + } if (i == clk->num_parents) { pr_debug("%s: clock %s is not a possible parent of clock %s\n", diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 5873e481e5d2..a8743c399e83 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -1039,6 +1039,24 @@ mode_in_range(const struct drm_display_mode *mode, struct edid *edid, return true; } +static bool valid_inferred_mode(const struct drm_connector *connector, + const struct drm_display_mode *mode) +{ + struct drm_display_mode *m; + bool ok = false; + + list_for_each_entry(m, &connector->probed_modes, head) { + if (mode->hdisplay == m->hdisplay && + mode->vdisplay == m->vdisplay && + drm_mode_vrefresh(mode) == drm_mode_vrefresh(m)) + return false; /* duplicated */ + if (mode->hdisplay <= m->hdisplay && + mode->vdisplay <= m->vdisplay) + ok = true; + } + return ok; +} + static int drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct detailed_timing *timing) @@ -1048,7 +1066,8 @@ drm_dmt_modes_for_range(struct drm_connector *connector, struct edid *edid, struct drm_device *dev = connector->dev; for (i = 0; i < drm_num_dmt_modes; i++) { - if (mode_in_range(drm_dmt_modes + i, edid, timing)) { + if (mode_in_range(drm_dmt_modes + i, edid, timing) && + valid_inferred_mode(connector, drm_dmt_modes + i)) { newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]); if (newmode) { drm_mode_probed_add(connector, newmode); @@ -1088,7 +1107,8 @@ drm_gtf_modes_for_range(struct drm_connector *connector, struct edid *edid, return modes; fixup_mode_1366x768(newmode); - if (!mode_in_range(newmode, edid, timing)) { + if (!mode_in_range(newmode, edid, timing) || + !valid_inferred_mode(connector, newmode)) { drm_mode_destroy(dev, newmode); continue; } @@ -1116,7 +1136,8 @@ drm_cvt_modes_for_range(struct drm_connector *connector, struct edid *edid, return modes; fixup_mode_1366x768(newmode); - if (!mode_in_range(newmode, edid, timing)) { + if (!mode_in_range(newmode, edid, timing) || + !valid_inferred_mode(connector, newmode)) { drm_mode_destroy(dev, newmode); continue; } diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index f94792626b94..36822b924eb1 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1401,6 +1401,27 @@ i915_mtrr_setup(struct drm_i915_private *dev_priv, unsigned long base, } } +static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv) +{ + struct apertures_struct *ap; + struct pci_dev *pdev = dev_priv->dev->pdev; + bool primary; + + ap = alloc_apertures(1); + if (!ap) + return; + + ap->ranges[0].base = dev_priv->dev->agp->base; + ap->ranges[0].size = + dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; + primary = + pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW; + + remove_conflicting_framebuffers(ap, "inteldrmfb", primary); + + kfree(ap); +} + /** * i915_driver_load - setup chip and create an initial config * @dev: DRM device @@ -1446,6 +1467,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto free_priv; } + dev_priv->mm.gtt = intel_gtt_get(); + if (!dev_priv->mm.gtt) { + DRM_ERROR("Failed to initialize GTT\n"); + ret = -ENODEV; + goto put_bridge; + } + + i915_kick_out_firmware_fb(dev_priv); + pci_set_master(dev->pdev); /* overlay on gen2 is broken and can't address above 1G */ @@ -1471,13 +1501,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) goto put_bridge; } - dev_priv->mm.gtt = intel_gtt_get(); - if (!dev_priv->mm.gtt) { - DRM_ERROR("Failed to initialize GTT\n"); - ret = -ENODEV; - goto out_rmmap; - } - aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT; dev_priv->mm.gtt_mapping = diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index 59d44937dd9f..84b648a7ddd8 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -289,8 +289,9 @@ int radeon_vm_manager_init(struct radeon_device *rdev) rdev->vm_manager.enabled = false; /* mark first vm as always in use, it's the system one */ + /* allocate enough for 2 full VM pts */ r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, - rdev->vm_manager.max_pfn * 8, + rdev->vm_manager.max_pfn * 8 * 2, RADEON_GEM_DOMAIN_VRAM); if (r) { dev_err(rdev->dev, "failed to allocate vm bo (%dKB)\n", @@ -633,7 +634,15 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm) mutex_init(&vm->mutex); INIT_LIST_HEAD(&vm->list); INIT_LIST_HEAD(&vm->va); - vm->last_pfn = 0; + /* SI requires equal sized PTs for all VMs, so always set + * last_pfn to max_pfn. cayman allows variable sized + * pts so we can grow then as needed. Once we switch + * to two level pts we can unify this again. + */ + if (rdev->family >= CHIP_TAHITI) + vm->last_pfn = rdev->vm_manager.max_pfn; + else + vm->last_pfn = 0; /* map the ib pool buffer at 0 in virtual address space, set * read only */ diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index f28bd4b7ef98..21ec9f5653ce 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -292,6 +292,7 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data, int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { + struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_busy *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; @@ -317,13 +318,14 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, break; } drm_gem_object_unreference_unlocked(gobj); - r = radeon_gem_handle_lockup(robj->rdev, r); + r = radeon_gem_handle_lockup(rdev, r); return r; } int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { + struct radeon_device *rdev = dev->dev_private; struct drm_radeon_gem_wait_idle *args = data; struct drm_gem_object *gobj; struct radeon_bo *robj; @@ -336,10 +338,10 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, robj = gem_to_radeon_bo(gobj); r = radeon_bo_wait(robj, NULL, false); /* callback hw specific functions if any */ - if (robj->rdev->asic->ioctl_wait_idle) - robj->rdev->asic->ioctl_wait_idle(robj->rdev, robj); + if (rdev->asic->ioctl_wait_idle) + robj->rdev->asic->ioctl_wait_idle(rdev, robj); drm_gem_object_unreference_unlocked(gobj); - r = radeon_gem_handle_lockup(robj->rdev, r); + r = radeon_gem_handle_lockup(rdev, r); return r; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index c7b61f16ecfd..0b0279291a73 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -2365,12 +2365,12 @@ int si_pcie_gart_enable(struct radeon_device *rdev) WREG32(0x15DC, 0); /* empty context1-15 */ - /* FIXME start with 1G, once using 2 level pt switch to full + /* FIXME start with 4G, once using 2 level pt switch to full * vm size space */ /* set vm size, must be a multiple of 4 */ WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); - WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, (1 << 30) / RADEON_GPU_PAGE_SIZE); + WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn); for (i = 1; i < 16; i++) { if (i < 8) WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2), diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 7f1feb2f467a..637c51c11b44 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -693,7 +693,7 @@ static void __cpuinit get_core_online(unsigned int cpu) * sensors. We check this bit only, all the early CPUs * without thermal sensors will be filtered out. */ - if (!cpu_has(c, X86_FEATURE_DTS)) + if (!cpu_has(c, X86_FEATURE_DTHERM)) return; if (!pdev) { @@ -794,7 +794,7 @@ static struct notifier_block coretemp_cpu_notifier __refdata = { }; static const struct x86_cpu_id coretemp_ids[] = { - { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTS }, + { X86_VENDOR_INTEL, X86_FAMILY_ANY, X86_MODEL_ANY, X86_FEATURE_DTHERM }, {} }; MODULE_DEVICE_TABLE(x86cpu, coretemp_ids); diff --git a/drivers/ieee802154/at86rf230.c b/drivers/ieee802154/at86rf230.c index 4d033d4c4ddc..902e38bb382f 100644 --- a/drivers/ieee802154/at86rf230.c +++ b/drivers/ieee802154/at86rf230.c @@ -585,7 +585,6 @@ err: static int at86rf230_rx(struct at86rf230_local *lp) { u8 len = 128, lqi = 0; - int rc; struct sk_buff *skb; skb = alloc_skb(len, GFP_KERNEL); @@ -607,7 +606,7 @@ static int at86rf230_rx(struct at86rf230_local *lp) ieee802154_rx_irqsafe(lp->dev, skb, lqi); - dev_dbg(&lp->spi->dev, "READ_FBUF: %d %d %x\n", rc, len, lqi); + dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi); return 0; err: diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 740dcc065cf2..77b6b182778a 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c @@ -1374,7 +1374,7 @@ static int pass_accept_req(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) goto reject; } dst = &rt->dst; - l2t = t3_l2t_get(tdev, dst, NULL); + l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip); if (!l2t) { printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", __func__); @@ -1942,7 +1942,8 @@ int iwch_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) goto fail3; } ep->dst = &rt->dst; - ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL); + ep->l2t = t3_l2t_get(ep->com.tdev, ep->dst, NULL, + &cm_id->remote_addr.sin_addr.s_addr); if (!ep->l2t) { printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); err = -ENOMEM; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 3530c41fcd1f..8a3a2037b005 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -718,26 +718,53 @@ int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, return ret; } +struct mlx4_ib_steering { + struct list_head list; + u64 reg_id; + union ib_gid gid; +}; + static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { int err; struct mlx4_ib_dev *mdev = to_mdev(ibqp->device); struct mlx4_ib_qp *mqp = to_mqp(ibqp); + u64 reg_id; + struct mlx4_ib_steering *ib_steering = NULL; + + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL); + if (!ib_steering) + return -ENOMEM; + } - err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, - !!(mqp->flags & MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), - MLX4_PROT_IB_IPV6); + err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, mqp->port, + !!(mqp->flags & + MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), + MLX4_PROT_IB_IPV6, ®_id); if (err) - return err; + goto err_malloc; err = add_gid_entry(ibqp, gid); if (err) goto err_add; + if (ib_steering) { + memcpy(ib_steering->gid.raw, gid->raw, 16); + ib_steering->reg_id = reg_id; + mutex_lock(&mqp->mutex); + list_add(&ib_steering->list, &mqp->steering_rules); + mutex_unlock(&mqp->mutex); + } return 0; err_add: - mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); + mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, + MLX4_PROT_IB_IPV6, reg_id); +err_malloc: + kfree(ib_steering); + return err; } @@ -765,9 +792,30 @@ static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) u8 mac[6]; struct net_device *ndev; struct mlx4_ib_gid_entry *ge; + u64 reg_id = 0; + + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + struct mlx4_ib_steering *ib_steering; + + mutex_lock(&mqp->mutex); + list_for_each_entry(ib_steering, &mqp->steering_rules, list) { + if (!memcmp(ib_steering->gid.raw, gid->raw, 16)) { + list_del(&ib_steering->list); + break; + } + } + mutex_unlock(&mqp->mutex); + if (&ib_steering->list == &mqp->steering_rules) { + pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n"); + return -EINVAL; + } + reg_id = ib_steering->reg_id; + kfree(ib_steering); + } - err = mlx4_multicast_detach(mdev->dev, - &mqp->mqp, gid->raw, MLX4_PROT_IB_IPV6); + err = mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw, + MLX4_PROT_IB_IPV6, reg_id); if (err) return err; diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index ff36655d23d3..42df4f7a6a5b 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -163,6 +163,7 @@ struct mlx4_ib_qp { u8 state; int mlx_type; struct list_head gid_list; + struct list_head steering_rules; }; struct mlx4_ib_srq { diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8d4ed24aef93..6af19f6c2b11 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -495,6 +495,7 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, spin_lock_init(&qp->sq.lock); spin_lock_init(&qp->rq.lock); INIT_LIST_HEAD(&qp->gid_list); + INIT_LIST_HEAD(&qp->steering_rules); qp->state = IB_QPS_RESET; if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 3974c290b667..bbee4b2d7a13 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -715,7 +715,7 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); if (likely(skb_dst(skb))) { - n = dst_get_neighbour_noref(skb_dst(skb)); + n = dst_neigh_lookup_skb(skb_dst(skb), skb); if (!n) { ++dev->stats.tx_dropped; dev_kfree_skb_any(skb); @@ -797,6 +797,8 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) } } unlock: + if (n) + neigh_release(n); rcu_read_unlock(); return NETDEV_TX_OK; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c index 20ebc6fd1bb9..7cecb16d3d48 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c @@ -658,9 +658,15 @@ static int ipoib_mcast_leave(struct net_device *dev, struct ipoib_mcast *mcast) void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) { struct ipoib_dev_priv *priv = netdev_priv(dev); + struct dst_entry *dst = skb_dst(skb); struct ipoib_mcast *mcast; + struct neighbour *n; unsigned long flags; + n = NULL; + if (dst) + n = dst_neigh_lookup_skb(dst, skb); + spin_lock_irqsave(&priv->lock, flags); if (!test_bit(IPOIB_FLAG_OPER_UP, &priv->flags) || @@ -715,29 +721,28 @@ void ipoib_mcast_send(struct net_device *dev, void *mgid, struct sk_buff *skb) out: if (mcast && mcast->ah) { - struct dst_entry *dst = skb_dst(skb); - struct neighbour *n = NULL; - - rcu_read_lock(); - if (dst) - n = dst_get_neighbour_noref(dst); - if (n && !*to_ipoib_neigh(n)) { - struct ipoib_neigh *neigh = ipoib_neigh_alloc(n, - skb->dev); - - if (neigh) { - kref_get(&mcast->ah->ref); - neigh->ah = mcast->ah; - list_add_tail(&neigh->list, &mcast->neigh_list); + if (n) { + if (!*to_ipoib_neigh(n)) { + struct ipoib_neigh *neigh; + + neigh = ipoib_neigh_alloc(n, skb->dev); + if (neigh) { + kref_get(&mcast->ah->ref); + neigh->ah = mcast->ah; + list_add_tail(&neigh->list, + &mcast->neigh_list); + } } + neigh_release(n); } - rcu_read_unlock(); spin_unlock_irqrestore(&priv->lock, flags); ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN); return; } unlock: + if (n) + neigh_release(n); spin_unlock_irqrestore(&priv->lock, flags); } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 37fdaf81bd1f..ce59824fb414 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2292,6 +2292,13 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct if (r) return r; + r = dm_pool_commit_metadata(pool->pmd); + if (r) { + DMERR("%s: dm_pool_commit_metadata() failed, error = %d", + __func__, r); + return r; + } + r = dm_pool_reserve_metadata_snap(pool->pmd); if (r) DMWARN("reserve_metadata_snap message failed."); diff --git a/drivers/md/md.c b/drivers/md/md.c index 1c2f9048e1ae..a4c219e3c859 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -5784,8 +5784,7 @@ static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) super_types[mddev->major_version]. validate_super(mddev, rdev); if ((info->state & (1<<MD_DISK_SYNC)) && - (!test_bit(In_sync, &rdev->flags) || - rdev->raid_disk != info->raid_disk)) { + rdev->raid_disk != info->raid_disk) { /* This was a hot-add request, but events doesn't * match, so reject it. */ @@ -6751,7 +6750,7 @@ struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev thread->tsk = kthread_run(md_thread, thread, "%s_%s", mdname(thread->mddev), - name ?: mddev->pers->name); + name); if (IS_ERR(thread->tsk)) { kfree(thread); return NULL; @@ -7298,6 +7297,7 @@ void md_do_sync(struct mddev *mddev) int skipped = 0; struct md_rdev *rdev; char *desc; + struct blk_plug plug; /* just incase thread restarts... */ if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) @@ -7447,6 +7447,7 @@ void md_do_sync(struct mddev *mddev) } mddev->curr_resync_completed = j; + blk_start_plug(&plug); while (j < max_sectors) { sector_t sectors; @@ -7552,6 +7553,7 @@ void md_do_sync(struct mddev *mddev) * this also signals 'finished resyncing' to md_stop */ out: + blk_finish_plug(&plug); wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); /* tell personality that we are finished */ diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 9339e67fcc79..61a1833ebaf3 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c @@ -474,7 +474,8 @@ static int multipath_run (struct mddev *mddev) } { - mddev->thread = md_register_thread(multipathd, mddev, NULL); + mddev->thread = md_register_thread(multipathd, mddev, + "multipath"); if (!mddev->thread) { printk(KERN_ERR "multipath: couldn't allocate thread" " for %s\n", mdname(mddev)); diff --git a/drivers/md/persistent-data/dm-space-map-checker.c b/drivers/md/persistent-data/dm-space-map-checker.c index 50ed53bf4aa2..fc90c11620ad 100644 --- a/drivers/md/persistent-data/dm-space-map-checker.c +++ b/drivers/md/persistent-data/dm-space-map-checker.c @@ -8,6 +8,7 @@ #include <linux/device-mapper.h> #include <linux/export.h> +#include <linux/vmalloc.h> #ifdef CONFIG_DM_DEBUG_SPACE_MAPS @@ -89,13 +90,23 @@ static int ca_create(struct count_array *ca, struct dm_space_map *sm) ca->nr = nr_blocks; ca->nr_free = nr_blocks; - ca->counts = kzalloc(sizeof(*ca->counts) * nr_blocks, GFP_KERNEL); - if (!ca->counts) - return -ENOMEM; + + if (!nr_blocks) + ca->counts = NULL; + else { + ca->counts = vzalloc(sizeof(*ca->counts) * nr_blocks); + if (!ca->counts) + return -ENOMEM; + } return 0; } +static void ca_destroy(struct count_array *ca) +{ + vfree(ca->counts); +} + static int ca_load(struct count_array *ca, struct dm_space_map *sm) { int r; @@ -126,12 +137,14 @@ static int ca_load(struct count_array *ca, struct dm_space_map *sm) static int ca_extend(struct count_array *ca, dm_block_t extra_blocks) { dm_block_t nr_blocks = ca->nr + extra_blocks; - uint32_t *counts = kzalloc(sizeof(*counts) * nr_blocks, GFP_KERNEL); + uint32_t *counts = vzalloc(sizeof(*counts) * nr_blocks); if (!counts) return -ENOMEM; - memcpy(counts, ca->counts, sizeof(*counts) * ca->nr); - kfree(ca->counts); + if (ca->counts) { + memcpy(counts, ca->counts, sizeof(*counts) * ca->nr); + ca_destroy(ca); + } ca->nr = nr_blocks; ca->nr_free += extra_blocks; ca->counts = counts; @@ -151,11 +164,6 @@ static int ca_commit(struct count_array *old, struct count_array *new) return 0; } -static void ca_destroy(struct count_array *ca) -{ - kfree(ca->counts); -} - /*----------------------------------------------------------------*/ struct sm_checker { @@ -343,25 +351,25 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) int r; struct sm_checker *smc; - if (!sm) - return NULL; + if (IS_ERR_OR_NULL(sm)) + return ERR_PTR(-EINVAL); smc = kmalloc(sizeof(*smc), GFP_KERNEL); if (!smc) - return NULL; + return ERR_PTR(-ENOMEM); memcpy(&smc->sm, &ops_, sizeof(smc->sm)); r = ca_create(&smc->old_counts, sm); if (r) { kfree(smc); - return NULL; + return ERR_PTR(r); } r = ca_create(&smc->counts, sm); if (r) { ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } smc->real_sm = sm; @@ -371,7 +379,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) ca_destroy(&smc->counts); ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } r = ca_commit(&smc->old_counts, &smc->counts); @@ -379,7 +387,7 @@ struct dm_space_map *dm_sm_checker_create(struct dm_space_map *sm) ca_destroy(&smc->counts); ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } return &smc->sm; @@ -391,25 +399,25 @@ struct dm_space_map *dm_sm_checker_create_fresh(struct dm_space_map *sm) int r; struct sm_checker *smc; - if (!sm) - return NULL; + if (IS_ERR_OR_NULL(sm)) + return ERR_PTR(-EINVAL); smc = kmalloc(sizeof(*smc), GFP_KERNEL); if (!smc) - return NULL; + return ERR_PTR(-ENOMEM); memcpy(&smc->sm, &ops_, sizeof(smc->sm)); r = ca_create(&smc->old_counts, sm); if (r) { kfree(smc); - return NULL; + return ERR_PTR(r); } r = ca_create(&smc->counts, sm); if (r) { ca_destroy(&smc->old_counts); kfree(smc); - return NULL; + return ERR_PTR(r); } smc->real_sm = sm; diff --git a/drivers/md/persistent-data/dm-space-map-disk.c b/drivers/md/persistent-data/dm-space-map-disk.c index fc469ba9f627..3d0ed5332883 100644 --- a/drivers/md/persistent-data/dm-space-map-disk.c +++ b/drivers/md/persistent-data/dm-space-map-disk.c @@ -290,7 +290,16 @@ struct dm_space_map *dm_sm_disk_create(struct dm_transaction_manager *tm, dm_block_t nr_blocks) { struct dm_space_map *sm = dm_sm_disk_create_real(tm, nr_blocks); - return dm_sm_checker_create_fresh(sm); + struct dm_space_map *smc; + + if (IS_ERR_OR_NULL(sm)) + return sm; + + smc = dm_sm_checker_create_fresh(sm); + if (IS_ERR(smc)) + dm_sm_destroy(sm); + + return smc; } EXPORT_SYMBOL_GPL(dm_sm_disk_create); diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c index 400fe144c0cd..e5604b32d91f 100644 --- a/drivers/md/persistent-data/dm-transaction-manager.c +++ b/drivers/md/persistent-data/dm-transaction-manager.c @@ -138,6 +138,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone); void dm_tm_destroy(struct dm_transaction_manager *tm) { + if (!tm->is_clone) + wipe_shadow_table(tm); + kfree(tm); } EXPORT_SYMBOL_GPL(dm_tm_destroy); @@ -344,8 +347,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm, } *sm = dm_sm_checker_create(inner); - if (!*sm) + if (IS_ERR(*sm)) { + r = PTR_ERR(*sm); goto bad2; + } } else { r = dm_bm_write_lock(dm_tm_get_bm(*tm), sb_location, @@ -364,8 +369,10 @@ static int dm_tm_create_internal(struct dm_block_manager *bm, } *sm = dm_sm_checker_create(inner); - if (!*sm) + if (IS_ERR(*sm)) { + r = PTR_ERR(*sm); goto bad2; + } } return 0; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a9c7981ddd24..8c2754f835ef 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -517,8 +517,8 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect int bad_sectors; int disk = start_disk + i; - if (disk >= conf->raid_disks) - disk -= conf->raid_disks; + if (disk >= conf->raid_disks * 2) + disk -= conf->raid_disks * 2; rdev = rcu_dereference(conf->mirrors[disk].rdev); if (r1_bio->bios[disk] == IO_BLOCKED @@ -883,7 +883,6 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); struct md_rdev *blocked_rdev; - int plugged; int first_clone; int sectors_handled; int max_sectors; @@ -1034,7 +1033,6 @@ read_again: * the bad blocks. Each set of writes gets it's own r1bio * with a set of bios attached. */ - plugged = mddev_check_plugged(mddev); disks = conf->raid_disks * 2; retry_write: @@ -1191,6 +1189,8 @@ read_again: bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); } /* Mustn't call r1_bio_write_done before this next test, * as it could result in the bio being freed. @@ -1213,9 +1213,6 @@ read_again: /* In case raid1d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - - if (do_sync || !bitmap || !plugged) - md_wakeup_thread(mddev->thread); } static void status(struct seq_file *seq, struct mddev *mddev) @@ -2621,7 +2618,7 @@ static struct r1conf *setup_conf(struct mddev *mddev) goto abort; } err = -ENOMEM; - conf->thread = md_register_thread(raid1d, mddev, NULL); + conf->thread = md_register_thread(raid1d, mddev, "raid1"); if (!conf->thread) { printk(KERN_ERR "md/raid1:%s: couldn't allocate thread\n", diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 99ae6068e456..8da6282254c3 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c @@ -1039,7 +1039,6 @@ static void make_request(struct mddev *mddev, struct bio * bio) const unsigned long do_fua = (bio->bi_rw & REQ_FUA); unsigned long flags; struct md_rdev *blocked_rdev; - int plugged; int sectors_handled; int max_sectors; int sectors; @@ -1239,7 +1238,6 @@ read_again: * of r10_bios is recored in bio->bi_phys_segments just as with * the read case. */ - plugged = mddev_check_plugged(mddev); r10_bio->read_slot = -1; /* make sure repl_bio gets freed */ raid10_find_phys(conf, r10_bio); @@ -1396,6 +1394,8 @@ retry_write: bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); if (!r10_bio->devs[i].repl_bio) continue; @@ -1423,6 +1423,8 @@ retry_write: bio_list_add(&conf->pending_bio_list, mbio); conf->pending_count++; spin_unlock_irqrestore(&conf->device_lock, flags); + if (!mddev_check_plugged(mddev)) + md_wakeup_thread(mddev->thread); } /* Don't remove the bias on 'remaining' (one_write_done) until @@ -1448,9 +1450,6 @@ retry_write: /* In case raid10d snuck in to freeze_array */ wake_up(&conf->wait_barrier); - - if (do_sync || !mddev->bitmap || !plugged) - md_wakeup_thread(mddev->thread); } static void status(struct seq_file *seq, struct mddev *mddev) @@ -2310,7 +2309,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 if (r10_sync_page_io(rdev, r10_bio->devs[sl].addr + sect, - s<<9, conf->tmppage, WRITE) + s, conf->tmppage, WRITE) == 0) { /* Well, this device is dead */ printk(KERN_NOTICE @@ -2349,7 +2348,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10 switch (r10_sync_page_io(rdev, r10_bio->devs[sl].addr + sect, - s<<9, conf->tmppage, + s, conf->tmppage, READ)) { case 0: /* Well, this device is dead */ @@ -2512,7 +2511,7 @@ read_more: slot = r10_bio->read_slot; printk_ratelimited( KERN_ERR - "md/raid10:%s: %s: redirecting" + "md/raid10:%s: %s: redirecting " "sector %llu to another mirror\n", mdname(mddev), bdevname(rdev->bdev, b), @@ -2661,7 +2660,8 @@ static void raid10d(struct mddev *mddev) blk_start_plug(&plug); for (;;) { - flush_pending_writes(conf); + if (atomic_read(&mddev->plug_cnt) == 0) + flush_pending_writes(conf); spin_lock_irqsave(&conf->device_lock, flags); if (list_empty(head)) { @@ -2890,6 +2890,12 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, /* want to reconstruct this device */ rb2 = r10_bio; sect = raid10_find_virt(conf, sector_nr, i); + if (sect >= mddev->resync_max_sectors) { + /* last stripe is not complete - don't + * try to recover this sector. + */ + continue; + } /* Unless we are doing a full sync, or a replacement * we only need to recover the block if it is set in * the bitmap @@ -3421,7 +3427,7 @@ static struct r10conf *setup_conf(struct mddev *mddev) spin_lock_init(&conf->resync_lock); init_waitqueue_head(&conf->wait_barrier); - conf->thread = md_register_thread(raid10d, mddev, NULL); + conf->thread = md_register_thread(raid10d, mddev, "raid10"); if (!conf->thread) goto out; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index d26767246d26..04348d76bb30 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -196,12 +196,14 @@ static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) BUG_ON(!list_empty(&sh->lru)); BUG_ON(atomic_read(&conf->active_stripes)==0); if (test_bit(STRIPE_HANDLE, &sh->state)) { - if (test_bit(STRIPE_DELAYED, &sh->state)) + if (test_bit(STRIPE_DELAYED, &sh->state) && + !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) list_add_tail(&sh->lru, &conf->delayed_list); else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && sh->bm_seq - conf->seq_write > 0) list_add_tail(&sh->lru, &conf->bitmap_list); else { + clear_bit(STRIPE_DELAYED, &sh->state); clear_bit(STRIPE_BIT_DELAY, &sh->state); list_add_tail(&sh->lru, &conf->handle_list); } @@ -606,6 +608,12 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) * a chance*/ md_check_recovery(conf->mddev); } + /* + * Because md_wait_for_blocked_rdev + * will dec nr_pending, we must + * increment it first. + */ + atomic_inc(&rdev->nr_pending); md_wait_for_blocked_rdev(rdev, conf->mddev); } else { /* Acknowledged bad block - skip the write */ @@ -1737,6 +1745,7 @@ static void raid5_end_read_request(struct bio * bi, int error) } else { const char *bdn = bdevname(rdev->bdev, b); int retry = 0; + int set_bad = 0; clear_bit(R5_UPTODATE, &sh->dev[i].flags); atomic_inc(&rdev->read_errors); @@ -1748,7 +1757,8 @@ static void raid5_end_read_request(struct bio * bi, int error) mdname(conf->mddev), (unsigned long long)s, bdn); - else if (conf->mddev->degraded >= conf->max_degraded) + else if (conf->mddev->degraded >= conf->max_degraded) { + set_bad = 1; printk_ratelimited( KERN_WARNING "md/raid:%s: read error not correctable " @@ -1756,8 +1766,9 @@ static void raid5_end_read_request(struct bio * bi, int error) mdname(conf->mddev), (unsigned long long)s, bdn); - else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) + } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { /* Oh, no!!! */ + set_bad = 1; printk_ratelimited( KERN_WARNING "md/raid:%s: read error NOT corrected!! " @@ -1765,7 +1776,7 @@ static void raid5_end_read_request(struct bio * bi, int error) mdname(conf->mddev), (unsigned long long)s, bdn); - else if (atomic_read(&rdev->read_errors) + } else if (atomic_read(&rdev->read_errors) > conf->max_nr_stripes) printk(KERN_WARNING "md/raid:%s: Too many read errors, failing device %s.\n", @@ -1777,7 +1788,11 @@ static void raid5_end_read_request(struct bio * bi, int error) else { clear_bit(R5_ReadError, &sh->dev[i].flags); clear_bit(R5_ReWrite, &sh->dev[i].flags); - md_error(conf->mddev, rdev); + if (!(set_bad + && test_bit(In_sync, &rdev->flags) + && rdev_set_badblocks( + rdev, sh->sector, STRIPE_SECTORS, 0))) + md_error(conf->mddev, rdev); } } rdev_dec_pending(rdev, conf->mddev); @@ -3582,8 +3597,18 @@ static void handle_stripe(struct stripe_head *sh) finish: /* wait for this device to become unblocked */ - if (conf->mddev->external && unlikely(s.blocked_rdev)) - md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); + if (unlikely(s.blocked_rdev)) { + if (conf->mddev->external) + md_wait_for_blocked_rdev(s.blocked_rdev, + conf->mddev); + else + /* Internal metadata will immediately + * be written by raid5d, so we don't + * need to wait here. + */ + rdev_dec_pending(s.blocked_rdev, + conf->mddev); + } if (s.handle_bad_blocks) for (i = disks; i--; ) { @@ -3881,8 +3906,6 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) raid_bio->bi_next = (void*)rdev; align_bi->bi_bdev = rdev->bdev; align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); - /* No reshape active, so we can trust rdev->data_offset */ - align_bi->bi_sector += rdev->data_offset; if (!bio_fits_rdev(align_bi) || is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, @@ -3893,6 +3916,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) return 0; } + /* No reshape active, so we can trust rdev->data_offset */ + align_bi->bi_sector += rdev->data_offset; + spin_lock_irq(&conf->device_lock); wait_event_lock_irq(conf->wait_for_stripe, conf->quiesce == 0, @@ -3971,7 +3997,6 @@ static void make_request(struct mddev *mddev, struct bio * bi) struct stripe_head *sh; const int rw = bio_data_dir(bi); int remaining; - int plugged; if (unlikely(bi->bi_rw & REQ_FLUSH)) { md_flush_request(mddev, bi); @@ -3990,7 +4015,6 @@ static void make_request(struct mddev *mddev, struct bio * bi) bi->bi_next = NULL; bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ - plugged = mddev_check_plugged(mddev); for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { DEFINE_WAIT(w); int previous; @@ -4092,6 +4116,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) if ((bi->bi_rw & REQ_SYNC) && !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) atomic_inc(&conf->preread_active_stripes); + mddev_check_plugged(mddev); release_stripe(sh); } else { /* cannot get stripe for read-ahead, just give-up */ @@ -4099,10 +4124,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) finish_wait(&conf->wait_for_overlap, &w); break; } - } - if (!plugged) - md_wakeup_thread(mddev->thread); spin_lock_irq(&conf->device_lock); remaining = raid5_dec_bi_phys_segments(bi); @@ -4823,6 +4845,7 @@ static struct r5conf *setup_conf(struct mddev *mddev) int raid_disk, memory, max_disks; struct md_rdev *rdev; struct disk_info *disk; + char pers_name[6]; if (mddev->new_level != 5 && mddev->new_level != 4 @@ -4946,7 +4969,8 @@ static struct r5conf *setup_conf(struct mddev *mddev) printk(KERN_INFO "md/raid:%s: allocated %dkB\n", mdname(mddev), memory); - conf->thread = md_register_thread(raid5d, mddev, NULL); + sprintf(pers_name, "raid%d", mddev->new_level); + conf->thread = md_register_thread(raid5d, mddev, pers_name); if (!conf->thread) { printk(KERN_ERR "md/raid:%s: couldn't allocate thread.\n", @@ -5465,10 +5489,9 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->saved_raid_disk >= 0 && rdev->saved_raid_disk >= first && conf->disks[rdev->saved_raid_disk].rdev == NULL) - disk = rdev->saved_raid_disk; - else - disk = first; - for ( ; disk <= last ; disk++) { + first = rdev->saved_raid_disk; + + for (disk = first; disk <= last; disk++) { p = conf->disks + disk; if (p->rdev == NULL) { clear_bit(In_sync, &rdev->flags); @@ -5477,8 +5500,11 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) if (rdev->saved_raid_disk != disk) conf->fullsync = 1; rcu_assign_pointer(p->rdev, rdev); - break; + goto out; } + } + for (disk = first; disk <= last; disk++) { + p = conf->disks + disk; if (test_bit(WantReplacement, &p->rdev->flags) && p->replacement == NULL) { clear_bit(In_sync, &rdev->flags); @@ -5490,6 +5516,7 @@ static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) break; } } +out: print_raid5_conf(conf); return err; } diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 9eb7624639b9..1901da153312 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -6250,7 +6250,7 @@ bnx2_enable_msix(struct bnx2 *bp, int msix_vecs) static int bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi) { - int cpus = num_online_cpus(); + int cpus = netif_get_num_default_rss_queues(); int msix_vecs; if (!bp->num_req_rx_rings) diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h index daa894bd772a..53659f321d51 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h @@ -822,7 +822,8 @@ static inline int bnx2x_calc_num_queues(struct bnx2x *bp) { return num_queues ? min_t(int, num_queues, BNX2X_MAX_QUEUES(bp)) : - min_t(int, num_online_cpus(), BNX2X_MAX_QUEUES(bp)); + min_t(int, netif_get_num_default_rss_queues(), + BNX2X_MAX_QUEUES(bp)); } static inline void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp) diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index e47ff8be1d7b..6cbab0369738 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -9908,7 +9908,7 @@ static bool tg3_enable_msix(struct tg3 *tp) int i, rc; struct msix_entry msix_ent[tp->irq_max]; - tp->irq_cnt = num_online_cpus(); + tp->irq_cnt = netif_get_num_default_rss_queues(); if (tp->irq_cnt > 1) { /* We want as many rx rings enabled as there are cpus. * In multiqueue MSI-X mode, the first MSI-X vector diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index abb6ce7c1b7e..9b0874957df5 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -3050,7 +3050,7 @@ static struct pci_error_handlers t3_err_handler = { static void set_nqsets(struct adapter *adap) { int i, j = 0; - int num_cpus = num_online_cpus(); + int num_cpus = netif_get_num_default_rss_queues(); int hwports = adap->params.nports; int nqsets = adap->msix_nvectors - 1; diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c index 55cf72af69ce..2dbbcbb450d3 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_offload.c @@ -62,7 +62,9 @@ static const unsigned int MAX_ATIDS = 64 * 1024; static const unsigned int ATID_BASE = 0x10000; static void cxgb_neigh_update(struct neighbour *neigh); -static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new); +static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, + struct dst_entry *new, struct neighbour *new_neigh, + const void *daddr); static inline int offload_activated(struct t3cdev *tdev) { @@ -968,8 +970,10 @@ static int nb_callback(struct notifier_block *self, unsigned long event, } case (NETEVENT_REDIRECT):{ struct netevent_redirect *nr = ctx; - cxgb_redirect(nr->old, nr->new); - cxgb_neigh_update(dst_get_neighbour_noref(nr->new)); + cxgb_redirect(nr->old, nr->old_neigh, + nr->new, nr->new_neigh, + nr->daddr); + cxgb_neigh_update(nr->new_neigh); break; } default: @@ -1107,10 +1111,11 @@ static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e) tdev->send(tdev, skb); } -static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) +static void cxgb_redirect(struct dst_entry *old, struct neighbour *old_neigh, + struct dst_entry *new, struct neighbour *new_neigh, + const void *daddr) { struct net_device *olddev, *newdev; - struct neighbour *n; struct tid_info *ti; struct t3cdev *tdev; u32 tid; @@ -1118,15 +1123,8 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) struct l2t_entry *e; struct t3c_tid_entry *te; - n = dst_get_neighbour_noref(old); - if (!n) - return; - olddev = n->dev; - - n = dst_get_neighbour_noref(new); - if (!n) - return; - newdev = n->dev; + olddev = old_neigh->dev; + newdev = new_neigh->dev; if (!is_offloading(olddev)) return; @@ -1144,7 +1142,7 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new) } /* Add new L2T entry */ - e = t3_l2t_get(tdev, new, newdev); + e = t3_l2t_get(tdev, new, newdev, daddr); if (!e) { printk(KERN_ERR "%s: couldn't allocate new l2t entry!\n", __func__); diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.c b/drivers/net/ethernet/chelsio/cxgb3/l2t.c index 3fa3c8833ed7..8d53438638b2 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.c @@ -299,7 +299,7 @@ static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh) } struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, - struct net_device *dev) + struct net_device *dev, const void *daddr) { struct l2t_entry *e = NULL; struct neighbour *neigh; @@ -311,7 +311,7 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, int smt_idx; rcu_read_lock(); - neigh = dst_get_neighbour_noref(dst); + neigh = dst_neigh_lookup(dst, daddr); if (!neigh) goto done_rcu; @@ -360,6 +360,8 @@ struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, done_unlock: write_unlock_bh(&d->lock); done_rcu: + if (neigh) + neigh_release(neigh); rcu_read_unlock(); return e; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h index c4e864369751..8cffcdfd5678 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h +++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h @@ -110,7 +110,7 @@ static inline void set_arp_failure_handler(struct sk_buff *skb, void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e); void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh); struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst, - struct net_device *dev); + struct net_device *dev, const void *daddr); int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb, struct l2t_entry *e); void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index e1f96fbb48c1..5ed49af23d6a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3493,8 +3493,8 @@ static void __devinit cfg_queues(struct adapter *adap) */ if (n10g) q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g; - if (q10g > num_online_cpus()) - q10g = num_online_cpus(); + if (q10g > netif_get_num_default_rss_queues()) + q10g = netif_get_num_default_rss_queues(); for_each_port(adap, i) { struct port_info *pi = adap2pinfo(adap, i); diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index edce7af97c87..2141bd784751 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -2172,12 +2172,14 @@ static void be_msix_disable(struct be_adapter *adapter) static uint be_num_rss_want(struct be_adapter *adapter) { + u32 num = 0; if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) && !sriov_want(adapter) && be_physfn(adapter) && - !be_is_mc(adapter)) - return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; - else - return 0; + !be_is_mc(adapter)) { + num = (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS; + num = min_t(u32, num, (u32)netif_get_num_default_rss_queues()); + } + return num; } static void be_msix_enable(struct be_adapter *adapter) diff --git a/drivers/net/ethernet/intel/e1000e/defines.h b/drivers/net/ethernet/intel/e1000e/defines.h index 351a4097b2ba..76edbc1be33b 100644 --- a/drivers/net/ethernet/intel/e1000e/defines.h +++ b/drivers/net/ethernet/intel/e1000e/defines.h @@ -103,6 +103,7 @@ #define E1000_RXD_ERR_SEQ 0x04 /* Sequence Error */ #define E1000_RXD_ERR_CXE 0x10 /* Carrier Extension Error */ #define E1000_RXD_ERR_TCPE 0x20 /* TCP/UDP Checksum Error */ +#define E1000_RXD_ERR_IPE 0x40 /* IP Checksum Error */ #define E1000_RXD_ERR_RXE 0x80 /* Rx Data Error */ #define E1000_RXD_SPC_VLAN_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index ba86b3f8a404..a166efc2fead 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -496,7 +496,7 @@ static void e1000_receive_skb(struct e1000_adapter *adapter, * @sk_buff: socket buffer with received data **/ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, - __le16 csum, struct sk_buff *skb) + struct sk_buff *skb) { u16 status = (u16)status_err; u8 errors = (u8)(status_err >> 24); @@ -511,8 +511,8 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, if (status & E1000_RXD_STAT_IXSM) return; - /* TCP/UDP checksum error bit is set */ - if (errors & E1000_RXD_ERR_TCPE) { + /* TCP/UDP checksum error bit or IP checksum error bit is set */ + if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) { /* let the stack verify checksum errors */ adapter->hw_csum_err++; return; @@ -523,19 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err, return; /* It must be a TCP or UDP packet with a valid checksum */ - if (status & E1000_RXD_STAT_TCPCS) { - /* TCP checksum is good */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - /* - * IP fragment with UDP payload - * Hardware complements the payload checksum, so we undo it - * and then put the value in host order for further stack use. - */ - __sum16 sum = (__force __sum16)swab16((__force u16)csum); - skb->csum = csum_unfold(~sum); - skb->ip_summed = CHECKSUM_COMPLETE; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; } @@ -954,8 +942,7 @@ static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done, skb_put(skb, length); /* Receive Checksum Offload */ - e1000_rx_checksum(adapter, staterr, - rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); + e1000_rx_checksum(adapter, staterr, skb); e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); @@ -1341,8 +1328,7 @@ copydone: total_rx_bytes += skb->len; total_rx_packets++; - e1000_rx_checksum(adapter, staterr, - rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); + e1000_rx_checksum(adapter, staterr, skb); e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); @@ -1512,9 +1498,8 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done, } } - /* Receive Checksum Offload XXX recompute due to CRC strip? */ - e1000_rx_checksum(adapter, staterr, - rx_desc->wb.lower.hi_dword.csum_ip.csum, skb); + /* Receive Checksum Offload */ + e1000_rx_checksum(adapter, staterr, skb); e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb); @@ -3098,19 +3083,10 @@ static void e1000_configure_rx(struct e1000_adapter *adapter) /* Enable Receive Checksum Offload for TCP and UDP */ rxcsum = er32(RXCSUM); - if (adapter->netdev->features & NETIF_F_RXCSUM) { + if (adapter->netdev->features & NETIF_F_RXCSUM) rxcsum |= E1000_RXCSUM_TUOFL; - - /* - * IPv4 payload checksum for UDP fragments must be - * used in conjunction with packet-split. - */ - if (adapter->rx_ps_pages) - rxcsum |= E1000_RXCSUM_IPPCSE; - } else { + else rxcsum &= ~E1000_RXCSUM_TUOFL; - /* no need to clear IPPCSE as it defaults to 0 */ - } ew32(RXCSUM, rxcsum); if (adapter->hw.mac.type == e1000_pch2lan) { @@ -5241,22 +5217,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu) int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; /* Jumbo frame support */ - if (max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) { - if (!(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { - e_err("Jumbo Frames not supported.\n"); - return -EINVAL; - } - - /* - * IP payload checksum (enabled with jumbos/packet-split when - * Rx checksum is enabled) and generation of RSS hash is - * mutually exclusive in the hardware. - */ - if ((netdev->features & NETIF_F_RXCSUM) && - (netdev->features & NETIF_F_RXHASH)) { - e_err("Jumbo frames cannot be enabled when both receive checksum offload and receive hashing are enabled. Disable one of the receive offload features before enabling jumbos.\n"); - return -EINVAL; - } + if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) && + !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) { + e_err("Jumbo Frames not supported.\n"); + return -EINVAL; } /* Supported frame sizes */ @@ -6030,17 +5994,6 @@ static int e1000_set_features(struct net_device *netdev, NETIF_F_RXALL))) return 0; - /* - * IP payload checksum (enabled with jumbos/packet-split when Rx - * checksum is enabled) and generation of RSS hash is mutually - * exclusive in the hardware. - */ - if (adapter->rx_ps_pages && - (features & NETIF_F_RXCSUM) && (features & NETIF_F_RXHASH)) { - e_err("Enabling both receive checksum offload and receive hashing is not possible with jumbo frames. Disable jumbos or enable only one of the receive offload features.\n"); - return -EINVAL; - } - if (changed & NETIF_F_RXFCS) { if (features & NETIF_F_RXFCS) { adapter->flags2 &= ~FLAG2_CRC_STRIPPING; diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index 8ce67064b9c5..90eef07943f4 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -357,21 +357,28 @@ static int igbvf_set_coalesce(struct net_device *netdev, struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; - if ((ec->rx_coalesce_usecs > IGBVF_MAX_ITR_USECS) || - ((ec->rx_coalesce_usecs > 3) && - (ec->rx_coalesce_usecs < IGBVF_MIN_ITR_USECS)) || - (ec->rx_coalesce_usecs == 2)) - return -EINVAL; - - /* convert to rate of irq's per second */ - if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { + if ((ec->rx_coalesce_usecs >= IGBVF_MIN_ITR_USECS) && + (ec->rx_coalesce_usecs <= IGBVF_MAX_ITR_USECS)) { + adapter->current_itr = ec->rx_coalesce_usecs << 2; + adapter->requested_itr = 1000000000 / + (adapter->current_itr * 256); + } else if ((ec->rx_coalesce_usecs == 3) || + (ec->rx_coalesce_usecs == 2)) { adapter->current_itr = IGBVF_START_ITR; adapter->requested_itr = ec->rx_coalesce_usecs; - } else { - adapter->current_itr = ec->rx_coalesce_usecs << 2; + } else if (ec->rx_coalesce_usecs == 0) { + /* + * The user's desire is to turn off interrupt throttling + * altogether, but due to HW limitations, we can't do that. + * Instead we set a very small value in EITR, which would + * allow ~967k interrupts per second, but allow the adapter's + * internal clocking to still function properly. + */ + adapter->current_itr = 4; adapter->requested_itr = 1000000000 / (adapter->current_itr * 256); - } + } else + return -EINVAL; writel(adapter->current_itr, hw->hw_addr + adapter->rx_ring->itr_register); diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c index 842c8ce9494e..7e94987d030c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c @@ -1080,6 +1080,25 @@ static struct mlx4_cmd_info cmd_info[] = { .verify = NULL, .wrapper = NULL }, + /* flow steering commands */ + { + .opcode = MLX4_QP_FLOW_STEERING_ATTACH, + .has_inbox = true, + .has_outbox = false, + .out_is_imm = true, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_FLOW_STEERING_ATTACH_wrapper + }, + { + .opcode = MLX4_QP_FLOW_STEERING_DETACH, + .has_inbox = false, + .has_outbox = false, + .out_is_imm = false, + .encode_slave_id = false, + .verify = NULL, + .wrapper = mlx4_QP_FLOW_STEERING_DETACH_wrapper + }, }; static int mlx4_master_process_vhcr(struct mlx4_dev *dev, int slave, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 72901ce2b088..dd6a77b21149 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -38,6 +38,10 @@ #include "mlx4_en.h" #include "en_port.h" +#define EN_ETHTOOL_QP_ATTACH (1ull << 63) +#define EN_ETHTOOL_MAC_MASK 0xffffffffffffULL +#define EN_ETHTOOL_SHORT_MASK cpu_to_be16(0xffff) +#define EN_ETHTOOL_WORD_MASK cpu_to_be32(0xffffffff) static void mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) @@ -599,16 +603,369 @@ static int mlx4_en_set_rxfh_indir(struct net_device *dev, return err; } +#define all_zeros_or_all_ones(field) \ + ((field) == 0 || (field) == (__force typeof(field))-1) + +static int mlx4_en_validate_flow(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_usrip4_spec *l3_mask; + struct ethtool_tcpip4_spec *l4_mask; + struct ethhdr *eth_mask; + u64 full_mac = ~0ull; + u64 zero_mac = 0; + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + switch (cmd->fs.flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + if (cmd->fs.m_u.tcp_ip4_spec.tos) + return -EINVAL; + l4_mask = &cmd->fs.m_u.tcp_ip4_spec; + /* don't allow mask which isn't all 0 or 1 */ + if (!all_zeros_or_all_ones(l4_mask->ip4src) || + !all_zeros_or_all_ones(l4_mask->ip4dst) || + !all_zeros_or_all_ones(l4_mask->psrc) || + !all_zeros_or_all_ones(l4_mask->pdst)) + return -EINVAL; + break; + case IP_USER_FLOW: + l3_mask = &cmd->fs.m_u.usr_ip4_spec; + if (l3_mask->l4_4_bytes || l3_mask->tos || l3_mask->proto || + cmd->fs.h_u.usr_ip4_spec.ip_ver != ETH_RX_NFC_IP4 || + (!l3_mask->ip4src && !l3_mask->ip4dst) || + !all_zeros_or_all_ones(l3_mask->ip4src) || + !all_zeros_or_all_ones(l3_mask->ip4dst)) + return -EINVAL; + break; + case ETHER_FLOW: + eth_mask = &cmd->fs.m_u.ether_spec; + /* source mac mask must not be set */ + if (memcmp(eth_mask->h_source, &zero_mac, ETH_ALEN)) + return -EINVAL; + + /* dest mac mask must be ff:ff:ff:ff:ff:ff */ + if (memcmp(eth_mask->h_dest, &full_mac, ETH_ALEN)) + return -EINVAL; + + if (!all_zeros_or_all_ones(eth_mask->h_proto)) + return -EINVAL; + break; + default: + return -EINVAL; + } + + if ((cmd->fs.flow_type & FLOW_EXT)) { + if (cmd->fs.m_ext.vlan_etype || + !(cmd->fs.m_ext.vlan_tci == 0 || + cmd->fs.m_ext.vlan_tci == cpu_to_be16(0xfff))) + return -EINVAL; + } + + return 0; +} + +static int add_ip_rule(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *list_h) +{ + struct mlx4_spec_list *spec_l3; + struct ethtool_usrip4_spec *l3_mask = &cmd->fs.m_u.usr_ip4_spec; + + spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); + if (!spec_l3) { + en_err(priv, "Fail to alloc ethtool rule.\n"); + return -ENOMEM; + } + + spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; + spec_l3->ipv4.src_ip = cmd->fs.h_u.usr_ip4_spec.ip4src; + if (l3_mask->ip4src) + spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.usr_ip4_spec.ip4dst; + if (l3_mask->ip4dst) + spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; + list_add_tail(&spec_l3->list, list_h); + + return 0; +} + +static int add_tcp_udp_rule(struct mlx4_en_priv *priv, + struct ethtool_rxnfc *cmd, + struct list_head *list_h, int proto) +{ + struct mlx4_spec_list *spec_l3; + struct mlx4_spec_list *spec_l4; + struct ethtool_tcpip4_spec *l4_mask = &cmd->fs.m_u.tcp_ip4_spec; + + spec_l3 = kzalloc(sizeof *spec_l3, GFP_KERNEL); + spec_l4 = kzalloc(sizeof *spec_l4, GFP_KERNEL); + if (!spec_l4 || !spec_l3) { + en_err(priv, "Fail to alloc ethtool rule.\n"); + kfree(spec_l3); + kfree(spec_l4); + return -ENOMEM; + } + + spec_l3->id = MLX4_NET_TRANS_RULE_ID_IPV4; + + if (proto == TCP_V4_FLOW) { + spec_l4->id = MLX4_NET_TRANS_RULE_ID_TCP; + spec_l3->ipv4.src_ip = cmd->fs.h_u.tcp_ip4_spec.ip4src; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.tcp_ip4_spec.ip4dst; + spec_l4->tcp_udp.src_port = cmd->fs.h_u.tcp_ip4_spec.psrc; + spec_l4->tcp_udp.dst_port = cmd->fs.h_u.tcp_ip4_spec.pdst; + } else { + spec_l4->id = MLX4_NET_TRANS_RULE_ID_UDP; + spec_l3->ipv4.src_ip = cmd->fs.h_u.udp_ip4_spec.ip4src; + spec_l3->ipv4.dst_ip = cmd->fs.h_u.udp_ip4_spec.ip4dst; + spec_l4->tcp_udp.src_port = cmd->fs.h_u.udp_ip4_spec.psrc; + spec_l4->tcp_udp.dst_port = cmd->fs.h_u.udp_ip4_spec.pdst; + } + + if (l4_mask->ip4src) + spec_l3->ipv4.src_ip_msk = EN_ETHTOOL_WORD_MASK; + if (l4_mask->ip4dst) + spec_l3->ipv4.dst_ip_msk = EN_ETHTOOL_WORD_MASK; + + if (l4_mask->psrc) + spec_l4->tcp_udp.src_port_msk = EN_ETHTOOL_SHORT_MASK; + if (l4_mask->pdst) + spec_l4->tcp_udp.dst_port_msk = EN_ETHTOOL_SHORT_MASK; + + list_add_tail(&spec_l3->list, list_h); + list_add_tail(&spec_l4->list, list_h); + + return 0; +} + +static int mlx4_en_ethtool_to_net_trans_rule(struct net_device *dev, + struct ethtool_rxnfc *cmd, + struct list_head *rule_list_h) +{ + int err; + u64 mac; + __be64 be_mac; + struct ethhdr *eth_spec; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_spec_list *spec_l2; + __be64 mac_msk = cpu_to_be64(EN_ETHTOOL_MAC_MASK << 16); + + err = mlx4_en_validate_flow(dev, cmd); + if (err) + return err; + + spec_l2 = kzalloc(sizeof *spec_l2, GFP_KERNEL); + if (!spec_l2) + return -ENOMEM; + + mac = priv->mac & EN_ETHTOOL_MAC_MASK; + be_mac = cpu_to_be64(mac << 16); + + spec_l2->id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_l2->eth.dst_mac_msk, &mac_msk, ETH_ALEN); + if ((cmd->fs.flow_type & ~FLOW_EXT) != ETHER_FLOW) + memcpy(spec_l2->eth.dst_mac, &be_mac, ETH_ALEN); + + if ((cmd->fs.flow_type & FLOW_EXT) && cmd->fs.m_ext.vlan_tci) { + spec_l2->eth.vlan_id = cmd->fs.h_ext.vlan_tci; + spec_l2->eth.vlan_id_msk = cpu_to_be16(0xfff); + } + + list_add_tail(&spec_l2->list, rule_list_h); + + switch (cmd->fs.flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + eth_spec = &cmd->fs.h_u.ether_spec; + memcpy(&spec_l2->eth.dst_mac, eth_spec->h_dest, ETH_ALEN); + spec_l2->eth.ether_type = eth_spec->h_proto; + if (eth_spec->h_proto) + spec_l2->eth.ether_type_enable = 1; + break; + case IP_USER_FLOW: + err = add_ip_rule(priv, cmd, rule_list_h); + break; + case TCP_V4_FLOW: + err = add_tcp_udp_rule(priv, cmd, rule_list_h, TCP_V4_FLOW); + break; + case UDP_V4_FLOW: + err = add_tcp_udp_rule(priv, cmd, rule_list_h, UDP_V4_FLOW); + break; + } + + return err; +} + +static int mlx4_en_flow_replace(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + int err; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct ethtool_flow_id *loc_rule; + struct mlx4_spec_list *spec, *tmp_spec; + u32 qpn; + u64 reg_id; + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_PROMISC_NONE, + }; + + rule.port = priv->port; + rule.priority = MLX4_DOMAIN_ETHTOOL | cmd->fs.location; + INIT_LIST_HEAD(&rule.list); + + /* Allow direct QP attaches if the EN_ETHTOOL_QP_ATTACH flag is set */ + if (cmd->fs.ring_cookie == RX_CLS_FLOW_DISC) + qpn = priv->drop_qp.qpn; + else if (cmd->fs.ring_cookie & EN_ETHTOOL_QP_ATTACH) { + qpn = cmd->fs.ring_cookie & (EN_ETHTOOL_QP_ATTACH - 1); + } else { + if (cmd->fs.ring_cookie >= priv->rx_ring_num) { + en_warn(priv, "rxnfc: RX ring (%llu) doesn't exist.\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + qpn = priv->rss_map.qps[cmd->fs.ring_cookie].qpn; + if (!qpn) { + en_warn(priv, "rxnfc: RX ring (%llu) is inactive.\n", + cmd->fs.ring_cookie); + return -EINVAL; + } + } + rule.qpn = qpn; + err = mlx4_en_ethtool_to_net_trans_rule(dev, cmd, &rule.list); + if (err) + goto out_free_list; + + loc_rule = &priv->ethtool_rules[cmd->fs.location]; + if (loc_rule->id) { + err = mlx4_flow_detach(priv->mdev->dev, loc_rule->id); + if (err) { + en_err(priv, "Fail to detach network rule at location %d. registration id = %llx\n", + cmd->fs.location, loc_rule->id); + goto out_free_list; + } + loc_rule->id = 0; + memset(&loc_rule->flow_spec, 0, + sizeof(struct ethtool_rx_flow_spec)); + } + err = mlx4_flow_attach(priv->mdev->dev, &rule, ®_id); + if (err) { + en_err(priv, "Fail to attach network rule at location %d.\n", + cmd->fs.location); + goto out_free_list; + } + loc_rule->id = reg_id; + memcpy(&loc_rule->flow_spec, &cmd->fs, + sizeof(struct ethtool_rx_flow_spec)); + +out_free_list: + list_for_each_entry_safe(spec, tmp_spec, &rule.list, list) { + list_del(&spec->list); + kfree(spec); + } + return err; +} + +static int mlx4_en_flow_detach(struct net_device *dev, + struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct ethtool_flow_id *rule; + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (cmd->fs.location >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->ethtool_rules[cmd->fs.location]; + if (!rule->id) { + err = -ENOENT; + goto out; + } + + err = mlx4_flow_detach(priv->mdev->dev, rule->id); + if (err) { + en_err(priv, "Fail to detach network rule at location %d. registration id = 0x%llx\n", + cmd->fs.location, rule->id); + goto out; + } + rule->id = 0; + memset(&rule->flow_spec, 0, sizeof(struct ethtool_rx_flow_spec)); +out: + return err; + +} + +static int mlx4_en_get_flow(struct net_device *dev, struct ethtool_rxnfc *cmd, + int loc) +{ + int err = 0; + struct ethtool_flow_id *rule; + struct mlx4_en_priv *priv = netdev_priv(dev); + + if (loc < 0 || loc >= MAX_NUM_OF_FS_RULES) + return -EINVAL; + + rule = &priv->ethtool_rules[loc]; + if (rule->id) + memcpy(&cmd->fs, &rule->flow_spec, + sizeof(struct ethtool_rx_flow_spec)); + else + err = -ENOENT; + + return err; +} + +static int mlx4_en_get_num_flows(struct mlx4_en_priv *priv) +{ + + int i, res = 0; + for (i = 0; i < MAX_NUM_OF_FS_RULES; i++) { + if (priv->ethtool_rules[i].id) + res++; + } + return res; + +} + static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; int err = 0; + int i = 0, priority = 0; + + if ((cmd->cmd == ETHTOOL_GRXCLSRLCNT || + cmd->cmd == ETHTOOL_GRXCLSRULE || + cmd->cmd == ETHTOOL_GRXCLSRLALL) && + mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EINVAL; switch (cmd->cmd) { case ETHTOOL_GRXRINGS: cmd->data = priv->rx_ring_num; break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = mlx4_en_get_num_flows(priv); + break; + case ETHTOOL_GRXCLSRULE: + err = mlx4_en_get_flow(dev, cmd, cmd->fs.location); + break; + case ETHTOOL_GRXCLSRLALL: + while ((!err || err == -ENOENT) && priority < cmd->rule_cnt) { + err = mlx4_en_get_flow(dev, cmd, i); + if (!err) + rule_locs[priority++] = i; + i++; + } + err = 0; + break; default: err = -EOPNOTSUPP; break; @@ -617,6 +974,30 @@ static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, return err; } +static int mlx4_en_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_dev *mdev = priv->mdev; + + if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EINVAL; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = mlx4_en_flow_replace(dev, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = mlx4_en_flow_detach(dev, cmd); + break; + default: + en_warn(priv, "Unsupported ethtool command. (%d)\n", cmd->cmd); + return -EINVAL; + } + + return err; +} + const struct ethtool_ops mlx4_en_ethtool_ops = { .get_drvinfo = mlx4_en_get_drvinfo, .get_settings = mlx4_en_get_settings, @@ -637,6 +1018,7 @@ const struct ethtool_ops mlx4_en_ethtool_ops = { .get_ringparam = mlx4_en_get_ringparam, .set_ringparam = mlx4_en_set_ringparam, .get_rxnfc = mlx4_en_get_rxnfc, + .set_rxnfc = mlx4_en_set_rxnfc, .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size, .get_rxfh_indir = mlx4_en_get_rxfh_indir, .set_rxfh_indir = mlx4_en_set_rxfh_indir, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 073b85b45fc5..94375a8c6d42 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -170,33 +170,81 @@ static void mlx4_en_do_set_mac(struct work_struct *work) static void mlx4_en_clear_list(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); + struct mlx4_en_mc_list *tmp, *mc_to_del; - kfree(priv->mc_addrs); - priv->mc_addrs = NULL; - priv->mc_addrs_cnt = 0; + list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { + list_del(&mc_to_del->list); + kfree(mc_to_del); + } } static void mlx4_en_cache_mclist(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct netdev_hw_addr *ha; - char *mc_addrs; - int mc_addrs_cnt = netdev_mc_count(dev); - int i; + struct mlx4_en_mc_list *tmp; - mc_addrs = kmalloc(mc_addrs_cnt * ETH_ALEN, GFP_ATOMIC); - if (!mc_addrs) { - en_err(priv, "failed to allocate multicast list\n"); - return; - } - i = 0; - netdev_for_each_mc_addr(ha, dev) - memcpy(mc_addrs + i++ * ETH_ALEN, ha->addr, ETH_ALEN); mlx4_en_clear_list(dev); - priv->mc_addrs = mc_addrs; - priv->mc_addrs_cnt = mc_addrs_cnt; + netdev_for_each_mc_addr(ha, dev) { + tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); + if (!tmp) { + en_err(priv, "failed to allocate multicast list\n"); + mlx4_en_clear_list(dev); + return; + } + memcpy(tmp->addr, ha->addr, ETH_ALEN); + list_add_tail(&tmp->list, &priv->mc_list); + } } +static void update_mclist_flags(struct mlx4_en_priv *priv, + struct list_head *dst, + struct list_head *src) +{ + struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; + bool found; + + /* Find all the entries that should be removed from dst, + * These are the entries that are not found in src + */ + list_for_each_entry(dst_tmp, dst, list) { + found = false; + list_for_each_entry(src_tmp, src, list) { + if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { + found = true; + break; + } + } + if (!found) + dst_tmp->action = MCLIST_REM; + } + + /* Add entries that exist in src but not in dst + * mark them as need to add + */ + list_for_each_entry(src_tmp, src, list) { + found = false; + list_for_each_entry(dst_tmp, dst, list) { + if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { + dst_tmp->action = MCLIST_NONE; + found = true; + break; + } + } + if (!found) { + new_mc = kmalloc(sizeof(struct mlx4_en_mc_list), + GFP_KERNEL); + if (!new_mc) { + en_err(priv, "Failed to allocate current multicast list\n"); + return; + } + memcpy(new_mc, src_tmp, + sizeof(struct mlx4_en_mc_list)); + new_mc->action = MCLIST_ADD; + list_add_tail(&new_mc->list, dst); + } + } +} static void mlx4_en_set_multicast(struct net_device *dev) { @@ -214,9 +262,10 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) mcast_task); struct mlx4_en_dev *mdev = priv->mdev; struct net_device *dev = priv->dev; + struct mlx4_en_mc_list *mclist, *tmp; u64 mcast_addr = 0; u8 mc_list[16] = {0}; - int err; + int err = 0; mutex_lock(&mdev->state_lock); if (!mdev->device_up) { @@ -251,16 +300,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) priv->flags |= MLX4_EN_FLAG_PROMISC; /* Enable promiscouos mode */ - if (!(mdev->dev->caps.flags & - MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, - priv->base_qpn, 1); - else - err = mlx4_unicast_promisc_add(mdev->dev, priv->base_qpn, + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_add(mdev->dev, + priv->port, + priv->base_qpn, + MLX4_FS_PROMISC_UPLINK); + if (err) + en_err(priv, "Failed enabling promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_unicast_promisc_add(mdev->dev, + priv->base_qpn, priv->port); - if (err) - en_err(priv, "Failed enabling " - "promiscuous mode\n"); + if (err) + en_err(priv, "Failed enabling unicast promiscuous mode\n"); + + /* Add the default qp number as multicast + * promisc + */ + if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { + err = mlx4_multicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed enabling multicast promiscuous mode\n"); + priv->flags |= MLX4_EN_FLAG_MC_PROMISC; + } + break; + + case MLX4_STEERING_MODE_A0: + err = mlx4_SET_PORT_qpn_calc(mdev->dev, + priv->port, + priv->base_qpn, + 1); + if (err) + en_err(priv, "Failed enabling promiscuous mode\n"); + break; + } /* Disable port multicast filter (unconditionally) */ err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, @@ -269,15 +348,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) en_err(priv, "Failed disabling " "multicast filter\n"); - /* Add the default qp number as multicast promisc */ - if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { - err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, - priv->port); - if (err) - en_err(priv, "Failed entering multicast promisc mode\n"); - priv->flags |= MLX4_EN_FLAG_MC_PROMISC; - } - /* Disable port VLAN filter */ err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); if (err) @@ -296,22 +366,40 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) priv->flags &= ~MLX4_EN_FLAG_PROMISC; /* Disable promiscouos mode */ - if (!(mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, - priv->base_qpn, 0); - else - err = mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_PROMISC_UPLINK); + if (err) + en_err(priv, "Failed disabling promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_unicast_promisc_remove(mdev->dev, + priv->base_qpn, priv->port); - if (err) - en_err(priv, "Failed disabling promiscuous mode\n"); + if (err) + en_err(priv, "Failed disabling unicast promiscuous mode\n"); + /* Disable Multicast promisc */ + if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { + err = mlx4_multicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + if (err) + en_err(priv, "Failed disabling multicast promiscuous mode\n"); + priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + } + break; - /* Disable Multicast promisc */ - if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { - err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, - priv->port); + case MLX4_STEERING_MODE_A0: + err = mlx4_SET_PORT_qpn_calc(mdev->dev, + priv->port, + priv->base_qpn, 0); if (err) - en_err(priv, "Failed disabling multicast promiscuous mode\n"); - priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; + en_err(priv, "Failed disabling promiscuous mode\n"); + break; } /* Enable port VLAN filter */ @@ -329,18 +417,46 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) /* Add the default qp number as multicast promisc */ if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { - err = mlx4_multicast_promisc_add(mdev->dev, priv->base_qpn, - priv->port); + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_add(mdev->dev, + priv->port, + priv->base_qpn, + MLX4_FS_PROMISC_ALL_MULTI); + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_multicast_promisc_add(mdev->dev, + priv->base_qpn, + priv->port); + break; + + case MLX4_STEERING_MODE_A0: + break; + } if (err) en_err(priv, "Failed entering multicast promisc mode\n"); priv->flags |= MLX4_EN_FLAG_MC_PROMISC; } } else { - int i; /* Disable Multicast promisc */ if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { - err = mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, - priv->port); + switch (mdev->dev->caps.steering_mode) { + case MLX4_STEERING_MODE_DEVICE_MANAGED: + err = mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_PROMISC_ALL_MULTI); + break; + + case MLX4_STEERING_MODE_B0: + err = mlx4_multicast_promisc_remove(mdev->dev, + priv->base_qpn, + priv->port); + break; + + case MLX4_STEERING_MODE_A0: + break; + } if (err) en_err(priv, "Failed disabling multicast promiscuous mode\n"); priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; @@ -351,13 +467,6 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) if (err) en_err(priv, "Failed disabling multicast filter\n"); - /* Detach our qp from all the multicast addresses */ - for (i = 0; i < priv->mc_addrs_cnt; i++) { - memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); - mc_list[5] = priv->port; - mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, - mc_list, MLX4_PROT_ETH); - } /* Flush mcast filter and init it with broadcast address */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 1, MLX4_MCAST_CONFIG); @@ -367,13 +476,8 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) netif_tx_lock_bh(dev); mlx4_en_cache_mclist(dev); netif_tx_unlock_bh(dev); - for (i = 0; i < priv->mc_addrs_cnt; i++) { - mcast_addr = - mlx4_en_mac_to_u64(priv->mc_addrs + i * ETH_ALEN); - memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); - mc_list[5] = priv->port; - mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, - mc_list, 0, MLX4_PROT_ETH); + list_for_each_entry(mclist, &priv->mc_list, list) { + mcast_addr = mlx4_en_mac_to_u64(mclist->addr); mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, mcast_addr, 0, MLX4_MCAST_CONFIG); } @@ -381,6 +485,42 @@ static void mlx4_en_do_set_multicast(struct work_struct *work) 0, MLX4_MCAST_ENABLE); if (err) en_err(priv, "Failed enabling multicast filter\n"); + + update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); + list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { + if (mclist->action == MCLIST_REM) { + /* detach this address and delete from list */ + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + mc_list[5] = priv->port; + err = mlx4_multicast_detach(mdev->dev, + &priv->rss_map.indir_qp, + mc_list, + MLX4_PROT_ETH, + mclist->reg_id); + if (err) + en_err(priv, "Fail to detach multicast address\n"); + + /* remove from list */ + list_del(&mclist->list); + kfree(mclist); + } + + if (mclist->action == MCLIST_ADD) { + /* attach the address */ + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); + /* needed for B0 steering support */ + mc_list[5] = priv->port; + err = mlx4_multicast_attach(mdev->dev, + &priv->rss_map.indir_qp, + mc_list, + priv->port, 0, + MLX4_PROT_ETH, + &mclist->reg_id); + if (err) + en_err(priv, "Fail to attach multicast address\n"); + + } + } } out: mutex_unlock(&mdev->state_lock); @@ -605,6 +745,9 @@ int mlx4_en_start_port(struct net_device *dev) return 0; } + INIT_LIST_HEAD(&priv->mc_list); + INIT_LIST_HEAD(&priv->curr_list); + /* Calculate Rx buf size */ dev->mtu = min(dev->mtu, priv->max_mtu); mlx4_en_calc_rx_buf(dev); @@ -653,6 +796,10 @@ int mlx4_en_start_port(struct net_device *dev) goto mac_err; } + err = mlx4_en_create_drop_qp(priv); + if (err) + goto rss_err; + /* Configure tx cq's and rings */ for (i = 0; i < priv->tx_ring_num; i++) { /* Configure cq */ @@ -720,13 +867,23 @@ int mlx4_en_start_port(struct net_device *dev) /* Attach rx QP to bradcast address */ memset(&mc_list[10], 0xff, ETH_ALEN); - mc_list[5] = priv->port; + mc_list[5] = priv->port; /* needed for B0 steering support */ if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, - 0, MLX4_PROT_ETH)) + priv->port, 0, MLX4_PROT_ETH, + &priv->broadcast_id)) mlx4_warn(mdev, "Failed Attaching Broadcast\n"); /* Must redo promiscuous mode setup. */ priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); + if (mdev->dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_PROMISC_UPLINK); + mlx4_flow_steer_promisc_remove(mdev->dev, + priv->port, + MLX4_FS_PROMISC_ALL_MULTI); + } /* Schedule multicast task to populate multicast list */ queue_work(mdev->workqueue, &priv->mcast_task); @@ -742,7 +899,8 @@ tx_err: mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[tx_index]); mlx4_en_deactivate_cq(priv, &priv->tx_cq[tx_index]); } - + mlx4_en_destroy_drop_qp(priv); +rss_err: mlx4_en_release_rss_steer(priv); mac_err: mlx4_put_eth_qp(mdev->dev, priv->port, priv->mac, priv->base_qpn); @@ -760,6 +918,7 @@ void mlx4_en_stop_port(struct net_device *dev) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; + struct mlx4_en_mc_list *mclist, *tmp; int i; u8 mc_list[16] = {0}; @@ -778,19 +937,26 @@ void mlx4_en_stop_port(struct net_device *dev) /* Detach All multicasts */ memset(&mc_list[10], 0xff, ETH_ALEN); - mc_list[5] = priv->port; + mc_list[5] = priv->port; /* needed for B0 steering support */ mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, - MLX4_PROT_ETH); - for (i = 0; i < priv->mc_addrs_cnt; i++) { - memcpy(&mc_list[10], priv->mc_addrs + i * ETH_ALEN, ETH_ALEN); + MLX4_PROT_ETH, priv->broadcast_id); + list_for_each_entry(mclist, &priv->curr_list, list) { + memcpy(&mc_list[10], mclist->addr, ETH_ALEN); mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, - mc_list, MLX4_PROT_ETH); + mc_list, MLX4_PROT_ETH, mclist->reg_id); } mlx4_en_clear_list(dev); + list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { + list_del(&mclist->list); + kfree(mclist); + } + /* Flush multicast filter */ mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); + mlx4_en_destroy_drop_qp(priv); + /* Free TX Rings */ for (i = 0; i < priv->tx_ring_num; i++) { mlx4_en_deactivate_tx_ring(priv, &priv->tx_ring[i]); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index d49a7ac3187d..a04cbf767eb5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -844,6 +844,36 @@ out: return err; } +int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv) +{ + int err; + u32 qpn; + + err = mlx4_qp_reserve_range(priv->mdev->dev, 1, 1, &qpn); + if (err) { + en_err(priv, "Failed reserving drop qpn\n"); + return err; + } + err = mlx4_qp_alloc(priv->mdev->dev, qpn, &priv->drop_qp); + if (err) { + en_err(priv, "Failed allocating drop qp\n"); + mlx4_qp_release_range(priv->mdev->dev, qpn, 1); + return err; + } + + return 0; +} + +void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv) +{ + u32 qpn; + + qpn = priv->drop_qp.qpn; + mlx4_qp_remove(priv->mdev->dev, &priv->drop_qp); + mlx4_qp_free(priv->mdev->dev, &priv->drop_qp); + mlx4_qp_release_range(priv->mdev->dev, qpn, 1); +} + /* Allocate rx qp's and configure them according to rss map */ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv) { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 9c83bb8151ea..1d70657058a5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -123,7 +123,8 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) static const char * const fname[] = { [0] = "RSS support", [1] = "RSS Toeplitz Hash Function support", - [2] = "RSS XOR Hash Function support" + [2] = "RSS XOR Hash Function support", + [3] = "Device manage flow steering support" }; int i; @@ -391,6 +392,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) #define QUERY_DEV_CAP_RSVD_XRC_OFFSET 0x66 #define QUERY_DEV_CAP_MAX_XRC_OFFSET 0x67 #define QUERY_DEV_CAP_MAX_COUNTERS_OFFSET 0x68 +#define QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET 0x76 +#define QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET 0x77 #define QUERY_DEV_CAP_RDMARC_ENTRY_SZ_OFFSET 0x80 #define QUERY_DEV_CAP_QPC_ENTRY_SZ_OFFSET 0x82 #define QUERY_DEV_CAP_AUX_ENTRY_SZ_OFFSET 0x84 @@ -474,6 +477,12 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->num_ports = field & 0xf; MLX4_GET(field, outbox, QUERY_DEV_CAP_MAX_MSG_SZ_OFFSET); dev_cap->max_msg_sz = 1 << (field & 0x1f); + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_RANGE_EN_OFFSET); + if (field & 0x80) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FS_EN; + dev_cap->fs_log_max_ucast_qp_range_size = field & 0x1f; + MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_MAX_QP_OFFSET); + dev_cap->fs_max_num_qp_per_entry = field; MLX4_GET(stat_rate, outbox, QUERY_DEV_CAP_RATE_SUPPORT_OFFSET); dev_cap->stat_rate_support = stat_rate; MLX4_GET(ext_flags, outbox, QUERY_DEV_CAP_EXT_FLAGS_OFFSET); @@ -1061,6 +1070,15 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) #define INIT_HCA_LOG_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) #define INIT_HCA_UC_STEERING_OFFSET (INIT_HCA_MCAST_OFFSET + 0x18) #define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) +#define INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN 0x6 +#define INIT_HCA_FS_PARAM_OFFSET 0x1d0 +#define INIT_HCA_FS_BASE_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x00) +#define INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x12) +#define INIT_HCA_FS_LOG_TABLE_SZ_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x1b) +#define INIT_HCA_FS_ETH_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x21) +#define INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x22) +#define INIT_HCA_FS_IB_BITS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x25) +#define INIT_HCA_FS_IB_NUM_ADDRS_OFFSET (INIT_HCA_FS_PARAM_OFFSET + 0x26) #define INIT_HCA_TPT_OFFSET 0x0f0 #define INIT_HCA_DMPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) #define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) @@ -1119,14 +1137,44 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param) MLX4_PUT(inbox, param->rdmarc_base, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_PUT(inbox, param->log_rd_per_qp, INIT_HCA_LOG_RD_OFFSET); - /* multicast attributes */ - - MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); - MLX4_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); - MLX4_PUT(inbox, param->log_mc_hash_sz, INIT_HCA_LOG_MC_HASH_SZ_OFFSET); - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) - MLX4_PUT(inbox, (u8) (1 << 3), INIT_HCA_UC_STEERING_OFFSET); - MLX4_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + /* steering attributes */ + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= + cpu_to_be32(1 << + INIT_HCA_DEVICE_MANAGED_FLOW_STEERING_EN); + + MLX4_PUT(inbox, param->mc_base, INIT_HCA_FS_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, + INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, + INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + /* Enable Ethernet flow steering + * with udp unicast and tcp unicast + */ + MLX4_PUT(inbox, param->fs_hash_enable_bits, + INIT_HCA_FS_ETH_BITS_OFFSET); + MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, + INIT_HCA_FS_ETH_NUM_ADDRS_OFFSET); + /* Enable IPoIB flow steering + * with udp unicast and tcp unicast + */ + MLX4_PUT(inbox, param->fs_hash_enable_bits, + INIT_HCA_FS_IB_BITS_OFFSET); + MLX4_PUT(inbox, (u16) MLX4_FS_NUM_OF_L2_ADDR, + INIT_HCA_FS_IB_NUM_ADDRS_OFFSET); + } else { + MLX4_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MLX4_PUT(inbox, param->log_mc_entry_sz, + INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_hash_sz, + INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_PUT(inbox, param->log_mc_table_sz, + INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0) + MLX4_PUT(inbox, (u8) (1 << 3), + INIT_HCA_UC_STEERING_OFFSET); + } /* TPT attributes */ @@ -1188,15 +1236,24 @@ int mlx4_QUERY_HCA(struct mlx4_dev *dev, MLX4_GET(param->rdmarc_base, outbox, INIT_HCA_RDMARC_BASE_OFFSET); MLX4_GET(param->log_rd_per_qp, outbox, INIT_HCA_LOG_RD_OFFSET); - /* multicast attributes */ + /* steering attributes */ + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { - MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); - MLX4_GET(param->log_mc_entry_sz, outbox, - INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); - MLX4_GET(param->log_mc_hash_sz, outbox, - INIT_HCA_LOG_MC_HASH_SZ_OFFSET); - MLX4_GET(param->log_mc_table_sz, outbox, - INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + MLX4_GET(param->mc_base, outbox, INIT_HCA_FS_BASE_OFFSET); + MLX4_GET(param->log_mc_entry_sz, outbox, + INIT_HCA_FS_LOG_ENTRY_SZ_OFFSET); + MLX4_GET(param->log_mc_table_sz, outbox, + INIT_HCA_FS_LOG_TABLE_SZ_OFFSET); + } else { + MLX4_GET(param->mc_base, outbox, INIT_HCA_MC_BASE_OFFSET); + MLX4_GET(param->log_mc_entry_sz, outbox, + INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MLX4_GET(param->log_mc_hash_sz, outbox, + INIT_HCA_LOG_MC_HASH_SZ_OFFSET); + MLX4_GET(param->log_mc_table_sz, outbox, + INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + } /* TPT attributes */ diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.h b/drivers/net/ethernet/mellanox/mlx4/fw.h index 64c0399e4b78..83fcbbf1b169 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.h +++ b/drivers/net/ethernet/mellanox/mlx4/fw.h @@ -78,6 +78,8 @@ struct mlx4_dev_cap { u16 wavelength[MLX4_MAX_PORTS + 1]; u64 trans_code[MLX4_MAX_PORTS + 1]; u16 stat_rate_support; + int fs_log_max_ucast_qp_range_size; + int fs_max_num_qp_per_entry; u64 flags; u64 flags2; int reserved_uars; @@ -165,6 +167,7 @@ struct mlx4_init_hca_param { u8 log_mpt_sz; u8 log_uar_sz; u8 uar_page_sz; /* log pg sz in 4k chunks */ + u8 fs_hash_enable_bits; }; struct mlx4_init_ib_param { diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index a0313de122de..42645166bae2 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -41,6 +41,7 @@ #include <linux/slab.h> #include <linux/io-mapping.h> #include <linux/delay.h> +#include <linux/netdevice.h> #include <linux/mlx4/device.h> #include <linux/mlx4/doorbell.h> @@ -90,7 +91,9 @@ module_param_named(log_num_mgm_entry_size, MODULE_PARM_DESC(log_num_mgm_entry_size, "log mgm size, that defines the num" " of qp per mcg, for example:" " 10 gives 248.range: 9<=" - " log_num_mgm_entry_size <= 12"); + " log_num_mgm_entry_size <= 12." + " Not in use with device managed" + " flow steering"); #define MLX4_VF (1 << 0) @@ -243,7 +246,6 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.reserved_srqs = dev_cap->reserved_srqs; dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz; dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz; - dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); /* * Subtract 1 from the limit because we need to allocate a * spare CQE so the HCA HW can tell the difference between an @@ -274,6 +276,28 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev->caps.max_gso_sz = dev_cap->max_gso_sz; dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz; + if (dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_FS_EN) { + dev->caps.steering_mode = MLX4_STEERING_MODE_DEVICE_MANAGED; + dev->caps.num_qp_per_mgm = dev_cap->fs_max_num_qp_per_entry; + dev->caps.fs_log_max_ucast_qp_range_size = + dev_cap->fs_log_max_ucast_qp_range_size; + } else { + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER && + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) { + dev->caps.steering_mode = MLX4_STEERING_MODE_B0; + } else { + dev->caps.steering_mode = MLX4_STEERING_MODE_A0; + + if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER || + dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) + mlx4_warn(dev, "Must have UC_STEER and MC_STEER flags " + "set to use B0 steering. Falling back to A0 steering mode.\n"); + } + dev->caps.num_qp_per_mgm = mlx4_get_qp_per_mgm(dev); + } + mlx4_dbg(dev, "Steering mode is: %s\n", + mlx4_steering_mode_str(dev->caps.steering_mode)); + /* Sense port always allowed on supported devices for ConnectX1 and 2 */ if (dev->pdev->device != 0x1003) dev->caps.flags |= MLX4_DEV_CAP_FLAG_SENSE_SUPPORT; @@ -967,9 +991,11 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap, } /* - * It's not strictly required, but for simplicity just map the - * whole multicast group table now. The table isn't very big - * and it's a lot easier than trying to track ref counts. + * For flow steering device managed mode it is required to use + * mlx4_init_icm_table. For B0 steering mode it's not strictly + * required, but for simplicity just map the whole multicast + * group table now. The table isn't very big and it's a lot + * easier than trying to track ref counts. */ err = mlx4_init_icm_table(dev, &priv->mcg_table.table, init_hca->mc_base, @@ -1205,7 +1231,26 @@ static int mlx4_init_hca(struct mlx4_dev *dev) goto err_stop_fw; } + priv->fs_hash_mode = MLX4_FS_L2_HASH; + + switch (priv->fs_hash_mode) { + case MLX4_FS_L2_HASH: + init_hca.fs_hash_enable_bits = 0; + break; + + case MLX4_FS_L2_L3_L4_HASH: + /* Enable flow steering with + * udp unicast and tcp unicast + */ + init_hca.fs_hash_enable_bits = + MLX4_FS_UDP_UC_EN | MLX4_FS_TCP_UC_EN; + break; + } + profile = default_profile; + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) + profile.num_mcg = MLX4_FS_NUM_MCG; icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca); @@ -1539,8 +1584,8 @@ static void mlx4_enable_msi_x(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); struct msix_entry *entries; int nreq = min_t(int, dev->caps.num_ports * - min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT) - + MSIX_LEGACY_SZ, MAX_MSIX); + min_t(int, netif_get_num_default_rss_queues() + 1, + MAX_MSIX_P_PORT) + MSIX_LEGACY_SZ, MAX_MSIX); int err; int i; diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c index f4a8f98e402a..bc62f536ffae 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mcg.c +++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c @@ -41,6 +41,7 @@ #define MGM_QPN_MASK 0x00FFFFFF #define MGM_BLCK_LB_BIT 30 +#define MLX4_MAC_MASK 0xffffffffffffULL static const u8 zero_gid[16]; /* automatically initialized to 0 */ @@ -54,7 +55,12 @@ struct mlx4_mgm { int mlx4_get_mgm_entry_size(struct mlx4_dev *dev) { - return min((1 << mlx4_log_num_mgm_entry_size), MLX4_MAX_MGM_ENTRY_SIZE); + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) + return 1 << MLX4_FS_MGM_LOG_ENTRY_SIZE; + else + return min((1 << mlx4_log_num_mgm_entry_size), + MLX4_MAX_MGM_ENTRY_SIZE); } int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) @@ -62,6 +68,35 @@ int mlx4_get_qp_per_mgm(struct mlx4_dev *dev) return 4 * (mlx4_get_mgm_entry_size(dev) / 16 - 2); } +static int mlx4_QP_FLOW_STEERING_ATTACH(struct mlx4_dev *dev, + struct mlx4_cmd_mailbox *mailbox, + u32 size, + u64 *reg_id) +{ + u64 imm; + int err = 0; + + err = mlx4_cmd_imm(dev, mailbox->dma, &imm, size, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + return err; + *reg_id = imm; + + return err; +} + +static int mlx4_QP_FLOW_STEERING_DETACH(struct mlx4_dev *dev, u64 regid) +{ + int err = 0; + + err = mlx4_cmd(dev, regid, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + return err; +} + static int mlx4_READ_ENTRY(struct mlx4_dev *dev, int index, struct mlx4_cmd_mailbox *mailbox) { @@ -614,6 +649,311 @@ static int find_entry(struct mlx4_dev *dev, u8 port, return err; } +struct mlx4_net_trans_rule_hw_ctrl { + __be32 ctrl; + __be32 vf_vep_port; + __be32 qpn; + __be32 reserved; +}; + +static void trans_rule_ctrl_to_hw(struct mlx4_net_trans_rule *ctrl, + struct mlx4_net_trans_rule_hw_ctrl *hw) +{ + static const u8 __promisc_mode[] = { + [MLX4_FS_PROMISC_NONE] = 0x0, + [MLX4_FS_PROMISC_UPLINK] = 0x1, + [MLX4_FS_PROMISC_FUNCTION_PORT] = 0x2, + [MLX4_FS_PROMISC_ALL_MULTI] = 0x3, + }; + + u32 dw = 0; + + dw = ctrl->queue_mode == MLX4_NET_TRANS_Q_LIFO ? 1 : 0; + dw |= ctrl->exclusive ? (1 << 2) : 0; + dw |= ctrl->allow_loopback ? (1 << 3) : 0; + dw |= __promisc_mode[ctrl->promisc_mode] << 8; + dw |= ctrl->priority << 16; + + hw->ctrl = cpu_to_be32(dw); + hw->vf_vep_port = cpu_to_be32(ctrl->port); + hw->qpn = cpu_to_be32(ctrl->qpn); +} + +struct mlx4_net_trans_rule_hw_ib { + u8 size; + u8 rsvd1; + __be16 id; + u32 rsvd2; + __be32 qpn; + __be32 qpn_mask; + u8 dst_gid[16]; + u8 dst_gid_msk[16]; +} __packed; + +struct mlx4_net_trans_rule_hw_eth { + u8 size; + u8 rsvd; + __be16 id; + u8 rsvd1[6]; + u8 dst_mac[6]; + u16 rsvd2; + u8 dst_mac_msk[6]; + u16 rsvd3; + u8 src_mac[6]; + u16 rsvd4; + u8 src_mac_msk[6]; + u8 rsvd5; + u8 ether_type_enable; + __be16 ether_type; + __be16 vlan_id_msk; + __be16 vlan_id; +} __packed; + +struct mlx4_net_trans_rule_hw_tcp_udp { + u8 size; + u8 rsvd; + __be16 id; + __be16 rsvd1[3]; + __be16 dst_port; + __be16 rsvd2; + __be16 dst_port_msk; + __be16 rsvd3; + __be16 src_port; + __be16 rsvd4; + __be16 src_port_msk; +} __packed; + +struct mlx4_net_trans_rule_hw_ipv4 { + u8 size; + u8 rsvd; + __be16 id; + __be32 rsvd1; + __be32 dst_ip; + __be32 dst_ip_msk; + __be32 src_ip; + __be32 src_ip_msk; +} __packed; + +struct _rule_hw { + union { + struct { + u8 size; + u8 rsvd; + __be16 id; + }; + struct mlx4_net_trans_rule_hw_eth eth; + struct mlx4_net_trans_rule_hw_ib ib; + struct mlx4_net_trans_rule_hw_ipv4 ipv4; + struct mlx4_net_trans_rule_hw_tcp_udp tcp_udp; + }; +}; + +static int parse_trans_rule(struct mlx4_dev *dev, struct mlx4_spec_list *spec, + struct _rule_hw *rule_hw) +{ + static const u16 __sw_id_hw[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = 0xE001, + [MLX4_NET_TRANS_RULE_ID_IB] = 0xE005, + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0xE003, + [MLX4_NET_TRANS_RULE_ID_IPV4] = 0xE002, + [MLX4_NET_TRANS_RULE_ID_TCP] = 0xE004, + [MLX4_NET_TRANS_RULE_ID_UDP] = 0xE006 + }; + + static const size_t __rule_hw_sz[] = { + [MLX4_NET_TRANS_RULE_ID_ETH] = + sizeof(struct mlx4_net_trans_rule_hw_eth), + [MLX4_NET_TRANS_RULE_ID_IB] = + sizeof(struct mlx4_net_trans_rule_hw_ib), + [MLX4_NET_TRANS_RULE_ID_IPV6] = 0, + [MLX4_NET_TRANS_RULE_ID_IPV4] = + sizeof(struct mlx4_net_trans_rule_hw_ipv4), + [MLX4_NET_TRANS_RULE_ID_TCP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp), + [MLX4_NET_TRANS_RULE_ID_UDP] = + sizeof(struct mlx4_net_trans_rule_hw_tcp_udp) + }; + if (spec->id > MLX4_NET_TRANS_RULE_NUM) { + mlx4_err(dev, "Invalid network rule id. id = %d\n", spec->id); + return -EINVAL; + } + memset(rule_hw, 0, __rule_hw_sz[spec->id]); + rule_hw->id = cpu_to_be16(__sw_id_hw[spec->id]); + rule_hw->size = __rule_hw_sz[spec->id] >> 2; + + switch (spec->id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + memcpy(rule_hw->eth.dst_mac, spec->eth.dst_mac, ETH_ALEN); + memcpy(rule_hw->eth.dst_mac_msk, spec->eth.dst_mac_msk, + ETH_ALEN); + memcpy(rule_hw->eth.src_mac, spec->eth.src_mac, ETH_ALEN); + memcpy(rule_hw->eth.src_mac_msk, spec->eth.src_mac_msk, + ETH_ALEN); + if (spec->eth.ether_type_enable) { + rule_hw->eth.ether_type_enable = 1; + rule_hw->eth.ether_type = spec->eth.ether_type; + } + rule_hw->eth.vlan_id = spec->eth.vlan_id; + rule_hw->eth.vlan_id_msk = spec->eth.vlan_id_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_IB: + rule_hw->ib.qpn = spec->ib.r_qpn; + rule_hw->ib.qpn_mask = spec->ib.qpn_msk; + memcpy(&rule_hw->ib.dst_gid, &spec->ib.dst_gid, 16); + memcpy(&rule_hw->ib.dst_gid_msk, &spec->ib.dst_gid_msk, 16); + break; + + case MLX4_NET_TRANS_RULE_ID_IPV6: + return -EOPNOTSUPP; + + case MLX4_NET_TRANS_RULE_ID_IPV4: + rule_hw->ipv4.src_ip = spec->ipv4.src_ip; + rule_hw->ipv4.src_ip_msk = spec->ipv4.src_ip_msk; + rule_hw->ipv4.dst_ip = spec->ipv4.dst_ip; + rule_hw->ipv4.dst_ip_msk = spec->ipv4.dst_ip_msk; + break; + + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + rule_hw->tcp_udp.dst_port = spec->tcp_udp.dst_port; + rule_hw->tcp_udp.dst_port_msk = spec->tcp_udp.dst_port_msk; + rule_hw->tcp_udp.src_port = spec->tcp_udp.src_port; + rule_hw->tcp_udp.src_port_msk = spec->tcp_udp.src_port_msk; + break; + + default: + return -EINVAL; + } + + return __rule_hw_sz[spec->id]; +} + +static void mlx4_err_rule(struct mlx4_dev *dev, char *str, + struct mlx4_net_trans_rule *rule) +{ +#define BUF_SIZE 256 + struct mlx4_spec_list *cur; + char buf[BUF_SIZE]; + int len = 0; + + mlx4_err(dev, "%s", str); + len += snprintf(buf + len, BUF_SIZE - len, + "port = %d prio = 0x%x qp = 0x%x ", + rule->port, rule->priority, rule->qpn); + + list_for_each_entry(cur, &rule->list, list) { + switch (cur->id) { + case MLX4_NET_TRANS_RULE_ID_ETH: + len += snprintf(buf + len, BUF_SIZE - len, + "dmac = %pM ", &cur->eth.dst_mac); + if (cur->eth.ether_type) + len += snprintf(buf + len, BUF_SIZE - len, + "ethertype = 0x%x ", + be16_to_cpu(cur->eth.ether_type)); + if (cur->eth.vlan_id) + len += snprintf(buf + len, BUF_SIZE - len, + "vlan-id = %d ", + be16_to_cpu(cur->eth.vlan_id)); + break; + + case MLX4_NET_TRANS_RULE_ID_IPV4: + if (cur->ipv4.src_ip) + len += snprintf(buf + len, BUF_SIZE - len, + "src-ip = %pI4 ", + &cur->ipv4.src_ip); + if (cur->ipv4.dst_ip) + len += snprintf(buf + len, BUF_SIZE - len, + "dst-ip = %pI4 ", + &cur->ipv4.dst_ip); + break; + + case MLX4_NET_TRANS_RULE_ID_TCP: + case MLX4_NET_TRANS_RULE_ID_UDP: + if (cur->tcp_udp.src_port) + len += snprintf(buf + len, BUF_SIZE - len, + "src-port = %d ", + be16_to_cpu(cur->tcp_udp.src_port)); + if (cur->tcp_udp.dst_port) + len += snprintf(buf + len, BUF_SIZE - len, + "dst-port = %d ", + be16_to_cpu(cur->tcp_udp.dst_port)); + break; + + case MLX4_NET_TRANS_RULE_ID_IB: + len += snprintf(buf + len, BUF_SIZE - len, + "dst-gid = %pI6\n", cur->ib.dst_gid); + len += snprintf(buf + len, BUF_SIZE - len, + "dst-gid-mask = %pI6\n", + cur->ib.dst_gid_msk); + break; + + case MLX4_NET_TRANS_RULE_ID_IPV6: + break; + + default: + break; + } + } + len += snprintf(buf + len, BUF_SIZE - len, "\n"); + mlx4_err(dev, "%s", buf); + + if (len >= BUF_SIZE) + mlx4_err(dev, "Network rule error message was truncated, print buffer is too small.\n"); +} + +int mlx4_flow_attach(struct mlx4_dev *dev, + struct mlx4_net_trans_rule *rule, u64 *reg_id) +{ + struct mlx4_cmd_mailbox *mailbox; + struct mlx4_spec_list *cur; + u32 size = 0; + int ret; + + mailbox = mlx4_alloc_cmd_mailbox(dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + memset(mailbox->buf, 0, sizeof(struct mlx4_net_trans_rule_hw_ctrl)); + trans_rule_ctrl_to_hw(rule, mailbox->buf); + + size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); + + list_for_each_entry(cur, &rule->list, list) { + ret = parse_trans_rule(dev, cur, mailbox->buf + size); + if (ret < 0) { + mlx4_free_cmd_mailbox(dev, mailbox); + return -EINVAL; + } + size += ret; + } + + ret = mlx4_QP_FLOW_STEERING_ATTACH(dev, mailbox, size >> 2, reg_id); + if (ret == -ENOMEM) + mlx4_err_rule(dev, + "mcg table is full. Fail to register network rule.\n", + rule); + else if (ret) + mlx4_err_rule(dev, "Fail to register network rule.\n", rule); + + mlx4_free_cmd_mailbox(dev, mailbox); + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_flow_attach); + +int mlx4_flow_detach(struct mlx4_dev *dev, u64 reg_id) +{ + int err; + + err = mlx4_QP_FLOW_STEERING_DETACH(dev, reg_id); + if (err) + mlx4_err(dev, "Fail to detach network rule. registration id = 0x%llx\n", + reg_id); + return err; +} +EXPORT_SYMBOL_GPL(mlx4_flow_detach); + int mlx4_qp_attach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot, enum mlx4_steer_type steer) @@ -866,49 +1206,159 @@ static int mlx4_QP_ATTACH(struct mlx4_dev *dev, struct mlx4_qp *qp, } int mlx4_multicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - int block_mcast_loopback, enum mlx4_protocol prot) + u8 port, int block_mcast_loopback, + enum mlx4_protocol prot, u64 *reg_id) { - if (prot == MLX4_PROT_ETH && - !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) - return 0; - if (prot == MLX4_PROT_ETH) - gid[7] |= (MLX4_MC_STEER << 1); + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_A0: + if (prot == MLX4_PROT_ETH) + return 0; + + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_MC_STEER << 1); + + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 1, + block_mcast_loopback, prot); + return mlx4_qp_attach_common(dev, qp, gid, + block_mcast_loopback, prot, + MLX4_MC_STEER); + + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + struct mlx4_spec_list spec = { {NULL} }; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .promisc_mode = MLX4_FS_PROMISC_NONE, + .priority = MLX4_DOMAIN_NIC, + }; + + rule.allow_loopback = ~block_mcast_loopback; + rule.port = port; + rule.qpn = qp->qpn; + INIT_LIST_HEAD(&rule.list); + + switch (prot) { + case MLX4_PROT_ETH: + spec.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec.eth.dst_mac, &gid[10], ETH_ALEN); + memcpy(spec.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + break; - if (mlx4_is_mfunc(dev)) - return mlx4_QP_ATTACH(dev, qp, gid, 1, - block_mcast_loopback, prot); + case MLX4_PROT_IB_IPV6: + spec.id = MLX4_NET_TRANS_RULE_ID_IB; + memcpy(spec.ib.dst_gid, gid, 16); + memset(&spec.ib.dst_gid_msk, 0xff, 16); + break; + default: + return -EINVAL; + } + list_add_tail(&spec.list, &rule.list); - return mlx4_qp_attach_common(dev, qp, gid, block_mcast_loopback, - prot, MLX4_MC_STEER); + return mlx4_flow_attach(dev, &rule, reg_id); + } + + default: + return -EINVAL; + } } EXPORT_SYMBOL_GPL(mlx4_multicast_attach); int mlx4_multicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], - enum mlx4_protocol prot) + enum mlx4_protocol prot, u64 reg_id) { - if (prot == MLX4_PROT_ETH && - !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) - return 0; + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_A0: + if (prot == MLX4_PROT_ETH) + return 0; - if (prot == MLX4_PROT_ETH) - gid[7] |= (MLX4_MC_STEER << 1); + case MLX4_STEERING_MODE_B0: + if (prot == MLX4_PROT_ETH) + gid[7] |= (MLX4_MC_STEER << 1); - if (mlx4_is_mfunc(dev)) - return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + if (mlx4_is_mfunc(dev)) + return mlx4_QP_ATTACH(dev, qp, gid, 0, 0, prot); + + return mlx4_qp_detach_common(dev, qp, gid, prot, + MLX4_MC_STEER); + + case MLX4_STEERING_MODE_DEVICE_MANAGED: + return mlx4_flow_detach(dev, reg_id); - return mlx4_qp_detach_common(dev, qp, gid, prot, MLX4_MC_STEER); + default: + return -EINVAL; + } } EXPORT_SYMBOL_GPL(mlx4_multicast_detach); +int mlx4_flow_steer_promisc_add(struct mlx4_dev *dev, u8 port, + u32 qpn, enum mlx4_net_trans_promisc_mode mode) +{ + struct mlx4_net_trans_rule rule; + u64 *regid_p; + + switch (mode) { + case MLX4_FS_PROMISC_UPLINK: + case MLX4_FS_PROMISC_FUNCTION_PORT: + regid_p = &dev->regid_promisc_array[port]; + break; + case MLX4_FS_PROMISC_ALL_MULTI: + regid_p = &dev->regid_allmulti_array[port]; + break; + default: + return -1; + } + + if (*regid_p != 0) + return -1; + + rule.promisc_mode = mode; + rule.port = port; + rule.qpn = qpn; + INIT_LIST_HEAD(&rule.list); + mlx4_err(dev, "going promisc on %x\n", port); + + return mlx4_flow_attach(dev, &rule, regid_p); +} +EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_add); + +int mlx4_flow_steer_promisc_remove(struct mlx4_dev *dev, u8 port, + enum mlx4_net_trans_promisc_mode mode) +{ + int ret; + u64 *regid_p; + + switch (mode) { + case MLX4_FS_PROMISC_UPLINK: + case MLX4_FS_PROMISC_FUNCTION_PORT: + regid_p = &dev->regid_promisc_array[port]; + break; + case MLX4_FS_PROMISC_ALL_MULTI: + regid_p = &dev->regid_allmulti_array[port]; + break; + default: + return -1; + } + + if (*regid_p == 0) + return -1; + + ret = mlx4_flow_detach(dev, *regid_p); + if (ret == 0) + *regid_p = 0; + + return ret; +} +EXPORT_SYMBOL_GPL(mlx4_flow_steer_promisc_remove); + int mlx4_unicast_attach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], int block_mcast_loopback, enum mlx4_protocol prot) { - if (prot == MLX4_PROT_ETH && - !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - return 0; - if (prot == MLX4_PROT_ETH) gid[7] |= (MLX4_UC_STEER << 1); @@ -924,10 +1374,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_attach); int mlx4_unicast_detach(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16], enum mlx4_protocol prot) { - if (prot == MLX4_PROT_ETH && - !(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - return 0; - if (prot == MLX4_PROT_ETH) gid[7] |= (MLX4_UC_STEER << 1); @@ -968,9 +1414,6 @@ static int mlx4_PROMISC(struct mlx4_dev *dev, u32 qpn, int mlx4_multicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) { - if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) - return 0; - if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 1, port); @@ -980,9 +1423,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_add); int mlx4_multicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) { - if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER)) - return 0; - if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_MC_STEER, 0, port); @@ -992,9 +1432,6 @@ EXPORT_SYMBOL_GPL(mlx4_multicast_promisc_remove); int mlx4_unicast_promisc_add(struct mlx4_dev *dev, u32 qpn, u8 port) { - if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - return 0; - if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 1, port); @@ -1004,9 +1441,6 @@ EXPORT_SYMBOL_GPL(mlx4_unicast_promisc_add); int mlx4_unicast_promisc_remove(struct mlx4_dev *dev, u32 qpn, u8 port) { - if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) - return 0; - if (mlx4_is_mfunc(dev)) return mlx4_PROMISC(dev, qpn, MLX4_UC_STEER, 0, port); @@ -1019,6 +1453,10 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev) struct mlx4_priv *priv = mlx4_priv(dev); int err; + /* No need for mcg_table when fw managed the mcg table*/ + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) + return 0; err = mlx4_bitmap_init(&priv->mcg_table.bitmap, dev->caps.num_amgms, dev->caps.num_amgms - 1, 0, 0); if (err) @@ -1031,5 +1469,7 @@ int mlx4_init_mcg_table(struct mlx4_dev *dev) void mlx4_cleanup_mcg_table(struct mlx4_dev *dev) { - mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + mlx4_bitmap_cleanup(&mlx4_priv(dev)->mcg_table.bitmap); } diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index e5d20220762c..d2c436b10fbf 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -39,6 +39,7 @@ #include <linux/mutex.h> #include <linux/radix-tree.h> +#include <linux/rbtree.h> #include <linux/timer.h> #include <linux/semaphore.h> #include <linux/workqueue.h> @@ -53,6 +54,17 @@ #define DRV_VERSION "1.1" #define DRV_RELDATE "Dec, 2011" +#define MLX4_FS_UDP_UC_EN (1 << 1) +#define MLX4_FS_TCP_UC_EN (1 << 2) +#define MLX4_FS_NUM_OF_L2_ADDR 8 +#define MLX4_FS_MGM_LOG_ENTRY_SIZE 7 +#define MLX4_FS_NUM_MCG (1 << 17) + +enum { + MLX4_FS_L2_HASH = 0, + MLX4_FS_L2_L3_L4_HASH, +}; + #define MLX4_NUM_UP 8 #define MLX4_NUM_TC 8 #define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */ @@ -137,6 +149,7 @@ enum mlx4_resource { RES_VLAN, RES_EQ, RES_COUNTER, + RES_FS_RULE, MLX4_NUM_OF_RESOURCE_TYPE }; @@ -509,7 +522,7 @@ struct slave_list { struct mlx4_resource_tracker { spinlock_t lock; /* tree for each resources */ - struct radix_tree_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; + struct rb_root res_tree[MLX4_NUM_OF_RESOURCE_TYPE]; /* num_of_slave's lists, one per slave */ struct slave_list *slave_list; }; @@ -703,6 +716,7 @@ struct mlx4_set_port_rqp_calc_context { struct mlx4_mac_entry { u64 mac; + u64 reg_id; }; struct mlx4_port_info { @@ -776,6 +790,7 @@ struct mlx4_priv { struct mutex bf_mutex; struct io_mapping *bf_mapping; int reserved_mtts; + int fs_hash_mode; }; static inline struct mlx4_priv *mlx4_priv(struct mlx4_dev *dev) @@ -1032,7 +1047,7 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port); /* resource tracker functions*/ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource resource_type, - int resource_id, int *slave); + u64 resource_id, int *slave); void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave_id); int mlx4_init_resource_tracker(struct mlx4_dev *dev); @@ -1117,6 +1132,16 @@ int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *inbox, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); +int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); +int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd); int mlx4_get_mgm_entry_size(struct mlx4_dev *dev); int mlx4_get_qp_per_mgm(struct mlx4_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index 225c20d47900..a12632150b34 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -75,6 +75,7 @@ #define STAMP_SHIFT 31 #define STAMP_VAL 0x7fffffff #define STATS_DELAY (HZ / 4) +#define MAX_NUM_OF_FS_RULES 256 /* Typical TSO descriptor with 16 gather entries is 352 bytes... */ #define MAX_DESC_SIZE 512 @@ -404,6 +405,19 @@ struct mlx4_en_perf_stats { #define NUM_PERF_COUNTERS 6 }; +enum mlx4_en_mclist_act { + MCLIST_NONE, + MCLIST_REM, + MCLIST_ADD, +}; + +struct mlx4_en_mc_list { + struct list_head list; + enum mlx4_en_mclist_act action; + u8 addr[ETH_ALEN]; + u64 reg_id; +}; + struct mlx4_en_frag_info { u16 frag_size; u16 frag_prefix_size; @@ -422,6 +436,11 @@ struct mlx4_en_frag_info { #endif +struct ethtool_flow_id { + struct ethtool_rx_flow_spec flow_spec; + u64 id; +}; + struct mlx4_en_priv { struct mlx4_en_dev *mdev; struct mlx4_en_port_profile *prof; @@ -431,6 +450,7 @@ struct mlx4_en_priv { struct net_device_stats ret_stats; struct mlx4_en_port_state port_state; spinlock_t stats_lock; + struct ethtool_flow_id ethtool_rules[MAX_NUM_OF_FS_RULES]; unsigned long last_moder_packets[MAX_RX_RINGS]; unsigned long last_moder_tx_packets; @@ -480,6 +500,7 @@ struct mlx4_en_priv { struct mlx4_en_rx_ring rx_ring[MAX_RX_RINGS]; struct mlx4_en_cq *tx_cq; struct mlx4_en_cq rx_cq[MAX_RX_RINGS]; + struct mlx4_qp drop_qp; struct work_struct mcast_task; struct work_struct mac_task; struct work_struct watchdog_task; @@ -489,8 +510,9 @@ struct mlx4_en_priv { struct mlx4_en_pkt_stats pkstats; struct mlx4_en_port_stats port_stats; u64 stats_bitmap; - char *mc_addrs; - int mc_addrs_cnt; + struct list_head mc_list; + struct list_head curr_list; + u64 broadcast_id; struct mlx4_en_stat_out_mbox hw_stats; int vids[128]; bool wol; @@ -565,6 +587,8 @@ void mlx4_en_unmap_buffer(struct mlx4_buf *buf); void mlx4_en_calc_rx_buf(struct net_device *dev); int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv); void mlx4_en_release_rss_steer(struct mlx4_en_priv *priv); +int mlx4_en_create_drop_qp(struct mlx4_en_priv *priv); +void mlx4_en_destroy_drop_qp(struct mlx4_en_priv *priv); int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring); void mlx4_en_rx_irq(struct mlx4_cq *mcq); diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index a8fb52992c64..a51d1b9bf1d1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -75,21 +75,54 @@ void mlx4_init_vlan_table(struct mlx4_dev *dev, struct mlx4_vlan_table *table) table->total = 0; } -static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) +static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, + u64 mac, int *qpn, u64 *reg_id) { - struct mlx4_qp qp; - u8 gid[16] = {0}; __be64 be_mac; int err; - qp.qpn = *qpn; - - mac &= 0xffffffffffffULL; + mac &= MLX4_MAC_MASK; be_mac = cpu_to_be64(mac << 16); - memcpy(&gid[10], &be_mac, ETH_ALEN); - gid[5] = port; - err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + + qp.qpn = *qpn; + memcpy(&gid[10], &be_mac, ETH_ALEN); + gid[5] = port; + + err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + struct mlx4_spec_list spec_eth = { {NULL} }; + __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); + + struct mlx4_net_trans_rule rule = { + .queue_mode = MLX4_NET_TRANS_Q_FIFO, + .exclusive = 0, + .allow_loopback = 1, + .promisc_mode = MLX4_FS_PROMISC_NONE, + .priority = MLX4_DOMAIN_NIC, + }; + + rule.port = port; + rule.qpn = *qpn; + INIT_LIST_HEAD(&rule.list); + + spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; + memcpy(spec_eth.eth.dst_mac, &be_mac, ETH_ALEN); + memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); + list_add_tail(&spec_eth.list, &rule.list); + + err = mlx4_flow_attach(dev, &rule, reg_id); + break; + } + default: + return -EINVAL; + } if (err) mlx4_warn(dev, "Failed Attaching Unicast\n"); @@ -97,19 +130,30 @@ static int mlx4_uc_steer_add(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) } static void mlx4_uc_steer_release(struct mlx4_dev *dev, u8 port, - u64 mac, int qpn) + u64 mac, int qpn, u64 reg_id) { - struct mlx4_qp qp; - u8 gid[16] = {0}; - __be64 be_mac; + switch (dev->caps.steering_mode) { + case MLX4_STEERING_MODE_B0: { + struct mlx4_qp qp; + u8 gid[16] = {0}; + __be64 be_mac; - qp.qpn = qpn; - mac &= 0xffffffffffffULL; - be_mac = cpu_to_be64(mac << 16); - memcpy(&gid[10], &be_mac, ETH_ALEN); - gid[5] = port; + qp.qpn = qpn; + mac &= MLX4_MAC_MASK; + be_mac = cpu_to_be64(mac << 16); + memcpy(&gid[10], &be_mac, ETH_ALEN); + gid[5] = port; - mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); + mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); + break; + } + case MLX4_STEERING_MODE_DEVICE_MANAGED: { + mlx4_flow_detach(dev, reg_id); + break; + } + default: + mlx4_err(dev, "Invalid steering mode.\n"); + } } static int validate_index(struct mlx4_dev *dev, @@ -144,6 +188,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) struct mlx4_mac_entry *entry; int index = 0; int err = 0; + u64 reg_id; mlx4_dbg(dev, "Registering MAC: 0x%llx for adding\n", (unsigned long long) mac); @@ -155,7 +200,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) return err; } - if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER)) { + if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { *qpn = info->base_qpn + index; return 0; } @@ -167,7 +212,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) goto qp_err; } - err = mlx4_uc_steer_add(dev, port, mac, qpn); + err = mlx4_uc_steer_add(dev, port, mac, qpn, ®_id); if (err) goto steer_err; @@ -177,6 +222,7 @@ int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn) goto alloc_err; } entry->mac = mac; + entry->reg_id = reg_id; err = radix_tree_insert(&info->mac_tree, *qpn, entry); if (err) goto insert_err; @@ -186,7 +232,7 @@ insert_err: kfree(entry); alloc_err: - mlx4_uc_steer_release(dev, port, mac, *qpn); + mlx4_uc_steer_release(dev, port, mac, *qpn, reg_id); steer_err: mlx4_qp_release_range(dev, *qpn, 1); @@ -206,13 +252,14 @@ void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn) (unsigned long long) mac); mlx4_unregister_mac(dev, port, mac); - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { entry = radix_tree_lookup(&info->mac_tree, qpn); if (entry) { mlx4_dbg(dev, "Releasing qp: port %d, mac 0x%llx," " qpn %d\n", port, (unsigned long long) mac, qpn); - mlx4_uc_steer_release(dev, port, entry->mac, qpn); + mlx4_uc_steer_release(dev, port, entry->mac, + qpn, entry->reg_id); mlx4_qp_release_range(dev, qpn, 1); radix_tree_delete(&info->mac_tree, qpn); kfree(entry); @@ -359,15 +406,18 @@ int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac) int index = qpn - info->base_qpn; int err = 0; - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) { + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) { entry = radix_tree_lookup(&info->mac_tree, qpn); if (!entry) return -EINVAL; - mlx4_uc_steer_release(dev, port, entry->mac, qpn); + mlx4_uc_steer_release(dev, port, entry->mac, + qpn, entry->reg_id); mlx4_unregister_mac(dev, port, entry->mac); entry->mac = new_mac; + entry->reg_id = 0; mlx4_register_mac(dev, port, new_mac); - err = mlx4_uc_steer_add(dev, port, entry->mac, &qpn); + err = mlx4_uc_steer_add(dev, port, entry->mac, + &qpn, &entry->reg_id); return err; } @@ -803,8 +853,7 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn, u32 m_promisc = (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER) ? MCAST_DIRECT : MCAST_DEFAULT; - if (dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_MC_STEER && - dev->caps.flags & MLX4_DEV_CAP_FLAG_VEP_UC_STEER) + if (dev->caps.steering_mode != MLX4_STEERING_MODE_A0) return 0; mailbox = mlx4_alloc_cmd_mailbox(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index b83bc928d52a..9ee4725363d5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -237,13 +237,19 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, init_hca->mtt_base = profile[i].start; break; case MLX4_RES_MCG: - dev->caps.num_mgms = profile[i].num >> 1; - dev->caps.num_amgms = profile[i].num >> 1; init_hca->mc_base = profile[i].start; init_hca->log_mc_entry_sz = ilog2(mlx4_get_mgm_entry_size(dev)); init_hca->log_mc_table_sz = profile[i].log_num; - init_hca->log_mc_hash_sz = profile[i].log_num - 1; + if (dev->caps.steering_mode == + MLX4_STEERING_MODE_DEVICE_MANAGED) { + dev->caps.num_mgms = profile[i].num; + } else { + init_hca->log_mc_hash_sz = + profile[i].log_num - 1; + dev->caps.num_mgms = profile[i].num >> 1; + dev->caps.num_amgms = profile[i].num >> 1; + } break; default: break; diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 766b8c5a235e..c3fa91986190 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -57,7 +57,8 @@ struct mac_res { struct res_common { struct list_head list; - u32 res_id; + struct rb_node node; + u64 res_id; int owner; int state; int from_state; @@ -189,6 +190,58 @@ struct res_xrcdn { int port; }; +enum res_fs_rule_states { + RES_FS_RULE_BUSY = RES_ANY_BUSY, + RES_FS_RULE_ALLOCATED, +}; + +struct res_fs_rule { + struct res_common com; +}; + +static void *res_tracker_lookup(struct rb_root *root, u64 res_id) +{ + struct rb_node *node = root->rb_node; + + while (node) { + struct res_common *res = container_of(node, struct res_common, + node); + + if (res_id < res->res_id) + node = node->rb_left; + else if (res_id > res->res_id) + node = node->rb_right; + else + return res; + } + return NULL; +} + +static int res_tracker_insert(struct rb_root *root, struct res_common *res) +{ + struct rb_node **new = &(root->rb_node), *parent = NULL; + + /* Figure out where to put new node */ + while (*new) { + struct res_common *this = container_of(*new, struct res_common, + node); + + parent = *new; + if (res->res_id < this->res_id) + new = &((*new)->rb_left); + else if (res->res_id > this->res_id) + new = &((*new)->rb_right); + else + return -EEXIST; + } + + /* Add new node and rebalance tree. */ + rb_link_node(&res->node, parent, new); + rb_insert_color(&res->node, root); + + return 0; +} + /* For Debug uses */ static const char *ResourceType(enum mlx4_resource rt) { @@ -201,6 +254,7 @@ static const char *ResourceType(enum mlx4_resource rt) case RES_MAC: return "RES_MAC"; case RES_EQ: return "RES_EQ"; case RES_COUNTER: return "RES_COUNTER"; + case RES_FS_RULE: return "RES_FS_RULE"; case RES_XRCD: return "RES_XRCD"; default: return "Unknown resource type !!!"; }; @@ -228,8 +282,7 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev) mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n", dev->num_slaves); for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) - INIT_RADIX_TREE(&priv->mfunc.master.res_tracker.res_tree[i], - GFP_ATOMIC|__GFP_NOWARN); + priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT; spin_lock_init(&priv->mfunc.master.res_tracker.lock); return 0 ; @@ -277,11 +330,11 @@ static void *find_res(struct mlx4_dev *dev, int res_id, { struct mlx4_priv *priv = mlx4_priv(dev); - return radix_tree_lookup(&priv->mfunc.master.res_tracker.res_tree[type], - res_id); + return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type], + res_id); } -static int get_res(struct mlx4_dev *dev, int slave, int res_id, +static int get_res(struct mlx4_dev *dev, int slave, u64 res_id, enum mlx4_resource type, void *res) { @@ -307,7 +360,7 @@ static int get_res(struct mlx4_dev *dev, int slave, int res_id, r->from_state = r->state; r->state = RES_ANY_BUSY; - mlx4_dbg(dev, "res %s id 0x%x to busy\n", + mlx4_dbg(dev, "res %s id 0x%llx to busy\n", ResourceType(type), r->res_id); if (res) @@ -320,7 +373,7 @@ exit: int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, enum mlx4_resource type, - int res_id, int *slave) + u64 res_id, int *slave) { struct res_common *r; @@ -341,7 +394,7 @@ int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev, return err; } -static void put_res(struct mlx4_dev *dev, int slave, int res_id, +static void put_res(struct mlx4_dev *dev, int slave, u64 res_id, enum mlx4_resource type) { struct res_common *r; @@ -473,7 +526,21 @@ static struct res_common *alloc_xrcdn_tr(int id) return &ret->com; } -static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, +static struct res_common *alloc_fs_rule_tr(u64 id) +{ + struct res_fs_rule *ret; + + ret = kzalloc(sizeof *ret, GFP_KERNEL); + if (!ret) + return NULL; + + ret->com.res_id = id; + ret->com.state = RES_FS_RULE_ALLOCATED; + + return &ret->com; +} + +static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave, int extra) { struct res_common *ret; @@ -506,6 +573,9 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, case RES_XRCD: ret = alloc_xrcdn_tr(id); break; + case RES_FS_RULE: + ret = alloc_fs_rule_tr(id); + break; default: return NULL; } @@ -515,7 +585,7 @@ static struct res_common *alloc_tr(int id, enum mlx4_resource type, int slave, return ret; } -static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, +static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, enum mlx4_resource type, int extra) { int i; @@ -523,7 +593,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, struct mlx4_priv *priv = mlx4_priv(dev); struct res_common **res_arr; struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; - struct radix_tree_root *root = &tracker->res_tree[type]; + struct rb_root *root = &tracker->res_tree[type]; res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL); if (!res_arr) @@ -546,7 +616,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, err = -EEXIST; goto undo; } - err = radix_tree_insert(root, base + i, res_arr[i]); + err = res_tracker_insert(root, res_arr[i]); if (err) goto undo; list_add_tail(&res_arr[i]->list, @@ -559,7 +629,7 @@ static int add_res_range(struct mlx4_dev *dev, int slave, int base, int count, undo: for (--i; i >= base; --i) - radix_tree_delete(&tracker->res_tree[type], i); + rb_erase(&res_arr[i]->node, root); spin_unlock_irq(mlx4_tlock(dev)); @@ -638,6 +708,16 @@ static int remove_xrcdn_ok(struct res_xrcdn *res) return 0; } +static int remove_fs_rule_ok(struct res_fs_rule *res) +{ + if (res->com.state == RES_FS_RULE_BUSY) + return -EBUSY; + else if (res->com.state != RES_FS_RULE_ALLOCATED) + return -EPERM; + + return 0; +} + static int remove_cq_ok(struct res_cq *res) { if (res->com.state == RES_CQ_BUSY) @@ -679,15 +759,17 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra) return remove_counter_ok((struct res_counter *)res); case RES_XRCD: return remove_xrcdn_ok((struct res_xrcdn *)res); + case RES_FS_RULE: + return remove_fs_rule_ok((struct res_fs_rule *)res); default: return -EINVAL; } } -static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, +static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count, enum mlx4_resource type, int extra) { - int i; + u64 i; int err; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker; @@ -695,7 +777,7 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, spin_lock_irq(mlx4_tlock(dev)); for (i = base; i < base + count; ++i) { - r = radix_tree_lookup(&tracker->res_tree[type], i); + r = res_tracker_lookup(&tracker->res_tree[type], i); if (!r) { err = -ENOENT; goto out; @@ -710,8 +792,8 @@ static int rem_res_range(struct mlx4_dev *dev, int slave, int base, int count, } for (i = base; i < base + count; ++i) { - r = radix_tree_lookup(&tracker->res_tree[type], i); - radix_tree_delete(&tracker->res_tree[type], i); + r = res_tracker_lookup(&tracker->res_tree[type], i); + rb_erase(&r->node, &tracker->res_tree[type]); list_del(&r->list); kfree(r); } @@ -733,7 +815,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, int err = 0; spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_QP], qpn); + r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn); if (!r) err = -ENOENT; else if (r->com.owner != slave) @@ -741,7 +823,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, else { switch (state) { case RES_QP_BUSY: - mlx4_dbg(dev, "%s: failed RES_QP, 0x%x\n", + mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n", __func__, r->com.res_id); err = -EBUSY; break; @@ -750,7 +832,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, if (r->com.state == RES_QP_MAPPED && !alloc) break; - mlx4_dbg(dev, "failed RES_QP, 0x%x\n", r->com.res_id); + mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); err = -EINVAL; break; @@ -759,7 +841,7 @@ static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn, r->com.state == RES_QP_HW) break; else { - mlx4_dbg(dev, "failed RES_QP, 0x%x\n", + mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id); err = -EINVAL; } @@ -797,7 +879,7 @@ static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index, int err = 0; spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_MPT], index); + r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index); if (!r) err = -ENOENT; else if (r->com.owner != slave) @@ -850,7 +932,7 @@ static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, int err = 0; spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_EQ], index); + r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index); if (!r) err = -ENOENT; else if (r->com.owner != slave) @@ -898,7 +980,7 @@ static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn, int err; spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_CQ], cqn); + r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn); if (!r) err = -ENOENT; else if (r->com.owner != slave) @@ -952,7 +1034,7 @@ static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index, int err = 0; spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[RES_SRQ], index); + r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index); if (!r) err = -ENOENT; else if (r->com.owner != slave) @@ -1001,7 +1083,7 @@ static void res_abort_move(struct mlx4_dev *dev, int slave, struct res_common *r; spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[type], id); + r = res_tracker_lookup(&tracker->res_tree[type], id); if (r && (r->owner == slave)) r->state = r->from_state; spin_unlock_irq(mlx4_tlock(dev)); @@ -1015,7 +1097,7 @@ static void res_end_move(struct mlx4_dev *dev, int slave, struct res_common *r; spin_lock_irq(mlx4_tlock(dev)); - r = radix_tree_lookup(&tracker->res_tree[type], id); + r = res_tracker_lookup(&tracker->res_tree[type], id); if (r && (r->owner == slave)) r->state = r->to_state; spin_unlock_irq(mlx4_tlock(dev)); @@ -2695,6 +2777,60 @@ ex_put: return err; } +int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EOPNOTSUPP; + + err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param, + vhcr->in_modifier, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + if (err) + return err; + + err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to add flow steering resources.\n "); + /* detach rule*/ + mlx4_cmd(dev, vhcr->out_param, 0, 0, + MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + } + return err; +} + +int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave, + struct mlx4_vhcr *vhcr, + struct mlx4_cmd_mailbox *inbox, + struct mlx4_cmd_mailbox *outbox, + struct mlx4_cmd_info *cmd) +{ + int err; + + if (dev->caps.steering_mode != + MLX4_STEERING_MODE_DEVICE_MANAGED) + return -EOPNOTSUPP; + + err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0); + if (err) { + mlx4_err(dev, "Fail to remove flow steering resources.\n "); + return err; + } + + err = mlx4_cmd(dev, vhcr->in_param, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + return err; +} + enum { BUSY_MAX_RETRIES = 10 }; @@ -2751,7 +2887,7 @@ static int _move_all_busy(struct mlx4_dev *dev, int slave, if (r->state == RES_ANY_BUSY) { if (print) mlx4_dbg(dev, - "%s id 0x%x is busy\n", + "%s id 0x%llx is busy\n", ResourceType(type), r->res_id); ++busy; @@ -2817,8 +2953,8 @@ static void rem_slave_qps(struct mlx4_dev *dev, int slave) switch (state) { case RES_QP_RESERVED: spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_QP], - qp->com.res_id); + rb_erase(&qp->com.node, + &tracker->res_tree[RES_QP]); list_del(&qp->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(qp); @@ -2888,8 +3024,8 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) case RES_SRQ_ALLOCATED: __mlx4_srq_free_icm(dev, srqn); spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_SRQ], - srqn); + rb_erase(&srq->com.node, + &tracker->res_tree[RES_SRQ]); list_del(&srq->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(srq); @@ -2954,8 +3090,8 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) case RES_CQ_ALLOCATED: __mlx4_cq_free_icm(dev, cqn); spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_CQ], - cqn); + rb_erase(&cq->com.node, + &tracker->res_tree[RES_CQ]); list_del(&cq->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(cq); @@ -3017,8 +3153,8 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) case RES_MPT_RESERVED: __mlx4_mr_release(dev, mpt->key); spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_MPT], - mptn); + rb_erase(&mpt->com.node, + &tracker->res_tree[RES_MPT]); list_del(&mpt->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(mpt); @@ -3086,8 +3222,8 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) __mlx4_free_mtt_range(dev, base, mtt->order); spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_MTT], - base); + rb_erase(&mtt->com.node, + &tracker->res_tree[RES_MTT]); list_del(&mtt->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(mtt); @@ -3104,6 +3240,58 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) spin_unlock_irq(mlx4_tlock(dev)); } +static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave) +{ + struct mlx4_priv *priv = mlx4_priv(dev); + struct mlx4_resource_tracker *tracker = + &priv->mfunc.master.res_tracker; + struct list_head *fs_rule_list = + &tracker->slave_list[slave].res_list[RES_FS_RULE]; + struct res_fs_rule *fs_rule; + struct res_fs_rule *tmp; + int state; + u64 base; + int err; + + err = move_all_busy(dev, slave, RES_FS_RULE); + if (err) + mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n", + slave); + + spin_lock_irq(mlx4_tlock(dev)); + list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) { + spin_unlock_irq(mlx4_tlock(dev)); + if (fs_rule->com.owner == slave) { + base = fs_rule->com.res_id; + state = fs_rule->com.from_state; + while (state != 0) { + switch (state) { + case RES_FS_RULE_ALLOCATED: + /* detach rule */ + err = mlx4_cmd(dev, base, 0, 0, + MLX4_QP_FLOW_STEERING_DETACH, + MLX4_CMD_TIME_CLASS_A, + MLX4_CMD_NATIVE); + + spin_lock_irq(mlx4_tlock(dev)); + rb_erase(&fs_rule->com.node, + &tracker->res_tree[RES_FS_RULE]); + list_del(&fs_rule->com.list); + spin_unlock_irq(mlx4_tlock(dev)); + kfree(fs_rule); + state = 0; + break; + + default: + state = 0; + } + } + } + spin_lock_irq(mlx4_tlock(dev)); + } + spin_unlock_irq(mlx4_tlock(dev)); +} + static void rem_slave_eqs(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -3133,8 +3321,8 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) switch (state) { case RES_EQ_RESERVED: spin_lock_irq(mlx4_tlock(dev)); - radix_tree_delete(&tracker->res_tree[RES_EQ], - eqn); + rb_erase(&eq->com.node, + &tracker->res_tree[RES_EQ]); list_del(&eq->com.list); spin_unlock_irq(mlx4_tlock(dev)); kfree(eq); @@ -3191,7 +3379,8 @@ static void rem_slave_counters(struct mlx4_dev *dev, int slave) list_for_each_entry_safe(counter, tmp, counter_list, com.list) { if (counter->com.owner == slave) { index = counter->com.res_id; - radix_tree_delete(&tracker->res_tree[RES_COUNTER], index); + rb_erase(&counter->com.node, + &tracker->res_tree[RES_COUNTER]); list_del(&counter->com.list); kfree(counter); __mlx4_counter_free(dev, index); @@ -3220,7 +3409,7 @@ static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave) list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) { if (xrcd->com.owner == slave) { xrcdn = xrcd->com.res_id; - radix_tree_delete(&tracker->res_tree[RES_XRCD], xrcdn); + rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]); list_del(&xrcd->com.list); kfree(xrcd); __mlx4_xrcd_free(dev, xrcdn); @@ -3244,5 +3433,6 @@ void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave) rem_slave_mtts(dev, slave); rem_slave_counters(dev, slave); rem_slave_xrcdns(dev, slave); + rem_slave_fs_rule(dev, slave); mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex); } diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index 90153fc983cb..fa85cf1353fd 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -3775,7 +3775,7 @@ static void myri10ge_probe_slices(struct myri10ge_priv *mgp) mgp->num_slices = 1; msix_cap = pci_find_capability(pdev, PCI_CAP_ID_MSIX); - ncpus = num_online_cpus(); + ncpus = netif_get_num_default_rss_queues(); if (myri10ge_max_slices == 1 || msix_cap == 0 || (myri10ge_max_slices == -1 && ncpus < 2)) diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 2578eb1f025d..2fd1edbc5e0e 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3687,7 +3687,8 @@ static int __devinit vxge_config_vpaths( return 0; if (!driver_config->g_no_cpus) - driver_config->g_no_cpus = num_online_cpus(); + driver_config->g_no_cpus = + netif_get_num_default_rss_queues(); driver_config->vpath_per_dev = driver_config->g_no_cpus >> 1; if (!driver_config->vpath_per_dev) diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 09d8d33171df..3c3499d928b9 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4649,7 +4649,7 @@ static int __devinit qlge_probe(struct pci_dev *pdev, int err = 0; ndev = alloc_etherdev_mq(sizeof(struct ql_adapter), - min(MAX_CPUS, (int)num_online_cpus())); + min(MAX_CPUS, netif_get_num_default_rss_queues())); if (!ndev) return -ENOMEM; diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index d1827e887f4e..9acc026f62e8 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -1256,7 +1256,6 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev) kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); netif_napi_del(&lp->napi); - pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, lp->base); pci_release_regions(pdev); free_netdev(dev); diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c index 3ae80eccd0ef..6564c32d3af0 100644 --- a/drivers/net/usb/asix.c +++ b/drivers/net/usb/asix.c @@ -358,14 +358,30 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, padlen = ((skb->len + 4) & (dev->maxpacket - 1)) ? 0 : 4; - if ((!skb_cloned(skb)) && - ((headroom + tailroom) >= (4 + padlen))) { - if ((headroom < 4) || (tailroom < padlen)) { + /* We need to push 4 bytes in front of frame (packet_len) + * and maybe add 4 bytes after the end (if padlen is 4) + * + * Avoid skb_copy_expand() expensive call, using following rules : + * - We are allowed to push 4 bytes in headroom if skb_header_cloned() + * is false (and if we have 4 bytes of headroom) + * - We are allowed to put 4 bytes at tail if skb_cloned() + * is false (and if we have 4 bytes of tailroom) + * + * TCP packets for example are cloned, but skb_header_release() + * was called in tcp stack, allowing us to use headroom for our needs. + */ + if (!skb_header_cloned(skb) && + !(padlen && skb_cloned(skb)) && + headroom + tailroom >= 4 + padlen) { + /* following should not happen, but better be safe */ + if (headroom < 4 || + tailroom < padlen) { skb->data = memmove(skb->head + 4, skb->data, skb->len); skb_set_tail_pointer(skb, skb->len); } } else { struct sk_buff *skb2; + skb2 = skb_copy_expand(skb, 4, padlen, flags); dev_kfree_skb_any(skb); skb = skb2; @@ -373,8 +389,8 @@ static struct sk_buff *asix_tx_fixup(struct usbnet *dev, struct sk_buff *skb, return NULL; } + packet_len = ((skb->len ^ 0x0000ffff) << 16) + skb->len; skb_push(skb, 4); - packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4); cpu_to_le32s(&packet_len); skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len)); @@ -880,6 +896,8 @@ static int ax88172_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->netdev_ops = &ax88172_netdev_ops; dev->net->ethtool_ops = &ax88172_ethtool_ops; + dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ + dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */ asix_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET); asix_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE, @@ -1075,6 +1093,8 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->netdev_ops = &ax88772_netdev_ops; dev->net->ethtool_ops = &ax88772_ethtool_ops; + dev->net->needed_headroom = 4; /* cf asix_tx_fixup() */ + dev->net->needed_tailroom = 4; /* cf asix_tx_fixup() */ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0); diff --git a/drivers/oprofile/oprofile_perf.c b/drivers/oprofile/oprofile_perf.c index da14432806c6..efc4b7f308cf 100644 --- a/drivers/oprofile/oprofile_perf.c +++ b/drivers/oprofile/oprofile_perf.c @@ -25,7 +25,7 @@ static int oprofile_perf_enabled; static DEFINE_MUTEX(oprofile_perf_mutex); static struct op_counter_config *counter_config; -static struct perf_event **perf_events[nr_cpumask_bits]; +static struct perf_event **perf_events[NR_CPUS]; static int num_counters; /* diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 7be5e9775691..73ac63d901c9 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2700,10 +2700,11 @@ int inline qeth_l3_get_cast_type(struct qeth_card *card, struct sk_buff *skb) rcu_read_lock(); dst = skb_dst(skb); if (dst) - n = dst_get_neighbour_noref(dst); + n = dst_neigh_lookup_skb(dst, skb); if (n) { cast_type = n->type; rcu_read_unlock(); + neigh_release(n); if ((cast_type == RTN_BROADCAST) || (cast_type == RTN_MULTICAST) || (cast_type == RTN_ANYCAST)) diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 36739da8bc15..49692a1ac44a 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -966,7 +966,8 @@ static int init_act_open(struct cxgbi_sock *csk) csk->saddr.sin_addr.s_addr = chba->ipv4addr; csk->rss_qid = 0; - csk->l2t = t3_l2t_get(t3dev, dst, ndev); + csk->l2t = t3_l2t_get(t3dev, dst, ndev, + &csk->daddr.sin_addr.s_addr); if (!csk->l2t) { pr_err("NO l2t available.\n"); return -EINVAL; diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 5a4a3bfc60cf..cc9a06897f34 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -1142,7 +1142,7 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_flag(csk, CTPF_HAS_ATID); cxgbi_sock_get(csk); - n = dst_get_neighbour_noref(csk->dst); + n = dst_neigh_lookup(csk->dst, &csk->daddr.sin_addr.s_addr); if (!n) { pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name); goto rel_resource; @@ -1182,9 +1182,12 @@ static int init_act_open(struct cxgbi_sock *csk) cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN); send_act_open_req(csk, skb, csk->l2t); + neigh_release(n); return 0; rel_resource: + if (n) + neigh_release(n); if (skb) __kfree_skb(skb); return -EINVAL; diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index d9253db1d0e2..b44c1cff3114 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -494,7 +494,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) goto err_out; } dst = &rt->dst; - n = dst_get_neighbour_noref(dst); + n = dst_neigh_lookup(dst, &daddr->sin_addr.s_addr); if (!n) { err = -ENODEV; goto rel_rt; @@ -506,7 +506,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) &daddr->sin_addr.s_addr, ntohs(daddr->sin_port), ndev->name); err = -ENETUNREACH; - goto rel_rt; + goto rel_neigh; } if (ndev->flags & IFF_LOOPBACK) { @@ -521,7 +521,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) pr_info("dst %pI4, %s, NOT cxgbi device.\n", &daddr->sin_addr.s_addr, ndev->name); err = -ENETUNREACH; - goto rel_rt; + goto rel_neigh; } log_debug(1 << CXGBI_DBG_SOCK, "route to %pI4 :%u, ndev p#%d,%s, cdev 0x%p.\n", @@ -531,7 +531,7 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk = cxgbi_sock_create(cdev); if (!csk) { err = -ENOMEM; - goto rel_rt; + goto rel_neigh; } csk->cdev = cdev; csk->port_id = port; @@ -541,9 +541,13 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr) csk->daddr.sin_port = daddr->sin_port; csk->daddr.sin_family = daddr->sin_family; csk->saddr.sin_addr.s_addr = fl4.saddr; + neigh_release(n); return csk; +rel_neigh: + neigh_release(n); + rel_rt: ip_rt_put(rt); if (csk) |