diff options
Diffstat (limited to 'drivers')
439 files changed, 8643 insertions, 3828 deletions
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 3c3a37b8503b..ebaa51ba8a22 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -774,9 +774,9 @@ static void ghes_add_timer(struct ghes *ghes) add_timer(&ghes->timer); } -static void ghes_poll_func(unsigned long data) +static void ghes_poll_func(struct timer_list *t) { - struct ghes *ghes = (void *)data; + struct ghes *ghes = from_timer(ghes, t, timer); ghes_proc(ghes); if (!(ghes->flags & GHES_EXITING)) @@ -1147,8 +1147,7 @@ static int ghes_probe(struct platform_device *ghes_dev) switch (generic->notify.type) { case ACPI_HEST_NOTIFY_POLLED: - setup_deferrable_timer(&ghes->timer, ghes_poll_func, - (unsigned long)ghes); + timer_setup(&ghes->timer, ghes_poll_func, TIMER_DEFERRABLE); ghes_add_timer(ghes); break; case ACPI_HEST_NOTIFY_EXTERNAL: diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index 8b61123d2c3c..749fd94441b0 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -303,6 +303,7 @@ struct ahci_em_priv { unsigned long saved_activity; unsigned long activity; unsigned long led_state; + struct ata_link *link; }; struct ahci_port_priv { diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 3e286d86ab42..a0de7a38430c 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -968,12 +968,12 @@ static void ahci_sw_activity(struct ata_link *link) mod_timer(&emp->timer, jiffies + msecs_to_jiffies(10)); } -static void ahci_sw_activity_blink(unsigned long arg) +static void ahci_sw_activity_blink(struct timer_list *t) { - struct ata_link *link = (struct ata_link *)arg; + struct ahci_em_priv *emp = from_timer(emp, t, timer); + struct ata_link *link = emp->link; struct ata_port *ap = link->ap; - struct ahci_port_priv *pp = ap->private_data; - struct ahci_em_priv *emp = &pp->em_priv[link->pmp]; + unsigned long led_message = emp->led_state; u32 activity_led_state; unsigned long flags; @@ -1020,7 +1020,8 @@ static void ahci_init_sw_activity(struct ata_link *link) /* init activity stats, setup timer */ emp->saved_activity = emp->activity = 0; - setup_timer(&emp->timer, ahci_sw_activity_blink, (unsigned long)link); + emp->link = link; + timer_setup(&emp->timer, ahci_sw_activity_blink, 0); /* check our blink policy and set flag for link if it's enabled */ if (emp->blink_policy) diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index ee4c1ec9dca0..b8ac4902d312 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -5979,9 +5979,8 @@ struct ata_port *ata_port_alloc(struct ata_host *host) INIT_LIST_HEAD(&ap->eh_done_q); init_waitqueue_head(&ap->eh_wait_q); init_completion(&ap->park_req_pending); - setup_deferrable_timer(&ap->fastdrain_timer, - ata_eh_fastdrain_timerfn, - (unsigned long)ap); + timer_setup(&ap->fastdrain_timer, ata_eh_fastdrain_timerfn, + TIMER_DEFERRABLE); ap->cbl = ATA_CBL_NONE; diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index e4effef0c83f..ece6fd91a947 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -879,9 +879,9 @@ static int ata_eh_nr_in_flight(struct ata_port *ap) return nr; } -void ata_eh_fastdrain_timerfn(unsigned long arg) +void ata_eh_fastdrain_timerfn(struct timer_list *t) { - struct ata_port *ap = (void *)arg; + struct ata_port *ap = from_timer(ap, t, fastdrain_timer); unsigned long flags; int cnt; diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 839d487394b7..08a245b76417 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h @@ -154,7 +154,7 @@ extern void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd); extern void ata_eh_acquire(struct ata_port *ap); extern void ata_eh_release(struct ata_port *ap); extern void ata_scsi_error(struct Scsi_Host *host); -extern void ata_eh_fastdrain_timerfn(unsigned long arg); +extern void ata_eh_fastdrain_timerfn(struct timer_list *t); extern void ata_qc_schedule_eh(struct ata_queued_cmd *qc); extern void ata_dev_disable(struct ata_device *dev); extern void ata_eh_detach_dev(struct ata_device *dev); diff --git a/drivers/atm/idt77105.c b/drivers/atm/idt77105.c index 082aa02abc57..57af9fd198e4 100644 --- a/drivers/atm/idt77105.c +++ b/drivers/atm/idt77105.c @@ -49,8 +49,8 @@ static void idt77105_stats_timer_func(unsigned long); static void idt77105_restart_timer_func(unsigned long); -static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func, 0, 0); -static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func, 0, 0); +static DEFINE_TIMER(stats_timer, idt77105_stats_timer_func); +static DEFINE_TIMER(restart_timer, idt77105_restart_timer_func); static int start_timer = 1; static struct idt77105_priv *idt77105_all = NULL; diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c index fc72b763fdd7..ad6b582c268e 100644 --- a/drivers/atm/iphase.c +++ b/drivers/atm/iphase.c @@ -76,7 +76,7 @@ static IADEV *ia_dev[8]; static struct atm_dev *_ia_dev[8]; static int iadev_count; static void ia_led_timer(unsigned long arg); -static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0); +static DEFINE_TIMER(ia_timer, ia_led_timer); static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ; static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ; static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER diff --git a/drivers/auxdisplay/img-ascii-lcd.c b/drivers/auxdisplay/img-ascii-lcd.c index a9020f82eea7..db040b378224 100644 --- a/drivers/auxdisplay/img-ascii-lcd.c +++ b/drivers/auxdisplay/img-ascii-lcd.c @@ -229,9 +229,9 @@ MODULE_DEVICE_TABLE(of, img_ascii_lcd_matches); * Scroll the current message along the LCD by one character, rearming the * timer if required. */ -static void img_ascii_lcd_scroll(unsigned long arg) +static void img_ascii_lcd_scroll(struct timer_list *t) { - struct img_ascii_lcd_ctx *ctx = (struct img_ascii_lcd_ctx *)arg; + struct img_ascii_lcd_ctx *ctx = from_timer(ctx, t, timer); unsigned int i, ch = ctx->scroll_pos; unsigned int num_chars = ctx->cfg->num_chars; @@ -299,7 +299,7 @@ static int img_ascii_lcd_display(struct img_ascii_lcd_ctx *ctx, ctx->scroll_pos = 0; /* update the LCD */ - img_ascii_lcd_scroll((unsigned long)ctx); + img_ascii_lcd_scroll(&ctx->timer); return 0; } @@ -395,9 +395,7 @@ static int img_ascii_lcd_probe(struct platform_device *pdev) ctx->scroll_rate = HZ / 2; /* initialise a timer for scrolling the message */ - init_timer(&ctx->timer); - ctx->timer.function = img_ascii_lcd_scroll; - ctx->timer.data = (unsigned long)ctx; + timer_setup(&ctx->timer, img_ascii_lcd_scroll, 0); platform_set_drvdata(pdev, ctx); diff --git a/drivers/auxdisplay/panel.c b/drivers/auxdisplay/panel.c index 6911acd896d9..ea7869c0d7f9 100644 --- a/drivers/auxdisplay/panel.c +++ b/drivers/auxdisplay/panel.c @@ -1396,7 +1396,7 @@ static void panel_process_inputs(void) } } -static void panel_scan_timer(void) +static void panel_scan_timer(struct timer_list *unused) { if (keypad.enabled && keypad_initialized) { if (spin_trylock_irq(&pprt_lock)) { @@ -1421,7 +1421,7 @@ static void init_scan_timer(void) if (scan_timer.function) return; /* already started */ - setup_timer(&scan_timer, (void *)&panel_scan_timer, 0); + timer_setup(&scan_timer, panel_scan_timer, 0); scan_timer.expires = jiffies + INPUT_POLL_TIME; add_timer(&scan_timer); } diff --git a/drivers/base/core.c b/drivers/base/core.c index 12ebd055724c..4b8ba2a75a4d 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -668,7 +668,7 @@ const char *dev_driver_string(const struct device *dev) * so be careful about accessing it. dev->bus and dev->class should * never change once they are set, so they don't need special care. */ - drv = ACCESS_ONCE(dev->driver); + drv = READ_ONCE(dev->driver); return drv ? drv->name : (dev->bus ? dev->bus->name : (dev->class ? dev->class->name : "")); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 321cd7b4d817..a73ab95558f5 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -18,6 +18,7 @@ #include <linux/cpufeature.h> #include <linux/tick.h> #include <linux/pm_qos.h> +#include <linux/sched/isolation.h> #include "base.h" @@ -271,8 +272,16 @@ static ssize_t print_cpus_isolated(struct device *dev, struct device_attribute *attr, char *buf) { int n = 0, len = PAGE_SIZE-2; + cpumask_var_t isolated; - n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(cpu_isolated_map)); + if (!alloc_cpumask_var(&isolated, GFP_KERNEL)) + return -ENOMEM; + + cpumask_andnot(isolated, cpu_possible_mask, + housekeeping_cpumask(HK_FLAG_DOMAIN)); + n = scnprintf(buf, len, "%*pbl\n", cpumask_pr_args(isolated)); + + free_cpumask_var(isolated); return n; } diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 770b1539a083..ae47b2ec84b4 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -478,9 +478,9 @@ struct dpm_watchdog { * There's not much we can do here to recover so panic() to * capture a crash-dump in pstore. */ -static void dpm_watchdog_handler(unsigned long data) +static void dpm_watchdog_handler(struct timer_list *t) { - struct dpm_watchdog *wd = (void *)data; + struct dpm_watchdog *wd = from_timer(wd, t, timer); dev_emerg(wd->dev, "**** DPM device timeout ****\n"); show_stack(wd->tsk, NULL); @@ -500,11 +500,9 @@ static void dpm_watchdog_set(struct dpm_watchdog *wd, struct device *dev) wd->dev = dev; wd->tsk = current; - init_timer_on_stack(timer); + timer_setup_on_stack(timer, dpm_watchdog_handler, 0); /* use same timeout value for both suspend and resume */ timer->expires = jiffies + HZ * CONFIG_DPM_WATCHDOG_TIMEOUT; - timer->function = dpm_watchdog_handler; - timer->data = (unsigned long)wd; add_timer(timer); } diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 7bcf80fa9ada..41d7c2b99f69 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -134,11 +134,11 @@ unsigned long pm_runtime_autosuspend_expiration(struct device *dev) if (!dev->power.use_autosuspend) goto out; - autosuspend_delay = ACCESS_ONCE(dev->power.autosuspend_delay); + autosuspend_delay = READ_ONCE(dev->power.autosuspend_delay); if (autosuspend_delay < 0) goto out; - last_busy = ACCESS_ONCE(dev->power.last_busy); + last_busy = READ_ONCE(dev->power.last_busy); elapsed = jiffies - last_busy; if (elapsed < 0) goto out; /* jiffies has wrapped around. */ diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig index 0368fd7b3a41..3a1535d812d8 100644 --- a/drivers/base/regmap/Kconfig +++ b/drivers/base/regmap/Kconfig @@ -6,6 +6,7 @@ config REGMAP default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_W1 || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ) select IRQ_DOMAIN if REGMAP_IRQ + select REGMAP_HWSPINLOCK if HWSPINLOCK=y bool config REGCACHE_COMPRESSED @@ -37,3 +38,6 @@ config REGMAP_MMIO config REGMAP_IRQ bool + +config REGMAP_HWSPINLOCK + bool diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 2a4435d76028..8641183cac2f 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -157,6 +157,8 @@ struct regmap { struct rb_root range_tree; void *selector_work_buf; /* Scratch buffer used for selector */ + + struct hwspinlock *hwlock; }; struct regcache_ops { diff --git a/drivers/base/regmap/regmap-spi.c b/drivers/base/regmap/regmap-spi.c index edd9a839d004..c7150dd264d5 100644 --- a/drivers/base/regmap/regmap-spi.c +++ b/drivers/base/regmap/regmap-spi.c @@ -102,7 +102,7 @@ static int regmap_spi_read(void *context, return spi_write_then_read(spi, reg, reg_size, val, val_size); } -static struct regmap_bus regmap_spi = { +static const struct regmap_bus regmap_spi = { .write = regmap_spi_write, .gather_write = regmap_spi_gather_write, .async_write = regmap_spi_async_write, diff --git a/drivers/base/regmap/regmap-spmi.c b/drivers/base/regmap/regmap-spmi.c index 4a36e415e938..0bfb8ed244d5 100644 --- a/drivers/base/regmap/regmap-spmi.c +++ b/drivers/base/regmap/regmap-spmi.c @@ -83,7 +83,7 @@ static int regmap_spmi_base_write(void *context, const void *data, count - 1); } -static struct regmap_bus regmap_spmi_base = { +static const struct regmap_bus regmap_spmi_base = { .read = regmap_spmi_base_read, .write = regmap_spmi_base_write, .gather_write = regmap_spmi_base_gather_write, @@ -203,7 +203,7 @@ static int regmap_spmi_ext_write(void *context, const void *data, count - 2); } -static struct regmap_bus regmap_spmi_ext = { +static const struct regmap_bus regmap_spmi_ext = { .read = regmap_spmi_ext_read, .write = regmap_spmi_ext_write, .gather_write = regmap_spmi_ext_gather_write, diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index b9a779a4a739..8d516a9bfc01 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -20,6 +20,7 @@ #include <linux/sched.h> #include <linux/delay.h> #include <linux/log2.h> +#include <linux/hwspinlock.h> #define CREATE_TRACE_POINTS #include "trace.h" @@ -413,6 +414,51 @@ static unsigned int regmap_parse_64_native(const void *buf) } #endif +#ifdef REGMAP_HWSPINLOCK +static void regmap_lock_hwlock(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout(map->hwlock, UINT_MAX); +} + +static void regmap_lock_hwlock_irq(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout_irq(map->hwlock, UINT_MAX); +} + +static void regmap_lock_hwlock_irqsave(void *__map) +{ + struct regmap *map = __map; + + hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX, + &map->spinlock_flags); +} + +static void regmap_unlock_hwlock(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock(map->hwlock); +} + +static void regmap_unlock_hwlock_irq(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock_irq(map->hwlock); +} + +static void regmap_unlock_hwlock_irqrestore(void *__map) +{ + struct regmap *map = __map; + + hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags); +} +#endif + static void regmap_lock_mutex(void *__map) { struct regmap *map = __map; @@ -627,6 +673,34 @@ struct regmap *__regmap_init(struct device *dev, map->lock = config->lock; map->unlock = config->unlock; map->lock_arg = config->lock_arg; + } else if (config->hwlock_id) { +#ifdef REGMAP_HWSPINLOCK + map->hwlock = hwspin_lock_request_specific(config->hwlock_id); + if (!map->hwlock) { + ret = -ENXIO; + goto err_map; + } + + switch (config->hwlock_mode) { + case HWLOCK_IRQSTATE: + map->lock = regmap_lock_hwlock_irqsave; + map->unlock = regmap_unlock_hwlock_irqrestore; + break; + case HWLOCK_IRQ: + map->lock = regmap_lock_hwlock_irq; + map->unlock = regmap_unlock_hwlock_irq; + break; + default: + map->lock = regmap_lock_hwlock; + map->unlock = regmap_unlock_hwlock; + break; + } + + map->lock_arg = map; +#else + ret = -EINVAL; + goto err_map; +#endif } else { if ((bus && bus->fast_io) || config->fast_io) { @@ -729,7 +803,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_2_6_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -739,7 +813,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_4_12_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -749,7 +823,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_7_9_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -759,7 +833,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_write = regmap_format_10_14_write; break; default: - goto err_map; + goto err_hwlock; } break; @@ -779,13 +853,13 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_reg = regmap_format_16_native; break; default: - goto err_map; + goto err_hwlock; } break; case 24: if (reg_endian != REGMAP_ENDIAN_BIG) - goto err_map; + goto err_hwlock; map->format.format_reg = regmap_format_24; break; @@ -801,7 +875,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_reg = regmap_format_32_native; break; default: - goto err_map; + goto err_hwlock; } break; @@ -818,13 +892,13 @@ struct regmap *__regmap_init(struct device *dev, map->format.format_reg = regmap_format_64_native; break; default: - goto err_map; + goto err_hwlock; } break; #endif default: - goto err_map; + goto err_hwlock; } if (val_endian == REGMAP_ENDIAN_NATIVE) @@ -853,12 +927,12 @@ struct regmap *__regmap_init(struct device *dev, map->format.parse_val = regmap_parse_16_native; break; default: - goto err_map; + goto err_hwlock; } break; case 24: if (val_endian != REGMAP_ENDIAN_BIG) - goto err_map; + goto err_hwlock; map->format.format_val = regmap_format_24; map->format.parse_val = regmap_parse_24; break; @@ -879,7 +953,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.parse_val = regmap_parse_32_native; break; default: - goto err_map; + goto err_hwlock; } break; #ifdef CONFIG_64BIT @@ -900,7 +974,7 @@ struct regmap *__regmap_init(struct device *dev, map->format.parse_val = regmap_parse_64_native; break; default: - goto err_map; + goto err_hwlock; } break; #endif @@ -909,18 +983,18 @@ struct regmap *__regmap_init(struct device *dev, if (map->format.format_write) { if ((reg_endian != REGMAP_ENDIAN_BIG) || (val_endian != REGMAP_ENDIAN_BIG)) - goto err_map; + goto err_hwlock; map->use_single_write = true; } if (!map->format.format_write && !(map->format.format_reg && map->format.format_val)) - goto err_map; + goto err_hwlock; map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL); if (map->work_buf == NULL) { ret = -ENOMEM; - goto err_map; + goto err_hwlock; } if (map->format.format_write) { @@ -1041,6 +1115,9 @@ err_regcache: err_range: regmap_range_exit(map); kfree(map->work_buf); +err_hwlock: + if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock) + hwspin_lock_free(map->hwlock); err_map: kfree(map); err: @@ -1228,6 +1305,8 @@ void regmap_exit(struct regmap *map) kfree(async->work_buf); kfree(async); } + if (IS_ENABLED(REGMAP_HWSPINLOCK) && map->hwlock) + hwspin_lock_free(map->hwlock); kfree(map); } EXPORT_SYMBOL_GPL(regmap_exit); diff --git a/drivers/block/amiflop.c b/drivers/block/amiflop.c index 49908c74bfcb..4e3fb9f104af 100644 --- a/drivers/block/amiflop.c +++ b/drivers/block/amiflop.c @@ -323,7 +323,7 @@ static void fd_deselect (int drive) } -static void motor_on_callback(unsigned long nr) +static void motor_on_callback(unsigned long ignored) { if (!(ciaa.pra & DSKRDY) || --on_attempts == 0) { complete_all(&motor_on_completion); @@ -344,7 +344,6 @@ static int fd_motor_on(int nr) fd_select(nr); reinit_completion(&motor_on_completion); - motor_on_timer.data = nr; mod_timer(&motor_on_timer, jiffies + HZ/2); on_attempts = 10; diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c index 4b987c2fefbe..251482066977 100644 --- a/drivers/block/aoe/aoemain.c +++ b/drivers/block/aoe/aoemain.c @@ -15,49 +15,19 @@ MODULE_AUTHOR("Sam Hopkins <sah@coraid.com>"); MODULE_DESCRIPTION("AoE block/char driver for 2.6.2 and newer 2.6 kernels"); MODULE_VERSION(VERSION); -enum { TINIT, TRUN, TKILL }; +static struct timer_list timer; -static void -discover_timer(ulong vp) +static void discover_timer(struct timer_list *t) { - static struct timer_list t; - static volatile ulong die; - static spinlock_t lock; - ulong flags; - enum { DTIMERTICK = HZ * 60 }; /* one minute */ - - switch (vp) { - case TINIT: - init_timer(&t); - spin_lock_init(&lock); - t.data = TRUN; - t.function = discover_timer; - die = 0; - case TRUN: - spin_lock_irqsave(&lock, flags); - if (!die) { - t.expires = jiffies + DTIMERTICK; - add_timer(&t); - } - spin_unlock_irqrestore(&lock, flags); - - aoecmd_cfg(0xffff, 0xff); - return; - case TKILL: - spin_lock_irqsave(&lock, flags); - die = 1; - spin_unlock_irqrestore(&lock, flags); + mod_timer(t, jiffies + HZ * 60); /* one minute */ - del_timer_sync(&t); - default: - return; - } + aoecmd_cfg(0xffff, 0xff); } static void aoe_exit(void) { - discover_timer(TKILL); + del_timer_sync(&timer); aoenet_exit(); unregister_blkdev(AOE_MAJOR, DEVICE_NAME); @@ -93,7 +63,9 @@ aoe_init(void) goto blkreg_fail; } printk(KERN_INFO "aoe: AoE v%s initialised.\n", VERSION); - discover_timer(TINIT); + + timer_setup(&timer, discover_timer, 0); + discover_timer(&timer); return 0; blkreg_fail: aoecmd_exit(); diff --git a/drivers/block/ataflop.c b/drivers/block/ataflop.c index 92da886180aa..ae596e55bcb6 100644 --- a/drivers/block/ataflop.c +++ b/drivers/block/ataflop.c @@ -373,10 +373,10 @@ static void floppy_release(struct gendisk *disk, fmode_t mode); /************************* End of Prototypes **************************/ -static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer, 0, 0); -static DEFINE_TIMER(readtrack_timer, fd_readtrack_check, 0, 0); -static DEFINE_TIMER(timeout_timer, fd_times_out, 0, 0); -static DEFINE_TIMER(fd_timer, check_change, 0, 0); +static DEFINE_TIMER(motor_off_timer, fd_motor_off_timer); +static DEFINE_TIMER(readtrack_timer, fd_readtrack_check); +static DEFINE_TIMER(timeout_timer, fd_times_out); +static DEFINE_TIMER(fd_timer, check_change); static void fd_end_request_cur(blk_status_t err) { diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 7e8589ce631c..06ecee1b528e 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h @@ -1551,8 +1551,8 @@ extern int w_restart_disk_io(struct drbd_work *, int); extern int w_send_out_of_sync(struct drbd_work *, int); extern int w_start_resync(struct drbd_work *, int); -extern void resync_timer_fn(unsigned long data); -extern void start_resync_timer_fn(unsigned long data); +extern void resync_timer_fn(struct timer_list *t); +extern void start_resync_timer_fn(struct timer_list *t); extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req); diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index 8cb3791898ae..4b4697a1f963 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -64,7 +64,7 @@ static DEFINE_MUTEX(drbd_main_mutex); static int drbd_open(struct block_device *bdev, fmode_t mode); static void drbd_release(struct gendisk *gd, fmode_t mode); -static void md_sync_timer_fn(unsigned long data); +static void md_sync_timer_fn(struct timer_list *t); static int w_bitmap_io(struct drbd_work *w, int unused); MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, " @@ -2023,14 +2023,10 @@ void drbd_init_set_defaults(struct drbd_device *device) device->unplug_work.cb = w_send_write_hint; device->bm_io_work.w.cb = w_bitmap_io; - setup_timer(&device->resync_timer, resync_timer_fn, - (unsigned long)device); - setup_timer(&device->md_sync_timer, md_sync_timer_fn, - (unsigned long)device); - setup_timer(&device->start_resync_timer, start_resync_timer_fn, - (unsigned long)device); - setup_timer(&device->request_timer, request_timer_fn, - (unsigned long)device); + timer_setup(&device->resync_timer, resync_timer_fn, 0); + timer_setup(&device->md_sync_timer, md_sync_timer_fn, 0); + timer_setup(&device->start_resync_timer, start_resync_timer_fn, 0); + timer_setup(&device->request_timer, request_timer_fn, 0); init_waitqueue_head(&device->misc_wait); init_waitqueue_head(&device->state_wait); @@ -3721,9 +3717,9 @@ int drbd_md_test_flag(struct drbd_backing_dev *bdev, int flag) return (bdev->md.flags & flag) != 0; } -static void md_sync_timer_fn(unsigned long data) +static void md_sync_timer_fn(struct timer_list *t) { - struct drbd_device *device = (struct drbd_device *) data; + struct drbd_device *device = from_timer(device, t, md_sync_timer); drbd_device_post_work(device, MD_SYNC); } diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 796eaf347dc0..cb2fa63f6bc0 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -5056,7 +5056,7 @@ static int drbd_disconnected(struct drbd_peer_device *peer_device) wake_up(&device->misc_wait); del_timer_sync(&device->resync_timer); - resync_timer_fn((unsigned long)device); + resync_timer_fn(&device->resync_timer); /* wait for all w_e_end_data_req, w_e_end_rsdata_req, w_send_barrier, * w_make_resync_request etc. which may still be on the worker queue diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index de8566e55334..a500e738d929 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -1714,9 +1714,9 @@ static bool net_timeout_reached(struct drbd_request *net_req, * to expire twice (worst case) to become effective. Good enough. */ -void request_timer_fn(unsigned long data) +void request_timer_fn(struct timer_list *t) { - struct drbd_device *device = (struct drbd_device *) data; + struct drbd_device *device = from_timer(device, t, request_timer); struct drbd_connection *connection = first_peer_device(device)->connection; struct drbd_request *req_read, *req_write, *req_peer; /* oldest request */ struct net_conf *nc; diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h index a2254f825601..cb97b3b30962 100644 --- a/drivers/block/drbd/drbd_req.h +++ b/drivers/block/drbd/drbd_req.h @@ -294,7 +294,7 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, struct bio_and_error *m); extern void complete_master_bio(struct drbd_device *device, struct bio_and_error *m); -extern void request_timer_fn(unsigned long data); +extern void request_timer_fn(struct timer_list *t); extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what); extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what); extern void tl_abort_disk_io(struct drbd_device *device); diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c index 03471b3fce86..1476cb3439f4 100644 --- a/drivers/block/drbd/drbd_worker.c +++ b/drivers/block/drbd/drbd_worker.c @@ -457,9 +457,9 @@ int w_resync_timer(struct drbd_work *w, int cancel) return 0; } -void resync_timer_fn(unsigned long data) +void resync_timer_fn(struct timer_list *t) { - struct drbd_device *device = (struct drbd_device *) data; + struct drbd_device *device = from_timer(device, t, resync_timer); drbd_queue_work_if_unqueued( &first_peer_device(device)->connection->sender_work, @@ -1705,9 +1705,9 @@ void drbd_rs_controller_reset(struct drbd_device *device) rcu_read_unlock(); } -void start_resync_timer_fn(unsigned long data) +void start_resync_timer_fn(struct timer_list *t) { - struct drbd_device *device = (struct drbd_device *) data; + struct drbd_device *device = from_timer(device, t, start_resync_timer); drbd_device_post_work(device, RS_START); } diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c index 58471394beb9..1a0385ed6417 100644 --- a/drivers/char/dtlk.c +++ b/drivers/char/dtlk.c @@ -84,7 +84,7 @@ static int dtlk_has_indexing; static unsigned int dtlk_portlist[] = {0x25e, 0x29e, 0x2de, 0x31e, 0x35e, 0x39e, 0}; static wait_queue_head_t dtlk_process_list; -static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick, 0, 0); +static DEFINE_TIMER(dtlk_timer, dtlk_timer_tick); /* prototypes for file_operations struct */ static ssize_t dtlk_read(struct file *, char __user *, diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c index 5406b90bf626..5b8db2ed844d 100644 --- a/drivers/char/hangcheck-timer.c +++ b/drivers/char/hangcheck-timer.c @@ -124,7 +124,7 @@ static unsigned long long hangcheck_tsc, hangcheck_tsc_margin; static void hangcheck_fire(unsigned long); -static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire, 0, 0); +static DEFINE_TIMER(hangcheck_ticktock, hangcheck_fire); static void hangcheck_fire(unsigned long data) { diff --git a/drivers/char/hw_random/xgene-rng.c b/drivers/char/hw_random/xgene-rng.c index 3c77645405e5..71755790c32b 100644 --- a/drivers/char/hw_random/xgene-rng.c +++ b/drivers/char/hw_random/xgene-rng.c @@ -100,9 +100,9 @@ struct xgene_rng_dev { struct clk *clk; }; -static void xgene_rng_expired_timer(unsigned long arg) +static void xgene_rng_expired_timer(struct timer_list *t) { - struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg; + struct xgene_rng_dev *ctx = from_timer(ctx, t, failure_timer); /* Clear failure counter as timer expired */ disable_irq(ctx->irq); @@ -113,8 +113,6 @@ static void xgene_rng_expired_timer(unsigned long arg) static void xgene_rng_start_timer(struct xgene_rng_dev *ctx) { - ctx->failure_timer.data = (unsigned long) ctx; - ctx->failure_timer.function = xgene_rng_expired_timer; ctx->failure_timer.expires = jiffies + 120 * HZ; add_timer(&ctx->failure_timer); } @@ -292,7 +290,7 @@ static int xgene_rng_init(struct hwrng *rng) struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; ctx->failure_cnt = 0; - init_timer(&ctx->failure_timer); + timer_setup(&ctx->failure_timer, xgene_rng_expired_timer, 0); ctx->revision = readl(ctx->csr_base + RNG_EIP_REV); diff --git a/drivers/char/nwbutton.c b/drivers/char/nwbutton.c index e6d0d271c58c..44006ed9558f 100644 --- a/drivers/char/nwbutton.c +++ b/drivers/char/nwbutton.c @@ -27,7 +27,7 @@ static void button_sequence_finished (unsigned long parameters); static int button_press_count; /* The count of button presses */ /* Times for the end of a sequence */ -static DEFINE_TIMER(button_timer, button_sequence_finished, 0, 0); +static DEFINE_TIMER(button_timer, button_sequence_finished); static DECLARE_WAIT_QUEUE_HEAD(button_wait_queue); /* Used for blocking read */ static char button_output_buffer[32]; /* Stores data to write out of device */ static int bcount; /* The number of bytes in the buffer */ diff --git a/drivers/char/random.c b/drivers/char/random.c index 8ad92707e45f..6c7ccac2679e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -641,7 +641,7 @@ static void credit_entropy_bits(struct entropy_store *r, int nbits) return; retry: - entropy_count = orig = ACCESS_ONCE(r->entropy_count); + entropy_count = orig = READ_ONCE(r->entropy_count); if (nfrac < 0) { /* Debit */ entropy_count += nfrac; @@ -1265,7 +1265,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min, /* Can we pull enough? */ retry: - entropy_count = orig = ACCESS_ONCE(r->entropy_count); + entropy_count = orig = READ_ONCE(r->entropy_count); ibytes = nbytes; /* never pull more than available */ have_bytes = entropy_count >> (ENTROPY_SHIFT + 3); diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index 974d48927b07..616871e68e09 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -137,7 +137,7 @@ static DECLARE_WAIT_QUEUE_HEAD(rtc_wait); #ifdef RTC_IRQ static void rtc_dropped_irq(unsigned long data); -static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq, 0, 0); +static DEFINE_TIMER(rtc_irq_timer, rtc_dropped_irq); #endif static ssize_t rtc_read(struct file *file, char __user *buf, diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c index 6210bff46341..8eeb4190207d 100644 --- a/drivers/char/tlclk.c +++ b/drivers/char/tlclk.c @@ -184,9 +184,8 @@ static unsigned int telclk_interrupt; static int int_events; /* Event that generate a interrupt */ static int got_event; /* if events processing have been done */ -static void switchover_timeout(unsigned long data); -static struct timer_list switchover_timer = - TIMER_INITIALIZER(switchover_timeout , 0, 0); +static void switchover_timeout(struct timer_list *t); +static struct timer_list switchover_timer; static unsigned long tlclk_timer_data; static struct tlclk_alarms *alarm_events; @@ -805,7 +804,7 @@ static int __init tlclk_init(void) goto out3; } - init_timer(&switchover_timer); + timer_setup(&switchover_timer, switchover_timeout, 0); ret = misc_register(&tlclk_miscdev); if (ret < 0) { @@ -855,9 +854,9 @@ static void __exit tlclk_cleanup(void) } -static void switchover_timeout(unsigned long data) +static void switchover_timeout(struct timer_list *unused) { - unsigned long flags = *(unsigned long *) data; + unsigned long flags = tlclk_timer_data; if ((flags & 1)) { if ((inb(TLCLK_REG1) & 0x08) != (flags & 0x08)) @@ -922,7 +921,6 @@ static irqreturn_t tlclk_interrupt(int irq, void *dev_id) /* TIMEOUT in ~10ms */ switchover_timer.expires = jiffies + msecs_to_jiffies(10); tlclk_timer_data = inb(TLCLK_REG1); - switchover_timer.data = (unsigned long) &tlclk_timer_data; mod_timer(&switchover_timer, switchover_timer.expires); } else { got_event = 1; diff --git a/drivers/char/tpm/tpm-dev-common.c b/drivers/char/tpm/tpm-dev-common.c index 610638a80383..461bf0b8a094 100644 --- a/drivers/char/tpm/tpm-dev-common.c +++ b/drivers/char/tpm/tpm-dev-common.c @@ -110,6 +110,12 @@ ssize_t tpm_common_write(struct file *file, const char __user *buf, return -EFAULT; } + if (in_size < 6 || + in_size < be32_to_cpu(*((__be32 *) (priv->data_buffer + 2)))) { + mutex_unlock(&priv->buffer_mutex); + return -EINVAL; + } + /* atomic tpm command send and result receive. We only hold the ops * lock during this period so that the tpm can be unregistered even if * the char dev is held open. diff --git a/drivers/char/tpm/tpm-sysfs.c b/drivers/char/tpm/tpm-sysfs.c index 86f38d239476..83a77a445538 100644 --- a/drivers/char/tpm/tpm-sysfs.c +++ b/drivers/char/tpm/tpm-sysfs.c @@ -20,44 +20,48 @@ #include <linux/device.h> #include "tpm.h" -#define READ_PUBEK_RESULT_SIZE 314 +struct tpm_readpubek_out { + u8 algorithm[4]; + u8 encscheme[2]; + u8 sigscheme[2]; + __be32 paramsize; + u8 parameters[12]; + __be32 keysize; + u8 modulus[256]; + u8 checksum[20]; +} __packed; + #define READ_PUBEK_RESULT_MIN_BODY_SIZE (28 + 256) #define TPM_ORD_READPUBEK 124 -static const struct tpm_input_header tpm_readpubek_header = { - .tag = cpu_to_be16(TPM_TAG_RQU_COMMAND), - .length = cpu_to_be32(30), - .ordinal = cpu_to_be32(TPM_ORD_READPUBEK) -}; + static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, char *buf) { - u8 *data; - struct tpm_cmd_t tpm_cmd; - ssize_t err; - int i, rc; + struct tpm_buf tpm_buf; + struct tpm_readpubek_out *out; + ssize_t rc; + int i; char *str = buf; struct tpm_chip *chip = to_tpm_chip(dev); + char anti_replay[20]; - memset(&tpm_cmd, 0, sizeof(tpm_cmd)); - - tpm_cmd.header.in = tpm_readpubek_header; - err = tpm_transmit_cmd(chip, NULL, &tpm_cmd, READ_PUBEK_RESULT_SIZE, - READ_PUBEK_RESULT_MIN_BODY_SIZE, 0, - "attempting to read the PUBEK"); - if (err) - goto out; - - /* - ignore header 10 bytes - algorithm 32 bits (1 == RSA ) - encscheme 16 bits - sigscheme 16 bits - parameters (RSA 12->bytes: keybit, #primes, expbit) - keylenbytes 32 bits - 256 byte modulus - ignore checksum 20 bytes - */ - data = tpm_cmd.params.readpubek_out_buffer; + memset(&anti_replay, 0, sizeof(anti_replay)); + + rc = tpm_buf_init(&tpm_buf, TPM_TAG_RQU_COMMAND, TPM_ORD_READPUBEK); + if (rc) + return rc; + + tpm_buf_append(&tpm_buf, anti_replay, sizeof(anti_replay)); + + rc = tpm_transmit_cmd(chip, NULL, tpm_buf.data, PAGE_SIZE, + READ_PUBEK_RESULT_MIN_BODY_SIZE, 0, + "attempting to read the PUBEK"); + if (rc) { + tpm_buf_destroy(&tpm_buf); + return 0; + } + + out = (struct tpm_readpubek_out *)&tpm_buf.data[10]; str += sprintf(str, "Algorithm: %02X %02X %02X %02X\n" @@ -68,21 +72,26 @@ static ssize_t pubek_show(struct device *dev, struct device_attribute *attr, "%02X %02X %02X %02X\n" "Modulus length: %d\n" "Modulus:\n", - data[0], data[1], data[2], data[3], - data[4], data[5], - data[6], data[7], - data[12], data[13], data[14], data[15], - data[16], data[17], data[18], data[19], - data[20], data[21], data[22], data[23], - be32_to_cpu(*((__be32 *) (data + 24)))); + out->algorithm[0], out->algorithm[1], out->algorithm[2], + out->algorithm[3], + out->encscheme[0], out->encscheme[1], + out->sigscheme[0], out->sigscheme[1], + out->parameters[0], out->parameters[1], + out->parameters[2], out->parameters[3], + out->parameters[4], out->parameters[5], + out->parameters[6], out->parameters[7], + out->parameters[8], out->parameters[9], + out->parameters[10], out->parameters[11], + be32_to_cpu(out->keysize)); for (i = 0; i < 256; i++) { - str += sprintf(str, "%02X ", data[i + 28]); + str += sprintf(str, "%02X ", out->modulus[i]); if ((i + 1) % 16 == 0) str += sprintf(str, "\n"); } -out: + rc = str - buf; + tpm_buf_destroy(&tpm_buf); return rc; } static DEVICE_ATTR_RO(pubek); diff --git a/drivers/char/tpm/tpm.h b/drivers/char/tpm/tpm.h index 2d5466a72e40..528cffbd49d3 100644 --- a/drivers/char/tpm/tpm.h +++ b/drivers/char/tpm/tpm.h @@ -345,17 +345,6 @@ enum tpm_sub_capabilities { TPM_CAP_PROP_TIS_DURATION = 0x120, }; -struct tpm_readpubek_params_out { - u8 algorithm[4]; - u8 encscheme[2]; - u8 sigscheme[2]; - __be32 paramsize; - u8 parameters[12]; /*assuming RSA*/ - __be32 keysize; - u8 modulus[256]; - u8 checksum[20]; -} __packed; - typedef union { struct tpm_input_header in; struct tpm_output_header out; @@ -385,8 +374,6 @@ struct tpm_getrandom_in { } __packed; typedef union { - struct tpm_readpubek_params_out readpubek_out; - u8 readpubek_out_buffer[sizeof(struct tpm_readpubek_params_out)]; struct tpm_pcrread_in pcrread_in; struct tpm_pcrread_out pcrread_out; struct tpm_getrandom_in getrandom_in; @@ -557,7 +544,7 @@ static inline void tpm_add_ppi(struct tpm_chip *chip) } #endif -static inline inline u32 tpm2_rc_value(u32 rc) +static inline u32 tpm2_rc_value(u32 rc) { return (rc & BIT(7)) ? rc & 0xff : rc; } diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c index e1a41b788f08..f40d20671a78 100644 --- a/drivers/char/tpm/tpm2-cmd.c +++ b/drivers/char/tpm/tpm2-cmd.c @@ -834,72 +834,43 @@ static const struct tpm_input_header tpm2_selftest_header = { }; /** - * tpm2_continue_selftest() - start a self test - * - * @chip: TPM chip to use - * @full: test all commands instead of testing only those that were not - * previously tested. - * - * Return: Same as with tpm_transmit_cmd with exception of RC_TESTING. - */ -static int tpm2_start_selftest(struct tpm_chip *chip, bool full) -{ - int rc; - struct tpm2_cmd cmd; - - cmd.header.in = tpm2_selftest_header; - cmd.params.selftest_in.full_test = full; - - rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE, 0, 0, - "continue selftest"); - - /* At least some prototype chips seem to give RC_TESTING error - * immediately. This is a workaround for that. - */ - if (rc == TPM2_RC_TESTING) { - dev_warn(&chip->dev, "Got RC_TESTING, ignoring\n"); - rc = 0; - } - - return rc; -} - -/** - * tpm2_do_selftest() - run a full self test + * tpm2_do_selftest() - ensure that all self tests have passed * * @chip: TPM chip to use * * Return: Same as with tpm_transmit_cmd. * - * During the self test TPM2 commands return with the error code RC_TESTING. - * Waiting is done by issuing PCR read until it executes successfully. + * The TPM can either run all self tests synchronously and then return + * RC_SUCCESS once all tests were successful. Or it can choose to run the tests + * asynchronously and return RC_TESTING immediately while the self tests still + * execute in the background. This function handles both cases and waits until + * all tests have completed. */ static int tpm2_do_selftest(struct tpm_chip *chip) { int rc; - unsigned int loops; - unsigned int delay_msec = 100; - unsigned long duration; - int i; - - duration = tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST); + unsigned int delay_msec = 20; + long duration; + struct tpm2_cmd cmd; - loops = jiffies_to_msecs(duration) / delay_msec; + duration = jiffies_to_msecs( + tpm2_calc_ordinal_duration(chip, TPM2_CC_SELF_TEST)); - rc = tpm2_start_selftest(chip, true); - if (rc) - return rc; + while (duration > 0) { + cmd.header.in = tpm2_selftest_header; + cmd.params.selftest_in.full_test = 0; - for (i = 0; i < loops; i++) { - /* Attempt to read a PCR value */ - rc = tpm2_pcr_read(chip, 0, NULL); - if (rc < 0) - break; + rc = tpm_transmit_cmd(chip, NULL, &cmd, TPM2_SELF_TEST_IN_SIZE, + 0, 0, "continue selftest"); if (rc != TPM2_RC_TESTING) break; tpm_msleep(delay_msec); + duration -= delay_msec; + + /* wait longer the next round */ + delay_msec *= 2; } return rc; @@ -1009,7 +980,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) { struct tpm_buf buf; u32 nr_commands; - u32 *attrs; + __be32 *attrs; u32 cc; int i; int rc; @@ -1049,7 +1020,7 @@ static int tpm2_get_cc_attrs_tbl(struct tpm_chip *chip) chip->nr_commands = nr_commands; - attrs = (u32 *)&buf.data[TPM_HEADER_SIZE + 9]; + attrs = (__be32 *)&buf.data[TPM_HEADER_SIZE + 9]; for (i = 0; i < nr_commands; i++, attrs++) { chip->cc_attrs_tbl[i] = be32_to_cpup(attrs); cc = chip->cc_attrs_tbl[i] & 0xFFFF; diff --git a/drivers/char/tpm/tpm2-space.c b/drivers/char/tpm/tpm2-space.c index e2e059d8ffec..4e4014eabdb9 100644 --- a/drivers/char/tpm/tpm2-space.c +++ b/drivers/char/tpm/tpm2-space.c @@ -242,7 +242,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd) struct tpm_space *space = &chip->work_space; unsigned int nr_handles; u32 attrs; - u32 *handle; + __be32 *handle; int i; i = tpm2_find_cc(chip, cc); @@ -252,7 +252,7 @@ static int tpm2_map_command(struct tpm_chip *chip, u32 cc, u8 *cmd) attrs = chip->cc_attrs_tbl[i]; nr_handles = (attrs >> TPM2_CC_ATTR_CHANDLES) & GENMASK(2, 0); - handle = (u32 *)&cmd[TPM_HEADER_SIZE]; + handle = (__be32 *)&cmd[TPM_HEADER_SIZE]; for (i = 0; i < nr_handles; i++, handle++) { if ((be32_to_cpu(*handle) & 0xFF000000) == TPM2_HT_TRANSIENT) { if (!tpm2_map_to_phandle(space, handle)) diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c index 8f0a98dea327..7b3c2a8aa9de 100644 --- a/drivers/char/tpm/tpm_crb.c +++ b/drivers/char/tpm/tpm_crb.c @@ -92,14 +92,9 @@ enum crb_status { CRB_DRV_STS_COMPLETE = BIT(0), }; -enum crb_flags { - CRB_FL_ACPI_START = BIT(0), - CRB_FL_CRB_START = BIT(1), - CRB_FL_CRB_SMC_START = BIT(2), -}; - struct crb_priv { - unsigned int flags; + u32 sm; + const char *hid; void __iomem *iobase; struct crb_regs_head __iomem *regs_h; struct crb_regs_tail __iomem *regs_t; @@ -128,14 +123,16 @@ struct tpm2_crb_smc { * Anyhow, we do not wait here as a consequent CMD_READY request * will be handled correctly even if idle was not completed. * - * The function does nothing for devices with ACPI-start method. + * The function does nothing for devices with ACPI-start method + * or SMC-start method. * * Return: 0 always */ static int __maybe_unused crb_go_idle(struct device *dev, struct crb_priv *priv) { - if ((priv->flags & CRB_FL_ACPI_START) || - (priv->flags & CRB_FL_CRB_SMC_START)) + if ((priv->sm == ACPI_TPM2_START_METHOD) || + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) return 0; iowrite32(CRB_CTRL_REQ_GO_IDLE, &priv->regs_t->ctrl_req); @@ -174,14 +171,16 @@ static bool crb_wait_for_reg_32(u32 __iomem *reg, u32 mask, u32 value, * The device should respond within TIMEOUT_C. * * The function does nothing for devices with ACPI-start method + * or SMC-start method. * * Return: 0 on success -ETIME on timeout; */ static int __maybe_unused crb_cmd_ready(struct device *dev, struct crb_priv *priv) { - if ((priv->flags & CRB_FL_ACPI_START) || - (priv->flags & CRB_FL_CRB_SMC_START)) + if ((priv->sm == ACPI_TPM2_START_METHOD) || + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) || + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC)) return 0; iowrite32(CRB_CTRL_REQ_CMD_READY, &priv->regs_t->ctrl_req); @@ -325,13 +324,20 @@ static int crb_send(struct tpm_chip *chip, u8 *buf, size_t len) /* Make sure that cmd is populated before issuing start. */ wmb(); - if (priv->flags & CRB_FL_CRB_START) + /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs + * report only ACPI start but in practice seems to require both + * CRB start, hence invoking CRB start method if hid == MSFT0101. + */ + if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || + (priv->sm == ACPI_TPM2_MEMORY_MAPPED) || + (!strcmp(priv->hid, "MSFT0101"))) iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); - if (priv->flags & CRB_FL_ACPI_START) + if ((priv->sm == ACPI_TPM2_START_METHOD) || + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) rc = crb_do_acpi_start(chip); - if (priv->flags & CRB_FL_CRB_SMC_START) { + if (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { iowrite32(CRB_START_INVOKE, &priv->regs_t->ctrl_start); rc = tpm_crb_smc_start(&chip->dev, priv->smc_func_id); } @@ -345,7 +351,9 @@ static void crb_cancel(struct tpm_chip *chip) iowrite32(CRB_CANCEL_INVOKE, &priv->regs_t->ctrl_cancel); - if ((priv->flags & CRB_FL_ACPI_START) && crb_do_acpi_start(chip)) + if (((priv->sm == ACPI_TPM2_START_METHOD) || + (priv->sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD)) && + crb_do_acpi_start(chip)) dev_err(&chip->dev, "ACPI Start failed\n"); } @@ -458,7 +466,8 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv, * the control area, as one nice sane region except for some older * stuff that puts the control area outside the ACPI IO region. */ - if (!(priv->flags & CRB_FL_ACPI_START)) { + if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) || + (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) { if (buf->control_address == io_res.start + sizeof(*priv->regs_h)) priv->regs_h = priv->iobase; @@ -552,18 +561,6 @@ static int crb_acpi_add(struct acpi_device *device) if (!priv) return -ENOMEM; - /* The reason for the extra quirk is that the PTT in 4th Gen Core CPUs - * report only ACPI start but in practice seems to require both - * ACPI start and CRB start. - */ - if (sm == ACPI_TPM2_COMMAND_BUFFER || sm == ACPI_TPM2_MEMORY_MAPPED || - !strcmp(acpi_device_hid(device), "MSFT0101")) - priv->flags |= CRB_FL_CRB_START; - - if (sm == ACPI_TPM2_START_METHOD || - sm == ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD) - priv->flags |= CRB_FL_ACPI_START; - if (sm == ACPI_TPM2_COMMAND_BUFFER_WITH_ARM_SMC) { if (buf->header.length < (sizeof(*buf) + sizeof(*crb_smc))) { dev_err(dev, @@ -574,9 +571,11 @@ static int crb_acpi_add(struct acpi_device *device) } crb_smc = ACPI_ADD_PTR(struct tpm2_crb_smc, buf, sizeof(*buf)); priv->smc_func_id = crb_smc->smc_func_id; - priv->flags |= CRB_FL_CRB_SMC_START; } + priv->sm = sm; + priv->hid = acpi_device_hid(device); + rc = crb_map_io(device, priv, buf); if (rc) return rc; diff --git a/drivers/char/tpm/tpm_tis.c b/drivers/char/tpm/tpm_tis.c index 7e55aa9ce680..e2d1055fb814 100644 --- a/drivers/char/tpm/tpm_tis.c +++ b/drivers/char/tpm/tpm_tis.c @@ -30,6 +30,7 @@ #include <linux/freezer.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/kernel.h> #include "tpm.h" #include "tpm_tis_core.h" @@ -223,7 +224,7 @@ static int tpm_tcg_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, } static int tpm_tcg_write_bytes(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *value) + const u8 *value) { struct tpm_tis_tcg_phy *phy = to_tpm_tis_tcg_phy(data); @@ -365,7 +366,7 @@ static struct pnp_driver tis_pnp_driver = { }, }; -#define TIS_HID_USR_IDX sizeof(tpm_pnp_tbl)/sizeof(struct pnp_device_id) -2 +#define TIS_HID_USR_IDX (ARRAY_SIZE(tpm_pnp_tbl) - 2) module_param_string(hid, tpm_pnp_tbl[TIS_HID_USR_IDX].id, sizeof(tpm_pnp_tbl[TIS_HID_USR_IDX].id), 0444); MODULE_PARM_DESC(hid, "Set additional specific HID for this driver to probe"); diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 63bc6c3b949e..fdde971bc810 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -252,7 +252,7 @@ out: * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send_data(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_data(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc, status, burstcnt; @@ -343,7 +343,7 @@ static void disable_interrupts(struct tpm_chip *chip) * tpm.c can skip polling for the data to be available as the interrupt is * waited for here */ -static int tpm_tis_send_main(struct tpm_chip *chip, u8 *buf, size_t len) +static int tpm_tis_send_main(struct tpm_chip *chip, const u8 *buf, size_t len) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc; @@ -445,7 +445,7 @@ static int probe_itpm(struct tpm_chip *chip) { struct tpm_tis_data *priv = dev_get_drvdata(&chip->dev); int rc = 0; - u8 cmd_getticks[] = { + static const u8 cmd_getticks[] = { 0x00, 0xc1, 0x00, 0x00, 0x00, 0x0a, 0x00, 0x00, 0x00, 0xf1 }; diff --git a/drivers/char/tpm/tpm_tis_core.h b/drivers/char/tpm/tpm_tis_core.h index e2212f021a02..6bbac319ff3b 100644 --- a/drivers/char/tpm/tpm_tis_core.h +++ b/drivers/char/tpm/tpm_tis_core.h @@ -98,7 +98,7 @@ struct tpm_tis_phy_ops { int (*read_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result); int (*write_bytes)(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *value); + const u8 *value); int (*read16)(struct tpm_tis_data *data, u32 addr, u16 *result); int (*read32)(struct tpm_tis_data *data, u32 addr, u32 *result); int (*write32)(struct tpm_tis_data *data, u32 addr, u32 src); @@ -128,7 +128,7 @@ static inline int tpm_tis_read32(struct tpm_tis_data *data, u32 addr, } static inline int tpm_tis_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *value) + u16 len, const u8 *value) { return data->phy_ops->write_bytes(data, addr, len, value); } diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi.c index 88fe72ae967f..424ff2fde1f2 100644 --- a/drivers/char/tpm/tpm_tis_spi.c +++ b/drivers/char/tpm/tpm_tis_spi.c @@ -46,9 +46,7 @@ struct tpm_tis_spi_phy { struct tpm_tis_data priv; struct spi_device *spi_device; - - u8 tx_buf[4]; - u8 rx_buf[4]; + u8 *iobuf; }; static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *data) @@ -57,7 +55,7 @@ static inline struct tpm_tis_spi_phy *to_tpm_tis_spi_phy(struct tpm_tis_data *da } static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, - u8 *buffer, u8 direction) + u8 *in, const u8 *out) { struct tpm_tis_spi_phy *phy = to_tpm_tis_spi_phy(data); int ret = 0; @@ -71,14 +69,14 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, while (len) { transfer_len = min_t(u16, len, MAX_SPI_FRAMESIZE); - phy->tx_buf[0] = direction | (transfer_len - 1); - phy->tx_buf[1] = 0xd4; - phy->tx_buf[2] = addr >> 8; - phy->tx_buf[3] = addr; + phy->iobuf[0] = (in ? 0x80 : 0) | (transfer_len - 1); + phy->iobuf[1] = 0xd4; + phy->iobuf[2] = addr >> 8; + phy->iobuf[3] = addr; memset(&spi_xfer, 0, sizeof(spi_xfer)); - spi_xfer.tx_buf = phy->tx_buf; - spi_xfer.rx_buf = phy->rx_buf; + spi_xfer.tx_buf = phy->iobuf; + spi_xfer.rx_buf = phy->iobuf; spi_xfer.len = 4; spi_xfer.cs_change = 1; @@ -88,9 +86,9 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, if (ret < 0) goto exit; - if ((phy->rx_buf[3] & 0x01) == 0) { + if ((phy->iobuf[3] & 0x01) == 0) { // handle SPI wait states - phy->tx_buf[0] = 0; + phy->iobuf[0] = 0; for (i = 0; i < TPM_RETRY; i++) { spi_xfer.len = 1; @@ -99,7 +97,7 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, ret = spi_sync_locked(phy->spi_device, &m); if (ret < 0) goto exit; - if (phy->rx_buf[0] & 0x01) + if (phy->iobuf[0] & 0x01) break; } @@ -113,12 +111,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, spi_xfer.len = transfer_len; spi_xfer.delay_usecs = 5; - if (direction) { + if (in) { spi_xfer.tx_buf = NULL; - spi_xfer.rx_buf = buffer; - } else { - spi_xfer.tx_buf = buffer; + } else if (out) { spi_xfer.rx_buf = NULL; + memcpy(phy->iobuf, out, transfer_len); + out += transfer_len; } spi_message_init(&m); @@ -127,8 +125,12 @@ static int tpm_tis_spi_transfer(struct tpm_tis_data *data, u32 addr, u16 len, if (ret < 0) goto exit; + if (in) { + memcpy(in, phy->iobuf, transfer_len); + in += transfer_len; + } + len -= transfer_len; - buffer += transfer_len; } exit: @@ -139,40 +141,51 @@ exit: static int tpm_tis_spi_read_bytes(struct tpm_tis_data *data, u32 addr, u16 len, u8 *result) { - return tpm_tis_spi_transfer(data, addr, len, result, 0x80); + return tpm_tis_spi_transfer(data, addr, len, result, NULL); } static int tpm_tis_spi_write_bytes(struct tpm_tis_data *data, u32 addr, - u16 len, u8 *value) + u16 len, const u8 *value) { - return tpm_tis_spi_transfer(data, addr, len, value, 0); + return tpm_tis_spi_transfer(data, addr, len, NULL, value); } static int tpm_tis_spi_read16(struct tpm_tis_data *data, u32 addr, u16 *result) { + __le16 result_le; int rc; - rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), (u8 *)result); + rc = data->phy_ops->read_bytes(data, addr, sizeof(u16), + (u8 *)&result_le); if (!rc) - *result = le16_to_cpu(*result); + *result = le16_to_cpu(result_le); + return rc; } static int tpm_tis_spi_read32(struct tpm_tis_data *data, u32 addr, u32 *result) { + __le32 result_le; int rc; - rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), (u8 *)result); + rc = data->phy_ops->read_bytes(data, addr, sizeof(u32), + (u8 *)&result_le); if (!rc) - *result = le32_to_cpu(*result); + *result = le32_to_cpu(result_le); + return rc; } static int tpm_tis_spi_write32(struct tpm_tis_data *data, u32 addr, u32 value) { - value = cpu_to_le32(value); - return data->phy_ops->write_bytes(data, addr, sizeof(u32), - (u8 *)&value); + __le32 value_le; + int rc; + + value_le = cpu_to_le32(value); + rc = data->phy_ops->write_bytes(data, addr, sizeof(u32), + (u8 *)&value_le); + + return rc; } static const struct tpm_tis_phy_ops tpm_spi_phy_ops = { @@ -194,6 +207,10 @@ static int tpm_tis_spi_probe(struct spi_device *dev) phy->spi_device = dev; + phy->iobuf = devm_kmalloc(&dev->dev, MAX_SPI_FRAMESIZE, GFP_KERNEL); + if (!phy->iobuf) + return -ENOMEM; + return tpm_tis_core_init(&dev->dev, &phy->priv, -1, &tpm_spi_phy_ops, NULL); } diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index cc6062049170..c729a88007d0 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -1,9 +1,8 @@ menu "Clock Source drivers" - depends on !ARCH_USES_GETTIMEOFFSET + depends on GENERIC_CLOCKEVENTS config TIMER_OF bool - depends on GENERIC_CLOCKEVENTS select TIMER_PROBE config TIMER_ACPI @@ -30,21 +29,18 @@ config CLKSRC_MMIO config BCM2835_TIMER bool "BCM2835 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables the support for the BCM2835 timer driver. config BCM_KONA_TIMER bool "BCM mobile timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables the support for the BCM Kona mobile timer driver. config DIGICOLOR_TIMER bool "Digicolor timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO depends on HAS_IOMEM help @@ -52,7 +48,6 @@ config DIGICOLOR_TIMER config DW_APB_TIMER bool "DW APB timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS help Enables the support for the dw_apb timer. @@ -63,7 +58,6 @@ config DW_APB_TIMER_OF config FTTMR010_TIMER bool "Faraday Technology timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM select CLKSRC_MMIO select TIMER_OF @@ -90,7 +84,6 @@ config ARMADA_370_XP_TIMER config MESON6_TIMER bool "Meson6 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables the support for the Meson6 timer driver. @@ -105,14 +98,12 @@ config ORION_TIMER config OWL_TIMER bool "Owl timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables the support for the Actions Semi Owl timer driver. config SUN4I_TIMER bool "Sun4i timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM select CLKSRC_MMIO select TIMER_OF @@ -135,7 +126,6 @@ config TEGRA_TIMER config VT8500_TIMER bool "VT8500 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM help Enables support for the VT8500 driver. @@ -148,7 +138,6 @@ config CADENCE_TTC_TIMER config ASM9260_TIMER bool "ASM9260 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO select TIMER_OF help @@ -171,28 +160,24 @@ config CLKSRC_NOMADIK_MTU_SCHED_CLOCK config CLKSRC_DBX500_PRCMU bool "Clocksource PRCMU Timer" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM help Use the always on PRCMU Timer as clocksource config CLPS711X_TIMER bool "Cirrus logic timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables support for the Cirrus Logic PS711 timer. config ATLAS7_TIMER bool "Atlas7 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables support for the Atlas7 timer. config MXS_TIMER bool "Mxs timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO select STMP_DEVICE help @@ -200,14 +185,12 @@ config MXS_TIMER config PRIMA2_TIMER bool "Prima2 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables support for the Prima2 timer. config U300_TIMER bool "U300 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on ARM select CLKSRC_MMIO help @@ -215,14 +198,12 @@ config U300_TIMER config NSPIRE_TIMER bool "NSpire timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables support for the Nspire timer. config KEYSTONE_TIMER bool "Keystone timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on ARM || ARM64 select CLKSRC_MMIO help @@ -230,7 +211,6 @@ config KEYSTONE_TIMER config INTEGRATOR_AP_TIMER bool "Integrator-ap timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables support for the Integrator-ap timer. @@ -253,7 +233,7 @@ config CLKSRC_EFM32 config CLKSRC_LPC32XX bool "Clocksource for LPC32XX" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM + depends on HAS_IOMEM depends on ARM select CLKSRC_MMIO select TIMER_OF @@ -262,7 +242,7 @@ config CLKSRC_LPC32XX config CLKSRC_PISTACHIO bool "Clocksource for Pistachio SoC" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM + depends on HAS_IOMEM select TIMER_OF help Enables the clocksource for the Pistachio SoC. @@ -298,7 +278,6 @@ config CLKSRC_MPS2 config ARC_TIMERS bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select TIMER_OF help These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores @@ -307,7 +286,6 @@ config ARC_TIMERS config ARC_TIMERS_64BIT bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on ARC_TIMERS select TIMER_OF help @@ -407,7 +385,6 @@ config ATMEL_PIT config ATMEL_ST bool "Atmel ST timer support" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select TIMER_OF select MFD_SYSCON help @@ -426,7 +403,6 @@ config CLKSRC_EXYNOS_MCT config CLKSRC_SAMSUNG_PWM bool "PWM timer driver for Samsung S3C, S5P" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM help This is a new clocksource driver for the PWM timer found in @@ -436,7 +412,6 @@ config CLKSRC_SAMSUNG_PWM config FSL_FTM_TIMER bool "Freescale FlexTimer Module driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM select CLKSRC_MMIO help @@ -450,7 +425,6 @@ config VF_PIT_TIMER config OXNAS_RPS_TIMER bool "Oxford Semiconductor OXNAS RPS Timers driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select TIMER_OF select CLKSRC_MMIO help @@ -461,7 +435,7 @@ config SYS_SUPPORTS_SH_CMT config MTK_TIMER bool "Mediatek timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM + depends on HAS_IOMEM select TIMER_OF select CLKSRC_MMIO help @@ -479,7 +453,6 @@ config SYS_SUPPORTS_EM_STI config CLKSRC_JCORE_PIT bool "J-Core PIT timer driver" if COMPILE_TEST depends on OF - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM select CLKSRC_MMIO help @@ -488,7 +461,6 @@ config CLKSRC_JCORE_PIT config SH_TIMER_CMT bool "Renesas CMT timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM default SYS_SUPPORTS_SH_CMT help @@ -498,7 +470,6 @@ config SH_TIMER_CMT config SH_TIMER_MTU2 bool "Renesas MTU2 timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM default SYS_SUPPORTS_SH_MTU2 help @@ -508,14 +479,12 @@ config SH_TIMER_MTU2 config RENESAS_OSTM bool "Renesas OSTM timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS select CLKSRC_MMIO help Enables the support for the Renesas OSTM. config SH_TIMER_TMU bool "Renesas TMU timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM default SYS_SUPPORTS_SH_TMU help @@ -525,7 +494,7 @@ config SH_TIMER_TMU config EM_TIMER_STI bool "Renesas STI timer driver" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM + depends on HAS_IOMEM default SYS_SUPPORTS_EM_STI help This enables build of a clocksource and clockevent driver for @@ -566,7 +535,6 @@ config CLKSRC_TANGO_XTAL config CLKSRC_PXA bool "Clocksource for PXA or SA-11x0 platform" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS depends on HAS_IOMEM select CLKSRC_MMIO help @@ -575,20 +543,20 @@ config CLKSRC_PXA config H8300_TMR8 bool "Clockevent timer for the H8300 platform" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM + depends on HAS_IOMEM help This enables the 8 bits timer for the H8300 platform. config H8300_TMR16 bool "Clockevent timer for the H83069 platform" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM + depends on HAS_IOMEM help This enables the 16 bits timer for the H8300 platform with the H83069 cpu. config H8300_TPU bool "Clocksource for the H8300 platform" if COMPILE_TEST - depends on GENERIC_CLOCKEVENTS && HAS_IOMEM + depends on HAS_IOMEM help This enables the clocksource for the H8300 platform with the H8S2678 cpu. @@ -600,7 +568,7 @@ config CLKSRC_IMX_GPT config CLKSRC_IMX_TPM bool "Clocksource using i.MX TPM" if COMPILE_TEST - depends on ARM && CLKDEV_LOOKUP && GENERIC_CLOCKEVENTS + depends on ARM && CLKDEV_LOOKUP select CLKSRC_MMIO help Enable this option to use IMX Timer/PWM Module (TPM) timer as diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index fd4b7f684bd0..0ecf5beb56ec 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -299,8 +299,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void) #endif #ifdef CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND -DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, - timer_unstable_counter_workaround); +DEFINE_PER_CPU(const struct arch_timer_erratum_workaround *, timer_unstable_counter_workaround); EXPORT_SYMBOL_GPL(timer_unstable_counter_workaround); DEFINE_STATIC_KEY_FALSE(arch_timer_read_ool_enabled); @@ -1268,10 +1267,6 @@ arch_timer_mem_find_best_frame(struct arch_timer_mem *timer_mem) iounmap(cntctlbase); - if (!best_frame) - pr_err("Unable to find a suitable frame in timer @ %pa\n", - &timer_mem->cntctlbase); - return best_frame; } @@ -1372,6 +1367,8 @@ static int __init arch_timer_mem_of_init(struct device_node *np) frame = arch_timer_mem_find_best_frame(timer_mem); if (!frame) { + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer_mem->cntctlbase); ret = -EINVAL; goto out; } @@ -1420,7 +1417,7 @@ arch_timer_mem_verify_cntfrq(struct arch_timer_mem *timer_mem) static int __init arch_timer_mem_acpi_init(int platform_timer_count) { struct arch_timer_mem *timers, *timer; - struct arch_timer_mem_frame *frame; + struct arch_timer_mem_frame *frame, *best_frame = NULL; int timer_count, i, ret = 0; timers = kcalloc(platform_timer_count, sizeof(*timers), @@ -1432,14 +1429,6 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) if (ret || !timer_count) goto out; - for (i = 0; i < timer_count; i++) { - ret = arch_timer_mem_verify_cntfrq(&timers[i]); - if (ret) { - pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); - goto out; - } - } - /* * While unlikely, it's theoretically possible that none of the frames * in a timer expose the combination of feature we want. @@ -1448,12 +1437,26 @@ static int __init arch_timer_mem_acpi_init(int platform_timer_count) timer = &timers[i]; frame = arch_timer_mem_find_best_frame(timer); - if (frame) - break; + if (!best_frame) + best_frame = frame; + + ret = arch_timer_mem_verify_cntfrq(timer); + if (ret) { + pr_err("Disabling MMIO timers due to CNTFRQ mismatch\n"); + goto out; + } + + if (!best_frame) /* implies !frame */ + /* + * Only complain about missing suitable frames if we + * haven't already found one in a previous iteration. + */ + pr_err("Unable to find a suitable frame in timer @ %pa\n", + &timer->cntctlbase); } - if (frame) - ret = arch_timer_mem_frame_register(frame); + if (best_frame) + ret = arch_timer_mem_frame_register(best_frame); out: kfree(timers); return ret; diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index 39e489a96ad7..60da2537bef9 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -71,7 +71,7 @@ static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id) if (readl_relaxed(timer->control) & timer->match_mask) { writel_relaxed(timer->match_mask, timer->control); - event_handler = ACCESS_ONCE(timer->evt.event_handler); + event_handler = READ_ONCE(timer->evt.event_handler); if (event_handler) event_handler(&timer->evt); return IRQ_HANDLED; diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index ae3167c28b12..a04808a21d4e 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -39,16 +39,18 @@ static u64 notrace gic_read_count(void) static int gic_next_event(unsigned long delta, struct clock_event_device *evt) { - unsigned long flags; + int cpu = cpumask_first(evt->cpumask); u64 cnt; int res; cnt = gic_read_count(); cnt += (u64)delta; - local_irq_save(flags); - write_gic_vl_other(mips_cm_vp_id(cpumask_first(evt->cpumask))); - write_gic_vo_compare(cnt); - local_irq_restore(flags); + if (cpu == raw_smp_processor_id()) { + write_gic_vl_compare(cnt); + } else { + write_gic_vl_other(mips_cm_vp_id(cpu)); + write_gic_vo_compare(cnt); + } res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0; return res; } diff --git a/drivers/clocksource/owl-timer.c b/drivers/clocksource/owl-timer.c index d19c53c11094..c68630565079 100644 --- a/drivers/clocksource/owl-timer.c +++ b/drivers/clocksource/owl-timer.c @@ -125,7 +125,7 @@ static int __init owl_timer_init(struct device_node *node) owl_timer_base = of_io_request_and_map(node, 0, "owl-timer"); if (IS_ERR(owl_timer_base)) { - pr_err("Can't map timer registers"); + pr_err("Can't map timer registers\n"); return PTR_ERR(owl_timer_base); } @@ -134,7 +134,7 @@ static int __init owl_timer_init(struct device_node *node) timer1_irq = of_irq_get_byname(node, "timer1"); if (timer1_irq <= 0) { - pr_err("Can't parse timer1 IRQ"); + pr_err("Can't parse timer1 IRQ\n"); return -EINVAL; } diff --git a/drivers/clocksource/rockchip_timer.c b/drivers/clocksource/rockchip_timer.c index c27f4c850d83..33f370dbd0d6 100644 --- a/drivers/clocksource/rockchip_timer.c +++ b/drivers/clocksource/rockchip_timer.c @@ -274,7 +274,7 @@ static int __init rk_clksrc_init(struct device_node *np) TIMER_NAME, rk_clksrc->freq, 250, 32, clocksource_mmio_readl_down); if (ret) { - pr_err("Failed to register clocksource"); + pr_err("Failed to register clocksource\n"); goto out_clocksource; } diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index e09e8bf0bb9b..70b3cf8e23d0 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -25,6 +25,7 @@ #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> @@ -39,16 +40,16 @@ struct sh_cmt_device; * SoC but also on the particular instance. The following table lists the main * characteristics of those flavours. * - * 16B 32B 32B-F 48B 48B-2 + * 16B 32B 32B-F 48B R-Car Gen2 * ----------------------------------------------------------------------------- * Channels 2 1/4 1 6 2/8 * Control Width 16 16 16 16 32 * Counter Width 16 32 32 32/48 32/48 * Shared Start/Stop Y Y Y Y N * - * The 48-bit gen2 version has a per-channel start/stop register located in the - * channel registers block. All other versions have a shared start/stop register - * located in the global space. + * The r8a73a4 / R-Car Gen2 version has a per-channel start/stop register + * located in the channel registers block. All other versions have a shared + * start/stop register located in the global space. * * Channels are indexed from 0 to N-1 in the documentation. The channel index * infers the start/stop bit position in the control register and the channel @@ -66,14 +67,16 @@ struct sh_cmt_device; enum sh_cmt_model { SH_CMT_16BIT, SH_CMT_32BIT, - SH_CMT_32BIT_FAST, SH_CMT_48BIT, - SH_CMT_48BIT_GEN2, + SH_CMT0_RCAR_GEN2, + SH_CMT1_RCAR_GEN2, }; struct sh_cmt_info { enum sh_cmt_model model; + unsigned int channels_mask; + unsigned long width; /* 16 or 32 bit version of hardware block */ unsigned long overflow_bit; unsigned long clear_bits; @@ -200,18 +203,20 @@ static const struct sh_cmt_info sh_cmt_info[] = { .read_count = sh_cmt_read32, .write_count = sh_cmt_write32, }, - [SH_CMT_32BIT_FAST] = { - .model = SH_CMT_32BIT_FAST, + [SH_CMT_48BIT] = { + .model = SH_CMT_48BIT, + .channels_mask = 0x3f, .width = 32, .overflow_bit = SH_CMT32_CMCSR_CMF, .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), - .read_control = sh_cmt_read16, - .write_control = sh_cmt_write16, + .read_control = sh_cmt_read32, + .write_control = sh_cmt_write32, .read_count = sh_cmt_read32, .write_count = sh_cmt_write32, }, - [SH_CMT_48BIT] = { - .model = SH_CMT_48BIT, + [SH_CMT0_RCAR_GEN2] = { + .model = SH_CMT0_RCAR_GEN2, + .channels_mask = 0x60, .width = 32, .overflow_bit = SH_CMT32_CMCSR_CMF, .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), @@ -220,8 +225,9 @@ static const struct sh_cmt_info sh_cmt_info[] = { .read_count = sh_cmt_read32, .write_count = sh_cmt_write32, }, - [SH_CMT_48BIT_GEN2] = { - .model = SH_CMT_48BIT_GEN2, + [SH_CMT1_RCAR_GEN2] = { + .model = SH_CMT1_RCAR_GEN2, + .channels_mask = 0xff, .width = 32, .overflow_bit = SH_CMT32_CMCSR_CMF, .clear_bits = ~(SH_CMT32_CMCSR_CMF | SH_CMT32_CMCSR_OVF), @@ -859,6 +865,7 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, ch->cmt = cmt; ch->index = index; ch->hwidx = hwidx; + ch->timer_bit = hwidx; /* * Compute the address of the channel control register block. For the @@ -873,16 +880,11 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, case SH_CMT_48BIT: ch->ioctrl = cmt->mapbase + 0x10 + ch->hwidx * 0x10; break; - case SH_CMT_32BIT_FAST: - /* - * The 32-bit "fast" timer has a single channel at hwidx 5 but - * is located at offset 0x40 instead of 0x60 for some reason. - */ - ch->ioctrl = cmt->mapbase + 0x40; - break; - case SH_CMT_48BIT_GEN2: + case SH_CMT0_RCAR_GEN2: + case SH_CMT1_RCAR_GEN2: ch->iostart = cmt->mapbase + ch->hwidx * 0x100; ch->ioctrl = ch->iostart + 0x10; + ch->timer_bit = 0; break; } @@ -894,8 +896,6 @@ static int sh_cmt_setup_channel(struct sh_cmt_channel *ch, unsigned int index, ch->match_value = ch->max_match_value; raw_spin_lock_init(&ch->lock); - ch->timer_bit = cmt->info->model == SH_CMT_48BIT_GEN2 ? 0 : ch->hwidx; - ret = sh_cmt_register(ch, dev_name(&cmt->pdev->dev), clockevent, clocksource); if (ret) { @@ -935,22 +935,18 @@ static const struct platform_device_id sh_cmt_id_table[] = { MODULE_DEVICE_TABLE(platform, sh_cmt_id_table); static const struct of_device_id sh_cmt_of_table[] __maybe_unused = { - { .compatible = "renesas,cmt-32", .data = &sh_cmt_info[SH_CMT_32BIT] }, - { .compatible = "renesas,cmt-32-fast", .data = &sh_cmt_info[SH_CMT_32BIT_FAST] }, { .compatible = "renesas,cmt-48", .data = &sh_cmt_info[SH_CMT_48BIT] }, - { .compatible = "renesas,cmt-48-gen2", .data = &sh_cmt_info[SH_CMT_48BIT_GEN2] }, + { + /* deprecated, preserved for backward compatibility */ + .compatible = "renesas,cmt-48-gen2", + .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] + }, + { .compatible = "renesas,rcar-gen2-cmt0", .data = &sh_cmt_info[SH_CMT0_RCAR_GEN2] }, + { .compatible = "renesas,rcar-gen2-cmt1", .data = &sh_cmt_info[SH_CMT1_RCAR_GEN2] }, { } }; MODULE_DEVICE_TABLE(of, sh_cmt_of_table); -static int sh_cmt_parse_dt(struct sh_cmt_device *cmt) -{ - struct device_node *np = cmt->pdev->dev.of_node; - - return of_property_read_u32(np, "renesas,channels-mask", - &cmt->hw_channels); -} - static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) { unsigned int mask; @@ -961,14 +957,8 @@ static int sh_cmt_setup(struct sh_cmt_device *cmt, struct platform_device *pdev) raw_spin_lock_init(&cmt->lock); if (IS_ENABLED(CONFIG_OF) && pdev->dev.of_node) { - const struct of_device_id *id; - - id = of_match_node(sh_cmt_of_table, pdev->dev.of_node); - cmt->info = id->data; - - ret = sh_cmt_parse_dt(cmt); - if (ret < 0) - return ret; + cmt->info = of_device_get_match_data(&pdev->dev); + cmt->hw_channels = cmt->info->channels_mask; } else if (pdev->dev.platform_data) { struct sh_timer_config *cfg = pdev->dev.platform_data; const struct platform_device_id *id = pdev->id_entry; diff --git a/drivers/clocksource/timer-fttmr010.c b/drivers/clocksource/timer-fttmr010.c index cdf23b628688..c020038ebfab 100644 --- a/drivers/clocksource/timer-fttmr010.c +++ b/drivers/clocksource/timer-fttmr010.c @@ -264,14 +264,14 @@ static int __init fttmr010_common_init(struct device_node *np, bool is_aspeed) fttmr010->base = of_iomap(np, 0); if (!fttmr010->base) { - pr_err("Can't remap registers"); + pr_err("Can't remap registers\n"); ret = -ENXIO; goto out_free; } /* IRQ for timer 1 */ irq = irq_of_parse_and_map(np, 0); if (irq <= 0) { - pr_err("Can't parse IRQ"); + pr_err("Can't parse IRQ\n"); ret = -EINVAL; goto out_unmap; } diff --git a/drivers/clocksource/timer-of.c b/drivers/clocksource/timer-of.c index c79122d8e10d..7c64a5c1bfc1 100644 --- a/drivers/clocksource/timer-of.c +++ b/drivers/clocksource/timer-of.c @@ -176,3 +176,15 @@ out_fail: timer_base_exit(&to->of_base); return ret; } + +void timer_of_exit(struct timer_of *to) +{ + if (to->flags & TIMER_OF_IRQ) + timer_irq_exit(&to->of_irq); + + if (to->flags & TIMER_OF_CLOCK) + timer_clk_exit(&to->of_clk); + + if (to->flags & TIMER_OF_BASE) + timer_base_exit(&to->of_base); +} diff --git a/drivers/clocksource/timer-of.h b/drivers/clocksource/timer-of.h index c6d995ab93d5..43f5ba3f8979 100644 --- a/drivers/clocksource/timer-of.h +++ b/drivers/clocksource/timer-of.h @@ -67,4 +67,7 @@ static inline unsigned long timer_of_period(struct timer_of *to) extern int __init timer_of_init(struct device_node *np, struct timer_of *to); + +extern void timer_of_exit(struct timer_of *to); + #endif diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index 3ff5160451b4..b6d7c4c98d0a 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -90,6 +90,7 @@ struct global_pstate_info { int last_gpstate_idx; spinlock_t gpstate_lock; struct timer_list timer; + struct cpufreq_policy *policy; }; static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; @@ -625,10 +626,10 @@ static inline void queue_gpstate_timer(struct global_pstate_info *gpstates) * according quadratic equation. Queues a new timer if it is still not equal * to local pstate */ -void gpstate_timer_handler(unsigned long data) +void gpstate_timer_handler(struct timer_list *t) { - struct cpufreq_policy *policy = (struct cpufreq_policy *)data; - struct global_pstate_info *gpstates = policy->driver_data; + struct global_pstate_info *gpstates = from_timer(gpstates, t, timer); + struct cpufreq_policy *policy = gpstates->policy; int gpstate_idx, lpstate_idx; unsigned long val; unsigned int time_diff = jiffies_to_msecs(jiffies) @@ -800,9 +801,9 @@ static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) policy->driver_data = gpstates; /* initialize timer */ - init_timer_pinned_deferrable(&gpstates->timer); - gpstates->timer.data = (unsigned long)policy; - gpstates->timer.function = gpstate_timer_handler; + gpstates->policy = policy; + timer_setup(&gpstates->timer, gpstate_timer_handler, + TIMER_PINNED | TIMER_DEFERRABLE); gpstates->timer.expires = jiffies + msecs_to_jiffies(GPSTATE_TIMER_INTERVAL); spin_lock_init(&gpstates->gpstate_lock); diff --git a/drivers/crypto/axis/artpec6_crypto.c b/drivers/crypto/axis/artpec6_crypto.c index 0f9754e07719..456278440863 100644 --- a/drivers/crypto/axis/artpec6_crypto.c +++ b/drivers/crypto/axis/artpec6_crypto.c @@ -2072,9 +2072,9 @@ static void artpec6_crypto_process_queue(struct artpec6_crypto *ac) del_timer(&ac->timer); } -static void artpec6_crypto_timeout(unsigned long data) +static void artpec6_crypto_timeout(struct timer_list *t) { - struct artpec6_crypto *ac = (struct artpec6_crypto *) data; + struct artpec6_crypto *ac = from_timer(ac, t, timer); dev_info_ratelimited(artpec6_crypto_dev, "timeout\n"); @@ -3063,7 +3063,7 @@ static int artpec6_crypto_probe(struct platform_device *pdev) spin_lock_init(&ac->queue_lock); INIT_LIST_HEAD(&ac->queue); INIT_LIST_HEAD(&ac->pending); - setup_timer(&ac->timer, artpec6_crypto_timeout, (unsigned long) ac); + timer_setup(&ac->timer, artpec6_crypto_timeout, 0); ac->base = base; diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c index d258953ff488..f4f258075b89 100644 --- a/drivers/crypto/caam/jr.c +++ b/drivers/crypto/caam/jr.c @@ -172,7 +172,7 @@ static void caam_jr_dequeue(unsigned long devarg) while (rd_reg32(&jrp->rregs->outring_used)) { - head = ACCESS_ONCE(jrp->head); + head = READ_ONCE(jrp->head); spin_lock(&jrp->outlock); @@ -341,7 +341,7 @@ int caam_jr_enqueue(struct device *dev, u32 *desc, spin_lock_bh(&jrp->inplock); head = jrp->head; - tail = ACCESS_ONCE(jrp->tail); + tail = READ_ONCE(jrp->tail); if (!rd_reg32(&jrp->rregs->inpring_avail) || CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c index bf25f415eea6..0eb2706f23c8 100644 --- a/drivers/crypto/mv_cesa.c +++ b/drivers/crypto/mv_cesa.c @@ -149,7 +149,7 @@ struct mv_req_hash_ctx { int count_add; }; -static void mv_completion_timer_callback(unsigned long unused) +static void mv_completion_timer_callback(struct timer_list *unused) { int active = readl(cpg->reg + SEC_ACCEL_CMD) & SEC_CMD_EN_SEC_ACCL0; @@ -167,7 +167,7 @@ static void mv_completion_timer_callback(unsigned long unused) static void mv_setup_timer(void) { - setup_timer(&cpg->completion_timer, &mv_completion_timer_callback, 0); + timer_setup(&cpg->completion_timer, mv_completion_timer_callback, 0); mod_timer(&cpg->completion_timer, jiffies + msecs_to_jiffies(MV_CESA_EXPIRE)); } diff --git a/drivers/crypto/nx/nx-842-powernv.c b/drivers/crypto/nx/nx-842-powernv.c index 874ddf5e9087..0f20f5ec9617 100644 --- a/drivers/crypto/nx/nx-842-powernv.c +++ b/drivers/crypto/nx/nx-842-powernv.c @@ -193,7 +193,7 @@ static int wait_for_csb(struct nx842_workmem *wmem, ktime_t start = wmem->start, now = ktime_get(); ktime_t timeout = ktime_add_ms(start, CSB_WAIT_MAX); - while (!(ACCESS_ONCE(csb->flags) & CSB_V)) { + while (!(READ_ONCE(csb->flags) & CSB_V)) { cpu_relax(); now = ktime_get(); if (ktime_after(now, timeout)) diff --git a/drivers/crypto/picoxcell_crypto.c b/drivers/crypto/picoxcell_crypto.c index b6f14844702e..5a6dc53b2b9d 100644 --- a/drivers/crypto/picoxcell_crypto.c +++ b/drivers/crypto/picoxcell_crypto.c @@ -1125,9 +1125,9 @@ static irqreturn_t spacc_spacc_irq(int irq, void *dev) return IRQ_HANDLED; } -static void spacc_packet_timeout(unsigned long data) +static void spacc_packet_timeout(struct timer_list *t) { - struct spacc_engine *engine = (struct spacc_engine *)data; + struct spacc_engine *engine = from_timer(engine, t, packet_timeout); spacc_process_done(engine); } @@ -1714,8 +1714,7 @@ static int spacc_probe(struct platform_device *pdev) writel(SPA_IRQ_EN_STAT_EN | SPA_IRQ_EN_GLBL_EN, engine->regs + SPA_IRQ_EN_REG_OFFSET); - setup_timer(&engine->packet_timeout, spacc_packet_timeout, - (unsigned long)engine); + timer_setup(&engine->packet_timeout, spacc_packet_timeout, 0); INIT_LIST_HEAD(&engine->pending); INIT_LIST_HEAD(&engine->completed); diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 346c4987b284..11d6419788c2 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -175,11 +175,11 @@ static ssize_t altr_sdr_mc_err_inject_write(struct file *file, /* * To trigger the error, we need to read the data back * (the data was written with errors above). - * The ACCESS_ONCE macros and printk are used to prevent the + * The READ_ONCE macros and printk are used to prevent the * the compiler optimizing these reads out. */ - reg = ACCESS_ONCE(ptemp[0]); - read_reg = ACCESS_ONCE(ptemp[1]); + reg = READ_ONCE(ptemp[0]); + read_reg = READ_ONCE(ptemp[1]); /* Force Read */ rmb(); @@ -618,7 +618,7 @@ static ssize_t altr_edac_device_trig(struct file *file, for (i = 0; i < (priv->trig_alloc_sz / sizeof(*ptemp)); i++) { /* Read data so we're in the correct state */ rmb(); - if (ACCESS_ONCE(ptemp[i])) + if (READ_ONCE(ptemp[i])) result = -1; /* Toggle Error bit (it is latched), leave ECC enabled */ writel(error_mask, (drvdata->base + priv->set_err_ofst)); @@ -635,7 +635,7 @@ static ssize_t altr_edac_device_trig(struct file *file, /* Read out written data. ECC error caused here */ for (i = 0; i < ALTR_TRIGGER_READ_WRD_CNT; i++) - if (ACCESS_ONCE(ptemp[i]) != i) + if (READ_ONCE(ptemp[i]) != i) edac_printk(KERN_ERR, EDAC_DEVICE, "Read doesn't match written data\n"); diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index ac2f30295efe..8b16ec595fa7 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -3434,9 +3434,14 @@ MODULE_DEVICE_TABLE(x86cpu, amd64_cpuids); static int __init amd64_edac_init(void) { + const char *owner; int err = -ENODEV; int i; + owner = edac_get_owner(); + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) + return -EBUSY; + if (!x86_match_cpu(amd64_cpuids)) return -ENODEV; diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 480072139b7a..48193f5f3b56 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c @@ -53,7 +53,7 @@ static LIST_HEAD(mc_devices); * Used to lock EDAC MC to just one module, avoiding two drivers e. g. * apei/ghes and i7core_edac to be used at the same time. */ -static void const *edac_mc_owner; +static const char *edac_mc_owner; static struct bus_type mc_bus[EDAC_MAX_MCS]; @@ -701,6 +701,11 @@ unlock: } EXPORT_SYMBOL(edac_mc_find); +const char *edac_get_owner(void) +{ + return edac_mc_owner; +} +EXPORT_SYMBOL_GPL(edac_get_owner); /* FIXME - should a warning be printed if no error detection? correction? */ int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, diff --git a/drivers/edac/edac_mc.h b/drivers/edac/edac_mc.h index 5357800e418d..4165e15995ad 100644 --- a/drivers/edac/edac_mc.h +++ b/drivers/edac/edac_mc.h @@ -128,6 +128,14 @@ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, unsigned sz_pvt); /** + * edac_get_owner - Return the owner's mod_name of EDAC MC + * + * Returns: + * Pointer to mod_name string when EDAC MC is owned. NULL otherwise. + */ +extern const char *edac_get_owner(void); + +/* * edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci * global list and create sysfs entries associated with @mci structure. * diff --git a/drivers/edac/ghes_edac.c b/drivers/edac/ghes_edac.c index 6f80eb65c26c..68b6ee18bea6 100644 --- a/drivers/edac/ghes_edac.c +++ b/drivers/edac/ghes_edac.c @@ -28,10 +28,19 @@ struct ghes_edac_pvt { char msg[80]; }; -static LIST_HEAD(ghes_reglist); -static DEFINE_MUTEX(ghes_edac_lock); -static int ghes_edac_mc_num; +static atomic_t ghes_init = ATOMIC_INIT(0); +static struct ghes_edac_pvt *ghes_pvt; +/* + * Sync with other, potentially concurrent callers of + * ghes_edac_report_mem_error(). We don't know what the + * "inventive" firmware would do. + */ +static DEFINE_SPINLOCK(ghes_lock); + +/* "ghes_edac.force_load=1" skips the platform check */ +static bool __read_mostly force_load; +module_param(force_load, bool, 0); /* Memory Device - Type 17 of SMBIOS spec */ struct memdev_dmi_entry { @@ -169,18 +178,26 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev, enum hw_event_mc_err_type type; struct edac_raw_error_desc *e; struct mem_ctl_info *mci; - struct ghes_edac_pvt *pvt = NULL; + struct ghes_edac_pvt *pvt = ghes_pvt; + unsigned long flags; char *p; u8 grain_bits; - list_for_each_entry(pvt, &ghes_reglist, list) { - if (ghes == pvt->ghes) - break; - } if (!pvt) { pr_err("Internal error: Can't find EDAC structure\n"); return; } + + /* + * We can do the locking below because GHES defers error processing + * from NMI to IRQ context. Whenever that changes, we'd at least + * know. + */ + if (WARN_ON_ONCE(in_nmi())) + return; + + spin_lock_irqsave(&ghes_lock, flags); + mci = pvt->mci; e = &mci->error_desc; @@ -398,10 +415,17 @@ void ghes_edac_report_mem_error(struct ghes *ghes, int sev, (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page, grain_bits, e->syndrome, pvt->detail_location); - /* Report the error via EDAC API */ edac_raw_mc_handle_error(type, mci, e); + spin_unlock_irqrestore(&ghes_lock, flags); } -EXPORT_SYMBOL_GPL(ghes_edac_report_mem_error); + +/* + * Known systems that are safe to enable this module. + */ +static struct acpi_platform_list plat_list[] = { + {"HPE ", "Server ", 0, ACPI_SIG_FADT, all_versions}, + { } /* End */ +}; int ghes_edac_register(struct ghes *ghes, struct device *dev) { @@ -409,8 +433,19 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) int rc, num_dimm = 0; struct mem_ctl_info *mci; struct edac_mc_layer layers[1]; - struct ghes_edac_pvt *pvt; struct ghes_edac_dimm_fill dimm_fill; + int idx; + + /* Check if safe to enable on this system */ + idx = acpi_match_platform_list(plat_list); + if (!force_load && idx < 0) + return 0; + + /* + * We have only one logical memory controller to which all DIMMs belong. + */ + if (atomic_inc_return(&ghes_init) > 1) + return 0; /* Get the number of DIMMs */ dmi_walk(ghes_edac_count_dimms, &num_dimm); @@ -425,26 +460,17 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) layers[0].size = num_dimm; layers[0].is_virt_csrow = true; - /* - * We need to serialize edac_mc_alloc() and edac_mc_add_mc(), - * to avoid duplicated memory controller numbers - */ - mutex_lock(&ghes_edac_lock); - mci = edac_mc_alloc(ghes_edac_mc_num, ARRAY_SIZE(layers), layers, - sizeof(*pvt)); + mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(struct ghes_edac_pvt)); if (!mci) { pr_info("Can't allocate memory for EDAC data\n"); - mutex_unlock(&ghes_edac_lock); return -ENOMEM; } - pvt = mci->pvt_info; - memset(pvt, 0, sizeof(*pvt)); - list_add_tail(&pvt->list, &ghes_reglist); - pvt->ghes = ghes; - pvt->mci = mci; - mci->pdev = dev; + ghes_pvt = mci->pvt_info; + ghes_pvt->ghes = ghes; + ghes_pvt->mci = mci; + mci->pdev = dev; mci->mtype_cap = MEM_FLAG_EMPTY; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; @@ -452,36 +478,23 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) mci->ctl_name = "ghes_edac"; mci->dev_name = "ghes"; - if (!ghes_edac_mc_num) { - if (!fake) { - pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n"); - pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n"); - pr_info("So, the end result of using this driver varies from vendor to vendor.\n"); - pr_info("If you find incorrect reports, please contact your hardware vendor\n"); - pr_info("to correct its BIOS.\n"); - pr_info("This system has %d DIMM sockets.\n", - num_dimm); - } else { - pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n"); - pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n"); - pr_info("work on such system. Use this driver with caution\n"); - } + if (fake) { + pr_info("This system has a very crappy BIOS: It doesn't even list the DIMMS.\n"); + pr_info("Its SMBIOS info is wrong. It is doubtful that the error report would\n"); + pr_info("work on such system. Use this driver with caution\n"); + } else if (idx < 0) { + pr_info("This EDAC driver relies on BIOS to enumerate memory and get error reports.\n"); + pr_info("Unfortunately, not all BIOSes reflect the memory layout correctly.\n"); + pr_info("So, the end result of using this driver varies from vendor to vendor.\n"); + pr_info("If you find incorrect reports, please contact your hardware vendor\n"); + pr_info("to correct its BIOS.\n"); + pr_info("This system has %d DIMM sockets.\n", num_dimm); } if (!fake) { - /* - * Fill DIMM info from DMI for the memory controller #0 - * - * Keep it in blank for the other memory controllers, as - * there's no reliable way to properly credit each DIMM to - * the memory controller, as different BIOSes fill the - * DMI bank location fields on different ways - */ - if (!ghes_edac_mc_num) { - dimm_fill.count = 0; - dimm_fill.mci = mci; - dmi_walk(ghes_edac_dmidecode, &dimm_fill); - } + dimm_fill.count = 0; + dimm_fill.mci = mci; + dmi_walk(ghes_edac_dmidecode, &dimm_fill); } else { struct dimm_info *dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers, 0, 0, 0); @@ -497,28 +510,16 @@ int ghes_edac_register(struct ghes *ghes, struct device *dev) if (rc < 0) { pr_info("Can't register at EDAC core\n"); edac_mc_free(mci); - mutex_unlock(&ghes_edac_lock); return -ENODEV; } - - ghes_edac_mc_num++; - mutex_unlock(&ghes_edac_lock); return 0; } -EXPORT_SYMBOL_GPL(ghes_edac_register); void ghes_edac_unregister(struct ghes *ghes) { struct mem_ctl_info *mci; - struct ghes_edac_pvt *pvt, *tmp; - - list_for_each_entry_safe(pvt, tmp, &ghes_reglist, list) { - if (ghes == pvt->ghes) { - mci = pvt->mci; - edac_mc_del_mc(mci->pdev); - edac_mc_free(mci); - list_del(&pvt->list); - } - } + + mci = ghes_pvt->mci; + edac_mc_del_mc(mci->pdev); + edac_mc_free(mci); } -EXPORT_SYMBOL_GPL(ghes_edac_unregister); diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index c16c3b931b3d..8c5540160a23 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c @@ -2159,8 +2159,13 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; mci->mod_name = "i7core_edac.c"; - mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", - i7core_dev->socket); + + mci->ctl_name = kasprintf(GFP_KERNEL, "i7 core #%d", i7core_dev->socket); + if (!mci->ctl_name) { + rc = -ENOMEM; + goto fail1; + } + mci->dev_name = pci_name(i7core_dev->pdev[0]); mci->ctl_page_to_phys = NULL; @@ -2214,6 +2219,8 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev) fail0: kfree(mci->ctl_name); + +fail1: edac_mc_free(mci); i7core_dev->mci = NULL; return rc; diff --git a/drivers/edac/pnd2_edac.c b/drivers/edac/pnd2_edac.c index 4395c84cdcbf..df28b65358d2 100644 --- a/drivers/edac/pnd2_edac.c +++ b/drivers/edac/pnd2_edac.c @@ -45,6 +45,8 @@ #include "edac_module.h" #include "pnd2_edac.h" +#define EDAC_MOD_STR "pnd2_edac" + #define APL_NUM_CHANNELS 4 #define DNV_NUM_CHANNELS 2 #define DNV_MAX_DIMMS 2 /* Max DIMMs per channel */ @@ -1355,7 +1357,7 @@ static int pnd2_register_mci(struct mem_ctl_info **ppmci) pvt = mci->pvt_info; memset(pvt, 0, sizeof(*pvt)); - mci->mod_name = "pnd2_edac.c"; + mci->mod_name = EDAC_MOD_STR; mci->dev_name = ops->name; mci->ctl_name = "Pondicherry2"; @@ -1547,10 +1549,15 @@ MODULE_DEVICE_TABLE(x86cpu, pnd2_cpuids); static int __init pnd2_init(void) { const struct x86_cpu_id *id; + const char *owner; int rc; edac_dbg(2, "\n"); + owner = edac_get_owner(); + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) + return -EBUSY; + id = x86_match_cpu(pnd2_cpuids); if (!id) return -ENODEV; diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c index dc0591654011..f34430f99fd8 100644 --- a/drivers/edac/sb_edac.c +++ b/drivers/edac/sb_edac.c @@ -36,7 +36,7 @@ static LIST_HEAD(sbridge_edac_list); * Alter this version for the module when modifications are made */ #define SBRIDGE_REVISION " Ver: 1.1.2 " -#define EDAC_MOD_STR "sbridge_edac" +#define EDAC_MOD_STR "sb_edac" /* * Debug macros @@ -462,6 +462,7 @@ static const struct pci_id_table pci_dev_descr_sbridge_table[] = { static const struct pci_id_descr pci_dev_descr_ibridge[] = { /* Processor Home Agent */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0, 0, IMC0) }, + { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, /* Memory controller */ { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA, 0, IMC0) }, @@ -472,7 +473,6 @@ static const struct pci_id_descr pci_dev_descr_ibridge[] = { { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TAD3, 0, IMC0) }, /* Optional, mode 2HA */ - { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS, 1, IMC1) }, { PCI_DESCR(PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TAD0, 1, IMC1) }, @@ -1318,9 +1318,7 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) int cur_reg_start; int mc; int channel; - int way; int participants[KNL_MAX_CHANNELS]; - int participant_count = 0; for (i = 0; i < KNL_MAX_CHANNELS; i++) mc_sizes[i] = 0; @@ -1495,21 +1493,14 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) * this channel mapped to the given target? */ for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { - for (way = 0; way < intrlv_ways; way++) { - int target; - int cha; - - if (KNL_MOD3(dram_rule)) - target = way; - else - target = 0x7 & sad_pkg( - pvt->info.interleave_pkg, interleave_reg, way); + int target; + int cha; + for (target = 0; target < KNL_MAX_CHANNELS; target++) { for (cha = 0; cha < KNL_MAX_CHAS; cha++) { if (knl_get_mc_route(target, mc_route_reg[cha]) == channel && !participants[channel]) { - participant_count++; participants[channel] = 1; break; } @@ -1517,10 +1508,6 @@ static int knl_get_dimm_capacity(struct sbridge_pvt *pvt, u64 *mc_sizes) } } - if (participant_count != intrlv_ways) - edac_dbg(0, "participant_count (%d) != interleave_ways (%d): DIMM size may be incorrect\n", - participant_count, intrlv_ways); - for (channel = 0; channel < KNL_MAX_CHANNELS; channel++) { mc = knl_channel_mc(channel); if (participants[channel]) { @@ -2291,6 +2278,13 @@ static int sbridge_get_onedevice(struct pci_dev **prev, next_imc: sbridge_dev = get_sbridge_dev(bus, dev_descr->dom, multi_bus, sbridge_dev); if (!sbridge_dev) { + /* If the HA1 wasn't found, don't create EDAC second memory controller */ + if (dev_descr->dom == IMC1 && devno != 1) { + edac_dbg(0, "Skip IMC1: %04x:%04x (since HA1 was absent)\n", + PCI_VENDOR_ID_INTEL, dev_descr->dev_id); + pci_dev_put(pdev); + return 0; + } if (dev_descr->dom == SOCK) goto out_imc; @@ -2491,6 +2485,7 @@ static int ibridge_mci_bind_devs(struct mem_ctl_info *mci, case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_TA: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_TA: pvt->pci_ta = pdev; + break; case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA0_RAS: case PCI_DEVICE_ID_INTEL_IBRIDGE_IMC_HA1_RAS: pvt->pci_ras = pdev; @@ -3155,7 +3150,7 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) MEM_FLAG_DDR4 : MEM_FLAG_DDR3; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; - mci->mod_name = "sb_edac.c"; + mci->mod_name = EDAC_MOD_STR; mci->dev_name = pci_name(pdev); mci->ctl_page_to_phys = NULL; @@ -3287,6 +3282,11 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev, enum type type) break; } + if (!mci->ctl_name) { + rc = -ENOMEM; + goto fail0; + } + /* Get dimm basic config and the memory layout */ rc = get_dimm_config(mci); if (rc < 0) { @@ -3402,10 +3402,15 @@ static void sbridge_remove(void) static int __init sbridge_init(void) { const struct x86_cpu_id *id; + const char *owner; int rc; edac_dbg(2, "\n"); + owner = edac_get_owner(); + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) + return -EBUSY; + id = x86_match_cpu(sbridge_cpuids); if (!id) return -ENODEV; diff --git a/drivers/edac/skx_edac.c b/drivers/edac/skx_edac.c index 16dea97568a1..912c4930c9ef 100644 --- a/drivers/edac/skx_edac.c +++ b/drivers/edac/skx_edac.c @@ -31,6 +31,8 @@ #include "edac_module.h" +#define EDAC_MOD_STR "skx_edac" + /* * Debug macros */ @@ -65,6 +67,7 @@ static u64 skx_tolm, skx_tohm; struct skx_dev { struct list_head list; u8 bus[4]; + int seg; struct pci_dev *sad_all; struct pci_dev *util_all; u32 mcroute; @@ -110,12 +113,12 @@ struct decoded_addr { int bank_group; }; -static struct skx_dev *get_skx_dev(u8 bus, u8 idx) +static struct skx_dev *get_skx_dev(struct pci_bus *bus, u8 idx) { struct skx_dev *d; list_for_each_entry(d, &skx_edac_list, list) { - if (d->bus[idx] == bus) + if (d->seg == pci_domain_nr(bus) && d->bus[idx] == bus->number) return d; } @@ -172,6 +175,7 @@ static int get_all_bus_mappings(void) pci_dev_put(pdev); return -ENOMEM; } + d->seg = pci_domain_nr(pdev->bus); pci_read_config_dword(pdev, 0xCC, ®); d->bus[0] = GET_BITFIELD(reg, 0, 7); d->bus[1] = GET_BITFIELD(reg, 8, 15); @@ -207,7 +211,7 @@ static int get_all_munits(const struct munit *m) if (i == NUM_IMC) goto fail; } - d = get_skx_dev(pdev->bus->number, m->busidx); + d = get_skx_dev(pdev->bus, m->busidx); if (!d) goto fail; @@ -299,7 +303,7 @@ static int get_dimm_attr(u32 reg, int lobit, int hibit, int add, int minval, #define IS_DIMM_PRESENT(mtr) GET_BITFIELD((mtr), 15, 15) -#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 1, 2, "ranks") +#define numrank(reg) get_dimm_attr((reg), 12, 13, 0, 0, 2, "ranks") #define numrow(reg) get_dimm_attr((reg), 2, 4, 12, 1, 6, "rows") #define numcol(reg) get_dimm_attr((reg), 0, 1, 10, 0, 2, "cols") @@ -360,7 +364,7 @@ static int get_dimm_info(u32 mtr, u32 amap, struct dimm_info *dimm, edac_dbg(0, "mc#%d: channel %d, dimm %d, %lld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n", imc->mc, chan, dimmno, size, npages, - banks, ranks, rows, cols); + banks, 1 << ranks, rows, cols); imc->chan[chan].dimms[dimmno].close_pg = GET_BITFIELD(mtr, 0, 0); imc->chan[chan].dimms[dimmno].bank_xor_enable = GET_BITFIELD(mtr, 9, 9); @@ -464,12 +468,16 @@ static int skx_register_mci(struct skx_imc *imc) pvt = mci->pvt_info; pvt->imc = imc; - mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", - imc->node_id, imc->lmc); + mci->ctl_name = kasprintf(GFP_KERNEL, "Skylake Socket#%d IMC#%d", imc->node_id, imc->lmc); + if (!mci->ctl_name) { + rc = -ENOMEM; + goto fail0; + } + mci->mtype_cap = MEM_FLAG_DDR4; mci->edac_ctl_cap = EDAC_FLAG_NONE; mci->edac_cap = EDAC_FLAG_NONE; - mci->mod_name = "skx_edac.c"; + mci->mod_name = EDAC_MOD_STR; mci->dev_name = pci_name(imc->chan[0].cdev); mci->ctl_page_to_phys = NULL; @@ -491,6 +499,7 @@ static int skx_register_mci(struct skx_imc *imc) fail: kfree(mci->ctl_name); +fail0: edac_mc_free(mci); imc->mci = NULL; return rc; @@ -1039,12 +1048,17 @@ static int __init skx_init(void) { const struct x86_cpu_id *id; const struct munit *m; + const char *owner; int rc = 0, i; u8 mc = 0, src_id, node_id; struct skx_dev *d; edac_dbg(2, "\n"); + owner = edac_get_owner(); + if (owner && strncmp(owner, EDAC_MOD_STR, sizeof(EDAC_MOD_STR))) + return -EBUSY; + id = x86_match_cpu(skx_cpuids); if (!id) return -ENODEV; diff --git a/drivers/edac/thunderx_edac.c b/drivers/edac/thunderx_edac.c index f35d87519a3e..4803c6468bab 100644 --- a/drivers/edac/thunderx_edac.c +++ b/drivers/edac/thunderx_edac.c @@ -639,27 +639,6 @@ err_free: return ret; } -#ifdef CONFIG_PM -static int thunderx_lmc_suspend(struct pci_dev *pdev, pm_message_t state) -{ - pci_save_state(pdev); - pci_disable_device(pdev); - - pci_set_power_state(pdev, pci_choose_state(pdev, state)); - - return 0; -} - -static int thunderx_lmc_resume(struct pci_dev *pdev) -{ - pci_set_power_state(pdev, PCI_D0); - pci_enable_wake(pdev, PCI_D0, 0); - pci_restore_state(pdev); - - return 0; -} -#endif - static const struct pci_device_id thunderx_lmc_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_LMC) }, { 0, }, @@ -834,10 +813,6 @@ static struct pci_driver thunderx_lmc_driver = { .name = "thunderx_lmc_edac", .probe = thunderx_lmc_probe, .remove = thunderx_lmc_remove, -#ifdef CONFIG_PM - .suspend = thunderx_lmc_suspend, - .resume = thunderx_lmc_resume, -#endif .id_table = thunderx_lmc_pci_tbl, }; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index d6a09b9cd8cc..4372f9e4b0da 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -137,9 +137,9 @@ int fw_cancel_transaction(struct fw_card *card, } EXPORT_SYMBOL(fw_cancel_transaction); -static void split_transaction_timeout_callback(unsigned long data) +static void split_transaction_timeout_callback(struct timer_list *timer) { - struct fw_transaction *t = (struct fw_transaction *)data; + struct fw_transaction *t = from_timer(t, timer, split_timeout_timer); struct fw_card *card = t->card; unsigned long flags; @@ -373,8 +373,8 @@ void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, t->tlabel = tlabel; t->card = card; t->is_split_transaction = false; - setup_timer(&t->split_timeout_timer, - split_transaction_timeout_callback, (unsigned long)t); + timer_setup(&t->split_timeout_timer, + split_transaction_timeout_callback, 0); t->callback = callback; t->callback_data = callback_data; @@ -423,7 +423,7 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id, struct transaction_callback_data d; struct fw_transaction t; - init_timer_on_stack(&t.split_timeout_timer); + timer_setup_on_stack(&t.split_timeout_timer, NULL, 0); init_completion(&d.done); d.payload = payload; fw_send_request(card, &t, tcode, destination_id, generation, speed, diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 8bf89267dc25..ccf52368a073 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -734,7 +734,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, __le16 res_count, next_res_count; i = ar_first_buffer_index(ctx); - res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); + res_count = READ_ONCE(ctx->descriptors[i].res_count); /* A buffer that is not yet completely filled must be the last one. */ while (i != last && res_count == 0) { @@ -742,8 +742,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, /* Peek at the next descriptor. */ next_i = ar_next_buffer_index(i); rmb(); /* read descriptors in order */ - next_res_count = ACCESS_ONCE( - ctx->descriptors[next_i].res_count); + next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); /* * If the next descriptor is still empty, we must stop at this * descriptor. @@ -759,8 +758,7 @@ static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) { next_i = ar_next_buffer_index(next_i); rmb(); - next_res_count = ACCESS_ONCE( - ctx->descriptors[next_i].res_count); + next_res_count = READ_ONCE(ctx->descriptors[next_i].res_count); if (next_res_count != cpu_to_le16(PAGE_SIZE)) goto next_buffer_is_active; } @@ -2812,7 +2810,7 @@ static int handle_ir_buffer_fill(struct context *context, u32 buffer_dma; req_count = le16_to_cpu(last->req_count); - res_count = le16_to_cpu(ACCESS_ONCE(last->res_count)); + res_count = le16_to_cpu(READ_ONCE(last->res_count)); completed = req_count - res_count; buffer_dma = le32_to_cpu(last->data_address); diff --git a/drivers/firmware/tegra/ivc.c b/drivers/firmware/tegra/ivc.c index a01461d63f68..00de793e6423 100644 --- a/drivers/firmware/tegra/ivc.c +++ b/drivers/firmware/tegra/ivc.c @@ -99,11 +99,11 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, { /* * This function performs multiple checks on the same values with - * security implications, so create snapshots with ACCESS_ONCE() to + * security implications, so create snapshots with READ_ONCE() to * ensure that these checks use the same values. */ - u32 tx = ACCESS_ONCE(header->tx.count); - u32 rx = ACCESS_ONCE(header->rx.count); + u32 tx = READ_ONCE(header->tx.count); + u32 rx = READ_ONCE(header->rx.count); /* * Perform an over-full check to prevent denial of service attacks @@ -124,8 +124,8 @@ static inline bool tegra_ivc_empty(struct tegra_ivc *ivc, static inline bool tegra_ivc_full(struct tegra_ivc *ivc, struct tegra_ivc_header *header) { - u32 tx = ACCESS_ONCE(header->tx.count); - u32 rx = ACCESS_ONCE(header->rx.count); + u32 tx = READ_ONCE(header->tx.count); + u32 rx = READ_ONCE(header->rx.count); /* * Invalid cases where the counters indicate that the queue is over @@ -137,8 +137,8 @@ static inline bool tegra_ivc_full(struct tegra_ivc *ivc, static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, struct tegra_ivc_header *header) { - u32 tx = ACCESS_ONCE(header->tx.count); - u32 rx = ACCESS_ONCE(header->rx.count); + u32 tx = READ_ONCE(header->tx.count); + u32 rx = READ_ONCE(header->rx.count); /* * This function isn't expected to be used in scenarios where an @@ -151,8 +151,8 @@ static inline u32 tegra_ivc_available(struct tegra_ivc *ivc, static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) { - ACCESS_ONCE(ivc->tx.channel->tx.count) = - ACCESS_ONCE(ivc->tx.channel->tx.count) + 1; + WRITE_ONCE(ivc->tx.channel->tx.count, + READ_ONCE(ivc->tx.channel->tx.count) + 1); if (ivc->tx.position == ivc->num_frames - 1) ivc->tx.position = 0; @@ -162,8 +162,8 @@ static inline void tegra_ivc_advance_tx(struct tegra_ivc *ivc) static inline void tegra_ivc_advance_rx(struct tegra_ivc *ivc) { - ACCESS_ONCE(ivc->rx.channel->rx.count) = - ACCESS_ONCE(ivc->rx.channel->rx.count) + 1; + WRITE_ONCE(ivc->rx.channel->rx.count, + READ_ONCE(ivc->rx.channel->rx.count) + 1); if (ivc->rx.position == ivc->num_frames - 1) ivc->rx.position = 0; @@ -428,7 +428,7 @@ int tegra_ivc_notified(struct tegra_ivc *ivc) /* Copy the receiver's state out of shared memory. */ tegra_ivc_invalidate(ivc, ivc->rx.phys + offset); - state = ACCESS_ONCE(ivc->rx.channel->tx.state); + state = READ_ONCE(ivc->rx.channel->tx.state); if (state == TEGRA_IVC_STATE_SYNC) { offset = offsetof(struct tegra_ivc_header, tx.count); diff --git a/drivers/gpio/gpio-xgene-sb.c b/drivers/gpio/gpio-xgene-sb.c index 033258634b8c..b5843fe6a44d 100644 --- a/drivers/gpio/gpio-xgene-sb.c +++ b/drivers/gpio/gpio-xgene-sb.c @@ -140,8 +140,9 @@ static int xgene_gpio_sb_to_irq(struct gpio_chip *gc, u32 gpio) return irq_create_fwspec_mapping(&fwspec); } -static void xgene_gpio_sb_domain_activate(struct irq_domain *d, - struct irq_data *irq_data) +static int xgene_gpio_sb_domain_activate(struct irq_domain *d, + struct irq_data *irq_data, + bool early) { struct xgene_gpio_sb *priv = d->host_data; u32 gpio = HWIRQ_TO_GPIO(priv, irq_data->hwirq); @@ -150,11 +151,12 @@ static void xgene_gpio_sb_domain_activate(struct irq_domain *d, dev_err(priv->gc.parent, "Unable to configure XGene GPIO standby pin %d as IRQ\n", gpio); - return; + return -ENOSPC; } xgene_gpio_set_bit(&priv->gc, priv->regs + MPA_GPIO_SEL_LO, gpio * 2, 1); + return 0; } static void xgene_gpio_sb_domain_deactivate(struct irq_domain *d, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c index 333bad749067..303b5e099a98 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c @@ -260,7 +260,7 @@ static void amdgpu_fence_fallback(unsigned long arg) */ int amdgpu_fence_wait_empty(struct amdgpu_ring *ring) { - uint64_t seq = ACCESS_ONCE(ring->fence_drv.sync_seq); + uint64_t seq = READ_ONCE(ring->fence_drv.sync_seq); struct dma_fence *fence, **ptr; int r; @@ -300,7 +300,7 @@ unsigned amdgpu_fence_count_emitted(struct amdgpu_ring *ring) amdgpu_fence_process(ring); emitted = 0x100000000ull; emitted -= atomic_read(&ring->fence_drv.last_seq); - emitted += ACCESS_ONCE(ring->fence_drv.sync_seq); + emitted += READ_ONCE(ring->fence_drv.sync_seq); return lower_32_bits(emitted); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 7171968f261e..6149a47fe63d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -788,11 +788,11 @@ static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) seq_printf(m, "\t0x%08x: %12ld byte %s", id, amdgpu_bo_size(bo), placement); - offset = ACCESS_ONCE(bo->tbo.mem.start); + offset = READ_ONCE(bo->tbo.mem.start); if (offset != AMDGPU_BO_INVALID_OFFSET) seq_printf(m, " @ 0x%010Lx", offset); - pin_count = ACCESS_ONCE(bo->pin_count); + pin_count = READ_ONCE(bo->pin_count); if (pin_count) seq_printf(m, " pin count %d", pin_count); seq_printf(m, "\n"); diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c index 38cea6fb25a8..a25f6c72f219 100644 --- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c +++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c @@ -187,7 +187,7 @@ static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity) if (kfifo_is_empty(&entity->job_queue)) return false; - if (ACCESS_ONCE(entity->dependency)) + if (READ_ONCE(entity->dependency)) return false; return true; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index fc9a6a83dfc7..4b152e0d31a6 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -975,9 +975,9 @@ static void hangcheck_timer_reset(struct etnaviv_gpu *gpu) round_jiffies_up(jiffies + DRM_ETNAVIV_HANGCHECK_JIFFIES)); } -static void hangcheck_handler(unsigned long data) +static void hangcheck_handler(struct timer_list *t) { - struct etnaviv_gpu *gpu = (struct etnaviv_gpu *)data; + struct etnaviv_gpu *gpu = from_timer(gpu, t, hangcheck_timer); u32 fence = gpu->completed_fence; bool progress = false; @@ -1648,8 +1648,7 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, INIT_WORK(&gpu->recover_work, recover_worker); init_waitqueue_head(&gpu->fence_event); - setup_deferrable_timer(&gpu->hangcheck_timer, hangcheck_handler, - (unsigned long)gpu); + timer_setup(&gpu->hangcheck_timer, hangcheck_handler, TIMER_DEFERRABLE); priv->gpu[priv->num_gpus++] = gpu; diff --git a/drivers/gpu/drm/gma500/psb_lid.c b/drivers/gpu/drm/gma500/psb_lid.c index 1d2ebb5e530f..be6dda58fcae 100644 --- a/drivers/gpu/drm/gma500/psb_lid.c +++ b/drivers/gpu/drm/gma500/psb_lid.c @@ -23,9 +23,9 @@ #include "psb_intel_reg.h" #include <linux/spinlock.h> -static void psb_lid_timer_func(unsigned long data) +static void psb_lid_timer_func(struct timer_list *t) { - struct drm_psb_private * dev_priv = (struct drm_psb_private *)data; + struct drm_psb_private *dev_priv = from_timer(dev_priv, t, lid_timer); struct drm_device *dev = (struct drm_device *)dev_priv->dev; struct timer_list *lid_timer = &dev_priv->lid_timer; unsigned long irq_flags; @@ -77,10 +77,8 @@ void psb_lid_timer_init(struct drm_psb_private *dev_priv) spin_lock_init(&dev_priv->lid_lock); spin_lock_irqsave(&dev_priv->lid_lock, irq_flags); - init_timer(lid_timer); + timer_setup(lid_timer, psb_lid_timer_func, 0); - lid_timer->data = (unsigned long)dev_priv; - lid_timer->function = psb_lid_timer_func; lid_timer->expires = jiffies + PSB_LID_DELAY; add_timer(lid_timer); diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index 3386452bd2f0..cf3deb283da5 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -451,7 +451,7 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data, else r = 0; - cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); + cur_placement = READ_ONCE(robj->tbo.mem.mem_type); args->domain = radeon_mem_type_to_domain(cur_placement); drm_gem_object_put_unlocked(gobj); return r; @@ -481,7 +481,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data, r = ret; /* Flush HDP cache via MMIO if necessary */ - cur_placement = ACCESS_ONCE(robj->tbo.mem.mem_type); + cur_placement = READ_ONCE(robj->tbo.mem.mem_type); if (rdev->asic->mmio_hdp_flush && radeon_mem_type_to_domain(cur_placement) == RADEON_GEM_DOMAIN_VRAM) robj->rdev->asic->mmio_hdp_flush(rdev); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index a552e4ea5440..6ac094ee8983 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -904,7 +904,7 @@ vmw_surface_handle_reference(struct vmw_private *dev_priv, if (unlikely(drm_is_render_client(file_priv))) require_exist = true; - if (ACCESS_ONCE(vmw_fpriv(file_priv)->locked_master)) { + if (READ_ONCE(vmw_fpriv(file_priv)->locked_master)) { DRM_ERROR("Locked master refused legacy " "surface reference.\n"); return -EACCES; diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 93d28c0ec8bf..9b167bc6eee4 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -464,10 +464,10 @@ static void ssip_error(struct hsi_client *cl) hsi_async_read(cl, msg); } -static void ssip_keep_alive(unsigned long data) +static void ssip_keep_alive(struct timer_list *t) { - struct hsi_client *cl = (struct hsi_client *)data; - struct ssi_protocol *ssi = hsi_client_drvdata(cl); + struct ssi_protocol *ssi = from_timer(ssi, t, keep_alive); + struct hsi_client *cl = ssi->cl; dev_dbg(&cl->device, "Keep alive kick in: m(%d) r(%d) s(%d)\n", ssi->main_state, ssi->recv_state, ssi->send_state); @@ -490,9 +490,19 @@ static void ssip_keep_alive(unsigned long data) spin_unlock(&ssi->lock); } -static void ssip_wd(unsigned long data) +static void ssip_rx_wd(struct timer_list *t) +{ + struct ssi_protocol *ssi = from_timer(ssi, t, rx_wd); + struct hsi_client *cl = ssi->cl; + + dev_err(&cl->device, "Watchdog trigerred\n"); + ssip_error(cl); +} + +static void ssip_tx_wd(struct timer_list *t) { - struct hsi_client *cl = (struct hsi_client *)data; + struct ssi_protocol *ssi = from_timer(ssi, t, tx_wd); + struct hsi_client *cl = ssi->cl; dev_err(&cl->device, "Watchdog trigerred\n"); ssip_error(cl); @@ -1084,15 +1094,9 @@ static int ssi_protocol_probe(struct device *dev) } spin_lock_init(&ssi->lock); - init_timer_deferrable(&ssi->rx_wd); - init_timer_deferrable(&ssi->tx_wd); - init_timer(&ssi->keep_alive); - ssi->rx_wd.data = (unsigned long)cl; - ssi->rx_wd.function = ssip_wd; - ssi->tx_wd.data = (unsigned long)cl; - ssi->tx_wd.function = ssip_wd; - ssi->keep_alive.data = (unsigned long)cl; - ssi->keep_alive.function = ssip_keep_alive; + timer_setup(&ssi->rx_wd, ssip_rx_wd, TIMER_DEFERRABLE); + timer_setup(&ssi->tx_wd, ssip_tx_wd, TIMER_DEFERRABLE); + timer_setup(&ssi->keep_alive, ssip_keep_alive, 0); INIT_LIST_HEAD(&ssi->txqueue); INIT_LIST_HEAD(&ssi->cmdqueue); atomic_set(&ssi->tx_usecnt, 0); diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c index 937801ac2fe0..2cd134dd94d2 100644 --- a/drivers/hv/vmbus_drv.c +++ b/drivers/hv/vmbus_drv.c @@ -1534,7 +1534,7 @@ static int __init hv_acpi_init(void) { int ret, t; - if (x86_hyper != &x86_hyper_ms_hyperv) + if (x86_hyper_type != X86_HYPER_MS_HYPERV) return -ENODEV; init_completion(&probe_event); diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig index d65431417b17..7ad017690e3a 100644 --- a/drivers/hwmon/Kconfig +++ b/drivers/hwmon/Kconfig @@ -552,6 +552,7 @@ config SENSORS_G762 config SENSORS_GPIO_FAN tristate "GPIO fan" + depends on OF_GPIO depends on GPIOLIB || COMPILE_TEST depends on THERMAL || THERMAL=n help @@ -862,6 +863,20 @@ tristate "MAX31722 temperature sensor" This driver can also be built as a module. If so, the module will be called max31722. +config SENSORS_MAX6621 + tristate "Maxim MAX6621 sensor chip" + depends on I2C + select REGMAP_I2C + help + If you say yes here you get support for MAX6621 sensor chip. + MAX6621 is a PECI-to-I2C translator provides an efficient, + low-cost solution for PECI-to-SMBus/I2C protocol conversion. + It allows reading the temperature from the PECI-compliant + host directly from up to four PECI-enabled CPUs. + + This driver can also be built as a module. If so, the module + will be called max6621. + config SENSORS_MAX6639 tristate "Maxim MAX6639 sensor chip" depends on I2C diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile index 23e195a5a2f3..0fe489fab663 100644 --- a/drivers/hwmon/Makefile +++ b/drivers/hwmon/Makefile @@ -118,6 +118,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o obj-$(CONFIG_SENSORS_MAX1668) += max1668.o obj-$(CONFIG_SENSORS_MAX197) += max197.o obj-$(CONFIG_SENSORS_MAX31722) += max31722.o +obj-$(CONFIG_SENSORS_MAX6621) += max6621.o obj-$(CONFIG_SENSORS_MAX6639) += max6639.o obj-$(CONFIG_SENSORS_MAX6642) += max6642.o obj-$(CONFIG_SENSORS_MAX6650) += max6650.o diff --git a/drivers/hwmon/asc7621.c b/drivers/hwmon/asc7621.c index 4875e99b59c9..6d34c05a4f83 100644 --- a/drivers/hwmon/asc7621.c +++ b/drivers/hwmon/asc7621.c @@ -579,7 +579,6 @@ static ssize_t show_pwm_enable(struct device *dev, mutex_unlock(&data->update_lock); val = config | (altbit << 3); - newval = 0; if (val == 3 || val >= 10) newval = 255; diff --git a/drivers/hwmon/aspeed-pwm-tacho.c b/drivers/hwmon/aspeed-pwm-tacho.c index 69b97d45e3cb..63a95e23ca81 100644 --- a/drivers/hwmon/aspeed-pwm-tacho.c +++ b/drivers/hwmon/aspeed-pwm-tacho.c @@ -7,19 +7,19 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> -#include <linux/delay.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> -#include <linux/of_platform.h> #include <linux/of_device.h> +#include <linux/of_platform.h> #include <linux/platform_device.h> -#include <linux/sysfs.h> #include <linux/regmap.h> +#include <linux/sysfs.h> #include <linux/thermal.h> /* ASPEED PWM & FAN Tach Register Definition */ @@ -161,7 +161,7 @@ * 11: reserved. */ #define M_TACH_MODE 0x02 /* 10b */ -#define M_TACH_UNIT 0x00c0 +#define M_TACH_UNIT 0x0210 #define INIT_FAN_CTRL 0xFF /* How long we sleep in us while waiting for an RPM result. */ diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c index 9c355b9d31c5..5c9a52599cf6 100644 --- a/drivers/hwmon/gpio-fan.c +++ b/drivers/hwmon/gpio-fan.c @@ -29,21 +29,24 @@ #include <linux/err.h> #include <linux/mutex.h> #include <linux/hwmon.h> -#include <linux/gpio.h> -#include <linux/gpio-fan.h> +#include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/of_platform.h> -#include <linux/of_gpio.h> #include <linux/thermal.h> +struct gpio_fan_speed { + int rpm; + int ctrl_val; +}; + struct gpio_fan_data { - struct platform_device *pdev; + struct device *dev; struct device *hwmon_dev; /* Cooling device if any */ struct thermal_cooling_device *cdev; struct mutex lock; /* lock GPIOs operations. */ - int num_ctrl; - unsigned *ctrl; + int num_gpios; + struct gpio_desc **gpios; int num_speed; struct gpio_fan_speed *speed; int speed_index; @@ -51,7 +54,7 @@ struct gpio_fan_data { int resume_speed; #endif bool pwm_enable; - struct gpio_fan_alarm *alarm; + struct gpio_desc *alarm_gpio; struct work_struct alarm_work; }; @@ -64,8 +67,8 @@ static void fan_alarm_notify(struct work_struct *ws) struct gpio_fan_data *fan_data = container_of(ws, struct gpio_fan_data, alarm_work); - sysfs_notify(&fan_data->pdev->dev.kobj, NULL, "fan1_alarm"); - kobject_uevent(&fan_data->pdev->dev.kobj, KOBJ_CHANGE); + sysfs_notify(&fan_data->dev->kobj, NULL, "fan1_alarm"); + kobject_uevent(&fan_data->dev->kobj, KOBJ_CHANGE); } static irqreturn_t fan_alarm_irq_handler(int irq, void *dev_id) @@ -81,47 +84,30 @@ static ssize_t fan1_alarm_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gpio_fan_data *fan_data = dev_get_drvdata(dev); - struct gpio_fan_alarm *alarm = fan_data->alarm; - int value = gpio_get_value_cansleep(alarm->gpio); - if (alarm->active_low) - value = !value; - - return sprintf(buf, "%d\n", value); + return sprintf(buf, "%d\n", + gpiod_get_value_cansleep(fan_data->alarm_gpio)); } static DEVICE_ATTR_RO(fan1_alarm); -static int fan_alarm_init(struct gpio_fan_data *fan_data, - struct gpio_fan_alarm *alarm) +static int fan_alarm_init(struct gpio_fan_data *fan_data) { - int err; int alarm_irq; - struct platform_device *pdev = fan_data->pdev; - - fan_data->alarm = alarm; - - err = devm_gpio_request(&pdev->dev, alarm->gpio, "GPIO fan alarm"); - if (err) - return err; - - err = gpio_direction_input(alarm->gpio); - if (err) - return err; + struct device *dev = fan_data->dev; /* * If the alarm GPIO don't support interrupts, just leave * without initializing the fail notification support. */ - alarm_irq = gpio_to_irq(alarm->gpio); - if (alarm_irq < 0) + alarm_irq = gpiod_to_irq(fan_data->alarm_gpio); + if (alarm_irq <= 0) return 0; INIT_WORK(&fan_data->alarm_work, fan_alarm_notify); irq_set_irq_type(alarm_irq, IRQ_TYPE_EDGE_BOTH); - err = devm_request_irq(&pdev->dev, alarm_irq, fan_alarm_irq_handler, - IRQF_SHARED, "GPIO fan alarm", fan_data); - return err; + return devm_request_irq(dev, alarm_irq, fan_alarm_irq_handler, + IRQF_SHARED, "GPIO fan alarm", fan_data); } /* @@ -133,8 +119,9 @@ static void __set_fan_ctrl(struct gpio_fan_data *fan_data, int ctrl_val) { int i; - for (i = 0; i < fan_data->num_ctrl; i++) - gpio_set_value_cansleep(fan_data->ctrl[i], (ctrl_val >> i) & 1); + for (i = 0; i < fan_data->num_gpios; i++) + gpiod_set_value_cansleep(fan_data->gpios[i], + (ctrl_val >> i) & 1); } static int __get_fan_ctrl(struct gpio_fan_data *fan_data) @@ -142,10 +129,10 @@ static int __get_fan_ctrl(struct gpio_fan_data *fan_data) int i; int ctrl_val = 0; - for (i = 0; i < fan_data->num_ctrl; i++) { + for (i = 0; i < fan_data->num_gpios; i++) { int value; - value = gpio_get_value_cansleep(fan_data->ctrl[i]); + value = gpiod_get_value_cansleep(fan_data->gpios[i]); ctrl_val |= (value << i); } return ctrl_val; @@ -170,7 +157,7 @@ static int get_fan_speed_index(struct gpio_fan_data *fan_data) if (fan_data->speed[i].ctrl_val == ctrl_val) return i; - dev_warn(&fan_data->pdev->dev, + dev_warn(fan_data->dev, "missing speed array entry for GPIO value 0x%x\n", ctrl_val); return -ENODEV; @@ -328,9 +315,9 @@ static umode_t gpio_fan_is_visible(struct kobject *kobj, struct device *dev = container_of(kobj, struct device, kobj); struct gpio_fan_data *data = dev_get_drvdata(dev); - if (index == 0 && !data->alarm) + if (index == 0 && !data->alarm_gpio) return 0; - if (index > 0 && !data->ctrl) + if (index > 0 && !data->gpios) return 0; return attr->mode; @@ -358,30 +345,25 @@ static const struct attribute_group *gpio_fan_groups[] = { NULL }; -static int fan_ctrl_init(struct gpio_fan_data *fan_data, - struct gpio_fan_platform_data *pdata) +static int fan_ctrl_init(struct gpio_fan_data *fan_data) { - struct platform_device *pdev = fan_data->pdev; - int num_ctrl = pdata->num_ctrl; - unsigned *ctrl = pdata->ctrl; + int num_gpios = fan_data->num_gpios; + struct gpio_desc **gpios = fan_data->gpios; int i, err; - for (i = 0; i < num_ctrl; i++) { - err = devm_gpio_request(&pdev->dev, ctrl[i], - "GPIO fan control"); - if (err) - return err; - - err = gpio_direction_output(ctrl[i], - gpio_get_value_cansleep(ctrl[i])); + for (i = 0; i < num_gpios; i++) { + /* + * The GPIO descriptors were retrieved with GPIOD_ASIS so here + * we set the GPIO into output mode, carefully preserving the + * current value by setting it to whatever it is already set + * (no surprise changes in default fan speed). + */ + err = gpiod_direction_output(gpios[i], + gpiod_get_value_cansleep(gpios[i])); if (err) return err; } - fan_data->num_ctrl = num_ctrl; - fan_data->ctrl = ctrl; - fan_data->num_speed = pdata->num_speed; - fan_data->speed = pdata->speed; fan_data->pwm_enable = true; /* Enable manual fan speed control. */ fan_data->speed_index = get_fan_speed_index(fan_data); if (fan_data->speed_index < 0) @@ -432,67 +414,47 @@ static const struct thermal_cooling_device_ops gpio_fan_cool_ops = { .set_cur_state = gpio_fan_set_cur_state, }; -#ifdef CONFIG_OF_GPIO /* * Translate OpenFirmware node properties into platform_data */ -static int gpio_fan_get_of_pdata(struct device *dev, - struct gpio_fan_platform_data *pdata) +static int gpio_fan_get_of_data(struct gpio_fan_data *fan_data) { - struct device_node *node; struct gpio_fan_speed *speed; - unsigned *ctrl; + struct device *dev = fan_data->dev; + struct device_node *np = dev->of_node; + struct gpio_desc **gpios; unsigned i; u32 u; struct property *prop; const __be32 *p; - node = dev->of_node; - /* Alarm GPIO if one exists */ - if (of_gpio_named_count(node, "alarm-gpios") > 0) { - struct gpio_fan_alarm *alarm; - int val; - enum of_gpio_flags flags; - - alarm = devm_kzalloc(dev, sizeof(struct gpio_fan_alarm), - GFP_KERNEL); - if (!alarm) - return -ENOMEM; - - val = of_get_named_gpio_flags(node, "alarm-gpios", 0, &flags); - if (val < 0) - return val; - alarm->gpio = val; - alarm->active_low = flags & OF_GPIO_ACTIVE_LOW; - - pdata->alarm = alarm; - } + fan_data->alarm_gpio = devm_gpiod_get_optional(dev, "alarm", GPIOD_IN); + if (IS_ERR(fan_data->alarm_gpio)) + return PTR_ERR(fan_data->alarm_gpio); /* Fill GPIO pin array */ - pdata->num_ctrl = of_gpio_count(node); - if (pdata->num_ctrl <= 0) { - if (pdata->alarm) + fan_data->num_gpios = gpiod_count(dev, NULL); + if (fan_data->num_gpios <= 0) { + if (fan_data->alarm_gpio) return 0; dev_err(dev, "DT properties empty / missing"); return -ENODEV; } - ctrl = devm_kzalloc(dev, pdata->num_ctrl * sizeof(unsigned), - GFP_KERNEL); - if (!ctrl) + gpios = devm_kzalloc(dev, + fan_data->num_gpios * sizeof(struct gpio_desc *), + GFP_KERNEL); + if (!gpios) return -ENOMEM; - for (i = 0; i < pdata->num_ctrl; i++) { - int val; - - val = of_get_gpio(node, i); - if (val < 0) - return val; - ctrl[i] = val; + for (i = 0; i < fan_data->num_gpios; i++) { + gpios[i] = devm_gpiod_get_index(dev, NULL, i, GPIOD_ASIS); + if (IS_ERR(gpios[i])) + return PTR_ERR(gpios[i]); } - pdata->ctrl = ctrl; + fan_data->gpios = gpios; /* Get number of RPM/ctrl_val pairs in speed map */ - prop = of_find_property(node, "gpio-fan,speed-map", &i); + prop = of_find_property(np, "gpio-fan,speed-map", &i); if (!prop) { dev_err(dev, "gpio-fan,speed-map DT property missing"); return -ENODEV; @@ -502,7 +464,7 @@ static int gpio_fan_get_of_pdata(struct device *dev, dev_err(dev, "gpio-fan,speed-map contains zero/odd number of entries"); return -ENODEV; } - pdata->num_speed = i / 2; + fan_data->num_speed = i / 2; /* * Populate speed map @@ -510,12 +472,12 @@ static int gpio_fan_get_of_pdata(struct device *dev, * this needs splitting into pairs to create gpio_fan_speed structs */ speed = devm_kzalloc(dev, - pdata->num_speed * sizeof(struct gpio_fan_speed), + fan_data->num_speed * sizeof(struct gpio_fan_speed), GFP_KERNEL); if (!speed) return -ENOMEM; p = NULL; - for (i = 0; i < pdata->num_speed; i++) { + for (i = 0; i < fan_data->num_speed; i++) { p = of_prop_next_u32(prop, p, &u); if (!p) return -ENODEV; @@ -525,7 +487,7 @@ static int gpio_fan_get_of_pdata(struct device *dev, return -ENODEV; speed[i].ctrl_val = u; } - pdata->speed = speed; + fan_data->speed = speed; return 0; } @@ -535,76 +497,58 @@ static const struct of_device_id of_gpio_fan_match[] = { {}, }; MODULE_DEVICE_TABLE(of, of_gpio_fan_match); -#endif /* CONFIG_OF_GPIO */ static int gpio_fan_probe(struct platform_device *pdev) { int err; struct gpio_fan_data *fan_data; - struct gpio_fan_platform_data *pdata = dev_get_platdata(&pdev->dev); + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; - fan_data = devm_kzalloc(&pdev->dev, sizeof(struct gpio_fan_data), + fan_data = devm_kzalloc(dev, sizeof(struct gpio_fan_data), GFP_KERNEL); if (!fan_data) return -ENOMEM; -#ifdef CONFIG_OF_GPIO - if (!pdata) { - pdata = devm_kzalloc(&pdev->dev, - sizeof(struct gpio_fan_platform_data), - GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - err = gpio_fan_get_of_pdata(&pdev->dev, pdata); - if (err) - return err; - } -#else /* CONFIG_OF_GPIO */ - if (!pdata) - return -EINVAL; -#endif /* CONFIG_OF_GPIO */ + fan_data->dev = dev; + err = gpio_fan_get_of_data(fan_data); + if (err) + return err; - fan_data->pdev = pdev; platform_set_drvdata(pdev, fan_data); mutex_init(&fan_data->lock); /* Configure alarm GPIO if available. */ - if (pdata->alarm) { - err = fan_alarm_init(fan_data, pdata->alarm); + if (fan_data->alarm_gpio) { + err = fan_alarm_init(fan_data); if (err) return err; } /* Configure control GPIOs if available. */ - if (pdata->ctrl && pdata->num_ctrl > 0) { - if (!pdata->speed || pdata->num_speed <= 1) + if (fan_data->gpios && fan_data->num_gpios > 0) { + if (!fan_data->speed || fan_data->num_speed <= 1) return -EINVAL; - err = fan_ctrl_init(fan_data, pdata); + err = fan_ctrl_init(fan_data); if (err) return err; } /* Make this driver part of hwmon class. */ fan_data->hwmon_dev = - devm_hwmon_device_register_with_groups(&pdev->dev, + devm_hwmon_device_register_with_groups(dev, "gpio_fan", fan_data, gpio_fan_groups); if (IS_ERR(fan_data->hwmon_dev)) return PTR_ERR(fan_data->hwmon_dev); -#ifdef CONFIG_OF_GPIO + /* Optional cooling device register for Device tree platforms */ - fan_data->cdev = thermal_of_cooling_device_register(pdev->dev.of_node, + fan_data->cdev = thermal_of_cooling_device_register(np, "gpio-fan", fan_data, &gpio_fan_cool_ops); -#else /* CONFIG_OF_GPIO */ - /* Optional cooling device register for non Device tree platforms */ - fan_data->cdev = thermal_cooling_device_register("gpio-fan", fan_data, - &gpio_fan_cool_ops); -#endif /* CONFIG_OF_GPIO */ - dev_info(&pdev->dev, "GPIO fan initialized\n"); + dev_info(dev, "GPIO fan initialized\n"); return 0; } @@ -616,7 +560,7 @@ static int gpio_fan_remove(struct platform_device *pdev) if (!IS_ERR(fan_data->cdev)) thermal_cooling_device_unregister(fan_data->cdev); - if (fan_data->ctrl) + if (fan_data->gpios) set_fan_speed(fan_data, 0); return 0; @@ -632,7 +576,7 @@ static int gpio_fan_suspend(struct device *dev) { struct gpio_fan_data *fan_data = dev_get_drvdata(dev); - if (fan_data->ctrl) { + if (fan_data->gpios) { fan_data->resume_speed = fan_data->speed_index; set_fan_speed(fan_data, 0); } @@ -644,7 +588,7 @@ static int gpio_fan_resume(struct device *dev) { struct gpio_fan_data *fan_data = dev_get_drvdata(dev); - if (fan_data->ctrl) + if (fan_data->gpios) set_fan_speed(fan_data, fan_data->resume_speed); return 0; @@ -663,9 +607,7 @@ static struct platform_driver gpio_fan_driver = { .driver = { .name = "gpio-fan", .pm = GPIO_FAN_PM, -#ifdef CONFIG_OF_GPIO .of_match_table = of_match_ptr(of_gpio_fan_match), -#endif }, }; diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index ce3b91f22e30..46a54ed23410 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -36,6 +36,10 @@ MODULE_PARM_DESC(force, "force loading on processors with erratum 319"); /* Provide lock for writing to NB_SMU_IND_ADDR */ static DEFINE_MUTEX(nb_smu_ind_mutex); +#ifndef PCI_DEVICE_ID_AMD_17H_DF_F3 +#define PCI_DEVICE_ID_AMD_17H_DF_F3 0x1463 +#endif + /* CPUID function 0x80000001, ebx */ #define CPUID_PKGTYPE_MASK 0xf0000000 #define CPUID_PKGTYPE_F 0x00000000 @@ -61,31 +65,72 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); */ #define F15H_M60H_REPORTED_TEMP_CTRL_OFFSET 0xd8200ca4 -static void amd_nb_smu_index_read(struct pci_dev *pdev, unsigned int devfn, - int offset, u32 *val) +/* F17h M01h Access througn SMN */ +#define F17H_M01H_REPORTED_TEMP_CTRL_OFFSET 0x00059800 + +struct k10temp_data { + struct pci_dev *pdev; + void (*read_tempreg)(struct pci_dev *pdev, u32 *regval); + int temp_offset; +}; + +struct tctl_offset { + u8 model; + char const *id; + int offset; +}; + +static const struct tctl_offset tctl_offset_table[] = { + { 0x17, "AMD Ryzen 7 1600X", 20000 }, + { 0x17, "AMD Ryzen 7 1700X", 20000 }, + { 0x17, "AMD Ryzen 7 1800X", 20000 }, + { 0x17, "AMD Ryzen Threadripper 1950X", 27000 }, + { 0x17, "AMD Ryzen Threadripper 1920X", 27000 }, + { 0x17, "AMD Ryzen Threadripper 1950", 10000 }, + { 0x17, "AMD Ryzen Threadripper 1920", 10000 }, + { 0x17, "AMD Ryzen Threadripper 1910", 10000 }, +}; + +static void read_tempreg_pci(struct pci_dev *pdev, u32 *regval) +{ + pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, regval); +} + +static void amd_nb_index_read(struct pci_dev *pdev, unsigned int devfn, + unsigned int base, int offset, u32 *val) { mutex_lock(&nb_smu_ind_mutex); pci_bus_write_config_dword(pdev->bus, devfn, - 0xb8, offset); + base, offset); pci_bus_read_config_dword(pdev->bus, devfn, - 0xbc, val); + base + 4, val); mutex_unlock(&nb_smu_ind_mutex); } +static void read_tempreg_nb_f15(struct pci_dev *pdev, u32 *regval) +{ + amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0xb8, + F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, regval); +} + +static void read_tempreg_nb_f17(struct pci_dev *pdev, u32 *regval) +{ + amd_nb_index_read(pdev, PCI_DEVFN(0, 0), 0x60, + F17H_M01H_REPORTED_TEMP_CTRL_OFFSET, regval); +} + static ssize_t temp1_input_show(struct device *dev, struct device_attribute *attr, char *buf) { + struct k10temp_data *data = dev_get_drvdata(dev); u32 regval; - struct pci_dev *pdev = dev_get_drvdata(dev); - - if (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model == 0x60) { - amd_nb_smu_index_read(pdev, PCI_DEVFN(0, 0), - F15H_M60H_REPORTED_TEMP_CTRL_OFFSET, - ®val); - } else { - pci_read_config_dword(pdev, REG_REPORTED_TEMPERATURE, ®val); - } - return sprintf(buf, "%u\n", (regval >> 21) * 125); + unsigned int temp; + + data->read_tempreg(data->pdev, ®val); + temp = (regval >> 21) * 125; + temp -= data->temp_offset; + + return sprintf(buf, "%u\n", temp); } static ssize_t temp1_max_show(struct device *dev, @@ -98,11 +143,12 @@ static ssize_t show_temp_crit(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); + struct k10temp_data *data = dev_get_drvdata(dev); int show_hyst = attr->index; u32 regval; int value; - pci_read_config_dword(dev_get_drvdata(dev), + pci_read_config_dword(data->pdev, REG_HARDWARE_THERMAL_CONTROL, ®val); value = ((regval >> 16) & 0x7f) * 500 + 52000; if (show_hyst) @@ -119,7 +165,8 @@ static umode_t k10temp_is_visible(struct kobject *kobj, struct attribute *attr, int index) { struct device *dev = container_of(kobj, struct device, kobj); - struct pci_dev *pdev = dev_get_drvdata(dev); + struct k10temp_data *data = dev_get_drvdata(dev); + struct pci_dev *pdev = data->pdev; if (index >= 2) { u32 reg_caps, reg_htc; @@ -187,7 +234,9 @@ static int k10temp_probe(struct pci_dev *pdev, { int unreliable = has_erratum_319(pdev); struct device *dev = &pdev->dev; + struct k10temp_data *data; struct device *hwmon_dev; + int i; if (unreliable) { if (!force) { @@ -199,7 +248,31 @@ static int k10temp_probe(struct pci_dev *pdev, "unreliable CPU thermal sensor; check erratum 319\n"); } - hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", pdev, + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->pdev = pdev; + + if (boot_cpu_data.x86 == 0x15 && (boot_cpu_data.x86_model == 0x60 || + boot_cpu_data.x86_model == 0x70)) + data->read_tempreg = read_tempreg_nb_f15; + else if (boot_cpu_data.x86 == 0x17) + data->read_tempreg = read_tempreg_nb_f17; + else + data->read_tempreg = read_tempreg_pci; + + for (i = 0; i < ARRAY_SIZE(tctl_offset_table); i++) { + const struct tctl_offset *entry = &tctl_offset_table[i]; + + if (boot_cpu_data.x86 == entry->model && + strstr(boot_cpu_data.x86_model_id, entry->id)) { + data->temp_offset = entry->offset; + break; + } + } + + hwmon_dev = devm_hwmon_device_register_with_groups(dev, "k10temp", data, k10temp_groups); return PTR_ERR_OR_ZERO(hwmon_dev); } @@ -214,6 +287,7 @@ static const struct pci_device_id k10temp_id_table[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) }, { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, + { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) }, {} }; MODULE_DEVICE_TABLE(pci, k10temp_id_table); diff --git a/drivers/hwmon/max1619.c b/drivers/hwmon/max1619.c index a18278938494..76d966932941 100644 --- a/drivers/hwmon/max1619.c +++ b/drivers/hwmon/max1619.c @@ -303,10 +303,20 @@ static const struct i2c_device_id max1619_id[] = { }; MODULE_DEVICE_TABLE(i2c, max1619_id); +#ifdef CONFIG_OF +static const struct of_device_id max1619_of_match[] = { + { .compatible = "maxim,max1619", }, + {}, +}; + +MODULE_DEVICE_TABLE(of, max1619_of_match); +#endif + static struct i2c_driver max1619_driver = { .class = I2C_CLASS_HWMON, .driver = { .name = "max1619", + .of_match_table = of_match_ptr(max1619_of_match), }, .probe = max1619_probe, .id_table = max1619_id, diff --git a/drivers/hwmon/max6621.c b/drivers/hwmon/max6621.c new file mode 100644 index 000000000000..35555f0eefb9 --- /dev/null +++ b/drivers/hwmon/max6621.c @@ -0,0 +1,593 @@ +/* + * Hardware monitoring driver for Maxim MAX6621 + * + * Copyright (c) 2017 Mellanox Technologies. All rights reserved. + * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/bitops.h> +#include <linux/hwmon.h> +#include <linux/hwmon-sysfs.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regmap.h> + +#define MAX6621_DRV_NAME "max6621" +#define MAX6621_TEMP_INPUT_REG_NUM 9 +#define MAX6621_TEMP_INPUT_MIN -127000 +#define MAX6621_TEMP_INPUT_MAX 128000 +#define MAX6621_TEMP_ALERT_CHAN_SHIFT 1 + +#define MAX6621_TEMP_S0D0_REG 0x00 +#define MAX6621_TEMP_S0D1_REG 0x01 +#define MAX6621_TEMP_S1D0_REG 0x02 +#define MAX6621_TEMP_S1D1_REG 0x03 +#define MAX6621_TEMP_S2D0_REG 0x04 +#define MAX6621_TEMP_S2D1_REG 0x05 +#define MAX6621_TEMP_S3D0_REG 0x06 +#define MAX6621_TEMP_S3D1_REG 0x07 +#define MAX6621_TEMP_MAX_REG 0x08 +#define MAX6621_TEMP_MAX_ADDR_REG 0x0a +#define MAX6621_TEMP_ALERT_CAUSE_REG 0x0b +#define MAX6621_CONFIG0_REG 0x0c +#define MAX6621_CONFIG1_REG 0x0d +#define MAX6621_CONFIG2_REG 0x0e +#define MAX6621_CONFIG3_REG 0x0f +#define MAX6621_TEMP_S0_ALERT_REG 0x10 +#define MAX6621_TEMP_S1_ALERT_REG 0x11 +#define MAX6621_TEMP_S2_ALERT_REG 0x12 +#define MAX6621_TEMP_S3_ALERT_REG 0x13 +#define MAX6621_CLEAR_ALERT_REG 0x15 +#define MAX6621_REG_MAX (MAX6621_CLEAR_ALERT_REG + 1) +#define MAX6621_REG_TEMP_SHIFT 0x06 + +#define MAX6621_ENABLE_TEMP_ALERTS_BIT 4 +#define MAX6621_ENABLE_I2C_CRC_BIT 5 +#define MAX6621_ENABLE_ALTERNATE_DATA 6 +#define MAX6621_ENABLE_LOCKUP_TO 7 +#define MAX6621_ENABLE_S0D0_BIT 8 +#define MAX6621_ENABLE_S3D1_BIT 15 +#define MAX6621_ENABLE_TEMP_ALL GENMASK(MAX6621_ENABLE_S3D1_BIT, \ + MAX6621_ENABLE_S0D0_BIT) +#define MAX6621_POLL_DELAY_MASK 0x5 +#define MAX6621_CONFIG0_INIT (MAX6621_ENABLE_TEMP_ALL | \ + BIT(MAX6621_ENABLE_LOCKUP_TO) | \ + BIT(MAX6621_ENABLE_I2C_CRC_BIT) | \ + MAX6621_POLL_DELAY_MASK) +#define MAX6621_PECI_BIT_TIME 0x2 +#define MAX6621_PECI_RETRY_NUM 0x3 +#define MAX6621_CONFIG1_INIT ((MAX6621_PECI_BIT_TIME << 8) | \ + MAX6621_PECI_RETRY_NUM) + +/* Error codes */ +#define MAX6621_TRAN_FAILED 0x8100 /* + * PECI transaction failed for more + * than the configured number of + * consecutive retries. + */ +#define MAX6621_POOL_DIS 0x8101 /* + * Polling disabled for requested + * socket/domain. + */ +#define MAX6621_POOL_UNCOMPLETE 0x8102 /* + * First poll not yet completed for + * requested socket/domain (on + * startup). + */ +#define MAX6621_SD_DIS 0x8103 /* + * Read maximum temperature requested, + * but no sockets/domains enabled or + * all enabled sockets/domains have + * errors; or read maximum temperature + * address requested, but read maximum + * temperature was not called. + */ +#define MAX6621_ALERT_DIS 0x8104 /* + * Get alert socket/domain requested, + * but no alert active. + */ +#define MAX6621_PECI_ERR_MIN 0x8000 /* Intel spec PECI error min value. */ +#define MAX6621_PECI_ERR_MAX 0x80ff /* Intel spec PECI error max value. */ + +static const u32 max6621_temp_regs[] = { + MAX6621_TEMP_MAX_REG, MAX6621_TEMP_S0D0_REG, MAX6621_TEMP_S1D0_REG, + MAX6621_TEMP_S2D0_REG, MAX6621_TEMP_S3D0_REG, MAX6621_TEMP_S0D1_REG, + MAX6621_TEMP_S1D1_REG, MAX6621_TEMP_S2D1_REG, MAX6621_TEMP_S3D1_REG, +}; + +static const char *const max6621_temp_labels[] = { + "maximum", + "socket0_0", + "socket1_0", + "socket2_0", + "socket3_0", + "socket0_1", + "socket1_1", + "socket2_1", + "socket3_1", +}; + +static const int max6621_temp_alert_chan2reg[] = { + MAX6621_TEMP_S0_ALERT_REG, + MAX6621_TEMP_S1_ALERT_REG, + MAX6621_TEMP_S2_ALERT_REG, + MAX6621_TEMP_S3_ALERT_REG, +}; + +/** + * struct max6621_data - private data: + * + * @client: I2C client; + * @regmap: register map handle; + * @input_chan2reg: mapping from channel to register; + */ +struct max6621_data { + struct i2c_client *client; + struct regmap *regmap; + int input_chan2reg[MAX6621_TEMP_INPUT_REG_NUM + 1]; +}; + +static long max6621_temp_mc2reg(long val) +{ + return (val / 1000L) << MAX6621_REG_TEMP_SHIFT; +} + +static umode_t +max6621_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, + int channel) +{ + /* Skip channels which are not physically conncted. */ + if (((struct max6621_data *)data)->input_chan2reg[channel] < 0) + return 0; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + case hwmon_temp_label: + case hwmon_temp_crit_alarm: + return 0444; + case hwmon_temp_offset: + case hwmon_temp_crit: + return 0644; + default: + break; + } + + default: + break; + } + + return 0; +} + +static int max6621_verify_reg_data(struct device *dev, int regval) +{ + if (regval >= MAX6621_PECI_ERR_MIN && + regval <= MAX6621_PECI_ERR_MAX) { + dev_dbg(dev, "PECI error code - err 0x%04x.\n", + regval); + + return -EIO; + } + + switch (regval) { + case MAX6621_TRAN_FAILED: + dev_dbg(dev, "PECI transaction failed - err 0x%04x.\n", + regval); + return -EIO; + case MAX6621_POOL_DIS: + dev_dbg(dev, "Polling disabled - err 0x%04x.\n", regval); + return -EOPNOTSUPP; + case MAX6621_POOL_UNCOMPLETE: + dev_dbg(dev, "First poll not completed on startup - err 0x%04x.\n", + regval); + return -EIO; + case MAX6621_SD_DIS: + dev_dbg(dev, "Resource is disabled - err 0x%04x.\n", regval); + return -EOPNOTSUPP; + case MAX6621_ALERT_DIS: + dev_dbg(dev, "No alert active - err 0x%04x.\n", regval); + return -EOPNOTSUPP; + default: + return 0; + } +} + +static int +max6621_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long *val) +{ + struct max6621_data *data = dev_get_drvdata(dev); + u32 regval; + int reg; + s8 temp; + int ret; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_input: + reg = data->input_chan2reg[channel]; + ret = regmap_read(data->regmap, reg, ®val); + if (ret) + return ret; + + ret = max6621_verify_reg_data(dev, regval); + if (ret) + return ret; + + /* + * Bit MAX6621_REG_TEMP_SHIFT represents 1 degree step. + * The temperature is given in two's complement and 8 + * bits is used for the register conversion. + */ + temp = (regval >> MAX6621_REG_TEMP_SHIFT); + *val = temp * 1000L; + + break; + case hwmon_temp_offset: + ret = regmap_read(data->regmap, MAX6621_CONFIG2_REG, + ®val); + if (ret) + return ret; + + ret = max6621_verify_reg_data(dev, regval); + if (ret) + return ret; + + *val = (regval >> MAX6621_REG_TEMP_SHIFT) * + 1000L; + + break; + case hwmon_temp_crit: + channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT; + reg = max6621_temp_alert_chan2reg[channel]; + ret = regmap_read(data->regmap, reg, ®val); + if (ret) + return ret; + + ret = max6621_verify_reg_data(dev, regval); + if (ret) + return ret; + + *val = regval * 1000L; + + break; + case hwmon_temp_crit_alarm: + /* + * Set val to zero to recover the case, when reading + * MAX6621_TEMP_ALERT_CAUSE_REG results in for example + * MAX6621_ALERT_DIS. Reading will return with error, + * but in such case alarm should be returned as 0. + */ + *val = 0; + ret = regmap_read(data->regmap, + MAX6621_TEMP_ALERT_CAUSE_REG, + ®val); + if (ret) + return ret; + + ret = max6621_verify_reg_data(dev, regval); + if (ret) { + /* Do not report error if alert is disabled. */ + if (regval == MAX6621_ALERT_DIS) + return 0; + else + return ret; + } + + /* + * Clear the alert automatically, using send-byte + * smbus protocol for clearing alert. + */ + if (regval) { + ret = i2c_smbus_write_byte(data->client, + MAX6621_CLEAR_ALERT_REG); + if (ret) + return ret; + } + + *val = !!regval; + + break; + default: + return -EOPNOTSUPP; + } + break; + + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static int +max6621_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, long val) +{ + struct max6621_data *data = dev_get_drvdata(dev); + u32 reg; + + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_offset: + /* Clamp to allowed range to prevent overflow. */ + val = clamp_val(val, MAX6621_TEMP_INPUT_MIN, + MAX6621_TEMP_INPUT_MAX); + val = max6621_temp_mc2reg(val); + + return regmap_write(data->regmap, + MAX6621_CONFIG2_REG, val); + case hwmon_temp_crit: + channel -= MAX6621_TEMP_ALERT_CHAN_SHIFT; + reg = max6621_temp_alert_chan2reg[channel]; + /* Clamp to allowed range to prevent overflow. */ + val = clamp_val(val, MAX6621_TEMP_INPUT_MIN, + MAX6621_TEMP_INPUT_MAX); + val = val / 1000L; + + return regmap_write(data->regmap, reg, val); + default: + return -EOPNOTSUPP; + } + break; + + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static int +max6621_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, + int channel, const char **str) +{ + switch (type) { + case hwmon_temp: + switch (attr) { + case hwmon_temp_label: + *str = max6621_temp_labels[channel]; + return 0; + default: + return -EOPNOTSUPP; + } + break; + default: + return -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static bool max6621_writeable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case MAX6621_CONFIG0_REG: + case MAX6621_CONFIG1_REG: + case MAX6621_CONFIG2_REG: + case MAX6621_CONFIG3_REG: + case MAX6621_TEMP_S0_ALERT_REG: + case MAX6621_TEMP_S1_ALERT_REG: + case MAX6621_TEMP_S2_ALERT_REG: + case MAX6621_TEMP_S3_ALERT_REG: + case MAX6621_TEMP_ALERT_CAUSE_REG: + return true; + } + return false; +} + +static bool max6621_readable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case MAX6621_TEMP_S0D0_REG: + case MAX6621_TEMP_S0D1_REG: + case MAX6621_TEMP_S1D0_REG: + case MAX6621_TEMP_S1D1_REG: + case MAX6621_TEMP_S2D0_REG: + case MAX6621_TEMP_S2D1_REG: + case MAX6621_TEMP_S3D0_REG: + case MAX6621_TEMP_S3D1_REG: + case MAX6621_TEMP_MAX_REG: + case MAX6621_TEMP_MAX_ADDR_REG: + case MAX6621_CONFIG0_REG: + case MAX6621_CONFIG1_REG: + case MAX6621_CONFIG2_REG: + case MAX6621_CONFIG3_REG: + case MAX6621_TEMP_S0_ALERT_REG: + case MAX6621_TEMP_S1_ALERT_REG: + case MAX6621_TEMP_S2_ALERT_REG: + case MAX6621_TEMP_S3_ALERT_REG: + return true; + } + return false; +} + +static bool max6621_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case MAX6621_TEMP_S0D0_REG: + case MAX6621_TEMP_S0D1_REG: + case MAX6621_TEMP_S1D0_REG: + case MAX6621_TEMP_S1D1_REG: + case MAX6621_TEMP_S2D0_REG: + case MAX6621_TEMP_S2D1_REG: + case MAX6621_TEMP_S3D0_REG: + case MAX6621_TEMP_S3D1_REG: + case MAX6621_TEMP_MAX_REG: + case MAX6621_TEMP_S0_ALERT_REG: + case MAX6621_TEMP_S1_ALERT_REG: + case MAX6621_TEMP_S2_ALERT_REG: + case MAX6621_TEMP_S3_ALERT_REG: + case MAX6621_TEMP_ALERT_CAUSE_REG: + return true; + } + return false; +} + +static const struct reg_default max6621_regmap_default[] = { + { MAX6621_CONFIG0_REG, MAX6621_CONFIG0_INIT }, + { MAX6621_CONFIG1_REG, MAX6621_CONFIG1_INIT }, +}; + +static const struct regmap_config max6621_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = MAX6621_REG_MAX, + .val_format_endian = REGMAP_ENDIAN_LITTLE, + .cache_type = REGCACHE_FLAT, + .writeable_reg = max6621_writeable_reg, + .readable_reg = max6621_readable_reg, + .volatile_reg = max6621_volatile_reg, + .reg_defaults = max6621_regmap_default, + .num_reg_defaults = ARRAY_SIZE(max6621_regmap_default), +}; + +static u32 max6621_chip_config[] = { + HWMON_C_REGISTER_TZ, + 0 +}; + +static const struct hwmon_channel_info max6621_chip = { + .type = hwmon_chip, + .config = max6621_chip_config, +}; + +static const u32 max6621_temp_config[] = { + HWMON_T_INPUT | HWMON_T_LABEL | HWMON_T_OFFSET, + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_CRIT_ALARM | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + HWMON_T_INPUT | HWMON_T_LABEL, + 0 +}; + +static const struct hwmon_channel_info max6621_temp = { + .type = hwmon_temp, + .config = max6621_temp_config, +}; + +static const struct hwmon_channel_info *max6621_info[] = { + &max6621_chip, + &max6621_temp, + NULL +}; + +static const struct hwmon_ops max6621_hwmon_ops = { + .read = max6621_read, + .write = max6621_write, + .read_string = max6621_read_string, + .is_visible = max6621_is_visible, +}; + +static const struct hwmon_chip_info max6621_chip_info = { + .ops = &max6621_hwmon_ops, + .info = max6621_info, +}; + +static int max6621_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct max6621_data *data; + struct device *hwmon_dev; + int i; + int ret; + + data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->regmap = devm_regmap_init_i2c(client, &max6621_regmap_config); + if (IS_ERR(data->regmap)) + return PTR_ERR(data->regmap); + + i2c_set_clientdata(client, data); + data->client = client; + + /* Set CONFIG0 register masking temperature alerts and PEC. */ + ret = regmap_write(data->regmap, MAX6621_CONFIG0_REG, + MAX6621_CONFIG0_INIT); + if (ret) + return ret; + + /* Set CONFIG1 register for PEC access retry number. */ + ret = regmap_write(data->regmap, MAX6621_CONFIG1_REG, + MAX6621_CONFIG1_INIT); + if (ret) + return ret; + + /* Sync registers with hardware. */ + regcache_mark_dirty(data->regmap); + ret = regcache_sync(data->regmap); + if (ret) + return ret; + + /* Verify which temperature input registers are enabled. */ + for (i = 0; i < MAX6621_TEMP_INPUT_REG_NUM; i++) { + ret = i2c_smbus_read_word_data(client, max6621_temp_regs[i]); + if (ret < 0) + return ret; + ret = max6621_verify_reg_data(dev, ret); + if (ret) { + data->input_chan2reg[i] = -1; + continue; + } + + data->input_chan2reg[i] = max6621_temp_regs[i]; + } + + hwmon_dev = devm_hwmon_device_register_with_info(dev, client->name, + data, + &max6621_chip_info, + NULL); + + return PTR_ERR_OR_ZERO(hwmon_dev); +} + +static const struct i2c_device_id max6621_id[] = { + { MAX6621_DRV_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max6621_id); + +static const struct of_device_id max6621_of_match[] = { + { .compatible = "maxim,max6621" }, + { } +}; +MODULE_DEVICE_TABLE(of, max6621_of_match); + +static struct i2c_driver max6621_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = MAX6621_DRV_NAME, + .of_match_table = of_match_ptr(max6621_of_match), + }, + .probe = max6621_probe, + .id_table = max6621_id, +}; + +module_i2c_driver(max6621_driver); + +MODULE_AUTHOR("Vadim Pasternak <vadimp@mellanox.com>"); +MODULE_DESCRIPTION("Driver for Maxim MAX6621"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/Kconfig b/drivers/hwmon/pmbus/Kconfig index 40019325b517..08479006c7f9 100644 --- a/drivers/hwmon/pmbus/Kconfig +++ b/drivers/hwmon/pmbus/Kconfig @@ -114,6 +114,16 @@ config SENSORS_MAX20751 This driver can also be built as a module. If so, the module will be called max20751. +config SENSORS_MAX31785 + tristate "Maxim MAX31785 and compatibles" + default n + help + If you say yes here you get hardware monitoring support for Maxim + MAX31785. + + This driver can also be built as a module. If so, the module will + be called max31785. + config SENSORS_MAX34440 tristate "Maxim MAX34440 and compatibles" default n diff --git a/drivers/hwmon/pmbus/Makefile b/drivers/hwmon/pmbus/Makefile index e9364420a512..ea0e39518c21 100644 --- a/drivers/hwmon/pmbus/Makefile +++ b/drivers/hwmon/pmbus/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_SENSORS_LTC2978) += ltc2978.o obj-$(CONFIG_SENSORS_LTC3815) += ltc3815.o obj-$(CONFIG_SENSORS_MAX16064) += max16064.o obj-$(CONFIG_SENSORS_MAX20751) += max20751.o +obj-$(CONFIG_SENSORS_MAX31785) += max31785.o obj-$(CONFIG_SENSORS_MAX34440) += max34440.o obj-$(CONFIG_SENSORS_MAX8688) += max8688.o obj-$(CONFIG_SENSORS_TPS40422) += tps40422.o diff --git a/drivers/hwmon/pmbus/max31785.c b/drivers/hwmon/pmbus/max31785.c new file mode 100644 index 000000000000..9313849d5160 --- /dev/null +++ b/drivers/hwmon/pmbus/max31785.c @@ -0,0 +1,116 @@ +/* + * Copyright (C) 2017 IBM Corp. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include "pmbus.h" + +enum max31785_regs { + MFR_REVISION = 0x9b, +}; + +#define MAX31785_NR_PAGES 23 + +#define MAX31785_FAN_FUNCS \ + (PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12) + +#define MAX31785_TEMP_FUNCS \ + (PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP) + +#define MAX31785_VOUT_FUNCS \ + (PMBUS_HAVE_VOUT | PMBUS_HAVE_STATUS_VOUT) + +static const struct pmbus_driver_info max31785_info = { + .pages = MAX31785_NR_PAGES, + + /* RPM */ + .format[PSC_FAN] = direct, + .m[PSC_FAN] = 1, + .b[PSC_FAN] = 0, + .R[PSC_FAN] = 0, + .func[0] = MAX31785_FAN_FUNCS, + .func[1] = MAX31785_FAN_FUNCS, + .func[2] = MAX31785_FAN_FUNCS, + .func[3] = MAX31785_FAN_FUNCS, + .func[4] = MAX31785_FAN_FUNCS, + .func[5] = MAX31785_FAN_FUNCS, + + .format[PSC_TEMPERATURE] = direct, + .m[PSC_TEMPERATURE] = 1, + .b[PSC_TEMPERATURE] = 0, + .R[PSC_TEMPERATURE] = 2, + .func[6] = MAX31785_TEMP_FUNCS, + .func[7] = MAX31785_TEMP_FUNCS, + .func[8] = MAX31785_TEMP_FUNCS, + .func[9] = MAX31785_TEMP_FUNCS, + .func[10] = MAX31785_TEMP_FUNCS, + .func[11] = MAX31785_TEMP_FUNCS, + .func[12] = MAX31785_TEMP_FUNCS, + .func[13] = MAX31785_TEMP_FUNCS, + .func[14] = MAX31785_TEMP_FUNCS, + .func[15] = MAX31785_TEMP_FUNCS, + .func[16] = MAX31785_TEMP_FUNCS, + + .format[PSC_VOLTAGE_OUT] = direct, + .m[PSC_VOLTAGE_OUT] = 1, + .b[PSC_VOLTAGE_OUT] = 0, + .R[PSC_VOLTAGE_OUT] = 0, + .func[17] = MAX31785_VOUT_FUNCS, + .func[18] = MAX31785_VOUT_FUNCS, + .func[19] = MAX31785_VOUT_FUNCS, + .func[20] = MAX31785_VOUT_FUNCS, + .func[21] = MAX31785_VOUT_FUNCS, + .func[22] = MAX31785_VOUT_FUNCS, +}; + +static int max31785_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct device *dev = &client->dev; + struct pmbus_driver_info *info; + s64 ret; + + info = devm_kzalloc(dev, sizeof(struct pmbus_driver_info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + *info = max31785_info; + + ret = i2c_smbus_write_byte_data(client, PMBUS_PAGE, 255); + if (ret < 0) + return ret; + + return pmbus_do_probe(client, id, info); +} + +static const struct i2c_device_id max31785_id[] = { + { "max31785", 0 }, + { "max31785a", 0 }, + { }, +}; + +MODULE_DEVICE_TABLE(i2c, max31785_id); + +static struct i2c_driver max31785_driver = { + .driver = { + .name = "max31785", + }, + .probe = max31785_probe, + .remove = pmbus_do_remove, + .id_table = max31785_id, +}; + +module_i2c_driver(max31785_driver); + +MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>"); +MODULE_DESCRIPTION("PMBus driver for the Maxim MAX31785"); +MODULE_LICENSE("GPL"); diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 4efa2bd4f6d8..fa613bd209e3 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -404,9 +404,9 @@ extern const struct regulator_ops pmbus_regulator_ops; /* Function declarations */ void pmbus_clear_cache(struct i2c_client *client); -int pmbus_set_page(struct i2c_client *client, u8 page); -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg); -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word); +int pmbus_set_page(struct i2c_client *client, int page); +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg); +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, u16 word); int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg); int pmbus_write_byte(struct i2c_client *client, int page, u8 value); int pmbus_write_byte_data(struct i2c_client *client, int page, u8 reg, diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c index 302f0aef59de..52a58b8b6e1b 100644 --- a/drivers/hwmon/pmbus/pmbus_core.c +++ b/drivers/hwmon/pmbus/pmbus_core.c @@ -136,13 +136,13 @@ void pmbus_clear_cache(struct i2c_client *client) } EXPORT_SYMBOL_GPL(pmbus_clear_cache); -int pmbus_set_page(struct i2c_client *client, u8 page) +int pmbus_set_page(struct i2c_client *client, int page) { struct pmbus_data *data = i2c_get_clientdata(client); int rv = 0; int newpage; - if (page != data->currpage) { + if (page >= 0 && page != data->currpage) { rv = i2c_smbus_write_byte_data(client, PMBUS_PAGE, page); newpage = i2c_smbus_read_byte_data(client, PMBUS_PAGE); if (newpage != page) @@ -158,11 +158,9 @@ int pmbus_write_byte(struct i2c_client *client, int page, u8 value) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_write_byte(client, value); } @@ -186,7 +184,8 @@ static int _pmbus_write_byte(struct i2c_client *client, int page, u8 value) return pmbus_write_byte(client, page, value); } -int pmbus_write_word_data(struct i2c_client *client, u8 page, u8 reg, u16 word) +int pmbus_write_word_data(struct i2c_client *client, int page, u8 reg, + u16 word) { int rv; @@ -219,7 +218,7 @@ static int _pmbus_write_word_data(struct i2c_client *client, int page, int reg, return pmbus_write_word_data(client, page, reg, word); } -int pmbus_read_word_data(struct i2c_client *client, u8 page, u8 reg) +int pmbus_read_word_data(struct i2c_client *client, int page, u8 reg) { int rv; @@ -255,11 +254,9 @@ int pmbus_read_byte_data(struct i2c_client *client, int page, u8 reg) { int rv; - if (page >= 0) { - rv = pmbus_set_page(client, page); - if (rv < 0) - return rv; - } + rv = pmbus_set_page(client, page); + if (rv < 0) + return rv; return i2c_smbus_read_byte_data(client, reg); } diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c index e4d642b673c6..25d28343ba93 100644 --- a/drivers/hwmon/sht15.c +++ b/drivers/hwmon/sht15.c @@ -18,13 +18,11 @@ #include <linux/interrupt.h> #include <linux/irq.h> -#include <linux/gpio.h> #include <linux/module.h> #include <linux/init.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/mutex.h> -#include <linux/platform_data/sht15.h> #include <linux/platform_device.h> #include <linux/sched.h> #include <linux/delay.h> @@ -34,7 +32,8 @@ #include <linux/slab.h> #include <linux/atomic.h> #include <linux/bitrev.h> -#include <linux/of_gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/of.h> /* Commands */ #define SHT15_MEASURE_TEMP 0x03 @@ -122,7 +121,8 @@ static const u8 sht15_crc8_table[] = { /** * struct sht15_data - device instance specific data - * @pdata: platform data (gpio's etc). + * @sck: clock GPIO line + * @data: data GPIO line * @read_work: bh of interrupt handler. * @wait_queue: wait queue for getting values from device. * @val_temp: last temperature value read from device. @@ -150,7 +150,8 @@ static const u8 sht15_crc8_table[] = { * @interrupt_handled: flag used to indicate a handler has been scheduled. */ struct sht15_data { - struct sht15_platform_data *pdata; + struct gpio_desc *sck; + struct gpio_desc *data; struct work_struct read_work; wait_queue_head_t wait_queue; uint16_t val_temp; @@ -205,16 +206,16 @@ static int sht15_connection_reset(struct sht15_data *data) { int i, err; - err = gpio_direction_output(data->pdata->gpio_data, 1); + err = gpiod_direction_output(data->data, 1); if (err) return err; ndelay(SHT15_TSCKL); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); for (i = 0; i < 9; ++i) { - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSCKH); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); } return 0; @@ -227,11 +228,11 @@ static int sht15_connection_reset(struct sht15_data *data) */ static inline void sht15_send_bit(struct sht15_data *data, int val) { - gpio_set_value(data->pdata->gpio_data, val); + gpiod_set_value(data->data, val); ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSCKH); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); /* clock low time */ } @@ -248,23 +249,23 @@ static int sht15_transmission_start(struct sht15_data *data) int err; /* ensure data is high and output */ - err = gpio_direction_output(data->pdata->gpio_data, 1); + err = gpiod_direction_output(data->data, 1); if (err) return err; ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSCKH); - gpio_set_value(data->pdata->gpio_data, 0); + gpiod_set_value(data->data, 0); ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSCKH); - gpio_set_value(data->pdata->gpio_data, 1); + gpiod_set_value(data->data, 1); ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); return 0; } @@ -292,20 +293,20 @@ static int sht15_wait_for_response(struct sht15_data *data) { int err; - err = gpio_direction_input(data->pdata->gpio_data); + err = gpiod_direction_input(data->data); if (err) return err; - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSCKH); - if (gpio_get_value(data->pdata->gpio_data)) { - gpio_set_value(data->pdata->gpio_sck, 0); + if (gpiod_get_value(data->data)) { + gpiod_set_value(data->sck, 0); dev_err(data->dev, "Command not acknowledged\n"); err = sht15_connection_reset(data); if (err) return err; return -EIO; } - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); return 0; } @@ -360,17 +361,17 @@ static int sht15_ack(struct sht15_data *data) { int err; - err = gpio_direction_output(data->pdata->gpio_data, 0); + err = gpiod_direction_output(data->data, 0); if (err) return err; ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_data, 1); + gpiod_set_value(data->data, 1); - return gpio_direction_input(data->pdata->gpio_data); + return gpiod_direction_input(data->data); } /** @@ -383,13 +384,13 @@ static int sht15_end_transmission(struct sht15_data *data) { int err; - err = gpio_direction_output(data->pdata->gpio_data, 1); + err = gpiod_direction_output(data->data, 1); if (err) return err; ndelay(SHT15_TSU); - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSCKH); - gpio_set_value(data->pdata->gpio_sck, 0); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); return 0; } @@ -405,10 +406,10 @@ static u8 sht15_read_byte(struct sht15_data *data) for (i = 0; i < 8; ++i) { byte <<= 1; - gpio_set_value(data->pdata->gpio_sck, 1); + gpiod_set_value(data->sck, 1); ndelay(SHT15_TSCKH); - byte |= !!gpio_get_value(data->pdata->gpio_data); - gpio_set_value(data->pdata->gpio_sck, 0); + byte |= !!gpiod_get_value(data->data); + gpiod_set_value(data->sck, 0); ndelay(SHT15_TSCKL); } return byte; @@ -428,7 +429,7 @@ static int sht15_send_status(struct sht15_data *data, u8 status) err = sht15_send_cmd(data, SHT15_WRITE_STATUS); if (err) return err; - err = gpio_direction_output(data->pdata->gpio_data, 1); + err = gpiod_direction_output(data->data, 1); if (err) return err; ndelay(SHT15_TSU); @@ -528,14 +529,14 @@ static int sht15_measurement(struct sht15_data *data, if (ret) return ret; - ret = gpio_direction_input(data->pdata->gpio_data); + ret = gpiod_direction_input(data->data); if (ret) return ret; atomic_set(&data->interrupt_handled, 0); - enable_irq(gpio_to_irq(data->pdata->gpio_data)); - if (gpio_get_value(data->pdata->gpio_data) == 0) { - disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + enable_irq(gpiod_to_irq(data->data)); + if (gpiod_get_value(data->data) == 0) { + disable_irq_nosync(gpiod_to_irq(data->data)); /* Only relevant if the interrupt hasn't occurred. */ if (!atomic_read(&data->interrupt_handled)) schedule_work(&data->read_work); @@ -547,7 +548,7 @@ static int sht15_measurement(struct sht15_data *data, data->state = SHT15_READING_NOTHING; return -EIO; } else if (ret == 0) { /* timeout occurred */ - disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + disable_irq_nosync(gpiod_to_irq(data->data)); ret = sht15_connection_reset(data); if (ret) return ret; @@ -826,15 +827,15 @@ static void sht15_bh_read_data(struct work_struct *work_s) read_work); /* Firstly, verify the line is low */ - if (gpio_get_value(data->pdata->gpio_data)) { + if (gpiod_get_value(data->data)) { /* * If not, then start the interrupt again - care here as could * have gone low in meantime so verify it hasn't! */ atomic_set(&data->interrupt_handled, 0); - enable_irq(gpio_to_irq(data->pdata->gpio_data)); + enable_irq(gpiod_to_irq(data->data)); /* If still not occurred or another handler was scheduled */ - if (gpio_get_value(data->pdata->gpio_data) + if (gpiod_get_value(data->data) || atomic_read(&data->interrupt_handled)) return; } @@ -918,53 +919,12 @@ static const struct of_device_id sht15_dt_match[] = { { }, }; MODULE_DEVICE_TABLE(of, sht15_dt_match); - -/* - * This function returns NULL if pdev isn't a device instatiated by dt, - * a pointer to pdata if it could successfully get all information - * from dt or a negative ERR_PTR() on error. - */ -static struct sht15_platform_data *sht15_probe_dt(struct device *dev) -{ - struct device_node *np = dev->of_node; - struct sht15_platform_data *pdata; - - /* no device tree device */ - if (!np) - return NULL; - - pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return ERR_PTR(-ENOMEM); - - pdata->gpio_data = of_get_named_gpio(np, "data-gpios", 0); - if (pdata->gpio_data < 0) { - if (pdata->gpio_data != -EPROBE_DEFER) - dev_err(dev, "data-gpios not found\n"); - return ERR_PTR(pdata->gpio_data); - } - - pdata->gpio_sck = of_get_named_gpio(np, "clk-gpios", 0); - if (pdata->gpio_sck < 0) { - if (pdata->gpio_sck != -EPROBE_DEFER) - dev_err(dev, "clk-gpios not found\n"); - return ERR_PTR(pdata->gpio_sck); - } - - return pdata; -} -#else -static inline struct sht15_platform_data *sht15_probe_dt(struct device *dev) -{ - return NULL; -} #endif static int sht15_probe(struct platform_device *pdev) { int ret; struct sht15_data *data; - u8 status = 0; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) @@ -977,25 +937,6 @@ static int sht15_probe(struct platform_device *pdev) data->dev = &pdev->dev; init_waitqueue_head(&data->wait_queue); - data->pdata = sht15_probe_dt(&pdev->dev); - if (IS_ERR(data->pdata)) - return PTR_ERR(data->pdata); - if (data->pdata == NULL) { - data->pdata = dev_get_platdata(&pdev->dev); - if (data->pdata == NULL) { - dev_err(&pdev->dev, "no platform data supplied\n"); - return -EINVAL; - } - } - - data->supply_uv = data->pdata->supply_mv * 1000; - if (data->pdata->checksum) - data->checksumming = true; - if (data->pdata->no_otp_reload) - status |= SHT15_STATUS_NO_OTP_RELOAD; - if (data->pdata->low_resolution) - status |= SHT15_STATUS_LOW_RESOLUTION; - /* * If a regulator is available, * query what the supply voltage actually is! @@ -1030,21 +971,20 @@ static int sht15_probe(struct platform_device *pdev) } /* Try requesting the GPIOs */ - ret = devm_gpio_request_one(&pdev->dev, data->pdata->gpio_sck, - GPIOF_OUT_INIT_LOW, "SHT15 sck"); - if (ret) { + data->sck = devm_gpiod_get(&pdev->dev, "clk", GPIOD_OUT_LOW); + if (IS_ERR(data->sck)) { + ret = PTR_ERR(data->sck); dev_err(&pdev->dev, "clock line GPIO request failed\n"); goto err_release_reg; } - - ret = devm_gpio_request(&pdev->dev, data->pdata->gpio_data, - "SHT15 data"); - if (ret) { + data->data = devm_gpiod_get(&pdev->dev, "data", GPIOD_IN); + if (IS_ERR(data->data)) { + ret = PTR_ERR(data->data); dev_err(&pdev->dev, "data line GPIO request failed\n"); goto err_release_reg; } - ret = devm_request_irq(&pdev->dev, gpio_to_irq(data->pdata->gpio_data), + ret = devm_request_irq(&pdev->dev, gpiod_to_irq(data->data), sht15_interrupt_fired, IRQF_TRIGGER_FALLING, "sht15 data", @@ -1053,7 +993,7 @@ static int sht15_probe(struct platform_device *pdev) dev_err(&pdev->dev, "failed to get irq for data line\n"); goto err_release_reg; } - disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data)); + disable_irq_nosync(gpiod_to_irq(data->data)); ret = sht15_connection_reset(data); if (ret) goto err_release_reg; @@ -1061,13 +1001,6 @@ static int sht15_probe(struct platform_device *pdev) if (ret) goto err_release_reg; - /* write status with platform data options */ - if (status) { - ret = sht15_send_status(data, status); - if (ret) - goto err_release_reg; - } - ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group); if (ret) { dev_err(&pdev->dev, "sysfs create failed\n"); diff --git a/drivers/hwmon/stts751.c b/drivers/hwmon/stts751.c index 3f940fb67dc6..7fe152d92350 100644 --- a/drivers/hwmon/stts751.c +++ b/drivers/hwmon/stts751.c @@ -396,7 +396,7 @@ static ssize_t show_max_alarm(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->max_alert); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->max_alert); } static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr, @@ -413,7 +413,7 @@ static ssize_t show_min_alarm(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->min_alert); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->min_alert); } static ssize_t show_input(struct device *dev, struct device_attribute *attr, @@ -428,7 +428,7 @@ static ssize_t show_input(struct device *dev, struct device_attribute *attr, if (ret < 0) return ret; - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->temp); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->temp); } static ssize_t show_therm(struct device *dev, struct device_attribute *attr, @@ -436,7 +436,7 @@ static ssize_t show_therm(struct device *dev, struct device_attribute *attr, { struct stts751_priv *priv = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm); } static ssize_t set_therm(struct device *dev, struct device_attribute *attr, @@ -478,7 +478,7 @@ static ssize_t show_hyst(struct device *dev, struct device_attribute *attr, { struct stts751_priv *priv = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->hyst); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->hyst); } static ssize_t set_hyst(struct device *dev, struct device_attribute *attr, @@ -518,7 +518,7 @@ static ssize_t show_therm_trip(struct device *dev, if (ret < 0) return ret; - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->therm_trip); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->therm_trip); } static ssize_t show_max(struct device *dev, struct device_attribute *attr, @@ -526,7 +526,7 @@ static ssize_t show_max(struct device *dev, struct device_attribute *attr, { struct stts751_priv *priv = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_max); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_max); } static ssize_t set_max(struct device *dev, struct device_attribute *attr, @@ -560,7 +560,7 @@ static ssize_t show_min(struct device *dev, struct device_attribute *attr, { struct stts751_priv *priv = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE - 1, "%d\n", priv->event_min); + return snprintf(buf, PAGE_SIZE, "%d\n", priv->event_min); } static ssize_t set_min(struct device *dev, struct device_attribute *attr, @@ -594,7 +594,7 @@ static ssize_t show_interval(struct device *dev, struct device_attribute *attr, { struct stts751_priv *priv = dev_get_drvdata(dev); - return snprintf(buf, PAGE_SIZE - 1, "%d\n", + return snprintf(buf, PAGE_SIZE, "%d\n", stts751_intervals[priv->interval]); } diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c index dab5c515d5a3..5ba9d9f1daa1 100644 --- a/drivers/hwmon/w83793.c +++ b/drivers/hwmon/w83793.c @@ -1676,7 +1676,9 @@ static int w83793_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev = &client->dev; - const int watchdog_minors[] = { WATCHDOG_MINOR, 212, 213, 214, 215 }; + static const int watchdog_minors[] = { + WATCHDOG_MINOR, 212, 213, 214, 215 + }; struct w83793_data *data; int i, tmp, val, err; int files_fan = ARRAY_SIZE(w83793_left_fan) / 7; diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index e1be61095532..a3cd91f23267 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -91,6 +91,11 @@ #define to_xgene_hwmon_dev(cl) \ container_of(cl, struct xgene_hwmon_dev, mbox_client) +enum xgene_hwmon_version { + XGENE_HWMON_V1 = 0, + XGENE_HWMON_V2 = 1, +}; + struct slimpro_resp_msg { u32 msg; u32 param1; @@ -609,6 +614,15 @@ static void xgene_hwmon_tx_done(struct mbox_client *cl, void *msg, int ret) } } +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_hwmon_acpi_match[] = { + {"APMC0D29", XGENE_HWMON_V1}, + {"APMC0D8A", XGENE_HWMON_V2}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match); +#endif + static int xgene_hwmon_probe(struct platform_device *pdev) { struct xgene_hwmon_dev *ctx; @@ -651,6 +665,15 @@ static int xgene_hwmon_probe(struct platform_device *pdev) } } else { struct acpi_pcct_hw_reduced *cppc_ss; + const struct acpi_device_id *acpi_id; + int version; + + acpi_id = acpi_match_device(pdev->dev.driver->acpi_match_table, + &pdev->dev); + if (!acpi_id) + return -EINVAL; + + version = (int)acpi_id->driver_data; if (device_property_read_u32(&pdev->dev, "pcc-channel", &ctx->mbox_idx)) { @@ -693,7 +716,13 @@ static int xgene_hwmon_probe(struct platform_device *pdev) */ ctx->comm_base_addr = cppc_ss->base_address; if (ctx->comm_base_addr) { - ctx->pcc_comm_addr = memremap(ctx->comm_base_addr, + if (version == XGENE_HWMON_V2) + ctx->pcc_comm_addr = (void __force *)ioremap( + ctx->comm_base_addr, + cppc_ss->length); + else + ctx->pcc_comm_addr = memremap( + ctx->comm_base_addr, cppc_ss->length, MEMREMAP_WB); } else { @@ -761,14 +790,6 @@ static int xgene_hwmon_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_ACPI -static const struct acpi_device_id xgene_hwmon_acpi_match[] = { - {"APMC0D29", 0}, - {}, -}; -MODULE_DEVICE_TABLE(acpi, xgene_hwmon_acpi_match); -#endif - static const struct of_device_id xgene_hwmon_of_match[] = { {.compatible = "apm,xgene-slimpro-hwmon"}, {} diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 3a234701d92c..6f25da56a169 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -611,9 +611,9 @@ static int drive_is_ready(ide_drive_t *drive) * logic that wants cleaning up. */ -void ide_timer_expiry (unsigned long data) +void ide_timer_expiry (struct timer_list *t) { - ide_hwif_t *hwif = (ide_hwif_t *)data; + ide_hwif_t *hwif = from_timer(hwif, t, timer); ide_drive_t *uninitialized_var(drive); ide_handler_t *handler; unsigned long flags; diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c index eaf39e5db08b..17fd55af4d92 100644 --- a/drivers/ide/ide-probe.c +++ b/drivers/ide/ide-probe.c @@ -1184,7 +1184,7 @@ static void ide_init_port_data(ide_hwif_t *hwif, unsigned int index) spin_lock_init(&hwif->lock); - setup_timer(&hwif->timer, &ide_timer_expiry, (unsigned long)hwif); + timer_setup(&hwif->timer, ide_timer_expiry, 0); init_completion(&hwif->gendev_rel_comp); diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c index d9a1e9893136..97bea2e1aa6a 100644 --- a/drivers/infiniband/hw/hfi1/file_ops.c +++ b/drivers/infiniband/hw/hfi1/file_ops.c @@ -380,7 +380,7 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd, if (sc->flags & SCF_FROZEN) { wait_event_interruptible_timeout( dd->event_queue, - !(ACCESS_ONCE(dd->flags) & HFI1_FROZEN), + !(READ_ONCE(dd->flags) & HFI1_FROZEN), msecs_to_jiffies(SEND_CTXT_HALT_TIMEOUT)); if (dd->flags & HFI1_FROZEN) return -ENOLCK; diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 7108a4b5e94c..75e740780285 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -1423,14 +1423,14 @@ retry: goto done; } /* copy from receiver cache line and recalculate */ - sc->alloc_free = ACCESS_ONCE(sc->free); + sc->alloc_free = READ_ONCE(sc->free); avail = (unsigned long)sc->credits - (sc->fill - sc->alloc_free); if (blocks > avail) { /* still no room, actively update */ sc_release_update(sc); - sc->alloc_free = ACCESS_ONCE(sc->free); + sc->alloc_free = READ_ONCE(sc->free); trycount++; goto retry; } @@ -1667,7 +1667,7 @@ void sc_release_update(struct send_context *sc) /* call sent buffer callbacks */ code = -1; /* code not yet set */ - head = ACCESS_ONCE(sc->sr_head); /* snapshot the head */ + head = READ_ONCE(sc->sr_head); /* snapshot the head */ tail = sc->sr_tail; while (head != tail) { pbuf = &sc->sr[tail].pbuf; diff --git a/drivers/infiniband/hw/hfi1/ruc.c b/drivers/infiniband/hw/hfi1/ruc.c index b3291f0fde9a..a7fc664f0d4e 100644 --- a/drivers/infiniband/hw/hfi1/ruc.c +++ b/drivers/infiniband/hw/hfi1/ruc.c @@ -363,7 +363,7 @@ static void ruc_loopback(struct rvt_qp *sqp) again: smp_read_barrier_depends(); /* see post_one_send() */ - if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) + if (sqp->s_last == READ_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 6781bcdb10b3..08346d25441c 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -1725,7 +1725,7 @@ retry: swhead = sde->descq_head & sde->sdma_mask; /* this code is really bad for cache line trading */ - swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; + swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; cnt = sde->descq_cnt; if (swhead < swtail) @@ -1872,7 +1872,7 @@ retry: if ((status & sde->idle_mask) && !idle_check_done) { u16 swtail; - swtail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; + swtail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; if (swtail != hwhead) { hwhead = (u16)read_sde_csr(sde, SD(HEAD)); idle_check_done = 1; @@ -2222,7 +2222,7 @@ void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *sde) u16 len; head = sde->descq_head & sde->sdma_mask; - tail = ACCESS_ONCE(sde->descq_tail) & sde->sdma_mask; + tail = READ_ONCE(sde->descq_tail) & sde->sdma_mask; seq_printf(s, SDE_FMT, sde->this_idx, sde->cpu, sdma_state_name(sde->state.current_state), @@ -3305,7 +3305,7 @@ int sdma_ahg_alloc(struct sdma_engine *sde) return -EINVAL; } while (1) { - nr = ffz(ACCESS_ONCE(sde->ahg_bits)); + nr = ffz(READ_ONCE(sde->ahg_bits)); if (nr > 31) { trace_hfi1_ahg_allocate(sde, -ENOSPC); return -ENOSPC; diff --git a/drivers/infiniband/hw/hfi1/sdma.h b/drivers/infiniband/hw/hfi1/sdma.h index 107011d8613b..374c59784950 100644 --- a/drivers/infiniband/hw/hfi1/sdma.h +++ b/drivers/infiniband/hw/hfi1/sdma.h @@ -445,7 +445,7 @@ static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) { return sde->descq_cnt - (sde->descq_tail - - ACCESS_ONCE(sde->descq_head)) - 1; + READ_ONCE(sde->descq_head)) - 1; } static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c index 0b646173ca22..9a31c585427f 100644 --- a/drivers/infiniband/hw/hfi1/uc.c +++ b/drivers/infiniband/hw/hfi1/uc.c @@ -80,7 +80,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { @@ -121,7 +121,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) goto bail; /* Check if send work queue is empty. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) { + if (qp->s_cur == READ_ONCE(qp->s_head)) { clear_ahg(qp); goto bail; } diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c index 2ba74fdd6f15..7fec6b984e3e 100644 --- a/drivers/infiniband/hw/hfi1/ud.c +++ b/drivers/infiniband/hw/hfi1/ud.c @@ -487,7 +487,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (iowait_sdma_pending(&priv->s_iowait)) { @@ -501,7 +501,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps) /* see post_one_send() */ smp_read_barrier_depends(); - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) + if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index c0c0e0445cbf..8ec6e8a8d6f7 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -276,7 +276,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, /* Wait until all requests have been freed. */ wait_event_interruptible( pq->wait, - (ACCESS_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); + (READ_ONCE(pq->state) == SDMA_PKT_Q_INACTIVE)); kfree(pq->reqs); kfree(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); @@ -591,7 +591,7 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd, if (ret != -EBUSY) { req->status = ret; WRITE_ONCE(req->has_error, 1); - if (ACCESS_ONCE(req->seqcomp) == + if (READ_ONCE(req->seqcomp) == req->seqsubmitted - 1) goto free_req; return ret; @@ -825,7 +825,7 @@ static int user_sdma_send_pkts(struct user_sdma_request *req, unsigned maxpkts) */ if (req->data_len) { iovec = &req->iovs[req->iov_idx]; - if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) { + if (READ_ONCE(iovec->offset) == iovec->iov.iov_len) { if (++req->iov_idx == req->data_iovs) { ret = -EFAULT; goto free_txreq; @@ -1390,7 +1390,7 @@ static void user_sdma_txreq_cb(struct sdma_txreq *txreq, int status) } else { if (status != SDMA_TXREQ_S_OK) req->status = status; - if (req->seqcomp == (ACCESS_ONCE(req->seqsubmitted) - 1) && + if (req->seqcomp == (READ_ONCE(req->seqsubmitted) - 1) && (READ_ONCE(req->done) || READ_ONCE(req->has_error))) { user_sdma_free_request(req, false); diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index 53efbb0b40c4..9a37e844d4c8 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c @@ -368,7 +368,7 @@ static void qib_ruc_loopback(struct rvt_qp *sqp) again: smp_read_barrier_depends(); /* see post_one_send() */ - if (sqp->s_last == ACCESS_ONCE(sqp->s_head)) + if (sqp->s_last == READ_ONCE(sqp->s_head)) goto clr_busy; wqe = rvt_get_swqe_ptr(sqp, sqp->s_last); diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 498e2202e72c..bddcc37ace44 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -61,7 +61,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -91,7 +91,7 @@ int qib_make_uc_req(struct rvt_qp *qp, unsigned long *flags) goto bail; /* Check if send work queue is empty. */ smp_read_barrier_depends(); /* see post_one_send() */ - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) + if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; /* * Start a new request. diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index be4907453ac4..15962ed193ce 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c @@ -253,7 +253,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) goto bail; /* We are in the error state, flush the work request. */ smp_read_barrier_depends(); /* see post_one_send */ - if (qp->s_last == ACCESS_ONCE(qp->s_head)) + if (qp->s_last == READ_ONCE(qp->s_head)) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&priv->s_dma_busy)) { @@ -267,7 +267,7 @@ int qib_make_ud_req(struct rvt_qp *qp, unsigned long *flags) /* see post_one_send() */ smp_read_barrier_depends(); - if (qp->s_cur == ACCESS_ONCE(qp->s_head)) + if (qp->s_cur == READ_ONCE(qp->s_head)) goto bail; wqe = rvt_get_swqe_ptr(qp, qp->s_cur); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 22df09ae809e..b670cb9d2006 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -1073,7 +1073,7 @@ int rvt_error_qp(struct rvt_qp *qp, enum ib_wc_status err) rdi->driver_f.notify_error_qp(qp); /* Schedule the sending tasklet to drain the send work queue. */ - if (ACCESS_ONCE(qp->s_last) != qp->s_head) + if (READ_ONCE(qp->s_last) != qp->s_head) rdi->driver_f.schedule_send(qp); rvt_clear_mr_refs(qp, 0); @@ -1686,7 +1686,7 @@ static inline int rvt_qp_is_avail( if (likely(qp->s_avail)) return 0; smp_read_barrier_depends(); /* see rc.c */ - slast = ACCESS_ONCE(qp->s_last); + slast = READ_ONCE(qp->s_last); if (qp->s_head >= slast) avail = qp->s_size - (qp->s_head - slast); else @@ -1917,7 +1917,7 @@ int rvt_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, * ahead and kick the send engine into gear. Otherwise we will always * just schedule the send to happen later. */ - call_send = qp->s_head == ACCESS_ONCE(qp->s_last) && !wr->next; + call_send = qp->s_head == READ_ONCE(qp->s_last) && !wr->next; for (; wr; wr = wr->next) { err = rvt_post_one_wr(qp, wr, &call_send); diff --git a/drivers/input/misc/regulator-haptic.c b/drivers/input/misc/regulator-haptic.c index 2e8f801932be..a1db1e5040dc 100644 --- a/drivers/input/misc/regulator-haptic.c +++ b/drivers/input/misc/regulator-haptic.c @@ -233,7 +233,7 @@ static int __maybe_unused regulator_haptic_resume(struct device *dev) haptic->suspended = false; - magnitude = ACCESS_ONCE(haptic->magnitude); + magnitude = READ_ONCE(haptic->magnitude); if (magnitude) regulator_haptic_set_voltage(haptic, magnitude); diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c index 0f586780ceb4..1ae5c1ef3f5b 100644 --- a/drivers/input/mouse/vmmouse.c +++ b/drivers/input/mouse/vmmouse.c @@ -316,11 +316,9 @@ static int vmmouse_enable(struct psmouse *psmouse) /* * Array of supported hypervisors. */ -static const struct hypervisor_x86 *vmmouse_supported_hypervisors[] = { - &x86_hyper_vmware, -#ifdef CONFIG_KVM_GUEST - &x86_hyper_kvm, -#endif +static enum x86_hypervisor_type vmmouse_supported_hypervisors[] = { + X86_HYPER_VMWARE, + X86_HYPER_KVM, }; /** @@ -331,7 +329,7 @@ static bool vmmouse_check_hypervisor(void) int i; for (i = 0; i < ARRAY_SIZE(vmmouse_supported_hypervisors); i++) - if (vmmouse_supported_hypervisors[i] == x86_hyper) + if (vmmouse_supported_hypervisors[i] == x86_hyper_type) return true; return false; diff --git a/drivers/input/touchscreen/s3c2410_ts.c b/drivers/input/touchscreen/s3c2410_ts.c index 3b3db8c868e0..d3265b6b58b8 100644 --- a/drivers/input/touchscreen/s3c2410_ts.c +++ b/drivers/input/touchscreen/s3c2410_ts.c @@ -145,7 +145,7 @@ static void touch_timer_fire(unsigned long data) } } -static DEFINE_TIMER(touch_timer, touch_timer_fire, 0, 0); +static DEFINE_TIMER(touch_timer, touch_timer_fire); /** * stylus_irq - touchscreen stylus event interrupt diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index 8e8874d23717..9c848e36f209 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -4173,16 +4173,26 @@ static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, irq_domain_free_irqs_common(domain, virq, nr_irqs); } -static void irq_remapping_activate(struct irq_domain *domain, - struct irq_data *irq_data) +static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, + struct amd_ir_data *ir_data, + struct irq_2_irte *irte_info, + struct irq_cfg *cfg); + +static int irq_remapping_activate(struct irq_domain *domain, + struct irq_data *irq_data, bool early) { struct amd_ir_data *data = irq_data->chip_data; struct irq_2_irte *irte_info = &data->irq_2_irte; struct amd_iommu *iommu = amd_iommu_rlookup_table[irte_info->devid]; + struct irq_cfg *cfg = irqd_cfg(irq_data); - if (iommu) - iommu->irte_ops->activate(data->entry, irte_info->devid, - irte_info->index); + if (!iommu) + return 0; + + iommu->irte_ops->activate(data->entry, irte_info->devid, + irte_info->index); + amd_ir_update_irte(irq_data, iommu, data, irte_info, cfg); + return 0; } static void irq_remapping_deactivate(struct irq_domain *domain, @@ -4269,6 +4279,22 @@ static int amd_ir_set_vcpu_affinity(struct irq_data *data, void *vcpu_info) return modify_irte_ga(irte_info->devid, irte_info->index, irte, ir_data); } + +static void amd_ir_update_irte(struct irq_data *irqd, struct amd_iommu *iommu, + struct amd_ir_data *ir_data, + struct irq_2_irte *irte_info, + struct irq_cfg *cfg) +{ + + /* + * Atomically updates the IRTE with the new destination, vector + * and flushes the interrupt entry cache. + */ + iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, + irte_info->index, cfg->vector, + cfg->dest_apicid); +} + static int amd_ir_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { @@ -4286,13 +4312,7 @@ static int amd_ir_set_affinity(struct irq_data *data, if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) return ret; - /* - * Atomically updates the IRTE with the new destination, vector - * and flushes the interrupt entry cache. - */ - iommu->irte_ops->set_affinity(ir_data->entry, irte_info->devid, - irte_info->index, cfg->vector, cfg->dest_apicid); - + amd_ir_update_irte(data, iommu, ir_data, irte_info, cfg); /* * After this point, all the interrupts will start arriving * at the new destination. So, time to cleanup the previous diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 25842b566c39..76a193c7fcfc 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -1122,6 +1122,24 @@ struct irq_remap_ops intel_irq_remap_ops = { .get_irq_domain = intel_get_irq_domain, }; +static void intel_ir_reconfigure_irte(struct irq_data *irqd, bool force) +{ + struct intel_ir_data *ir_data = irqd->chip_data; + struct irte *irte = &ir_data->irte_entry; + struct irq_cfg *cfg = irqd_cfg(irqd); + + /* + * Atomically updates the IRTE with the new destination, vector + * and flushes the interrupt entry cache. + */ + irte->vector = cfg->vector; + irte->dest_id = IRTE_DEST(cfg->dest_apicid); + + /* Update the hardware only if the interrupt is in remapped mode. */ + if (!force || ir_data->irq_2_iommu.mode == IRQ_REMAPPING) + modify_irte(&ir_data->irq_2_iommu, irte); +} + /* * Migrate the IO-APIC irq in the presence of intr-remapping. * @@ -1140,27 +1158,15 @@ static int intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) { - struct intel_ir_data *ir_data = data->chip_data; - struct irte *irte = &ir_data->irte_entry; - struct irq_cfg *cfg = irqd_cfg(data); struct irq_data *parent = data->parent_data; + struct irq_cfg *cfg = irqd_cfg(data); int ret; ret = parent->chip->irq_set_affinity(parent, mask, force); if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) return ret; - /* - * Atomically updates the IRTE with the new destination, vector - * and flushes the interrupt entry cache. - */ - irte->vector = cfg->vector; - irte->dest_id = IRTE_DEST(cfg->dest_apicid); - - /* Update the hardware only if the interrupt is in remapped mode. */ - if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING) - modify_irte(&ir_data->irq_2_iommu, irte); - + intel_ir_reconfigure_irte(data, false); /* * After this point, all the interrupts will start arriving * at the new destination. So, time to cleanup the previous @@ -1390,12 +1396,11 @@ static void intel_irq_remapping_free(struct irq_domain *domain, irq_domain_free_irqs_common(domain, virq, nr_irqs); } -static void intel_irq_remapping_activate(struct irq_domain *domain, - struct irq_data *irq_data) +static int intel_irq_remapping_activate(struct irq_domain *domain, + struct irq_data *irq_data, bool early) { - struct intel_ir_data *data = irq_data->chip_data; - - modify_irte(&data->irq_2_iommu, &data->irte_entry); + intel_ir_reconfigure_irte(irq_data, true); + return 0; } static void intel_irq_remapping_deactivate(struct irq_domain *domain, diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index 9d8a1dd2e2c2..53380bd72ea4 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -1,3 +1,5 @@ +menu "IRQ chip support" + config IRQCHIP def_bool y depends on OF_IRQ @@ -151,6 +153,9 @@ config CLPS711X_IRQCHIP select SPARSE_IRQ default y +config OMPIC + bool + config OR1K_PIC bool select IRQ_DOMAIN @@ -304,6 +309,7 @@ config EZNPS_GIC config STM32_EXTI bool select IRQ_DOMAIN + select GENERIC_IRQ_CHIP config QCOM_IRQ_COMBINER bool "QCOM IRQ combiner support" @@ -321,3 +327,13 @@ config IRQ_UNIPHIER_AIDET select IRQ_DOMAIN_HIERARCHY help Support for the UniPhier AIDET (ARM Interrupt Detector). + +config MESON_IRQ_GPIO + bool "Meson GPIO Interrupt Multiplexer" + depends on ARCH_MESON + select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY + help + Support Meson SoC Family GPIO Interrupt Multiplexer + +endmenu diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index b842dfdc903f..dae7282bfdef 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_DW_APB_ICTL) += irq-dw-apb-ictl.o obj-$(CONFIG_METAG) += irq-metag-ext.o obj-$(CONFIG_METAG_PERFCOUNTER_IRQS) += irq-metag.o obj-$(CONFIG_CLPS711X_IRQCHIP) += irq-clps711x.o +obj-$(CONFIG_OMPIC) += irq-ompic.o obj-$(CONFIG_OR1K_PIC) += irq-or1k-pic.o obj-$(CONFIG_ORION_IRQCHIP) += irq-orion.o obj-$(CONFIG_OMAP_IRQCHIP) += irq-omap-intc.o @@ -80,3 +81,5 @@ obj-$(CONFIG_ARCH_ASPEED) += irq-aspeed-vic.o irq-aspeed-i2c-ic.o obj-$(CONFIG_STM32_EXTI) += irq-stm32-exti.o obj-$(CONFIG_QCOM_IRQ_COMBINER) += qcom-irq-combiner.o obj-$(CONFIG_IRQ_UNIPHIER_AIDET) += irq-uniphier-aidet.o +obj-$(CONFIG_ARCH_SYNQUACER) += irq-sni-exiu.o +obj-$(CONFIG_MESON_IRQ_GPIO) += irq-meson-gpio.o diff --git a/drivers/irqchip/irq-aspeed-i2c-ic.c b/drivers/irqchip/irq-aspeed-i2c-ic.c index 815b88dd18f2..f20200af0992 100644 --- a/drivers/irqchip/irq-aspeed-i2c-ic.c +++ b/drivers/irqchip/irq-aspeed-i2c-ic.c @@ -76,8 +76,8 @@ static int __init aspeed_i2c_ic_of_init(struct device_node *node, return -ENOMEM; i2c_ic->base = of_iomap(node, 0); - if (IS_ERR(i2c_ic->base)) { - ret = PTR_ERR(i2c_ic->base); + if (!i2c_ic->base) { + ret = -ENOMEM; goto err_free_ic; } diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c index b009b916a292..691d20eb0bec 100644 --- a/drivers/irqchip/irq-brcmstb-l2.c +++ b/drivers/irqchip/irq-brcmstb-l2.c @@ -1,7 +1,7 @@ /* * Generic Broadcom Set Top Box Level 2 Interrupt controller driver * - * Copyright (C) 2014 Broadcom Corporation + * Copyright (C) 2014-2017 Broadcom * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as @@ -31,35 +31,82 @@ #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> -/* Register offsets in the L2 interrupt controller */ -#define CPU_STATUS 0x00 -#define CPU_SET 0x04 -#define CPU_CLEAR 0x08 -#define CPU_MASK_STATUS 0x0c -#define CPU_MASK_SET 0x10 -#define CPU_MASK_CLEAR 0x14 +struct brcmstb_intc_init_params { + irq_flow_handler_t handler; + int cpu_status; + int cpu_clear; + int cpu_mask_status; + int cpu_mask_set; + int cpu_mask_clear; +}; + +/* Register offsets in the L2 latched interrupt controller */ +static const struct brcmstb_intc_init_params l2_edge_intc_init = { + .handler = handle_edge_irq, + .cpu_status = 0x00, + .cpu_clear = 0x08, + .cpu_mask_status = 0x0c, + .cpu_mask_set = 0x10, + .cpu_mask_clear = 0x14 +}; + +/* Register offsets in the L2 level interrupt controller */ +static const struct brcmstb_intc_init_params l2_lvl_intc_init = { + .handler = handle_level_irq, + .cpu_status = 0x00, + .cpu_clear = -1, /* Register not present */ + .cpu_mask_status = 0x04, + .cpu_mask_set = 0x08, + .cpu_mask_clear = 0x0C +}; /* L2 intc private data structure */ struct brcmstb_l2_intc_data { - int parent_irq; - void __iomem *base; struct irq_domain *domain; + struct irq_chip_generic *gc; + int status_offset; + int mask_offset; bool can_wake; u32 saved_mask; /* for suspend/resume */ }; +/** + * brcmstb_l2_mask_and_ack - Mask and ack pending interrupt + * @d: irq_data + * + * Chip has separate enable/disable registers instead of a single mask + * register and pending interrupt is acknowledged by setting a bit. + * + * Note: This function is generic and could easily be added to the + * generic irqchip implementation if there ever becomes a will to do so. + * Perhaps with a name like irq_gc_mask_disable_and_ack_set(). + * + * e.g.: https://patchwork.kernel.org/patch/9831047/ + */ +static void brcmstb_l2_mask_and_ack(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); + u32 mask = d->mask; + + irq_gc_lock(gc); + irq_reg_writel(gc, mask, ct->regs.disable); + *ct->mask_cache &= ~mask; + irq_reg_writel(gc, mask, ct->regs.ack); + irq_gc_unlock(gc); +} + static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) { struct brcmstb_l2_intc_data *b = irq_desc_get_handler_data(desc); - struct irq_chip_generic *gc = irq_get_domain_generic_chip(b->domain, 0); struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int irq; u32 status; chained_irq_enter(chip, desc); - status = irq_reg_readl(gc, CPU_STATUS) & - ~(irq_reg_readl(gc, CPU_MASK_STATUS)); + status = irq_reg_readl(b->gc, b->status_offset) & + ~(irq_reg_readl(b->gc, b->mask_offset)); if (status == 0) { raw_spin_lock(&desc->lock); @@ -70,10 +117,8 @@ static void brcmstb_l2_intc_irq_handle(struct irq_desc *desc) do { irq = ffs(status) - 1; - /* ack at our level */ - irq_reg_writel(gc, 1 << irq, CPU_CLEAR); status &= ~(1 << irq); - generic_handle_irq(irq_find_mapping(b->domain, irq)); + generic_handle_irq(irq_linear_revmap(b->domain, irq)); } while (status); out: chained_irq_exit(chip, desc); @@ -82,16 +127,17 @@ out: static void brcmstb_l2_intc_suspend(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; irq_gc_lock(gc); /* Save the current mask */ - b->saved_mask = irq_reg_readl(gc, CPU_MASK_STATUS); + b->saved_mask = irq_reg_readl(gc, ct->regs.mask); if (b->can_wake) { /* Program the wakeup mask */ - irq_reg_writel(gc, ~gc->wake_active, CPU_MASK_SET); - irq_reg_writel(gc, gc->wake_active, CPU_MASK_CLEAR); + irq_reg_writel(gc, ~gc->wake_active, ct->regs.disable); + irq_reg_writel(gc, gc->wake_active, ct->regs.enable); } irq_gc_unlock(gc); } @@ -99,49 +145,56 @@ static void brcmstb_l2_intc_suspend(struct irq_data *d) static void brcmstb_l2_intc_resume(struct irq_data *d) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct irq_chip_type *ct = irq_data_get_chip_type(d); struct brcmstb_l2_intc_data *b = gc->private; irq_gc_lock(gc); - /* Clear unmasked non-wakeup interrupts */ - irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, CPU_CLEAR); + if (ct->chip.irq_ack) { + /* Clear unmasked non-wakeup interrupts */ + irq_reg_writel(gc, ~b->saved_mask & ~gc->wake_active, + ct->regs.ack); + } /* Restore the saved mask */ - irq_reg_writel(gc, b->saved_mask, CPU_MASK_SET); - irq_reg_writel(gc, ~b->saved_mask, CPU_MASK_CLEAR); + irq_reg_writel(gc, b->saved_mask, ct->regs.disable); + irq_reg_writel(gc, ~b->saved_mask, ct->regs.enable); irq_gc_unlock(gc); } static int __init brcmstb_l2_intc_of_init(struct device_node *np, - struct device_node *parent) + struct device_node *parent, + const struct brcmstb_intc_init_params + *init_params) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; struct brcmstb_l2_intc_data *data; - struct irq_chip_generic *gc; struct irq_chip_type *ct; int ret; unsigned int flags; + int parent_irq; + void __iomem *base; data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; - data->base = of_iomap(np, 0); - if (!data->base) { + base = of_iomap(np, 0); + if (!base) { pr_err("failed to remap intc L2 registers\n"); ret = -ENOMEM; goto out_free; } /* Disable all interrupts by default */ - writel(0xffffffff, data->base + CPU_MASK_SET); + writel(0xffffffff, base + init_params->cpu_mask_set); /* Wakeup interrupts may be retained from S5 (cold boot) */ data->can_wake = of_property_read_bool(np, "brcm,irq-can-wake"); - if (!data->can_wake) - writel(0xffffffff, data->base + CPU_CLEAR); + if (!data->can_wake && (init_params->cpu_clear >= 0)) + writel(0xffffffff, base + init_params->cpu_clear); - data->parent_irq = irq_of_parse_and_map(np, 0); - if (!data->parent_irq) { + parent_irq = irq_of_parse_and_map(np, 0); + if (!parent_irq) { pr_err("failed to find parent interrupt\n"); ret = -EINVAL; goto out_unmap; @@ -163,29 +216,39 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, /* Allocate a single Generic IRQ chip for this node */ ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, - np->full_name, handle_edge_irq, clr, 0, flags); + np->full_name, init_params->handler, clr, 0, flags); if (ret) { pr_err("failed to allocate generic irq chip\n"); goto out_free_domain; } /* Set the IRQ chaining logic */ - irq_set_chained_handler_and_data(data->parent_irq, + irq_set_chained_handler_and_data(parent_irq, brcmstb_l2_intc_irq_handle, data); - gc = irq_get_domain_generic_chip(data->domain, 0); - gc->reg_base = data->base; - gc->private = data; - ct = gc->chip_types; - - ct->chip.irq_ack = irq_gc_ack_set_bit; - ct->regs.ack = CPU_CLEAR; + data->gc = irq_get_domain_generic_chip(data->domain, 0); + data->gc->reg_base = base; + data->gc->private = data; + data->status_offset = init_params->cpu_status; + data->mask_offset = init_params->cpu_mask_status; + + ct = data->gc->chip_types; + + if (init_params->cpu_clear >= 0) { + ct->regs.ack = init_params->cpu_clear; + ct->chip.irq_ack = irq_gc_ack_set_bit; + ct->chip.irq_mask_ack = brcmstb_l2_mask_and_ack; + } else { + /* No Ack - but still slightly more efficient to define this */ + ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; + } ct->chip.irq_mask = irq_gc_mask_disable_reg; - ct->regs.disable = CPU_MASK_SET; + ct->regs.disable = init_params->cpu_mask_set; + ct->regs.mask = init_params->cpu_mask_status; ct->chip.irq_unmask = irq_gc_unmask_enable_reg; - ct->regs.enable = CPU_MASK_CLEAR; + ct->regs.enable = init_params->cpu_mask_clear; ct->chip.irq_suspend = brcmstb_l2_intc_suspend; ct->chip.irq_resume = brcmstb_l2_intc_resume; @@ -195,21 +258,35 @@ static int __init brcmstb_l2_intc_of_init(struct device_node *np, /* This IRQ chip can wake the system, set all child interrupts * in wake_enabled mask */ - gc->wake_enabled = 0xffffffff; + data->gc->wake_enabled = 0xffffffff; ct->chip.irq_set_wake = irq_gc_set_wake; } pr_info("registered L2 intc (mem: 0x%p, parent irq: %d)\n", - data->base, data->parent_irq); + base, parent_irq); return 0; out_free_domain: irq_domain_remove(data->domain); out_unmap: - iounmap(data->base); + iounmap(base); out_free: kfree(data); return ret; } -IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_intc_of_init); + +int __init brcmstb_l2_edge_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + return brcmstb_l2_intc_of_init(np, parent, &l2_edge_intc_init); +} +IRQCHIP_DECLARE(brcmstb_l2_intc, "brcm,l2-intc", brcmstb_l2_edge_intc_of_init); + +int __init brcmstb_l2_lvl_intc_of_init(struct device_node *np, + struct device_node *parent) +{ + return brcmstb_l2_intc_of_init(np, parent, &l2_lvl_intc_init); +} +IRQCHIP_DECLARE(bcm7271_l2_intc, "brcm,bcm7271-l2-intc", + brcmstb_l2_lvl_intc_of_init); diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c index 9ae71804b5dd..30017df5b54c 100644 --- a/drivers/irqchip/irq-gic-common.c +++ b/drivers/irqchip/irq-gic-common.c @@ -40,8 +40,9 @@ void gic_enable_quirks(u32 iidr, const struct gic_quirk *quirks, for (; quirks->desc; quirks++) { if (quirks->iidr != (quirks->mask & iidr)) continue; - quirks->init(data); - pr_info("GIC: enabling workaround for %s\n", quirks->desc); + if (quirks->init(data)) + pr_info("GIC: enabling workaround for %s\n", + quirks->desc); } } diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h index 205e5fddf6da..3919cd7c5285 100644 --- a/drivers/irqchip/irq-gic-common.h +++ b/drivers/irqchip/irq-gic-common.h @@ -23,7 +23,7 @@ struct gic_quirk { const char *desc; - void (*init)(void *data); + bool (*init)(void *data); u32 iidr; u32 mask; }; diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index e88395605e32..4039e64cd342 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -83,6 +83,8 @@ struct its_baser { u32 psz; }; +struct its_device; + /* * The ITS structure - contains most of the infrastructure, with the * top-level MSI domain, the command queue, the collections, and the @@ -97,12 +99,18 @@ struct its_node { struct its_cmd_block *cmd_write; struct its_baser tables[GITS_BASER_NR_REGS]; struct its_collection *collections; + struct fwnode_handle *fwnode_handle; + u64 (*get_msi_base)(struct its_device *its_dev); struct list_head its_device_list; u64 flags; + unsigned long list_nr; u32 ite_size; u32 device_ids; int numa_node; + unsigned int msi_domain_flags; + u32 pre_its_base; /* for Socionext Synquacer */ bool is_v4; + int vlpi_redist_offset; }; #define ITS_ITT_ALIGN SZ_256 @@ -152,12 +160,6 @@ static DEFINE_SPINLOCK(its_lock); static struct rdists *gic_rdists; static struct irq_domain *its_parent; -/* - * We have a maximum number of 16 ITSs in the whole system if we're - * using the ITSList mechanism - */ -#define ITS_LIST_MAX 16 - static unsigned long its_list_map; static u16 vmovp_seq_num; static DEFINE_RAW_SPINLOCK(vmovp_lock); @@ -272,10 +274,12 @@ struct its_cmd_block { #define ITS_CMD_QUEUE_SZ SZ_64K #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) -typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, +typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, + struct its_cmd_block *, struct its_cmd_desc *); -typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_cmd_block *, +typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, + struct its_cmd_block *, struct its_cmd_desc *); static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) @@ -379,7 +383,8 @@ static inline void its_fixup_cmd(struct its_cmd_block *cmd) cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); } -static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_mapd_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { unsigned long itt_addr; @@ -399,7 +404,8 @@ static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, return NULL; } -static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_mapc_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { its_encode_cmd(cmd, GITS_CMD_MAPC); @@ -412,7 +418,8 @@ static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, return desc->its_mapc_cmd.col; } -static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_mapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { struct its_collection *col; @@ -431,7 +438,8 @@ static struct its_collection *its_build_mapti_cmd(struct its_cmd_block *cmd, return col; } -static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_movi_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { struct its_collection *col; @@ -449,7 +457,8 @@ static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, return col; } -static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_discard_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { struct its_collection *col; @@ -466,7 +475,8 @@ static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, return col; } -static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_inv_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { struct its_collection *col; @@ -483,7 +493,8 @@ static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, return col; } -static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_int_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { struct its_collection *col; @@ -500,7 +511,8 @@ static struct its_collection *its_build_int_cmd(struct its_cmd_block *cmd, return col; } -static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_clear_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { struct its_collection *col; @@ -517,7 +529,8 @@ static struct its_collection *its_build_clear_cmd(struct its_cmd_block *cmd, return col; } -static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, +static struct its_collection *its_build_invall_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { its_encode_cmd(cmd, GITS_CMD_INVALL); @@ -528,7 +541,8 @@ static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, return NULL; } -static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, +static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { its_encode_cmd(cmd, GITS_CMD_VINVALL); @@ -539,17 +553,20 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_cmd_block *cmd, return desc->its_vinvall_cmd.vpe; } -static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, +static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { unsigned long vpt_addr; + u64 target; vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); + target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; its_encode_cmd(cmd, GITS_CMD_VMAPP); its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id); its_encode_valid(cmd, desc->its_vmapp_cmd.valid); - its_encode_target(cmd, desc->its_vmapp_cmd.col->target_address); + its_encode_target(cmd, target); its_encode_vpt_addr(cmd, vpt_addr); its_encode_vpt_size(cmd, LPI_NRBITS - 1); @@ -558,7 +575,8 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_cmd_block *cmd, return desc->its_vmapp_cmd.vpe; } -static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, +static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { u32 db; @@ -580,7 +598,8 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_cmd_block *cmd, return desc->its_vmapti_cmd.vpe; } -static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, +static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { u32 db; @@ -602,14 +621,18 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_cmd_block *cmd, return desc->its_vmovi_cmd.vpe; } -static struct its_vpe *its_build_vmovp_cmd(struct its_cmd_block *cmd, +static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, + struct its_cmd_block *cmd, struct its_cmd_desc *desc) { + u64 target; + + target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; its_encode_cmd(cmd, GITS_CMD_VMOVP); its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num); its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list); its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id); - its_encode_target(cmd, desc->its_vmovp_cmd.col->target_address); + its_encode_target(cmd, target); its_fixup_cmd(cmd); @@ -688,9 +711,9 @@ static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) dsb(ishst); } -static void its_wait_for_range_completion(struct its_node *its, - struct its_cmd_block *from, - struct its_cmd_block *to) +static int its_wait_for_range_completion(struct its_node *its, + struct its_cmd_block *from, + struct its_cmd_block *to) { u64 rd_idx, from_idx, to_idx; u32 count = 1000000; /* 1s! */ @@ -711,12 +734,15 @@ static void its_wait_for_range_completion(struct its_node *its, count--; if (!count) { - pr_err_ratelimited("ITS queue timeout\n"); - return; + pr_err_ratelimited("ITS queue timeout (%llu %llu %llu)\n", + from_idx, to_idx, rd_idx); + return -1; } cpu_relax(); udelay(1); } + + return 0; } /* Warning, macro hell follows */ @@ -736,7 +762,7 @@ void name(struct its_node *its, \ raw_spin_unlock_irqrestore(&its->lock, flags); \ return; \ } \ - sync_obj = builder(cmd, desc); \ + sync_obj = builder(its, cmd, desc); \ its_flush_cmd(its, cmd); \ \ if (sync_obj) { \ @@ -744,7 +770,7 @@ void name(struct its_node *its, \ if (!sync_cmd) \ goto post; \ \ - buildfn(sync_cmd, sync_obj); \ + buildfn(its, sync_cmd, sync_obj); \ its_flush_cmd(its, sync_cmd); \ } \ \ @@ -752,10 +778,12 @@ post: \ next_cmd = its_post_commands(its); \ raw_spin_unlock_irqrestore(&its->lock, flags); \ \ - its_wait_for_range_completion(its, cmd, next_cmd); \ + if (its_wait_for_range_completion(its, cmd, next_cmd)) \ + pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ } -static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, +static void its_build_sync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, struct its_collection *sync_col) { its_encode_cmd(sync_cmd, GITS_CMD_SYNC); @@ -767,7 +795,8 @@ static void its_build_sync_cmd(struct its_cmd_block *sync_cmd, static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, struct its_collection, its_build_sync_cmd) -static void its_build_vsync_cmd(struct its_cmd_block *sync_cmd, +static void its_build_vsync_cmd(struct its_node *its, + struct its_cmd_block *sync_cmd, struct its_vpe *sync_vpe) { its_encode_cmd(sync_cmd, GITS_CMD_VSYNC); @@ -899,21 +928,16 @@ static void its_send_vmovi(struct its_device *dev, u32 id) its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc); } -static void its_send_vmapp(struct its_vpe *vpe, bool valid) +static void its_send_vmapp(struct its_node *its, + struct its_vpe *vpe, bool valid) { struct its_cmd_desc desc; - struct its_node *its; desc.its_vmapp_cmd.vpe = vpe; desc.its_vmapp_cmd.valid = valid; + desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; - list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) - continue; - - desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; - its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); - } + its_send_single_vcommand(its, its_build_vmapp_cmd, &desc); } static void its_send_vmovp(struct its_vpe *vpe) @@ -951,6 +975,9 @@ static void its_send_vmovp(struct its_vpe *vpe) if (!its->is_v4) continue; + if (!vpe->its_vm->vlpi_count[its->list_nr]) + continue; + desc.its_vmovp_cmd.col = &its->collections[col_id]; its_send_single_vcommand(its, its_build_vmovp_cmd, &desc); } @@ -958,18 +985,12 @@ static void its_send_vmovp(struct its_vpe *vpe) raw_spin_unlock_irqrestore(&vmovp_lock, flags); } -static void its_send_vinvall(struct its_vpe *vpe) +static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) { struct its_cmd_desc desc; - struct its_node *its; desc.its_vinvall_cmd.vpe = vpe; - - list_for_each_entry(its, &its_nodes, entry) { - if (!its->is_v4) - continue; - its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); - } + its_send_single_vcommand(its, its_build_vinvall_cmd, &desc); } /* @@ -991,9 +1012,15 @@ static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) if (irqd_is_forwarded_to_vcpu(d)) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); + struct its_vlpi_map *map; prop_page = its_dev->event_map.vm->vprop_page; - hwirq = its_dev->event_map.vlpi_maps[event].vintid; + map = &its_dev->event_map.vlpi_maps[event]; + hwirq = map->vintid; + + /* Remember the updated property */ + map->properties &= ~clr; + map->properties |= set | LPI_PROP_GROUP1; } else { prop_page = gic_rdists->prop_page; hwirq = d->hwirq; @@ -1099,6 +1126,13 @@ static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, return IRQ_SET_MASK_OK_DONE; } +static u64 its_irq_get_msi_base(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + return its->phys_base + GITS_TRANSLATER; +} + static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -1106,7 +1140,7 @@ static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) u64 addr; its = its_dev->its; - addr = its->phys_base + GITS_TRANSLATER; + addr = its->get_msi_base(its_dev); msg->address_lo = lower_32_bits(addr); msg->address_hi = upper_32_bits(addr); @@ -1133,6 +1167,60 @@ static int its_irq_set_irqchip_state(struct irq_data *d, return 0; } +static void its_map_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + /* + * If the VM wasn't mapped yet, iterate over the vpes and get + * them mapped now. + */ + vm->vlpi_count[its->list_nr]++; + + if (vm->vlpi_count[its->list_nr] == 1) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) { + struct its_vpe *vpe = vm->vpes[i]; + struct irq_data *d = irq_get_irq_data(vpe->irq); + + /* Map the VPE to the first possible CPU */ + vpe->col_idx = cpumask_first(cpu_online_mask); + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + } + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + +static void its_unmap_vm(struct its_node *its, struct its_vm *vm) +{ + unsigned long flags; + + /* Not using the ITS list? Everything is always mapped. */ + if (!its_list_map) + return; + + raw_spin_lock_irqsave(&vmovp_lock, flags); + + if (!--vm->vlpi_count[its->list_nr]) { + int i; + + for (i = 0; i < vm->nr_vpes; i++) + its_send_vmapp(its, vm->vpes[i], false); + } + + raw_spin_unlock_irqrestore(&vmovp_lock, flags); +} + static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -1168,12 +1256,23 @@ static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) /* Already mapped, move it around */ its_send_vmovi(its_dev, event); } else { + /* Ensure all the VPEs are mapped on this ITS */ + its_map_vm(its_dev->its, info->map->vm); + + /* + * Flag the interrupt as forwarded so that we can + * start poking the virtual property table. + */ + irqd_set_forwarded_to_vcpu(d); + + /* Write out the property to the prop table */ + lpi_write_config(d, 0xff, info->map->properties); + /* Drop the physical mapping */ its_send_discard(its_dev, event); /* and install the virtual one */ its_send_vmapti(its_dev, event); - irqd_set_forwarded_to_vcpu(d); /* Increment the number of VLPIs */ its_dev->event_map.nr_vlpis++; @@ -1229,6 +1328,9 @@ static int its_vlpi_unmap(struct irq_data *d) LPI_PROP_ENABLED | LPI_PROP_GROUP1)); + /* Potentially unmap the VM from this ITS */ + its_unmap_vm(its_dev->its, its_dev->event_map.vm); + /* * Drop the refcount and make the device available again if * this was the last VLPI. @@ -1669,23 +1771,14 @@ static void its_free_tables(struct its_node *its) static int its_alloc_tables(struct its_node *its) { - u64 typer = gic_read_typer(its->base + GITS_TYPER); - u32 ids = GITS_TYPER_DEVBITS(typer); u64 shr = GITS_BASER_InnerShareable; u64 cache = GITS_BASER_RaWaWb; u32 psz = SZ_64K; int err, i; - if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) { - /* - * erratum 22375: only alloc 8MB table size - * erratum 24313: ignore memory access type - */ - cache = GITS_BASER_nCnB; - ids = 0x14; /* 20 bits, 8MB */ - } - - its->device_ids = ids; + if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) + /* erratum 24313: ignore memory access type */ + cache = GITS_BASER_nCnB; for (i = 0; i < GITS_BASER_NR_REGS; i++) { struct its_baser *baser = its->tables + i; @@ -2209,8 +2302,8 @@ static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, return 0; } -static void its_irq_domain_activate(struct irq_domain *domain, - struct irq_data *d) +static int its_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool early) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); u32 event = its_get_event_id(d); @@ -2228,6 +2321,7 @@ static void its_irq_domain_activate(struct irq_domain *domain, /* Map the GIC IRQ and event to the device */ its_send_mapti(its_dev, d->hwirq, event); + return 0; } static void its_irq_domain_deactivate(struct irq_domain *domain, @@ -2394,6 +2488,8 @@ static int its_vpe_set_affinity(struct irq_data *d, its_vpe_db_proxy_move(vpe, from, cpu); } + irq_data_update_effective_affinity(d, cpumask_of(cpu)); + return IRQ_SET_MASK_OK_DONE; } @@ -2461,6 +2557,26 @@ static void its_vpe_deschedule(struct its_vpe *vpe) } } +static void its_vpe_invall(struct its_vpe *vpe) +{ + struct its_node *its; + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) + continue; + + /* + * Sending a VINVALL to a single ITS is enough, as all + * we need is to reach the redistributors. + */ + its_send_vinvall(its, vpe); + return; + } +} + static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); @@ -2476,7 +2592,7 @@ static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) return 0; case INVALL_VPE: - its_send_vinvall(vpe); + its_vpe_invall(vpe); return 0; default: @@ -2701,23 +2817,51 @@ static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq return err; } -static void its_vpe_irq_domain_activate(struct irq_domain *domain, - struct irq_data *d) +static int its_vpe_irq_domain_activate(struct irq_domain *domain, + struct irq_data *d, bool early) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* If we use the list map, we issue VMAPP on demand... */ + if (its_list_map) + return 0; /* Map the VPE to the first possible CPU */ vpe->col_idx = cpumask_first(cpu_online_mask); - its_send_vmapp(vpe, true); - its_send_vinvall(vpe); + + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, true); + its_send_vinvall(its, vpe); + } + + irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); + + return 0; } static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); + struct its_node *its; + + /* + * If we use the list map, we unmap the VPE once no VLPIs are + * associated with the VM. + */ + if (its_list_map) + return; - its_send_vmapp(vpe, false); + list_for_each_entry(its, &its_nodes, entry) { + if (!its->is_v4) + continue; + + its_send_vmapp(its, vpe, false); + } } static const struct irq_domain_ops its_vpe_domain_ops = { @@ -2760,26 +2904,85 @@ static int its_force_quiescent(void __iomem *base) } } -static void __maybe_unused its_enable_quirk_cavium_22375(void *data) +static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) { struct its_node *its = data; + /* erratum 22375: only alloc 8MB table size */ + its->device_ids = 0x14; /* 20 bits, 8MB */ its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; + + return true; } -static void __maybe_unused its_enable_quirk_cavium_23144(void *data) +static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) { struct its_node *its = data; its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; + + return true; } -static void __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) +static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) { struct its_node *its = data; /* On QDF2400, the size of the ITE is 16Bytes */ its->ite_size = 16; + + return true; +} + +static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) +{ + struct its_node *its = its_dev->its; + + /* + * The Socionext Synquacer SoC has a so-called 'pre-ITS', + * which maps 32-bit writes targeted at a separate window of + * size '4 << device_id_bits' onto writes to GITS_TRANSLATER + * with device ID taken from bits [device_id_bits + 1:2] of + * the window offset. + */ + return its->pre_its_base + (its_dev->device_id << 2); +} + +static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) +{ + struct its_node *its = data; + u32 pre_its_window[2]; + u32 ids; + + if (!fwnode_property_read_u32_array(its->fwnode_handle, + "socionext,synquacer-pre-its", + pre_its_window, + ARRAY_SIZE(pre_its_window))) { + + its->pre_its_base = pre_its_window[0]; + its->get_msi_base = its_irq_get_msi_base_pre_its; + + ids = ilog2(pre_its_window[1]) - 2; + if (its->device_ids > ids) + its->device_ids = ids; + + /* the pre-ITS breaks isolation, so disable MSI remapping */ + its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP; + return true; + } + return false; +} + +static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) +{ + struct its_node *its = data; + + /* + * Hip07 insists on using the wrong address for the VLPI + * page. Trick it into doing the right thing... + */ + its->vlpi_redist_offset = SZ_128K; + return true; } static const struct gic_quirk its_quirks[] = { @@ -2807,6 +3010,27 @@ static const struct gic_quirk its_quirks[] = { .init = its_enable_quirk_qdf2400_e0065, }, #endif +#ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS + { + /* + * The Socionext Synquacer SoC incorporates ARM's own GIC-500 + * implementation, but with a 'pre-ITS' added that requires + * special handling in software. + */ + .desc = "ITS: Socionext Synquacer pre-ITS", + .iidr = 0x0001143b, + .mask = 0xffffffff, + .init = its_enable_quirk_socionext_synquacer, + }, +#endif +#ifdef CONFIG_HISILICON_ERRATUM_161600802 + { + .desc = "ITS: Hip07 erratum 161600802", + .iidr = 0x00000004, + .mask = 0xffffffff, + .init = its_enable_quirk_hip07_161600802, + }, +#endif { } }; @@ -2835,7 +3059,7 @@ static int its_init_domain(struct fwnode_handle *handle, struct its_node *its) inner_domain->parent = its_parent; irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS); - inner_domain->flags |= IRQ_DOMAIN_FLAG_MSI_REMAP; + inner_domain->flags |= its->msi_domain_flags; info->ops = &its_msi_domain_ops; info->data = its; inner_domain->host_data = info; @@ -2896,8 +3120,8 @@ static int __init its_compute_its_list_map(struct resource *res, * locking. Should this change, we should address * this. */ - its_number = find_first_zero_bit(&its_list_map, ITS_LIST_MAX); - if (its_number >= ITS_LIST_MAX) { + its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX); + if (its_number >= GICv4_ITS_LIST_MAX) { pr_err("ITS@%pa: No ITSList entry available!\n", &res->start); return -EINVAL; @@ -2965,6 +3189,7 @@ static int __init its_probe_one(struct resource *res, its->base = its_base; its->phys_base = res->start; its->ite_size = GITS_TYPER_ITT_ENTRY_SIZE(typer); + its->device_ids = GITS_TYPER_DEVBITS(typer); its->is_v4 = !!(typer & GITS_TYPER_VLPIS); if (its->is_v4) { if (!(typer & GITS_TYPER_VMOVP)) { @@ -2972,6 +3197,8 @@ static int __init its_probe_one(struct resource *res, if (err < 0) goto out_free_its; + its->list_nr = err; + pr_info("ITS@%pa: Using ITS number %d\n", &res->start, err); } else { @@ -2988,6 +3215,9 @@ static int __init its_probe_one(struct resource *res, goto out_free_its; } its->cmd_write = its->cmd_base; + its->fwnode_handle = handle; + its->get_msi_base = its_irq_get_msi_base; + its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP; its_enable_quirks(its); diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index b5df99c6f680..b54b55597ffb 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -55,6 +55,7 @@ struct gic_chip_data { struct irq_domain *domain; u64 redist_stride; u32 nr_redist_regions; + bool has_rss; unsigned int irq_nr; struct partition_desc *ppi_descs[16]; }; @@ -63,7 +64,9 @@ static struct gic_chip_data gic_data __read_mostly; static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; static struct gic_kvm_info gic_v3_kvm_info; +static DEFINE_PER_CPU(bool, has_rss); +#define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) @@ -526,6 +529,10 @@ static void gic_update_vlpi_properties(void) static void gic_cpu_sys_reg_init(void) { + int i, cpu = smp_processor_id(); + u64 mpidr = cpu_logical_map(cpu); + u64 need_rss = MPIDR_RS(mpidr); + /* * Need to check that the SRE bit has actually been set. If * not, it means that SRE is disabled at EL2. We're going to @@ -557,6 +564,30 @@ static void gic_cpu_sys_reg_init(void) /* ... and let's hit the road... */ gic_write_grpen1(1); + + /* Keep the RSS capability status in per_cpu variable */ + per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); + + /* Check all the CPUs have capable of sending SGIs to other CPUs */ + for_each_online_cpu(i) { + bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); + + need_rss |= MPIDR_RS(cpu_logical_map(i)); + if (need_rss && (!have_rss)) + pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", + cpu, (unsigned long)mpidr, + i, (unsigned long)cpu_logical_map(i)); + } + + /** + * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, + * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED + * UNPREDICTABLE choice of : + * - The write is ignored. + * - The RS field is treated as 0. + */ + if (need_rss && (!gic_data.has_rss)) + pr_crit_once("RSS is required but GICD doesn't support it\n"); } static int gic_dist_supports_lpis(void) @@ -591,6 +622,9 @@ static void gic_cpu_init(void) #ifdef CONFIG_SMP +#define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) +#define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) + static int gic_starting_cpu(unsigned int cpu) { gic_cpu_init(); @@ -605,13 +639,6 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, u16 tlist = 0; while (cpu < nr_cpu_ids) { - /* - * If we ever get a cluster of more than 16 CPUs, just - * scream and skip that CPU. - */ - if (WARN_ON((mpidr & 0xff) >= 16)) - goto out; - tlist |= 1 << (mpidr & 0xf); next_cpu = cpumask_next(cpu, mask); @@ -621,7 +648,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, mpidr = cpu_logical_map(cpu); - if (cluster_id != (mpidr & ~0xffUL)) { + if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { cpu--; goto out; } @@ -643,6 +670,7 @@ static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | irq << ICC_SGI1R_SGI_ID_SHIFT | MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | + MPIDR_TO_SGI_RS(cluster_id) | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); @@ -663,7 +691,7 @@ static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) smp_wmb(); for_each_cpu(cpu, mask) { - unsigned long cluster_id = cpu_logical_map(cpu) & ~0xffUL; + u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); u16 tlist; tlist = gic_compute_target_list(&cpu, mask, cluster_id); @@ -1007,6 +1035,10 @@ static int __init gic_init_bases(void __iomem *dist_base, goto out_free; } + gic_data.has_rss = !!(typer & GICD_TYPER_RSS); + pr_info("Distributor has %sRange Selector support\n", + gic_data.has_rss ? "" : "no "); + set_handle_irq(gic_handle_irq); gic_update_vlpi_properties(); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 651d726e8b12..f641e8e2c78d 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -1256,6 +1256,19 @@ static void gic_teardown(struct gic_chip_data *gic) #ifdef CONFIG_OF static int gic_cnt __initdata; +static bool gicv2_force_probe; + +static int __init gicv2_force_probe_cfg(char *buf) +{ + return strtobool(buf, &gicv2_force_probe); +} +early_param("irqchip.gicv2_force_probe", gicv2_force_probe_cfg); + +static bool gic_check_gicv2(void __iomem *base) +{ + u32 val = readl_relaxed(base + GIC_CPU_IDENT); + return (val & 0xff0fff) == 0x02043B; +} static bool gic_check_eoimode(struct device_node *node, void __iomem **base) { @@ -1265,20 +1278,60 @@ static bool gic_check_eoimode(struct device_node *node, void __iomem **base) if (!is_hyp_mode_available()) return false; - if (resource_size(&cpuif_res) < SZ_8K) - return false; - if (resource_size(&cpuif_res) == SZ_128K) { - u32 val_low, val_high; + if (resource_size(&cpuif_res) < SZ_8K) { + void __iomem *alt; + /* + * Check for a stupid firmware that only exposes the + * first page of a GICv2. + */ + if (!gic_check_gicv2(*base)) + return false; + if (!gicv2_force_probe) { + pr_warn("GIC: GICv2 detected, but range too small and irqchip.gicv2_force_probe not set\n"); + return false; + } + + alt = ioremap(cpuif_res.start, SZ_8K); + if (!alt) + return false; + if (!gic_check_gicv2(alt + SZ_4K)) { + /* + * The first page was that of a GICv2, and + * the second was *something*. Let's trust it + * to be a GICv2, and update the mapping. + */ + pr_warn("GIC: GICv2 at %pa, but range is too small (broken DT?), assuming 8kB\n", + &cpuif_res.start); + iounmap(*base); + *base = alt; + return true; + } + + /* + * We detected *two* initial GICv2 pages in a + * row. Could be a GICv2 aliased over two 64kB + * pages. Update the resource, map the iospace, and + * pray. + */ + iounmap(alt); + alt = ioremap(cpuif_res.start, SZ_128K); + if (!alt) + return false; + pr_warn("GIC: Aliased GICv2 at %pa, trying to find the canonical range over 128kB\n", + &cpuif_res.start); + cpuif_res.end = cpuif_res.start + SZ_128K -1; + iounmap(*base); + *base = alt; + } + if (resource_size(&cpuif_res) == SZ_128K) { /* - * Verify that we have the first 4kB of a GIC400 + * Verify that we have the first 4kB of a GICv2 * aliased over the first 64kB by checking the * GICC_IIDR register on both ends. */ - val_low = readl_relaxed(*base + GIC_CPU_IDENT); - val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); - if ((val_low & 0xffff0fff) != 0x0202043B || - val_low != val_high) + if (!gic_check_gicv2(*base) || + !gic_check_gicv2(*base + 0xf000)) return false; /* diff --git a/drivers/irqchip/irq-meson-gpio.c b/drivers/irqchip/irq-meson-gpio.c new file mode 100644 index 000000000000..a59bdbc0b9bb --- /dev/null +++ b/drivers/irqchip/irq-meson-gpio.c @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + * Copyright (c) 2016 BayLibre, SAS. + * Author: Jerome Brunet <jbrunet@baylibre.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see <http://www.gnu.org/licenses/>. + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/io.h> +#include <linux/module.h> +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/irqchip.h> +#include <linux/of.h> +#include <linux/of_address.h> + +#define NUM_CHANNEL 8 +#define MAX_INPUT_MUX 256 + +#define REG_EDGE_POL 0x00 +#define REG_PIN_03_SEL 0x04 +#define REG_PIN_47_SEL 0x08 +#define REG_FILTER_SEL 0x0c + +#define REG_EDGE_POL_MASK(x) (BIT(x) | BIT(16 + (x))) +#define REG_EDGE_POL_EDGE(x) BIT(x) +#define REG_EDGE_POL_LOW(x) BIT(16 + (x)) +#define REG_PIN_SEL_SHIFT(x) (((x) % 4) * 8) +#define REG_FILTER_SEL_SHIFT(x) ((x) * 4) + +struct meson_gpio_irq_params { + unsigned int nr_hwirq; +}; + +static const struct meson_gpio_irq_params meson8_params = { + .nr_hwirq = 134, +}; + +static const struct meson_gpio_irq_params meson8b_params = { + .nr_hwirq = 119, +}; + +static const struct meson_gpio_irq_params gxbb_params = { + .nr_hwirq = 133, +}; + +static const struct meson_gpio_irq_params gxl_params = { + .nr_hwirq = 110, +}; + +static const struct of_device_id meson_irq_gpio_matches[] = { + { .compatible = "amlogic,meson8-gpio-intc", .data = &meson8_params }, + { .compatible = "amlogic,meson8b-gpio-intc", .data = &meson8b_params }, + { .compatible = "amlogic,meson-gxbb-gpio-intc", .data = &gxbb_params }, + { .compatible = "amlogic,meson-gxl-gpio-intc", .data = &gxl_params }, + { } +}; + +struct meson_gpio_irq_controller { + unsigned int nr_hwirq; + void __iomem *base; + u32 channel_irqs[NUM_CHANNEL]; + DECLARE_BITMAP(channel_map, NUM_CHANNEL); + spinlock_t lock; +}; + +static void meson_gpio_irq_update_bits(struct meson_gpio_irq_controller *ctl, + unsigned int reg, u32 mask, u32 val) +{ + u32 tmp; + + tmp = readl_relaxed(ctl->base + reg); + tmp &= ~mask; + tmp |= val; + writel_relaxed(tmp, ctl->base + reg); +} + +static unsigned int meson_gpio_irq_channel_to_reg(unsigned int channel) +{ + return (channel < 4) ? REG_PIN_03_SEL : REG_PIN_47_SEL; +} + +static int +meson_gpio_irq_request_channel(struct meson_gpio_irq_controller *ctl, + unsigned long hwirq, + u32 **channel_hwirq) +{ + unsigned int reg, idx; + + spin_lock(&ctl->lock); + + /* Find a free channel */ + idx = find_first_zero_bit(ctl->channel_map, NUM_CHANNEL); + if (idx >= NUM_CHANNEL) { + spin_unlock(&ctl->lock); + pr_err("No channel available\n"); + return -ENOSPC; + } + + /* Mark the channel as used */ + set_bit(idx, ctl->channel_map); + + /* + * Setup the mux of the channel to route the signal of the pad + * to the appropriate input of the GIC + */ + reg = meson_gpio_irq_channel_to_reg(idx); + meson_gpio_irq_update_bits(ctl, reg, + 0xff << REG_PIN_SEL_SHIFT(idx), + hwirq << REG_PIN_SEL_SHIFT(idx)); + + /* + * Get the hwirq number assigned to this channel through + * a pointer the channel_irq table. The added benifit of this + * method is that we can also retrieve the channel index with + * it, using the table base. + */ + *channel_hwirq = &(ctl->channel_irqs[idx]); + + spin_unlock(&ctl->lock); + + pr_debug("hwirq %lu assigned to channel %d - irq %u\n", + hwirq, idx, **channel_hwirq); + + return 0; +} + +static unsigned int +meson_gpio_irq_get_channel_idx(struct meson_gpio_irq_controller *ctl, + u32 *channel_hwirq) +{ + return channel_hwirq - ctl->channel_irqs; +} + +static void +meson_gpio_irq_release_channel(struct meson_gpio_irq_controller *ctl, + u32 *channel_hwirq) +{ + unsigned int idx; + + idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); + clear_bit(idx, ctl->channel_map); +} + +static int meson_gpio_irq_type_setup(struct meson_gpio_irq_controller *ctl, + unsigned int type, + u32 *channel_hwirq) +{ + u32 val = 0; + unsigned int idx; + + idx = meson_gpio_irq_get_channel_idx(ctl, channel_hwirq); + + /* + * The controller has a filter block to operate in either LEVEL or + * EDGE mode, then signal is sent to the GIC. To enable LEVEL_LOW and + * EDGE_FALLING support (which the GIC does not support), the filter + * block is also able to invert the input signal it gets before + * providing it to the GIC. + */ + type &= IRQ_TYPE_SENSE_MASK; + + if (type == IRQ_TYPE_EDGE_BOTH) + return -EINVAL; + + if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + val |= REG_EDGE_POL_EDGE(idx); + + if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) + val |= REG_EDGE_POL_LOW(idx); + + spin_lock(&ctl->lock); + + meson_gpio_irq_update_bits(ctl, REG_EDGE_POL, + REG_EDGE_POL_MASK(idx), val); + + spin_unlock(&ctl->lock); + + return 0; +} + +static unsigned int meson_gpio_irq_type_output(unsigned int type) +{ + unsigned int sense = type & IRQ_TYPE_SENSE_MASK; + + type &= ~IRQ_TYPE_SENSE_MASK; + + /* + * The polarity of the signal provided to the GIC should always + * be high. + */ + if (sense & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) + type |= IRQ_TYPE_LEVEL_HIGH; + else if (sense & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) + type |= IRQ_TYPE_EDGE_RISING; + + return type; +} + +static int meson_gpio_irq_set_type(struct irq_data *data, unsigned int type) +{ + struct meson_gpio_irq_controller *ctl = data->domain->host_data; + u32 *channel_hwirq = irq_data_get_irq_chip_data(data); + int ret; + + ret = meson_gpio_irq_type_setup(ctl, type, channel_hwirq); + if (ret) + return ret; + + return irq_chip_set_type_parent(data, + meson_gpio_irq_type_output(type)); +} + +static struct irq_chip meson_gpio_irq_chip = { + .name = "meson-gpio-irqchip", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_type = meson_gpio_irq_set_type, + .irq_retrigger = irq_chip_retrigger_hierarchy, +#ifdef CONFIG_SMP + .irq_set_affinity = irq_chip_set_affinity_parent, +#endif + .flags = IRQCHIP_SET_TYPE_MASKED, +}; + +static int meson_gpio_irq_domain_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) { + *hwirq = fwspec->param[0]; + *type = fwspec->param[1]; + return 0; + } + + return -EINVAL; +} + +static int meson_gpio_irq_allocate_gic_irq(struct irq_domain *domain, + unsigned int virq, + u32 hwirq, + unsigned int type) +{ + struct irq_fwspec fwspec; + + fwspec.fwnode = domain->parent->fwnode; + fwspec.param_count = 3; + fwspec.param[0] = 0; /* SPI */ + fwspec.param[1] = hwirq; + fwspec.param[2] = meson_gpio_irq_type_output(type); + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec); +} + +static int meson_gpio_irq_domain_alloc(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs, + void *data) +{ + struct irq_fwspec *fwspec = data; + struct meson_gpio_irq_controller *ctl = domain->host_data; + unsigned long hwirq; + u32 *channel_hwirq; + unsigned int type; + int ret; + + if (WARN_ON(nr_irqs != 1)) + return -EINVAL; + + ret = meson_gpio_irq_domain_translate(domain, fwspec, &hwirq, &type); + if (ret) + return ret; + + ret = meson_gpio_irq_request_channel(ctl, hwirq, &channel_hwirq); + if (ret) + return ret; + + ret = meson_gpio_irq_allocate_gic_irq(domain, virq, + *channel_hwirq, type); + if (ret < 0) { + pr_err("failed to allocate gic irq %u\n", *channel_hwirq); + meson_gpio_irq_release_channel(ctl, channel_hwirq); + return ret; + } + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &meson_gpio_irq_chip, channel_hwirq); + + return 0; +} + +static void meson_gpio_irq_domain_free(struct irq_domain *domain, + unsigned int virq, + unsigned int nr_irqs) +{ + struct meson_gpio_irq_controller *ctl = domain->host_data; + struct irq_data *irq_data; + u32 *channel_hwirq; + + if (WARN_ON(nr_irqs != 1)) + return; + + irq_domain_free_irqs_parent(domain, virq, 1); + + irq_data = irq_domain_get_irq_data(domain, virq); + channel_hwirq = irq_data_get_irq_chip_data(irq_data); + + meson_gpio_irq_release_channel(ctl, channel_hwirq); +} + +static const struct irq_domain_ops meson_gpio_irq_domain_ops = { + .alloc = meson_gpio_irq_domain_alloc, + .free = meson_gpio_irq_domain_free, + .translate = meson_gpio_irq_domain_translate, +}; + +static int __init meson_gpio_irq_parse_dt(struct device_node *node, + struct meson_gpio_irq_controller *ctl) +{ + const struct of_device_id *match; + const struct meson_gpio_irq_params *params; + int ret; + + match = of_match_node(meson_irq_gpio_matches, node); + if (!match) + return -ENODEV; + + params = match->data; + ctl->nr_hwirq = params->nr_hwirq; + + ret = of_property_read_variable_u32_array(node, + "amlogic,channel-interrupts", + ctl->channel_irqs, + NUM_CHANNEL, + NUM_CHANNEL); + if (ret < 0) { + pr_err("can't get %d channel interrupts\n", NUM_CHANNEL); + return ret; + } + + return 0; +} + +static int __init meson_gpio_irq_of_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *domain, *parent_domain; + struct meson_gpio_irq_controller *ctl; + int ret; + + if (!parent) { + pr_err("missing parent interrupt node\n"); + return -ENODEV; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("unable to obtain parent domain\n"); + return -ENXIO; + } + + ctl = kzalloc(sizeof(*ctl), GFP_KERNEL); + if (!ctl) + return -ENOMEM; + + spin_lock_init(&ctl->lock); + + ctl->base = of_iomap(node, 0); + if (!ctl->base) { + ret = -ENOMEM; + goto free_ctl; + } + + ret = meson_gpio_irq_parse_dt(node, ctl); + if (ret) + goto free_channel_irqs; + + domain = irq_domain_create_hierarchy(parent_domain, 0, ctl->nr_hwirq, + of_node_to_fwnode(node), + &meson_gpio_irq_domain_ops, + ctl); + if (!domain) { + pr_err("failed to add domain\n"); + ret = -ENODEV; + goto free_channel_irqs; + } + + pr_info("%d to %d gpio interrupt mux initialized\n", + ctl->nr_hwirq, NUM_CHANNEL); + + return 0; + +free_channel_irqs: + iounmap(ctl->base); +free_ctl: + kfree(ctl); + + return ret; +} + +IRQCHIP_DECLARE(meson_gpio_intc, "amlogic,meson-gpio-intc", + meson_gpio_irq_of_init); diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c index c90976d7e53c..ef92a4d2038e 100644 --- a/drivers/irqchip/irq-mips-gic.c +++ b/drivers/irqchip/irq-mips-gic.c @@ -6,8 +6,12 @@ * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. */ + +#define pr_fmt(fmt) "irq-mips-gic: " fmt + #include <linux/bitmap.h> #include <linux/clocksource.h> +#include <linux/cpuhotplug.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> @@ -48,12 +52,16 @@ static DEFINE_SPINLOCK(gic_lock); static struct irq_domain *gic_irq_domain; static struct irq_domain *gic_ipi_domain; static int gic_shared_intrs; -static int gic_vpes; static unsigned int gic_cpu_pin; static unsigned int timer_cpu_pin; static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; -DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); -DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); +static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); +static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); + +static struct gic_all_vpes_chip_data { + u32 map; + bool mask; +} gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS]; static void gic_clear_pcpu_masks(unsigned int intr) { @@ -194,46 +202,46 @@ static void gic_ack_irq(struct irq_data *d) static int gic_set_type(struct irq_data *d, unsigned int type) { - unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); + unsigned int irq, pol, trig, dual; unsigned long flags; - bool is_edge; + + irq = GIC_HWIRQ_TO_SHARED(d->hwirq); spin_lock_irqsave(&gic_lock, flags); switch (type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_FALLING: - change_gic_pol(irq, GIC_POL_FALLING_EDGE); - change_gic_trig(irq, GIC_TRIG_EDGE); - change_gic_dual(irq, GIC_DUAL_SINGLE); - is_edge = true; + pol = GIC_POL_FALLING_EDGE; + trig = GIC_TRIG_EDGE; + dual = GIC_DUAL_SINGLE; break; case IRQ_TYPE_EDGE_RISING: - change_gic_pol(irq, GIC_POL_RISING_EDGE); - change_gic_trig(irq, GIC_TRIG_EDGE); - change_gic_dual(irq, GIC_DUAL_SINGLE); - is_edge = true; + pol = GIC_POL_RISING_EDGE; + trig = GIC_TRIG_EDGE; + dual = GIC_DUAL_SINGLE; break; case IRQ_TYPE_EDGE_BOTH: - /* polarity is irrelevant in this case */ - change_gic_trig(irq, GIC_TRIG_EDGE); - change_gic_dual(irq, GIC_DUAL_DUAL); - is_edge = true; + pol = 0; /* Doesn't matter */ + trig = GIC_TRIG_EDGE; + dual = GIC_DUAL_DUAL; break; case IRQ_TYPE_LEVEL_LOW: - change_gic_pol(irq, GIC_POL_ACTIVE_LOW); - change_gic_trig(irq, GIC_TRIG_LEVEL); - change_gic_dual(irq, GIC_DUAL_SINGLE); - is_edge = false; + pol = GIC_POL_ACTIVE_LOW; + trig = GIC_TRIG_LEVEL; + dual = GIC_DUAL_SINGLE; break; case IRQ_TYPE_LEVEL_HIGH: default: - change_gic_pol(irq, GIC_POL_ACTIVE_HIGH); - change_gic_trig(irq, GIC_TRIG_LEVEL); - change_gic_dual(irq, GIC_DUAL_SINGLE); - is_edge = false; + pol = GIC_POL_ACTIVE_HIGH; + trig = GIC_TRIG_LEVEL; + dual = GIC_DUAL_SINGLE; break; } - if (is_edge) + change_gic_pol(irq, pol); + change_gic_trig(irq, trig); + change_gic_dual(irq, dual); + + if (trig == GIC_TRIG_EDGE) irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, handle_edge_irq, NULL); else @@ -338,13 +346,17 @@ static struct irq_chip gic_local_irq_controller = { static void gic_mask_local_irq_all_vpes(struct irq_data *d) { - int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - int i; + struct gic_all_vpes_chip_data *cd; unsigned long flags; + int intr, cpu; + + intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); + cd = irq_data_get_irq_chip_data(d); + cd->mask = false; spin_lock_irqsave(&gic_lock, flags); - for (i = 0; i < gic_vpes; i++) { - write_gic_vl_other(mips_cm_vp_id(i)); + for_each_online_cpu(cpu) { + write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_rmask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); @@ -352,22 +364,40 @@ static void gic_mask_local_irq_all_vpes(struct irq_data *d) static void gic_unmask_local_irq_all_vpes(struct irq_data *d) { - int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); - int i; + struct gic_all_vpes_chip_data *cd; unsigned long flags; + int intr, cpu; + + intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); + cd = irq_data_get_irq_chip_data(d); + cd->mask = true; spin_lock_irqsave(&gic_lock, flags); - for (i = 0; i < gic_vpes; i++) { - write_gic_vl_other(mips_cm_vp_id(i)); + for_each_online_cpu(cpu) { + write_gic_vl_other(mips_cm_vp_id(cpu)); write_gic_vo_smask(BIT(intr)); } spin_unlock_irqrestore(&gic_lock, flags); } +static void gic_all_vpes_irq_cpu_online(struct irq_data *d) +{ + struct gic_all_vpes_chip_data *cd; + unsigned int intr; + + intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); + cd = irq_data_get_irq_chip_data(d); + + write_gic_vl_map(intr, cd->map); + if (cd->mask) + write_gic_vl_smask(BIT(intr)); +} + static struct irq_chip gic_all_vpes_local_irq_controller = { - .name = "MIPS GIC Local", - .irq_mask = gic_mask_local_irq_all_vpes, - .irq_unmask = gic_unmask_local_irq_all_vpes, + .name = "MIPS GIC Local", + .irq_mask = gic_mask_local_irq_all_vpes, + .irq_unmask = gic_unmask_local_irq_all_vpes, + .irq_cpu_online = gic_all_vpes_irq_cpu_online, }; static void __gic_irq_dispatch(void) @@ -382,39 +412,6 @@ static void gic_irq_dispatch(struct irq_desc *desc) gic_handle_shared_int(true); } -static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, - irq_hw_number_t hw) -{ - int intr = GIC_HWIRQ_TO_LOCAL(hw); - int i; - unsigned long flags; - u32 val; - - if (!gic_local_irq_is_routable(intr)) - return -EPERM; - - if (intr > GIC_LOCAL_INT_FDC) { - pr_err("Invalid local IRQ %d\n", intr); - return -EINVAL; - } - - if (intr == GIC_LOCAL_INT_TIMER) { - /* CONFIG_MIPS_CMP workaround (see __gic_init) */ - val = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; - } else { - val = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; - } - - spin_lock_irqsave(&gic_lock, flags); - for (i = 0; i < gic_vpes; i++) { - write_gic_vl_other(mips_cm_vp_id(i)); - write_gic_vo_map(intr, val); - } - spin_unlock_irqrestore(&gic_lock, flags); - - return 0; -} - static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw, unsigned int cpu) { @@ -457,7 +454,11 @@ static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq) { - int err; + struct gic_all_vpes_chip_data *cd; + unsigned long flags; + unsigned int intr; + int err, cpu; + u32 map; if (hwirq >= GIC_SHARED_HWIRQ_BASE) { /* verify that shared irqs don't conflict with an IPI irq */ @@ -474,8 +475,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, return gic_shared_irq_domain_map(d, virq, hwirq, 0); } - switch (GIC_HWIRQ_TO_LOCAL(hwirq)) { + intr = GIC_HWIRQ_TO_LOCAL(hwirq); + map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; + + switch (intr) { case GIC_LOCAL_INT_TIMER: + /* CONFIG_MIPS_CMP workaround (see __gic_init) */ + map = GIC_MAP_PIN_MAP_TO_PIN | timer_cpu_pin; + /* fall-through */ case GIC_LOCAL_INT_PERFCTR: case GIC_LOCAL_INT_FDC: /* @@ -483,9 +490,11 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, * the rest of the MIPS kernel code does not use the * percpu IRQ API for them. */ + cd = &gic_all_vpes_chip_data[intr]; + cd->map = map; err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, &gic_all_vpes_local_irq_controller, - NULL); + cd); if (err) return err; @@ -504,7 +513,17 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, break; } - return gic_local_irq_domain_map(d, virq, hwirq); + if (!gic_local_irq_is_routable(intr)) + return -EPERM; + + spin_lock_irqsave(&gic_lock, flags); + for_each_online_cpu(cpu) { + write_gic_vl_other(mips_cm_vp_id(cpu)); + write_gic_vo_map(intr, map); + } + spin_unlock_irqrestore(&gic_lock, flags); + + return 0; } static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, @@ -636,11 +655,25 @@ static const struct irq_domain_ops gic_ipi_domain_ops = { .match = gic_ipi_domain_match, }; +static int gic_cpu_startup(unsigned int cpu) +{ + /* Enable or disable EIC */ + change_gic_vl_ctl(GIC_VX_CTL_EIC, + cpu_has_veic ? GIC_VX_CTL_EIC : 0); + + /* Clear all local IRQ masks (ie. disable all local interrupts) */ + write_gic_vl_rmask(~0); + + /* Invoke irq_cpu_online callbacks to enable desired interrupts */ + irq_cpu_online(); + + return 0; +} static int __init gic_of_init(struct device_node *node, struct device_node *parent) { - unsigned int cpu_vec, i, j, gicconfig, cpu, v[2]; + unsigned int cpu_vec, i, gicconfig, v[2], num_ipis; unsigned long reserved; phys_addr_t gic_base; struct resource res; @@ -655,7 +688,7 @@ static int __init gic_of_init(struct device_node *node, cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); if (cpu_vec == hweight_long(ST0_IM)) { - pr_err("No CPU vectors available for GIC\n"); + pr_err("No CPU vectors available\n"); return -ENODEV; } @@ -668,8 +701,10 @@ static int __init gic_of_init(struct device_node *node, gic_base = read_gcr_gic_base() & ~CM_GCR_GIC_BASE_GICEN; gic_len = 0x20000; + pr_warn("Using inherited base address %pa\n", + &gic_base); } else { - pr_err("Failed to get GIC memory range\n"); + pr_err("Failed to get memory range\n"); return -ENODEV; } } else { @@ -690,17 +725,7 @@ static int __init gic_of_init(struct device_node *node, gic_shared_intrs >>= __ffs(GIC_CONFIG_NUMINTERRUPTS); gic_shared_intrs = (gic_shared_intrs + 1) * 8; - gic_vpes = gicconfig & GIC_CONFIG_PVPS; - gic_vpes >>= __ffs(GIC_CONFIG_PVPS); - gic_vpes = gic_vpes + 1; - if (cpu_has_veic) { - /* Set EIC mode for all VPEs */ - for_each_present_cpu(cpu) { - write_gic_vl_other(mips_cm_vp_id(cpu)); - write_gic_vo_ctl(GIC_VX_CTL_EIC); - } - /* Always use vector 1 in EIC mode */ gic_cpu_pin = 0; timer_cpu_pin = gic_cpu_pin; @@ -737,7 +762,7 @@ static int __init gic_of_init(struct device_node *node, gic_shared_intrs, 0, &gic_irq_domain_ops, NULL); if (!gic_irq_domain) { - pr_err("Failed to add GIC IRQ domain"); + pr_err("Failed to add IRQ domain"); return -ENXIO; } @@ -746,7 +771,7 @@ static int __init gic_of_init(struct device_node *node, GIC_NUM_LOCAL_INTRS + gic_shared_intrs, node, &gic_ipi_domain_ops, NULL); if (!gic_ipi_domain) { - pr_err("Failed to add GIC IPI domain"); + pr_err("Failed to add IPI domain"); return -ENXIO; } @@ -756,10 +781,12 @@ static int __init gic_of_init(struct device_node *node, !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { bitmap_set(ipi_resrv, v[0], v[1]); } else { - /* Make the last 2 * gic_vpes available for IPIs */ - bitmap_set(ipi_resrv, - gic_shared_intrs - 2 * gic_vpes, - 2 * gic_vpes); + /* + * Reserve 2 interrupts per possible CPU/VP for use as IPIs, + * meeting the requirements of arch/mips SMP. + */ + num_ipis = 2 * num_possible_cpus(); + bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); } bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); @@ -773,15 +800,8 @@ static int __init gic_of_init(struct device_node *node, write_gic_rmask(i); } - for (i = 0; i < gic_vpes; i++) { - write_gic_vl_other(mips_cm_vp_id(i)); - for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { - if (!gic_local_irq_is_routable(j)) - continue; - write_gic_vo_rmask(BIT(j)); - } - } - - return 0; + return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING, + "irqchip/mips/gic:starting", + gic_cpu_startup, NULL); } IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c index b04a8ac6e744..d360a6eddd6d 100644 --- a/drivers/irqchip/irq-omap-intc.c +++ b/drivers/irqchip/irq-omap-intc.c @@ -25,10 +25,6 @@ #include <linux/irqchip/irq-omap-intc.h> -/* Define these here for now until we drop all board-files */ -#define OMAP24XX_IC_BASE 0x480fe000 -#define OMAP34XX_IC_BASE 0x48200000 - /* selected INTC register offsets */ #define INTC_REVISION 0x0000 @@ -70,8 +66,8 @@ static struct omap_intc_regs intc_context; static struct irq_domain *domain; static void __iomem *omap_irq_base; -static int omap_nr_pending = 3; -static int omap_nr_irqs = 96; +static int omap_nr_pending; +static int omap_nr_irqs; static void intc_writel(u32 reg, u32 val) { @@ -364,14 +360,6 @@ omap_intc_handle_irq(struct pt_regs *regs) handle_domain_irq(domain, irqnr, regs); } -void __init omap3_init_irq(void) -{ - omap_nr_irqs = 96; - omap_nr_pending = 3; - omap_init_irq(OMAP34XX_IC_BASE, NULL); - set_handle_irq(omap_intc_handle_irq); -} - static int __init intc_of_init(struct device_node *node, struct device_node *parent) { diff --git a/drivers/irqchip/irq-ompic.c b/drivers/irqchip/irq-ompic.c new file mode 100644 index 000000000000..cf6d0c455518 --- /dev/null +++ b/drivers/irqchip/irq-ompic.c @@ -0,0 +1,202 @@ +/* + * Open Multi-Processor Interrupt Controller driver + * + * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> + * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> + * + * This file is licensed under the terms of the GNU General Public License + * version 2. This program is licensed "as is" without any warranty of any + * kind, whether express or implied. + * + * The ompic device handles IPI communication between cores in multi-core + * OpenRISC systems. + * + * Registers + * + * For each CPU the ompic has 2 registers. The control register for sending + * and acking IPIs and the status register for receiving IPIs. The register + * layouts are as follows: + * + * Control register + * +---------+---------+----------+---------+ + * | 31 | 30 | 29 .. 16 | 15 .. 0 | + * ----------+---------+----------+---------- + * | IRQ ACK | IRQ GEN | DST CORE | DATA | + * +---------+---------+----------+---------+ + * + * Status register + * +----------+-------------+----------+---------+ + * | 31 | 30 | 29 .. 16 | 15 .. 0 | + * -----------+-------------+----------+---------+ + * | Reserved | IRQ Pending | SRC CORE | DATA | + * +----------+-------------+----------+---------+ + * + * Architecture + * + * - The ompic generates a level interrupt to the CPU PIC when a message is + * ready. Messages are delivered via the memory bus. + * - The ompic does not have any interrupt input lines. + * - The ompic is wired to the same irq line on each core. + * - Devices are wired to the same irq line on each core. + * + * +---------+ +---------+ + * | CPU | | CPU | + * | Core 0 |<==\ (memory access) /==>| Core 1 | + * | [ PIC ]| | | | [ PIC ]| + * +----^-^--+ | | +----^-^--+ + * | | v v | | + * <====|=|=================================|=|==> (memory bus) + * | | ^ ^ | | + * (ipi | +------|---------+--------|-------|-+ (device irq) + * irq | | | | | + * core0)| +------|---------|--------|-------+ (ipi irq core1) + * | | | | | + * +----o-o-+ | +--------+ | + * | ompic |<===/ | Device |<===/ + * | IPI | +--------+ + * +--------+* + * + */ + +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/interrupt.h> +#include <linux/smp.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> + +#include <linux/irqchip.h> + +#define OMPIC_CPUBYTES 8 +#define OMPIC_CTRL(cpu) (0x0 + (cpu * OMPIC_CPUBYTES)) +#define OMPIC_STAT(cpu) (0x4 + (cpu * OMPIC_CPUBYTES)) + +#define OMPIC_CTRL_IRQ_ACK (1 << 31) +#define OMPIC_CTRL_IRQ_GEN (1 << 30) +#define OMPIC_CTRL_DST(cpu) (((cpu) & 0x3fff) << 16) + +#define OMPIC_STAT_IRQ_PENDING (1 << 30) + +#define OMPIC_DATA(x) ((x) & 0xffff) + +DEFINE_PER_CPU(unsigned long, ops); + +static void __iomem *ompic_base; + +static inline u32 ompic_readreg(void __iomem *base, loff_t offset) +{ + return ioread32be(base + offset); +} + +static void ompic_writereg(void __iomem *base, loff_t offset, u32 data) +{ + iowrite32be(data, base + offset); +} + +static void ompic_raise_softirq(const struct cpumask *mask, + unsigned int ipi_msg) +{ + unsigned int dst_cpu; + unsigned int src_cpu = smp_processor_id(); + + for_each_cpu(dst_cpu, mask) { + set_bit(ipi_msg, &per_cpu(ops, dst_cpu)); + + /* + * On OpenRISC the atomic set_bit() call implies a memory + * barrier. Otherwise we would need: smp_wmb(); paired + * with the read in ompic_ipi_handler. + */ + + ompic_writereg(ompic_base, OMPIC_CTRL(src_cpu), + OMPIC_CTRL_IRQ_GEN | + OMPIC_CTRL_DST(dst_cpu) | + OMPIC_DATA(1)); + } +} + +static irqreturn_t ompic_ipi_handler(int irq, void *dev_id) +{ + unsigned int cpu = smp_processor_id(); + unsigned long *pending_ops = &per_cpu(ops, cpu); + unsigned long ops; + + ompic_writereg(ompic_base, OMPIC_CTRL(cpu), OMPIC_CTRL_IRQ_ACK); + while ((ops = xchg(pending_ops, 0)) != 0) { + + /* + * On OpenRISC the atomic xchg() call implies a memory + * barrier. Otherwise we may need an smp_rmb(); paired + * with the write in ompic_raise_softirq. + */ + + do { + unsigned long ipi_msg; + + ipi_msg = __ffs(ops); + ops &= ~(1UL << ipi_msg); + + handle_IPI(ipi_msg); + } while (ops); + } + + return IRQ_HANDLED; +} + +static int __init ompic_of_init(struct device_node *node, + struct device_node *parent) +{ + struct resource res; + int irq; + int ret; + + /* Validate the DT */ + if (ompic_base) { + pr_err("ompic: duplicate ompic's are not supported"); + return -EEXIST; + } + + if (of_address_to_resource(node, 0, &res)) { + pr_err("ompic: reg property requires an address and size"); + return -EINVAL; + } + + if (resource_size(&res) < (num_possible_cpus() * OMPIC_CPUBYTES)) { + pr_err("ompic: reg size, currently %d must be at least %d", + resource_size(&res), + (num_possible_cpus() * OMPIC_CPUBYTES)); + return -EINVAL; + } + + /* Setup the device */ + ompic_base = ioremap(res.start, resource_size(&res)); + if (IS_ERR(ompic_base)) { + pr_err("ompic: unable to map registers"); + return PTR_ERR(ompic_base); + } + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) { + pr_err("ompic: unable to parse device irq"); + ret = -EINVAL; + goto out_unmap; + } + + ret = request_irq(irq, ompic_ipi_handler, IRQF_PERCPU, + "ompic_ipi", NULL); + if (ret) + goto out_irq_disp; + + set_smp_cross_call(ompic_raise_softirq); + + return 0; + +out_irq_disp: + irq_dispose_mapping(irq); +out_unmap: + iounmap(ompic_base); + ompic_base = NULL; + return ret; +} +IRQCHIP_DECLARE(ompic, "openrisc,ompic", ompic_of_init); diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c index 713177d97c7a..06f29cf5018a 100644 --- a/drivers/irqchip/irq-renesas-intc-irqpin.c +++ b/drivers/irqchip/irq-renesas-intc-irqpin.c @@ -389,9 +389,8 @@ MODULE_DEVICE_TABLE(of, intc_irqpin_dt_ids); static int intc_irqpin_probe(struct platform_device *pdev) { - const struct intc_irqpin_config *config = NULL; + const struct intc_irqpin_config *config; struct device *dev = &pdev->dev; - const struct of_device_id *of_id; struct intc_irqpin_priv *p; struct intc_irqpin_iomem *i; struct resource *io[INTC_IRQPIN_REG_NR]; @@ -422,11 +421,9 @@ static int intc_irqpin_probe(struct platform_device *pdev) p->pdev = pdev; platform_set_drvdata(pdev, p); - of_id = of_match_device(intc_irqpin_dt_ids, dev); - if (of_id && of_id->data) { - config = of_id->data; + config = of_device_get_match_data(dev); + if (config) p->needs_clk = config->needs_clk; - } p->clk = devm_clk_get(dev, NULL); if (IS_ERR(p->clk)) { diff --git a/drivers/irqchip/irq-sni-exiu.c b/drivers/irqchip/irq-sni-exiu.c new file mode 100644 index 000000000000..1b6e2f7c59af --- /dev/null +++ b/drivers/irqchip/irq-sni-exiu.c @@ -0,0 +1,227 @@ +/* + * Driver for Socionext External Interrupt Unit (EXIU) + * + * Copyright (c) 2017 Linaro, Ltd. <ard.biesheuvel@linaro.org> + * + * Based on irq-tegra.c: + * Copyright (C) 2011 Google, Inc. + * Copyright (C) 2010,2013, NVIDIA Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/irqchip.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> + +#include <dt-bindings/interrupt-controller/arm-gic.h> + +#define NUM_IRQS 32 + +#define EIMASK 0x00 +#define EISRCSEL 0x04 +#define EIREQSTA 0x08 +#define EIRAWREQSTA 0x0C +#define EIREQCLR 0x10 +#define EILVL 0x14 +#define EIEDG 0x18 +#define EISIR 0x1C + +struct exiu_irq_data { + void __iomem *base; + u32 spi_base; +}; + +static void exiu_irq_eoi(struct irq_data *d) +{ + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); + + writel(BIT(d->hwirq), data->base + EIREQCLR); + irq_chip_eoi_parent(d); +} + +static void exiu_irq_mask(struct irq_data *d) +{ + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); + u32 val; + + val = readl_relaxed(data->base + EIMASK) | BIT(d->hwirq); + writel_relaxed(val, data->base + EIMASK); + irq_chip_mask_parent(d); +} + +static void exiu_irq_unmask(struct irq_data *d) +{ + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); + u32 val; + + val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq); + writel_relaxed(val, data->base + EIMASK); + irq_chip_unmask_parent(d); +} + +static void exiu_irq_enable(struct irq_data *d) +{ + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); + u32 val; + + /* clear interrupts that were latched while disabled */ + writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR); + + val = readl_relaxed(data->base + EIMASK) & ~BIT(d->hwirq); + writel_relaxed(val, data->base + EIMASK); + irq_chip_enable_parent(d); +} + +static int exiu_irq_set_type(struct irq_data *d, unsigned int type) +{ + struct exiu_irq_data *data = irq_data_get_irq_chip_data(d); + u32 val; + + val = readl_relaxed(data->base + EILVL); + if (type == IRQ_TYPE_EDGE_RISING || type == IRQ_TYPE_LEVEL_HIGH) + val |= BIT(d->hwirq); + else + val &= ~BIT(d->hwirq); + writel_relaxed(val, data->base + EILVL); + + val = readl_relaxed(data->base + EIEDG); + if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_LEVEL_HIGH) + val &= ~BIT(d->hwirq); + else + val |= BIT(d->hwirq); + writel_relaxed(val, data->base + EIEDG); + + writel_relaxed(BIT(d->hwirq), data->base + EIREQCLR); + + return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH); +} + +static struct irq_chip exiu_irq_chip = { + .name = "EXIU", + .irq_eoi = exiu_irq_eoi, + .irq_enable = exiu_irq_enable, + .irq_mask = exiu_irq_mask, + .irq_unmask = exiu_irq_unmask, + .irq_set_type = exiu_irq_set_type, + .irq_set_affinity = irq_chip_set_affinity_parent, + .flags = IRQCHIP_SET_TYPE_MASKED | + IRQCHIP_SKIP_SET_WAKE | + IRQCHIP_EOI_THREADED | + IRQCHIP_MASK_ON_SUSPEND, +}; + +static int exiu_domain_translate(struct irq_domain *domain, + struct irq_fwspec *fwspec, + unsigned long *hwirq, + unsigned int *type) +{ + struct exiu_irq_data *info = domain->host_data; + + if (is_of_node(fwspec->fwnode)) { + if (fwspec->param_count != 3) + return -EINVAL; + + if (fwspec->param[0] != GIC_SPI) + return -EINVAL; /* No PPI should point to this domain */ + + *hwirq = fwspec->param[1] - info->spi_base; + *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; + return 0; + } + return -EINVAL; +} + +static int exiu_domain_alloc(struct irq_domain *dom, unsigned int virq, + unsigned int nr_irqs, void *data) +{ + struct irq_fwspec *fwspec = data; + struct irq_fwspec parent_fwspec; + struct exiu_irq_data *info = dom->host_data; + irq_hw_number_t hwirq; + + if (fwspec->param_count != 3) + return -EINVAL; /* Not GIC compliant */ + if (fwspec->param[0] != GIC_SPI) + return -EINVAL; /* No PPI should point to this domain */ + + WARN_ON(nr_irqs != 1); + hwirq = fwspec->param[1] - info->spi_base; + irq_domain_set_hwirq_and_chip(dom, virq, hwirq, &exiu_irq_chip, info); + + parent_fwspec = *fwspec; + parent_fwspec.fwnode = dom->parent->fwnode; + return irq_domain_alloc_irqs_parent(dom, virq, nr_irqs, &parent_fwspec); +} + +static const struct irq_domain_ops exiu_domain_ops = { + .translate = exiu_domain_translate, + .alloc = exiu_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int __init exiu_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *parent_domain, *domain; + struct exiu_irq_data *data; + int err; + + if (!parent) { + pr_err("%pOF: no parent, giving up\n", node); + return -ENODEV; + } + + parent_domain = irq_find_host(parent); + if (!parent_domain) { + pr_err("%pOF: unable to obtain parent domain\n", node); + return -ENXIO; + } + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + if (of_property_read_u32(node, "socionext,spi-base", &data->spi_base)) { + pr_err("%pOF: failed to parse 'spi-base' property\n", node); + err = -ENODEV; + goto out_free; + } + + data->base = of_iomap(node, 0); + if (IS_ERR(data->base)) { + err = PTR_ERR(data->base); + goto out_free; + } + + /* clear and mask all interrupts */ + writel_relaxed(0xFFFFFFFF, data->base + EIREQCLR); + writel_relaxed(0xFFFFFFFF, data->base + EIMASK); + + domain = irq_domain_add_hierarchy(parent_domain, 0, NUM_IRQS, node, + &exiu_domain_ops, data); + if (!domain) { + pr_err("%pOF: failed to allocate domain\n", node); + err = -ENOMEM; + goto out_unmap; + } + + pr_info("%pOF: %d interrupts forwarded to %pOF\n", node, NUM_IRQS, + parent); + + return 0; + +out_unmap: + iounmap(data->base); +out_free: + kfree(data); + return err; +} +IRQCHIP_DECLARE(exiu, "socionext,synquacer-exiu", exiu_init); diff --git a/drivers/irqchip/irq-stm32-exti.c b/drivers/irqchip/irq-stm32-exti.c index 45363ff8d06f..31ab0dee2ce7 100644 --- a/drivers/irqchip/irq-stm32-exti.c +++ b/drivers/irqchip/irq-stm32-exti.c @@ -14,27 +14,99 @@ #include <linux/of_address.h> #include <linux/of_irq.h> -#define EXTI_IMR 0x0 -#define EXTI_EMR 0x4 -#define EXTI_RTSR 0x8 -#define EXTI_FTSR 0xc -#define EXTI_SWIER 0x10 -#define EXTI_PR 0x14 +#define IRQS_PER_BANK 32 + +struct stm32_exti_bank { + u32 imr_ofst; + u32 emr_ofst; + u32 rtsr_ofst; + u32 ftsr_ofst; + u32 swier_ofst; + u32 pr_ofst; +}; + +static const struct stm32_exti_bank stm32f4xx_exti_b1 = { + .imr_ofst = 0x00, + .emr_ofst = 0x04, + .rtsr_ofst = 0x08, + .ftsr_ofst = 0x0C, + .swier_ofst = 0x10, + .pr_ofst = 0x14, +}; + +static const struct stm32_exti_bank *stm32f4xx_exti_banks[] = { + &stm32f4xx_exti_b1, +}; + +static const struct stm32_exti_bank stm32h7xx_exti_b1 = { + .imr_ofst = 0x80, + .emr_ofst = 0x84, + .rtsr_ofst = 0x00, + .ftsr_ofst = 0x04, + .swier_ofst = 0x08, + .pr_ofst = 0x88, +}; + +static const struct stm32_exti_bank stm32h7xx_exti_b2 = { + .imr_ofst = 0x90, + .emr_ofst = 0x94, + .rtsr_ofst = 0x20, + .ftsr_ofst = 0x24, + .swier_ofst = 0x28, + .pr_ofst = 0x98, +}; + +static const struct stm32_exti_bank stm32h7xx_exti_b3 = { + .imr_ofst = 0xA0, + .emr_ofst = 0xA4, + .rtsr_ofst = 0x40, + .ftsr_ofst = 0x44, + .swier_ofst = 0x48, + .pr_ofst = 0xA8, +}; + +static const struct stm32_exti_bank *stm32h7xx_exti_banks[] = { + &stm32h7xx_exti_b1, + &stm32h7xx_exti_b2, + &stm32h7xx_exti_b3, +}; + +static unsigned long stm32_exti_pending(struct irq_chip_generic *gc) +{ + const struct stm32_exti_bank *stm32_bank = gc->private; + + return irq_reg_readl(gc, stm32_bank->pr_ofst); +} + +static void stm32_exti_irq_ack(struct irq_chip_generic *gc, u32 mask) +{ + const struct stm32_exti_bank *stm32_bank = gc->private; + + irq_reg_writel(gc, mask, stm32_bank->pr_ofst); +} static void stm32_irq_handler(struct irq_desc *desc) { struct irq_domain *domain = irq_desc_get_handler_data(desc); - struct irq_chip_generic *gc = domain->gc->gc[0]; struct irq_chip *chip = irq_desc_get_chip(desc); + unsigned int virq, nbanks = domain->gc->num_chips; + struct irq_chip_generic *gc; + const struct stm32_exti_bank *stm32_bank; unsigned long pending; - int n; + int n, i, irq_base = 0; chained_irq_enter(chip, desc); - while ((pending = irq_reg_readl(gc, EXTI_PR))) { - for_each_set_bit(n, &pending, BITS_PER_LONG) { - generic_handle_irq(irq_find_mapping(domain, n)); - irq_reg_writel(gc, BIT(n), EXTI_PR); + for (i = 0; i < nbanks; i++, irq_base += IRQS_PER_BANK) { + gc = irq_get_domain_generic_chip(domain, irq_base); + stm32_bank = gc->private; + + while ((pending = stm32_exti_pending(gc))) { + for_each_set_bit(n, &pending, IRQS_PER_BANK) { + virq = irq_find_mapping(domain, irq_base + n); + generic_handle_irq(virq); + stm32_exti_irq_ack(gc, BIT(n)); + } } } @@ -44,13 +116,14 @@ static void stm32_irq_handler(struct irq_desc *desc) static int stm32_irq_set_type(struct irq_data *data, unsigned int type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - int pin = data->hwirq; + const struct stm32_exti_bank *stm32_bank = gc->private; + int pin = data->hwirq % IRQS_PER_BANK; u32 rtsr, ftsr; irq_gc_lock(gc); - rtsr = irq_reg_readl(gc, EXTI_RTSR); - ftsr = irq_reg_readl(gc, EXTI_FTSR); + rtsr = irq_reg_readl(gc, stm32_bank->rtsr_ofst); + ftsr = irq_reg_readl(gc, stm32_bank->ftsr_ofst); switch (type) { case IRQ_TYPE_EDGE_RISING: @@ -70,8 +143,8 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type) return -EINVAL; } - irq_reg_writel(gc, rtsr, EXTI_RTSR); - irq_reg_writel(gc, ftsr, EXTI_FTSR); + irq_reg_writel(gc, rtsr, stm32_bank->rtsr_ofst); + irq_reg_writel(gc, ftsr, stm32_bank->ftsr_ofst); irq_gc_unlock(gc); @@ -81,17 +154,18 @@ static int stm32_irq_set_type(struct irq_data *data, unsigned int type) static int stm32_irq_set_wake(struct irq_data *data, unsigned int on) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); - int pin = data->hwirq; - u32 emr; + const struct stm32_exti_bank *stm32_bank = gc->private; + int pin = data->hwirq % IRQS_PER_BANK; + u32 imr; irq_gc_lock(gc); - emr = irq_reg_readl(gc, EXTI_EMR); + imr = irq_reg_readl(gc, stm32_bank->imr_ofst); if (on) - emr |= BIT(pin); + imr |= BIT(pin); else - emr &= ~BIT(pin); - irq_reg_writel(gc, emr, EXTI_EMR); + imr &= ~BIT(pin); + irq_reg_writel(gc, imr, stm32_bank->imr_ofst); irq_gc_unlock(gc); @@ -101,11 +175,12 @@ static int stm32_irq_set_wake(struct irq_data *data, unsigned int on) static int stm32_exti_alloc(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs, void *data) { - struct irq_chip_generic *gc = d->gc->gc[0]; + struct irq_chip_generic *gc; struct irq_fwspec *fwspec = data; irq_hw_number_t hwirq; hwirq = fwspec->param[0]; + gc = irq_get_domain_generic_chip(d, hwirq); irq_map_generic_chip(d, virq, hwirq); irq_domain_set_info(d, virq, hwirq, &gc->chip_types->chip, gc, @@ -129,8 +204,9 @@ struct irq_domain_ops irq_exti_domain_ops = { .free = stm32_exti_free, }; -static int __init stm32_exti_init(struct device_node *node, - struct device_node *parent) +static int +__init stm32_exti_init(const struct stm32_exti_bank **stm32_exti_banks, + int bank_nr, struct device_node *node) { unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; int nr_irqs, nr_exti, ret, i; @@ -144,23 +220,16 @@ static int __init stm32_exti_init(struct device_node *node, return -ENOMEM; } - /* Determine number of irqs supported */ - writel_relaxed(~0UL, base + EXTI_RTSR); - nr_exti = fls(readl_relaxed(base + EXTI_RTSR)); - writel_relaxed(0, base + EXTI_RTSR); - - pr_info("%pOF: %d External IRQs detected\n", node, nr_exti); - - domain = irq_domain_add_linear(node, nr_exti, + domain = irq_domain_add_linear(node, bank_nr * IRQS_PER_BANK, &irq_exti_domain_ops, NULL); if (!domain) { pr_err("%s: Could not register interrupt domain.\n", - node->name); + node->name); ret = -ENOMEM; goto out_unmap; } - ret = irq_alloc_domain_generic_chips(domain, nr_exti, 1, "exti", + ret = irq_alloc_domain_generic_chips(domain, IRQS_PER_BANK, 1, "exti", handle_edge_irq, clr, 0, 0); if (ret) { pr_err("%pOF: Could not allocate generic interrupt chip.\n", @@ -168,18 +237,41 @@ static int __init stm32_exti_init(struct device_node *node, goto out_free_domain; } - gc = domain->gc->gc[0]; - gc->reg_base = base; - gc->chip_types->type = IRQ_TYPE_EDGE_BOTH; - gc->chip_types->chip.name = gc->chip_types[0].chip.name; - gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit; - gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit; - gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit; - gc->chip_types->chip.irq_set_type = stm32_irq_set_type; - gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake; - gc->chip_types->regs.ack = EXTI_PR; - gc->chip_types->regs.mask = EXTI_IMR; - gc->chip_types->handler = handle_edge_irq; + for (i = 0; i < bank_nr; i++) { + const struct stm32_exti_bank *stm32_bank = stm32_exti_banks[i]; + u32 irqs_mask; + + gc = irq_get_domain_generic_chip(domain, i * IRQS_PER_BANK); + + gc->reg_base = base; + gc->chip_types->type = IRQ_TYPE_EDGE_BOTH; + gc->chip_types->chip.irq_ack = irq_gc_ack_set_bit; + gc->chip_types->chip.irq_mask = irq_gc_mask_clr_bit; + gc->chip_types->chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types->chip.irq_set_type = stm32_irq_set_type; + gc->chip_types->chip.irq_set_wake = stm32_irq_set_wake; + gc->chip_types->regs.ack = stm32_bank->pr_ofst; + gc->chip_types->regs.mask = stm32_bank->imr_ofst; + gc->private = (void *)stm32_bank; + + /* Determine number of irqs supported */ + writel_relaxed(~0UL, base + stm32_bank->rtsr_ofst); + irqs_mask = readl_relaxed(base + stm32_bank->rtsr_ofst); + nr_exti = fls(readl_relaxed(base + stm32_bank->rtsr_ofst)); + + /* + * This IP has no reset, so after hot reboot we should + * clear registers to avoid residue + */ + writel_relaxed(0, base + stm32_bank->imr_ofst); + writel_relaxed(0, base + stm32_bank->emr_ofst); + writel_relaxed(0, base + stm32_bank->rtsr_ofst); + writel_relaxed(0, base + stm32_bank->ftsr_ofst); + writel_relaxed(~0UL, base + stm32_bank->pr_ofst); + + pr_info("%s: bank%d, External IRQs available:%#x\n", + node->full_name, i, irqs_mask); + } nr_irqs = of_irq_count(node); for (i = 0; i < nr_irqs; i++) { @@ -198,4 +290,20 @@ out_unmap: return ret; } -IRQCHIP_DECLARE(stm32_exti, "st,stm32-exti", stm32_exti_init); +static int __init stm32f4_exti_of_init(struct device_node *np, + struct device_node *parent) +{ + return stm32_exti_init(stm32f4xx_exti_banks, + ARRAY_SIZE(stm32f4xx_exti_banks), np); +} + +IRQCHIP_DECLARE(stm32f4_exti, "st,stm32-exti", stm32f4_exti_of_init); + +static int __init stm32h7_exti_of_init(struct device_node *np, + struct device_node *parent) +{ + return stm32_exti_init(stm32h7xx_exti_banks, + ARRAY_SIZE(stm32h7xx_exti_banks), np); +} + +IRQCHIP_DECLARE(stm32h7_exti, "st,stm32h7-exti", stm32h7_exti_of_init); diff --git a/drivers/macintosh/adb-iop.c b/drivers/macintosh/adb-iop.c index dbc4a3e63396..15db69d8ba69 100644 --- a/drivers/macintosh/adb-iop.c +++ b/drivers/macintosh/adb-iop.c @@ -30,8 +30,6 @@ /*#define DEBUG_ADB_IOP*/ -extern void iop_ism_irq(int, void *); - static struct adb_request *current_req; static struct adb_request *last_req; #if 0 @@ -266,7 +264,7 @@ int adb_iop_autopoll(int devs) void adb_iop_poll(void) { if (adb_iop_state == idle) adb_iop_start(); - iop_ism_irq(0, (void *) ADB_IOP); + iop_ism_irq_poll(ADB_IOP); } int adb_iop_reset_bus(void) diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index ea9bdc85a21d..899ec1f4c833 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -103,7 +103,7 @@ static DEFINE_MUTEX(smu_part_access); static int smu_irq_inited; static unsigned long smu_cmdbuf_abs; -static void smu_i2c_retry(unsigned long data); +static void smu_i2c_retry(struct timer_list *t); /* * SMU driver low level stuff @@ -582,9 +582,7 @@ static int smu_late_init(void) if (!smu) return 0; - init_timer(&smu->i2c_timer); - smu->i2c_timer.function = smu_i2c_retry; - smu->i2c_timer.data = (unsigned long)smu; + timer_setup(&smu->i2c_timer, smu_i2c_retry, 0); if (smu->db_node) { smu->db_irq = irq_of_parse_and_map(smu->db_node, 0); @@ -755,7 +753,7 @@ static void smu_i2c_complete_command(struct smu_i2c_cmd *cmd, int fail) } -static void smu_i2c_retry(unsigned long data) +static void smu_i2c_retry(struct timer_list *unused) { struct smu_i2c_cmd *cmd = smu->cmd_i2c_cur; @@ -795,7 +793,7 @@ static void smu_i2c_low_completion(struct smu_cmd *scmd, void *misc) BUG_ON(cmd != smu->cmd_i2c_cur); if (!smu_irq_inited) { mdelay(5); - smu_i2c_retry(0); + smu_i2c_retry(NULL); return; } mod_timer(&smu->i2c_timer, jiffies + msecs_to_jiffies(5)); diff --git a/drivers/mailbox/mailbox-altera.c b/drivers/mailbox/mailbox-altera.c index bb682c926b0a..bcb29df9549e 100644 --- a/drivers/mailbox/mailbox-altera.c +++ b/drivers/mailbox/mailbox-altera.c @@ -57,6 +57,7 @@ struct altera_mbox { /* If the controller supports only RX polling mode */ struct timer_list rxpoll_timer; + struct mbox_chan *chan; }; static struct altera_mbox *mbox_chan_to_altera_mbox(struct mbox_chan *chan) @@ -138,12 +139,11 @@ static void altera_mbox_rx_data(struct mbox_chan *chan) } } -static void altera_mbox_poll_rx(unsigned long data) +static void altera_mbox_poll_rx(struct timer_list *t) { - struct mbox_chan *chan = (struct mbox_chan *)data; - struct altera_mbox *mbox = mbox_chan_to_altera_mbox(chan); + struct altera_mbox *mbox = from_timer(mbox, t, rxpoll_timer); - altera_mbox_rx_data(chan); + altera_mbox_rx_data(mbox->chan); mod_timer(&mbox->rxpoll_timer, jiffies + msecs_to_jiffies(MBOX_POLLING_MS)); @@ -206,8 +206,8 @@ static int altera_mbox_startup_receiver(struct mbox_chan *chan) polling: /* Setup polling timer */ - setup_timer(&mbox->rxpoll_timer, altera_mbox_poll_rx, - (unsigned long)chan); + mbox->chan = chan; + timer_setup(&mbox->rxpoll_timer, altera_mbox_poll_rx, 0); mod_timer(&mbox->rxpoll_timer, jiffies + msecs_to_jiffies(MBOX_POLLING_MS)); diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index d216a8f7bc22..33bb074d6941 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -347,7 +347,7 @@ static void __cache_size_refresh(void) BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock)); BUG_ON(dm_bufio_client_count < 0); - dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size); + dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size); /* * Use default if set to 0 and report the actual cache size used. @@ -960,7 +960,7 @@ static void __get_memory_limit(struct dm_bufio_client *c, { unsigned long buffers; - if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { + if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) { if (mutex_trylock(&dm_bufio_clients_lock)) { __cache_size_refresh(); mutex_unlock(&dm_bufio_clients_lock); @@ -1600,7 +1600,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) static unsigned long get_retain_buffers(struct dm_bufio_client *c) { - unsigned long retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes); + unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes); return retain_bytes >> (c->sectors_per_block_bits + SECTOR_SHIFT); } @@ -1647,7 +1647,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { struct dm_bufio_client *c = container_of(shrink, struct dm_bufio_client, shrinker); - return ACCESS_ONCE(c->n_buffers[LIST_CLEAN]) + ACCESS_ONCE(c->n_buffers[LIST_DIRTY]); + return READ_ONCE(c->n_buffers[LIST_CLEAN]) + READ_ONCE(c->n_buffers[LIST_DIRTY]); } /* @@ -1818,7 +1818,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset); static unsigned get_max_age_hz(void) { - unsigned max_age = ACCESS_ONCE(dm_bufio_max_age); + unsigned max_age = READ_ONCE(dm_bufio_max_age); if (max_age > UINT_MAX / HZ) max_age = UINT_MAX / HZ; diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index 096fe9b66c50..8c5756e1df94 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -6,6 +6,7 @@ * This file is released under the GPL. */ +#include <linux/compiler.h> #include <linux/module.h> #include <linux/device-mapper.h> #include <linux/dm-io.h> @@ -80,13 +81,13 @@ struct journal_entry { #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block]) #if BITS_PER_LONG == 64 -#define journal_entry_set_sector(je, x) do { smp_wmb(); ACCESS_ONCE((je)->u.sector) = cpu_to_le64(x); } while (0) +#define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0) #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) #elif defined(CONFIG_LBDAF) -#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32((x) >> 32); } while (0) +#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0) #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector) #else -#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); ACCESS_ONCE((je)->u.s.sector_hi) = cpu_to_le32(0); } while (0) +#define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0) #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo) #endif #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1)) @@ -320,7 +321,7 @@ static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, in static int dm_integrity_failed(struct dm_integrity_c *ic) { - return ACCESS_ONCE(ic->failed); + return READ_ONCE(ic->failed); } static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i, @@ -1545,7 +1546,7 @@ retry_kmap: smp_mb(); if (unlikely(waitqueue_active(&ic->copy_to_journal_wait))) wake_up(&ic->copy_to_journal_wait); - if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { + if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) { queue_work(ic->commit_wq, &ic->commit_work); } else { schedule_autocommit(ic); @@ -1798,7 +1799,7 @@ static void integrity_commit(struct work_struct *w) ic->n_committed_sections += commit_sections; spin_unlock_irq(&ic->endio_wait.lock); - if (ACCESS_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) + if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) queue_work(ic->writer_wq, &ic->writer_work); release_flush_bios: @@ -1980,7 +1981,7 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; /* the following test is not needed, but it tests the replay code */ - if (ACCESS_ONCE(ic->suspending)) + if (READ_ONCE(ic->suspending)) return; spin_lock_irq(&ic->endio_wait.lock); diff --git a/drivers/md/dm-kcopyd.c b/drivers/md/dm-kcopyd.c index cf2c67e35eaf..eb45cc3df31d 100644 --- a/drivers/md/dm-kcopyd.c +++ b/drivers/md/dm-kcopyd.c @@ -107,7 +107,7 @@ static void io_job_start(struct dm_kcopyd_throttle *t) try_again: spin_lock_irq(&throttle_spinlock); - throttle = ACCESS_ONCE(t->throttle); + throttle = READ_ONCE(t->throttle); if (likely(throttle >= 100)) goto skip_limit; @@ -157,7 +157,7 @@ static void io_job_finish(struct dm_kcopyd_throttle *t) t->num_io_jobs--; - if (likely(ACCESS_ONCE(t->throttle) >= 100)) + if (likely(READ_ONCE(t->throttle) >= 100)) goto skip_limit; if (!t->num_io_jobs) { diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c index 11f273d2f018..3f88c9d32f7e 100644 --- a/drivers/md/dm-mpath.c +++ b/drivers/md/dm-mpath.c @@ -366,7 +366,7 @@ static struct pgpath *choose_path_in_pg(struct multipath *m, pgpath = path_to_pgpath(path); - if (unlikely(lockless_dereference(m->current_pg) != pg)) { + if (unlikely(READ_ONCE(m->current_pg) != pg)) { /* Only update current_pgpath if pg changed */ spin_lock_irqsave(&m->lock, flags); m->current_pgpath = pgpath; @@ -390,7 +390,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) } /* Were we instructed to switch PG? */ - if (lockless_dereference(m->next_pg)) { + if (READ_ONCE(m->next_pg)) { spin_lock_irqsave(&m->lock, flags); pg = m->next_pg; if (!pg) { @@ -406,7 +406,7 @@ static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) /* Don't change PG until it has no remaining paths */ check_current_pg: - pg = lockless_dereference(m->current_pg); + pg = READ_ONCE(m->current_pg); if (pg) { pgpath = choose_path_in_pg(m, pg, nr_bytes); if (!IS_ERR_OR_NULL(pgpath)) @@ -473,7 +473,7 @@ static int multipath_clone_and_map(struct dm_target *ti, struct request *rq, struct request *clone; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags)) pgpath = choose_pgpath(m, nr_bytes); @@ -535,7 +535,7 @@ static int __multipath_map_bio(struct multipath *m, struct bio *bio, struct dm_m bool queue_io; /* Do we need to select a new pgpath? */ - pgpath = lockless_dereference(m->current_pgpath); + pgpath = READ_ONCE(m->current_pgpath); queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags); if (!pgpath || !queue_io) pgpath = choose_pgpath(m, nr_bytes); @@ -1804,7 +1804,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, struct pgpath *current_pgpath; int r; - current_pgpath = lockless_dereference(m->current_pgpath); + current_pgpath = READ_ONCE(m->current_pgpath); if (!current_pgpath) current_pgpath = choose_pgpath(m, 0); @@ -1826,7 +1826,7 @@ static int multipath_prepare_ioctl(struct dm_target *ti, } if (r == -ENOTCONN) { - if (!lockless_dereference(m->current_pg)) { + if (!READ_ONCE(m->current_pg)) { /* Path status changed, redo selection */ (void) choose_pgpath(m, 0); } @@ -1895,9 +1895,9 @@ static int multipath_busy(struct dm_target *ti) return (m->queue_mode != DM_TYPE_MQ_REQUEST_BASED); /* Guess which priority_group will be used at next mapping time */ - pg = lockless_dereference(m->current_pg); - next_pg = lockless_dereference(m->next_pg); - if (unlikely(!lockless_dereference(m->current_pgpath) && next_pg)) + pg = READ_ONCE(m->current_pg); + next_pg = READ_ONCE(m->next_pg); + if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) pg = next_pg; if (!pg) { diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index a7868503d135..29bc51084c82 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -432,7 +432,7 @@ do_sync_free: synchronize_rcu_expedited(); dm_stat_free(&s->rcu_head); } else { - ACCESS_ONCE(dm_stat_need_rcu_barrier) = 1; + WRITE_ONCE(dm_stat_need_rcu_barrier, 1); call_rcu(&s->rcu_head, dm_stat_free); } return 0; @@ -640,12 +640,12 @@ void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw, */ last = raw_cpu_ptr(stats->last); stats_aux->merged = - (bi_sector == (ACCESS_ONCE(last->last_sector) && + (bi_sector == (READ_ONCE(last->last_sector) && ((bi_rw == WRITE) == - (ACCESS_ONCE(last->last_rw) == WRITE)) + (READ_ONCE(last->last_rw) == WRITE)) )); - ACCESS_ONCE(last->last_sector) = end_sector; - ACCESS_ONCE(last->last_rw) = bi_rw; + WRITE_ONCE(last->last_sector, end_sector); + WRITE_ONCE(last->last_rw, bi_rw); } rcu_read_lock(); @@ -694,22 +694,22 @@ static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared for_each_possible_cpu(cpu) { p = &s->stat_percpu[cpu][x]; - shared->tmp.sectors[READ] += ACCESS_ONCE(p->sectors[READ]); - shared->tmp.sectors[WRITE] += ACCESS_ONCE(p->sectors[WRITE]); - shared->tmp.ios[READ] += ACCESS_ONCE(p->ios[READ]); - shared->tmp.ios[WRITE] += ACCESS_ONCE(p->ios[WRITE]); - shared->tmp.merges[READ] += ACCESS_ONCE(p->merges[READ]); - shared->tmp.merges[WRITE] += ACCESS_ONCE(p->merges[WRITE]); - shared->tmp.ticks[READ] += ACCESS_ONCE(p->ticks[READ]); - shared->tmp.ticks[WRITE] += ACCESS_ONCE(p->ticks[WRITE]); - shared->tmp.io_ticks[READ] += ACCESS_ONCE(p->io_ticks[READ]); - shared->tmp.io_ticks[WRITE] += ACCESS_ONCE(p->io_ticks[WRITE]); - shared->tmp.io_ticks_total += ACCESS_ONCE(p->io_ticks_total); - shared->tmp.time_in_queue += ACCESS_ONCE(p->time_in_queue); + shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]); + shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]); + shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]); + shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]); + shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]); + shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]); + shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]); + shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]); + shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]); + shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]); + shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total); + shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue); if (s->n_histogram_entries) { unsigned i; for (i = 0; i < s->n_histogram_entries + 1; i++) - shared->tmp.histogram[i] += ACCESS_ONCE(p->histogram[i]); + shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]); } } } diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index 4c8de1ff78ca..8d0ba879777e 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c @@ -144,7 +144,7 @@ static unsigned switch_region_table_read(struct switch_ctx *sctx, unsigned long switch_get_position(sctx, region_nr, ®ion_index, &bit); - return (ACCESS_ONCE(sctx->region_table[region_index]) >> bit) & + return (READ_ONCE(sctx->region_table[region_index]) >> bit) & ((1 << sctx->region_table_entry_bits) - 1); } diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 1e25705209c2..89e5dff9b4cf 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -2431,7 +2431,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) struct pool_c *pt = pool->ti->private; bool needs_check = dm_pool_metadata_needs_check(pool->pmd); enum pool_mode old_mode = get_pool_mode(pool); - unsigned long no_space_timeout = ACCESS_ONCE(no_space_timeout_secs) * HZ; + unsigned long no_space_timeout = READ_ONCE(no_space_timeout_secs) * HZ; /* * Never allow the pool to transition to PM_WRITE mode if user diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index bda3caca23ca..fba93237a780 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -589,7 +589,7 @@ static void verity_prefetch_io(struct work_struct *work) verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL); verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL); if (!i) { - unsigned cluster = ACCESS_ONCE(dm_verity_prefetch_cluster); + unsigned cluster = READ_ONCE(dm_verity_prefetch_cluster); cluster >>= v->data_dev_block_bits; if (unlikely(!cluster)) diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4be85324f44d..8aaffa19b29a 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -114,7 +114,7 @@ static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS; static int __dm_get_module_param_int(int *module_param, int min, int max) { - int param = ACCESS_ONCE(*module_param); + int param = READ_ONCE(*module_param); int modified_param = 0; bool modified = true; @@ -136,7 +136,7 @@ static int __dm_get_module_param_int(int *module_param, int min, int max) unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max) { - unsigned param = ACCESS_ONCE(*module_param); + unsigned param = READ_ONCE(*module_param); unsigned modified_param = 0; if (!param) diff --git a/drivers/md/md.c b/drivers/md/md.c index 0ff1bbf6c90e..447ddcbc9566 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -2651,7 +2651,7 @@ state_show(struct md_rdev *rdev, char *page) { char *sep = ","; size_t len = 0; - unsigned long flags = ACCESS_ONCE(rdev->flags); + unsigned long flags = READ_ONCE(rdev->flags); if (test_bit(Faulty, &flags) || (!test_bit(ExternalBbl, &flags) && diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 928e24a07133..7d9a50eed9db 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -6072,7 +6072,7 @@ static inline sector_t raid5_sync_request(struct mddev *mddev, sector_t sector_n */ rcu_read_lock(); for (i = 0; i < conf->raid_disks; i++) { - struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); + struct md_rdev *rdev = READ_ONCE(conf->disks[i].rdev); if (rdev == NULL || test_bit(Faulty, &rdev->flags)) still_degraded = 1; diff --git a/drivers/media/dvb-core/dvb_ringbuffer.c b/drivers/media/dvb-core/dvb_ringbuffer.c index 2322af1b8742..53011629c9ad 100644 --- a/drivers/media/dvb-core/dvb_ringbuffer.c +++ b/drivers/media/dvb-core/dvb_ringbuffer.c @@ -66,12 +66,12 @@ ssize_t dvb_ringbuffer_free(struct dvb_ringbuffer *rbuf) { ssize_t free; - /* ACCESS_ONCE() to load read pointer on writer side + /* READ_ONCE() to load read pointer on writer side * this pairs with smp_store_release() in dvb_ringbuffer_read(), * dvb_ringbuffer_read_user(), dvb_ringbuffer_flush(), * or dvb_ringbuffer_reset() */ - free = ACCESS_ONCE(rbuf->pread) - rbuf->pwrite; + free = READ_ONCE(rbuf->pread) - rbuf->pwrite; if (free <= 0) free += rbuf->size; return free-1; @@ -143,7 +143,7 @@ ssize_t dvb_ringbuffer_read_user(struct dvb_ringbuffer *rbuf, u8 __user *buf, si todo -= split; /* smp_store_release() for read pointer update to ensure * that buf is not overwritten until read is complete, - * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + * this pairs with READ_ONCE() in dvb_ringbuffer_free() */ smp_store_release(&rbuf->pread, 0); } @@ -168,7 +168,7 @@ void dvb_ringbuffer_read(struct dvb_ringbuffer *rbuf, u8 *buf, size_t len) todo -= split; /* smp_store_release() for read pointer update to ensure * that buf is not overwritten until read is complete, - * this pairs with ACCESS_ONCE() in dvb_ringbuffer_free() + * this pairs with READ_ONCE() in dvb_ringbuffer_free() */ smp_store_release(&rbuf->pread, 0); } diff --git a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c index ad5b25b89699..8289ee482f49 100644 --- a/drivers/media/usb/pvrusb2/pvrusb2-hdw.c +++ b/drivers/media/usb/pvrusb2/pvrusb2-hdw.c @@ -330,10 +330,10 @@ static void pvr2_hdw_state_log_state(struct pvr2_hdw *); static int pvr2_hdw_cmd_usbstream(struct pvr2_hdw *hdw,int runFl); static int pvr2_hdw_commit_setup(struct pvr2_hdw *hdw); static int pvr2_hdw_get_eeprom_addr(struct pvr2_hdw *hdw); -static void pvr2_hdw_quiescent_timeout(unsigned long); -static void pvr2_hdw_decoder_stabilization_timeout(unsigned long); -static void pvr2_hdw_encoder_wait_timeout(unsigned long); -static void pvr2_hdw_encoder_run_timeout(unsigned long); +static void pvr2_hdw_quiescent_timeout(struct timer_list *); +static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *); +static void pvr2_hdw_encoder_wait_timeout(struct timer_list *); +static void pvr2_hdw_encoder_run_timeout(struct timer_list *); static int pvr2_issue_simple_cmd(struct pvr2_hdw *,u32); static int pvr2_send_request_ex(struct pvr2_hdw *hdw, unsigned int timeout,int probe_fl, @@ -2373,18 +2373,15 @@ struct pvr2_hdw *pvr2_hdw_create(struct usb_interface *intf, } if (!hdw) goto fail; - setup_timer(&hdw->quiescent_timer, pvr2_hdw_quiescent_timeout, - (unsigned long)hdw); + timer_setup(&hdw->quiescent_timer, pvr2_hdw_quiescent_timeout, 0); - setup_timer(&hdw->decoder_stabilization_timer, - pvr2_hdw_decoder_stabilization_timeout, - (unsigned long)hdw); + timer_setup(&hdw->decoder_stabilization_timer, + pvr2_hdw_decoder_stabilization_timeout, 0); - setup_timer(&hdw->encoder_wait_timer, pvr2_hdw_encoder_wait_timeout, - (unsigned long)hdw); + timer_setup(&hdw->encoder_wait_timer, pvr2_hdw_encoder_wait_timeout, + 0); - setup_timer(&hdw->encoder_run_timer, pvr2_hdw_encoder_run_timeout, - (unsigned long)hdw); + timer_setup(&hdw->encoder_run_timer, pvr2_hdw_encoder_run_timeout, 0); hdw->master_state = PVR2_STATE_DEAD; @@ -3539,10 +3536,16 @@ static void pvr2_ctl_read_complete(struct urb *urb) complete(&hdw->ctl_done); } +struct hdw_timer { + struct timer_list timer; + struct pvr2_hdw *hdw; +}; -static void pvr2_ctl_timeout(unsigned long data) +static void pvr2_ctl_timeout(struct timer_list *t) { - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; + struct hdw_timer *timer = from_timer(timer, t, timer); + struct pvr2_hdw *hdw = timer->hdw; + if (hdw->ctl_write_pend_flag || hdw->ctl_read_pend_flag) { hdw->ctl_timeout_flag = !0; if (hdw->ctl_write_pend_flag) @@ -3564,7 +3567,10 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw, { unsigned int idx; int status = 0; - struct timer_list timer; + struct hdw_timer timer = { + .hdw = hdw, + }; + if (!hdw->ctl_lock_held) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Attempted to execute control transfer without lock!!"); @@ -3621,8 +3627,8 @@ static int pvr2_send_request_ex(struct pvr2_hdw *hdw, hdw->ctl_timeout_flag = 0; hdw->ctl_write_pend_flag = 0; hdw->ctl_read_pend_flag = 0; - setup_timer(&timer, pvr2_ctl_timeout, (unsigned long)hdw); - timer.expires = jiffies + timeout; + timer_setup_on_stack(&timer.timer, pvr2_ctl_timeout, 0); + timer.timer.expires = jiffies + timeout; if (write_len && write_data) { hdw->cmd_debug_state = 2; @@ -3677,7 +3683,7 @@ status); } /* Start timer */ - add_timer(&timer); + add_timer(&timer.timer); /* Now wait for all I/O to complete */ hdw->cmd_debug_state = 4; @@ -3687,7 +3693,7 @@ status); hdw->cmd_debug_state = 5; /* Stop timer */ - del_timer_sync(&timer); + del_timer_sync(&timer.timer); hdw->cmd_debug_state = 6; status = 0; @@ -3769,6 +3775,8 @@ status); if ((status < 0) && (!probe_fl)) { pvr2_hdw_render_useless(hdw); } + destroy_timer_on_stack(&timer.timer); + return status; } @@ -4366,9 +4374,9 @@ static int state_eval_encoder_run(struct pvr2_hdw *hdw) /* Timeout function for quiescent timer. */ -static void pvr2_hdw_quiescent_timeout(unsigned long data) +static void pvr2_hdw_quiescent_timeout(struct timer_list *t) { - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; + struct pvr2_hdw *hdw = from_timer(hdw, t, quiescent_timer); hdw->state_decoder_quiescent = !0; trace_stbit("state_decoder_quiescent",hdw->state_decoder_quiescent); hdw->state_stale = !0; @@ -4377,9 +4385,9 @@ static void pvr2_hdw_quiescent_timeout(unsigned long data) /* Timeout function for decoder stabilization timer. */ -static void pvr2_hdw_decoder_stabilization_timeout(unsigned long data) +static void pvr2_hdw_decoder_stabilization_timeout(struct timer_list *t) { - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; + struct pvr2_hdw *hdw = from_timer(hdw, t, decoder_stabilization_timer); hdw->state_decoder_ready = !0; trace_stbit("state_decoder_ready", hdw->state_decoder_ready); hdw->state_stale = !0; @@ -4388,9 +4396,9 @@ static void pvr2_hdw_decoder_stabilization_timeout(unsigned long data) /* Timeout function for encoder wait timer. */ -static void pvr2_hdw_encoder_wait_timeout(unsigned long data) +static void pvr2_hdw_encoder_wait_timeout(struct timer_list *t) { - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; + struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_wait_timer); hdw->state_encoder_waitok = !0; trace_stbit("state_encoder_waitok",hdw->state_encoder_waitok); hdw->state_stale = !0; @@ -4399,9 +4407,9 @@ static void pvr2_hdw_encoder_wait_timeout(unsigned long data) /* Timeout function for encoder run timer. */ -static void pvr2_hdw_encoder_run_timeout(unsigned long data) +static void pvr2_hdw_encoder_run_timeout(struct timer_list *t) { - struct pvr2_hdw *hdw = (struct pvr2_hdw *)data; + struct pvr2_hdw *hdw = from_timer(hdw, t, encoder_run_timer); if (!hdw->state_encoder_runok) { hdw->state_encoder_runok = !0; trace_stbit("state_encoder_runok",hdw->state_encoder_runok); diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c index 48db922075e2..bcdca9fbef51 100644 --- a/drivers/memstick/host/jmb38x_ms.c +++ b/drivers/memstick/host/jmb38x_ms.c @@ -59,6 +59,7 @@ struct jmb38x_ms_host { unsigned int block_pos; unsigned long timeout_jiffies; struct timer_list timer; + struct memstick_host *msh; struct memstick_request *req; unsigned char cmd_flags; unsigned char io_pos; @@ -592,10 +593,10 @@ static irqreturn_t jmb38x_ms_isr(int irq, void *dev_id) return IRQ_HANDLED; } -static void jmb38x_ms_abort(unsigned long data) +static void jmb38x_ms_abort(struct timer_list *t) { - struct memstick_host *msh = (struct memstick_host *)data; - struct jmb38x_ms_host *host = memstick_priv(msh); + struct jmb38x_ms_host *host = from_timer(host, t, timer); + struct memstick_host *msh = host->msh; unsigned long flags; dev_dbg(&host->chip->pdev->dev, "abort\n"); @@ -878,6 +879,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt) return NULL; host = memstick_priv(msh); + host->msh = msh; host->chip = jm; host->addr = ioremap(pci_resource_start(jm->pdev, cnt), pci_resource_len(jm->pdev, cnt)); @@ -897,7 +899,7 @@ static struct memstick_host *jmb38x_ms_alloc_host(struct jmb38x_ms *jm, int cnt) msh->caps = MEMSTICK_CAP_PAR4 | MEMSTICK_CAP_PAR8; - setup_timer(&host->timer, jmb38x_ms_abort, (unsigned long)msh); + timer_setup(&host->timer, jmb38x_ms_abort, 0); if (!request_irq(host->irq, jmb38x_ms_isr, IRQF_SHARED, host->host_id, msh)) diff --git a/drivers/memstick/host/r592.c b/drivers/memstick/host/r592.c index d5cfb503b9d6..627d6e62fe31 100644 --- a/drivers/memstick/host/r592.c +++ b/drivers/memstick/host/r592.c @@ -616,9 +616,9 @@ static void r592_update_card_detect(struct r592_device *dev) } /* Timer routine that fires 1 second after last card detection event, */ -static void r592_detect_timer(long unsigned int data) +static void r592_detect_timer(struct timer_list *t) { - struct r592_device *dev = (struct r592_device *)data; + struct r592_device *dev = from_timer(dev, t, detect_timer); r592_update_card_detect(dev); memstick_detect_change(dev->host); } @@ -770,8 +770,7 @@ static int r592_probe(struct pci_dev *pdev, const struct pci_device_id *id) spin_lock_init(&dev->io_thread_lock); init_completion(&dev->dma_done); INIT_KFIFO(dev->pio_fifo); - setup_timer(&dev->detect_timer, - r592_detect_timer, (long unsigned int)dev); + timer_setup(&dev->detect_timer, r592_detect_timer, 0); /* Host initialization */ host->caps = MEMSTICK_CAP_PAR4; diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c index 7bafa72f8f57..bed205849d02 100644 --- a/drivers/memstick/host/tifm_ms.c +++ b/drivers/memstick/host/tifm_ms.c @@ -538,9 +538,9 @@ static int tifm_ms_set_param(struct memstick_host *msh, return 0; } -static void tifm_ms_abort(unsigned long data) +static void tifm_ms_abort(struct timer_list *t) { - struct tifm_ms *host = (struct tifm_ms *)data; + struct tifm_ms *host = from_timer(host, t, timer); dev_dbg(&host->dev->dev, "status %x\n", readl(host->dev->addr + SOCK_MS_STATUS)); @@ -575,7 +575,7 @@ static int tifm_ms_probe(struct tifm_dev *sock) host->dev = sock; host->timeout_jiffies = msecs_to_jiffies(1000); - setup_timer(&host->timer, tifm_ms_abort, (unsigned long)host); + timer_setup(&host->timer, tifm_ms_abort, 0); tasklet_init(&host->notify, tifm_ms_req_tasklet, (unsigned long)msh); msh->request = tifm_ms_submit_req; diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c index 981b3ef71e47..ed7f0c61c59a 100644 --- a/drivers/misc/lkdtm_core.c +++ b/drivers/misc/lkdtm_core.c @@ -56,122 +56,54 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf, size_t count, loff_t *off); #ifdef CONFIG_KPROBES -static void lkdtm_handler(void); +static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs); static ssize_t lkdtm_debugfs_entry(struct file *f, const char __user *user_buf, size_t count, loff_t *off); - - -/* jprobe entry point handlers. */ -static unsigned int jp_do_irq(unsigned int irq) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static irqreturn_t jp_handle_irq_event(unsigned int irq, - struct irqaction *action) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static void jp_tasklet_action(struct softirq_action *a) -{ - lkdtm_handler(); - jprobe_return(); -} - -static void jp_ll_rw_block(int rw, int nr, struct buffer_head *bhs[]) -{ - lkdtm_handler(); - jprobe_return(); -} - -struct scan_control; - -static unsigned long jp_shrink_inactive_list(unsigned long max_scan, - struct zone *zone, - struct scan_control *sc) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static int jp_hrtimer_start(struct hrtimer *timer, ktime_t tim, - const enum hrtimer_mode mode) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -static int jp_scsi_dispatch_cmd(struct scsi_cmnd *cmd) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} - -# ifdef CONFIG_IDE -static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file, - struct block_device *bdev, unsigned int cmd, - unsigned long arg) -{ - lkdtm_handler(); - jprobe_return(); - return 0; -} -# endif +# define CRASHPOINT_KPROBE(_symbol) \ + .kprobe = { \ + .symbol_name = (_symbol), \ + .pre_handler = lkdtm_kprobe_handler, \ + }, +# define CRASHPOINT_WRITE(_symbol) \ + (_symbol) ? lkdtm_debugfs_entry : direct_entry +#else +# define CRASHPOINT_KPROBE(_symbol) +# define CRASHPOINT_WRITE(_symbol) direct_entry #endif /* Crash points */ struct crashpoint { const char *name; const struct file_operations fops; - struct jprobe jprobe; + struct kprobe kprobe; }; -#define CRASHPOINT(_name, _write, _symbol, _entry) \ +#define CRASHPOINT(_name, _symbol) \ { \ .name = _name, \ .fops = { \ .read = lkdtm_debugfs_read, \ .llseek = generic_file_llseek, \ .open = lkdtm_debugfs_open, \ - .write = _write, \ - }, \ - .jprobe = { \ - .kp.symbol_name = _symbol, \ - .entry = (kprobe_opcode_t *)_entry, \ + .write = CRASHPOINT_WRITE(_symbol) \ }, \ + CRASHPOINT_KPROBE(_symbol) \ } /* Define the possible places where we can trigger a crash point. */ -struct crashpoint crashpoints[] = { - CRASHPOINT("DIRECT", direct_entry, - NULL, NULL), +static struct crashpoint crashpoints[] = { + CRASHPOINT("DIRECT", NULL), #ifdef CONFIG_KPROBES - CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry, - "do_IRQ", jp_do_irq), - CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry, - "handle_IRQ_event", jp_handle_irq_event), - CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry, - "tasklet_action", jp_tasklet_action), - CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry, - "ll_rw_block", jp_ll_rw_block), - CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry, - "shrink_inactive_list", jp_shrink_inactive_list), - CRASHPOINT("TIMERADD", lkdtm_debugfs_entry, - "hrtimer_start", jp_hrtimer_start), - CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry, - "scsi_dispatch_cmd", jp_scsi_dispatch_cmd), + CRASHPOINT("INT_HARDWARE_ENTRY", "do_IRQ"), + CRASHPOINT("INT_HW_IRQ_EN", "handle_IRQ_event"), + CRASHPOINT("INT_TASKLET_ENTRY", "tasklet_action"), + CRASHPOINT("FS_DEVRW", "ll_rw_block"), + CRASHPOINT("MEM_SWAPOUT", "shrink_inactive_list"), + CRASHPOINT("TIMERADD", "hrtimer_start"), + CRASHPOINT("SCSI_DISPATCH_CMD", "scsi_dispatch_cmd"), # ifdef CONFIG_IDE - CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry, - "generic_ide_ioctl", jp_generic_ide_ioctl), + CRASHPOINT("IDE_CORE_CP", "generic_ide_ioctl"), # endif #endif }; @@ -254,8 +186,8 @@ struct crashtype crashtypes[] = { }; -/* Global jprobe entry and crashtype. */ -static struct jprobe *lkdtm_jprobe; +/* Global kprobe entry and crashtype. */ +static struct kprobe *lkdtm_kprobe; struct crashpoint *lkdtm_crashpoint; struct crashtype *lkdtm_crashtype; @@ -298,7 +230,8 @@ static struct crashtype *find_crashtype(const char *name) */ static noinline void lkdtm_do_action(struct crashtype *crashtype) { - BUG_ON(!crashtype || !crashtype->func); + if (WARN_ON(!crashtype || !crashtype->func)) + return; crashtype->func(); } @@ -308,22 +241,22 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint, int ret; /* If this doesn't have a symbol, just call immediately. */ - if (!crashpoint->jprobe.kp.symbol_name) { + if (!crashpoint->kprobe.symbol_name) { lkdtm_do_action(crashtype); return 0; } - if (lkdtm_jprobe != NULL) - unregister_jprobe(lkdtm_jprobe); + if (lkdtm_kprobe != NULL) + unregister_kprobe(lkdtm_kprobe); lkdtm_crashpoint = crashpoint; lkdtm_crashtype = crashtype; - lkdtm_jprobe = &crashpoint->jprobe; - ret = register_jprobe(lkdtm_jprobe); + lkdtm_kprobe = &crashpoint->kprobe; + ret = register_kprobe(lkdtm_kprobe); if (ret < 0) { - pr_info("Couldn't register jprobe %s\n", - crashpoint->jprobe.kp.symbol_name); - lkdtm_jprobe = NULL; + pr_info("Couldn't register kprobe %s\n", + crashpoint->kprobe.symbol_name); + lkdtm_kprobe = NULL; lkdtm_crashpoint = NULL; lkdtm_crashtype = NULL; } @@ -336,13 +269,14 @@ static int lkdtm_register_cpoint(struct crashpoint *crashpoint, static int crash_count = DEFAULT_COUNT; static DEFINE_SPINLOCK(crash_count_lock); -/* Called by jprobe entry points. */ -static void lkdtm_handler(void) +/* Called by kprobe entry points. */ +static int lkdtm_kprobe_handler(struct kprobe *kp, struct pt_regs *regs) { unsigned long flags; bool do_it = false; - BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype); + if (WARN_ON(!lkdtm_crashpoint || !lkdtm_crashtype)) + return 0; spin_lock_irqsave(&crash_count_lock, flags); crash_count--; @@ -357,6 +291,8 @@ static void lkdtm_handler(void) if (do_it) lkdtm_do_action(lkdtm_crashtype); + + return 0; } static ssize_t lkdtm_debugfs_entry(struct file *f, @@ -556,8 +492,8 @@ static void __exit lkdtm_module_exit(void) /* Handle test-specific clean-up. */ lkdtm_usercopy_exit(); - if (lkdtm_jprobe != NULL) - unregister_jprobe(lkdtm_jprobe); + if (lkdtm_kprobe != NULL) + unregister_kprobe(lkdtm_kprobe); pr_info("Crash point unregistered\n"); } diff --git a/drivers/misc/mic/scif/scif_rb.c b/drivers/misc/mic/scif/scif_rb.c index 637cc4686742..b665757ca89a 100644 --- a/drivers/misc/mic/scif/scif_rb.c +++ b/drivers/misc/mic/scif/scif_rb.c @@ -138,7 +138,7 @@ void scif_rb_commit(struct scif_rb *rb) * the read barrier in scif_rb_count(..) */ wmb(); - ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; + WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si bug: For the case where a Core is performing an EXT_WR @@ -147,7 +147,7 @@ void scif_rb_commit(struct scif_rb *rb) * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ - ACCESS_ONCE(*rb->write_ptr) = rb->current_write_offset; + WRITE_ONCE(*rb->write_ptr, rb->current_write_offset); #endif } @@ -210,7 +210,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) * scif_rb_space(..) */ mb(); - ACCESS_ONCE(*rb->read_ptr) = new_offset; + WRITE_ONCE(*rb->read_ptr, new_offset); #ifdef CONFIG_INTEL_MIC_CARD /* * X100 Si Bug: For the case where a Core is performing an EXT_WR @@ -219,7 +219,7 @@ void scif_rb_update_read_ptr(struct scif_rb *rb) * This way, if ordering is violated for the Interrupt Message, it will * fall just behind the first Posted associated with the first EXT_WR. */ - ACCESS_ONCE(*rb->read_ptr) = new_offset; + WRITE_ONCE(*rb->read_ptr, new_offset); #endif } diff --git a/drivers/misc/mic/scif/scif_rma_list.c b/drivers/misc/mic/scif/scif_rma_list.c index e1ef8daedd5a..a036dbb4101e 100644 --- a/drivers/misc/mic/scif/scif_rma_list.c +++ b/drivers/misc/mic/scif/scif_rma_list.c @@ -277,7 +277,7 @@ retry: * Need to restart list traversal if there has been * an asynchronous list entry deletion. */ - if (ACCESS_ONCE(ep->rma_info.async_list_del)) + if (READ_ONCE(ep->rma_info.async_list_del)) goto retry; } mutex_unlock(&ep->rma_info.rma_lock); diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c index 7f327121e6d7..0c775d6fcf59 100644 --- a/drivers/misc/sgi-xp/xpc_main.c +++ b/drivers/misc/sgi-xp/xpc_main.c @@ -172,9 +172,9 @@ struct xpc_arch_operations xpc_arch_ops; * Timer function to enforce the timelimit on the partition disengage. */ static void -xpc_timeout_partition_disengage(unsigned long data) +xpc_timeout_partition_disengage(struct timer_list *t) { - struct xpc_partition *part = (struct xpc_partition *)data; + struct xpc_partition *part = from_timer(part, t, disengage_timer); DBUG_ON(time_is_after_jiffies(part->disengage_timeout)); @@ -190,7 +190,7 @@ xpc_timeout_partition_disengage(unsigned long data) * specify when the next timeout should occur. */ static void -xpc_hb_beater(unsigned long dummy) +xpc_hb_beater(struct timer_list *unused) { xpc_arch_ops.increment_heartbeat(); @@ -205,8 +205,7 @@ static void xpc_start_hb_beater(void) { xpc_arch_ops.heartbeat_init(); - init_timer(&xpc_hb_timer); - xpc_hb_timer.function = xpc_hb_beater; + timer_setup(&xpc_hb_timer, xpc_hb_beater, 0); xpc_hb_beater(0); } @@ -931,10 +930,8 @@ xpc_setup_partitions(void) part->act_state = XPC_P_AS_INACTIVE; XPC_SET_REASON(part, 0, 0); - init_timer(&part->disengage_timer); - part->disengage_timer.function = - xpc_timeout_partition_disengage; - part->disengage_timer.data = (unsigned long)part; + timer_setup(&part->disengage_timer, + xpc_timeout_partition_disengage, 0); part->setup_state = XPC_P_SS_UNSET; init_waitqueue_head(&part->teardown_wq); diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c index 7d71c04fc938..5a12d2a54049 100644 --- a/drivers/misc/sgi-xp/xpc_sn2.c +++ b/drivers/misc/sgi-xp/xpc_sn2.c @@ -323,16 +323,16 @@ xpc_handle_notify_IRQ_sn2(int irq, void *dev_id) * was received. */ static void -xpc_check_for_dropped_notify_IRQ_sn2(struct xpc_partition *part) +xpc_check_for_dropped_notify_IRQ_sn2(struct timer_list *t) { - struct xpc_partition_sn2 *part_sn2 = &part->sn.sn2; + struct xpc_partition *part = + from_timer(part, t, sn.sn2.dropped_notify_IRQ_timer); if (xpc_part_ref(part)) { xpc_check_for_sent_chctl_flags_sn2(part); - part_sn2->dropped_notify_IRQ_timer.expires = jiffies + - XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; - add_timer(&part_sn2->dropped_notify_IRQ_timer); + t->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; + add_timer(t); xpc_part_deref(part); } } @@ -1232,10 +1232,7 @@ xpc_setup_ch_structures_sn2(struct xpc_partition *part) /* Setup a timer to check for dropped notify IRQs */ timer = &part_sn2->dropped_notify_IRQ_timer; - init_timer(timer); - timer->function = - (void (*)(unsigned long))xpc_check_for_dropped_notify_IRQ_sn2; - timer->data = (unsigned long)part; + timer_setup(timer, xpc_check_for_dropped_notify_IRQ_sn2, 0); timer->expires = jiffies + XPC_DROPPED_NOTIFY_IRQ_WAIT_INTERVAL; add_timer(timer); diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c index 1e688bfec567..9047c0a529b2 100644 --- a/drivers/misc/vmw_balloon.c +++ b/drivers/misc/vmw_balloon.c @@ -1271,7 +1271,7 @@ static int __init vmballoon_init(void) * Check if we are running on VMware's hypervisor and bail out * if we are not. */ - if (x86_hyper != &x86_hyper_vmware) + if (x86_hyper_type != X86_HYPER_VMWARE) return -ENODEV; for (is_2m_pages = 0; is_2m_pages < VMW_BALLOON_NUM_PAGE_SIZES; diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 2ad7b5c69156..ea80ff4cd7f9 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -28,6 +28,7 @@ #include <linux/hdreg.h> #include <linux/kdev_t.h> #include <linux/blkdev.h> +#include <linux/cdev.h> #include <linux/mutex.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> @@ -86,6 +87,7 @@ static int max_devices; #define MAX_DEVICES 256 static DEFINE_IDA(mmc_blk_ida); +static DEFINE_IDA(mmc_rpmb_ida); /* * There is one mmc_blk_data per slot. @@ -96,6 +98,7 @@ struct mmc_blk_data { struct gendisk *disk; struct mmc_queue queue; struct list_head part; + struct list_head rpmbs; unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ @@ -121,6 +124,32 @@ struct mmc_blk_data { int area_type; }; +/* Device type for RPMB character devices */ +static dev_t mmc_rpmb_devt; + +/* Bus type for RPMB character devices */ +static struct bus_type mmc_rpmb_bus_type = { + .name = "mmc_rpmb", +}; + +/** + * struct mmc_rpmb_data - special RPMB device type for these areas + * @dev: the device for the RPMB area + * @chrdev: character device for the RPMB area + * @id: unique device ID number + * @part_index: partition index (0 on first) + * @md: parent MMC block device + * @node: list item, so we can put this device on a list + */ +struct mmc_rpmb_data { + struct device dev; + struct cdev chrdev; + int id; + unsigned int part_index; + struct mmc_blk_data *md; + struct list_head node; +}; + static DEFINE_MUTEX(open_lock); module_param(perdev_minors, int, 0444); @@ -299,6 +328,7 @@ struct mmc_blk_ioc_data { struct mmc_ioc_cmd ic; unsigned char *buf; u64 buf_bytes; + struct mmc_rpmb_data *rpmb; }; static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( @@ -437,14 +467,25 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, struct mmc_request mrq = {}; struct scatterlist sg; int err; - bool is_rpmb = false; + unsigned int target_part; u32 status = 0; if (!card || !md || !idata) return -EINVAL; - if (md->area_type & MMC_BLK_DATA_AREA_RPMB) - is_rpmb = true; + /* + * The RPMB accesses comes in from the character device, so we + * need to target these explicitly. Else we just target the + * partition type for the block device the ioctl() was issued + * on. + */ + if (idata->rpmb) { + /* Support multiple RPMB partitions */ + target_part = idata->rpmb->part_index; + target_part |= EXT_CSD_PART_CONFIG_ACC_RPMB; + } else { + target_part = md->part_type; + } cmd.opcode = idata->ic.opcode; cmd.arg = idata->ic.arg; @@ -488,7 +529,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, mrq.cmd = &cmd; - err = mmc_blk_part_switch(card, md->part_type); + err = mmc_blk_part_switch(card, target_part); if (err) return err; @@ -498,7 +539,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, return err; } - if (is_rpmb) { + if (idata->rpmb) { err = mmc_set_blockcount(card, data.blocks, idata->ic.write_flag & (1 << 31)); if (err) @@ -538,7 +579,7 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, memcpy(&(idata->ic.response), cmd.resp, sizeof(cmd.resp)); - if (is_rpmb) { + if (idata->rpmb) { /* * Ensure RPMB command has completed by polling CMD13 * "Send Status". @@ -554,7 +595,8 @@ static int __mmc_blk_ioctl_cmd(struct mmc_card *card, struct mmc_blk_data *md, } static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, - struct mmc_ioc_cmd __user *ic_ptr) + struct mmc_ioc_cmd __user *ic_ptr, + struct mmc_rpmb_data *rpmb) { struct mmc_blk_ioc_data *idata; struct mmc_blk_ioc_data *idatas[1]; @@ -566,6 +608,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, idata = mmc_blk_ioctl_copy_from_user(ic_ptr); if (IS_ERR(idata)) return PTR_ERR(idata); + /* This will be NULL on non-RPMB ioctl():s */ + idata->rpmb = rpmb; card = md->queue.card; if (IS_ERR(card)) { @@ -581,7 +625,8 @@ static int mmc_blk_ioctl_cmd(struct mmc_blk_data *md, idata->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); idatas[0] = idata; - req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op = + rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idatas; req_to_mmc_queue_req(req)->ioc_count = 1; blk_execute_rq(mq->queue, NULL, req, 0); @@ -596,7 +641,8 @@ cmd_done: } static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, - struct mmc_ioc_multi_cmd __user *user) + struct mmc_ioc_multi_cmd __user *user, + struct mmc_rpmb_data *rpmb) { struct mmc_blk_ioc_data **idata = NULL; struct mmc_ioc_cmd __user *cmds = user->cmds; @@ -627,6 +673,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, num_of_cmds = i; goto cmd_err; } + /* This will be NULL on non-RPMB ioctl():s */ + idata[i]->rpmb = rpmb; } card = md->queue.card; @@ -643,7 +691,8 @@ static int mmc_blk_ioctl_multi_cmd(struct mmc_blk_data *md, req = blk_get_request(mq->queue, idata[0]->ic.write_flag ? REQ_OP_DRV_OUT : REQ_OP_DRV_IN, __GFP_RECLAIM); - req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_IOCTL; + req_to_mmc_queue_req(req)->drv_op = + rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL; req_to_mmc_queue_req(req)->drv_op_data = idata; req_to_mmc_queue_req(req)->ioc_count = num_of_cmds; blk_execute_rq(mq->queue, NULL, req, 0); @@ -691,7 +740,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, if (!md) return -EINVAL; ret = mmc_blk_ioctl_cmd(md, - (struct mmc_ioc_cmd __user *)arg); + (struct mmc_ioc_cmd __user *)arg, + NULL); mmc_blk_put(md); return ret; case MMC_IOC_MULTI_CMD: @@ -702,7 +752,8 @@ static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, if (!md) return -EINVAL; ret = mmc_blk_ioctl_multi_cmd(md, - (struct mmc_ioc_multi_cmd __user *)arg); + (struct mmc_ioc_multi_cmd __user *)arg, + NULL); mmc_blk_put(md); return ret; default: @@ -1152,18 +1203,6 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) md->reset_done &= ~type; } -int mmc_access_rpmb(struct mmc_queue *mq) -{ - struct mmc_blk_data *md = mq->blkdata; - /* - * If this is a RPMB partition access, return ture - */ - if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB) - return true; - - return false; -} - /* * The non-block commands come back from the block layer after it queued it and * processed it with all other requests and then they get issued in this @@ -1174,17 +1213,19 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) struct mmc_queue_req *mq_rq; struct mmc_card *card = mq->card; struct mmc_blk_data *md = mq->blkdata; - struct mmc_blk_data *main_md = dev_get_drvdata(&card->dev); struct mmc_blk_ioc_data **idata; + bool rpmb_ioctl; u8 **ext_csd; u32 status; int ret; int i; mq_rq = req_to_mmc_queue_req(req); + rpmb_ioctl = (mq_rq->drv_op == MMC_DRV_OP_IOCTL_RPMB); switch (mq_rq->drv_op) { case MMC_DRV_OP_IOCTL: + case MMC_DRV_OP_IOCTL_RPMB: idata = mq_rq->drv_op_data; for (i = 0, ret = 0; i < mq_rq->ioc_count; i++) { ret = __mmc_blk_ioctl_cmd(card, md, idata[i]); @@ -1192,8 +1233,8 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req) break; } /* Always switch back to main area after RPMB access */ - if (md->area_type & MMC_BLK_DATA_AREA_RPMB) - mmc_blk_part_switch(card, main_md->part_type); + if (rpmb_ioctl) + mmc_blk_part_switch(card, 0); break; case MMC_DRV_OP_BOOT_WP: ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, @@ -1534,25 +1575,27 @@ static enum mmc_blk_status mmc_blk_err_check(struct mmc_card *card, } static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, - int disable_multi, bool *do_rel_wr, - bool *do_data_tag) + int disable_multi, bool *do_rel_wr_p, + bool *do_data_tag_p) { struct mmc_blk_data *md = mq->blkdata; struct mmc_card *card = md->queue.card; struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mmc_queue_req_to_req(mqrq); + bool do_rel_wr, do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and * are supported only on MMCs. */ - *do_rel_wr = (req->cmd_flags & REQ_FUA) && - rq_data_dir(req) == WRITE && - (md->flags & MMC_BLK_REL_WR); + do_rel_wr = (req->cmd_flags & REQ_FUA) && + rq_data_dir(req) == WRITE && + (md->flags & MMC_BLK_REL_WR); memset(brq, 0, sizeof(struct mmc_blk_request)); brq->mrq.data = &brq->data; + brq->mrq.tag = req->tag; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; @@ -1567,6 +1610,14 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.blksz = 512; brq->data.blocks = blk_rq_sectors(req); + brq->data.blk_addr = blk_rq_pos(req); + + /* + * The command queue supports 2 priorities: "high" (1) and "simple" (0). + * The eMMC will give "high" priority tasks priority over "simple" + * priority tasks. Here we always set "simple" priority by not setting + * MMC_DATA_PRIO. + */ /* * The block layer doesn't support all sector count @@ -1596,18 +1647,23 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, brq->data.blocks); } - if (*do_rel_wr) + if (do_rel_wr) { mmc_apply_rel_rw(brq, card, req); + brq->data.flags |= MMC_DATA_REL_WR; + } /* * Data tag is used only during writing meta data to speed * up write and any subsequent read of this meta data */ - *do_data_tag = card->ext_csd.data_tag_unit_size && - (req->cmd_flags & REQ_META) && - (rq_data_dir(req) == WRITE) && - ((brq->data.blocks * brq->data.blksz) >= - card->ext_csd.data_tag_unit_size); + do_data_tag = card->ext_csd.data_tag_unit_size && + (req->cmd_flags & REQ_META) && + (rq_data_dir(req) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + + if (do_data_tag) + brq->data.flags |= MMC_DATA_DAT_TAG; mmc_set_data_timeout(&brq->data, card); @@ -1634,6 +1690,12 @@ static void mmc_blk_data_prep(struct mmc_queue *mq, struct mmc_queue_req *mqrq, } mqrq->areq.mrq = &brq->mrq; + + if (do_rel_wr_p) + *do_rel_wr_p = do_rel_wr; + + if (do_data_tag_p) + *do_data_tag_p = do_data_tag; } static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, @@ -1948,7 +2010,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) if (req && !mq->qcnt) /* claim host only for the first request */ - mmc_get_card(card); + mmc_get_card(card, NULL); ret = mmc_blk_part_switch(card, md->part_type); if (ret) { @@ -2011,7 +2073,7 @@ void mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) out: if (!mq->qcnt) - mmc_put_card(card); + mmc_put_card(card, NULL); } static inline int mmc_blk_readonly(struct mmc_card *card) @@ -2068,6 +2130,7 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, spin_lock_init(&md->lock); INIT_LIST_HEAD(&md->part); + INIT_LIST_HEAD(&md->rpmbs); md->usage = 1; ret = mmc_init_queue(&md->queue, card, &md->lock, subname); @@ -2186,6 +2249,158 @@ static int mmc_blk_alloc_part(struct mmc_card *card, return 0; } +/** + * mmc_rpmb_ioctl() - ioctl handler for the RPMB chardev + * @filp: the character device file + * @cmd: the ioctl() command + * @arg: the argument from userspace + * + * This will essentially just redirect the ioctl()s coming in over to + * the main block device spawning the RPMB character device. + */ +static long mmc_rpmb_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct mmc_rpmb_data *rpmb = filp->private_data; + int ret; + + switch (cmd) { + case MMC_IOC_CMD: + ret = mmc_blk_ioctl_cmd(rpmb->md, + (struct mmc_ioc_cmd __user *)arg, + rpmb); + break; + case MMC_IOC_MULTI_CMD: + ret = mmc_blk_ioctl_multi_cmd(rpmb->md, + (struct mmc_ioc_multi_cmd __user *)arg, + rpmb); + break; + default: + ret = -EINVAL; + break; + } + + return 0; +} + +#ifdef CONFIG_COMPAT +static long mmc_rpmb_ioctl_compat(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + return mmc_rpmb_ioctl(filp, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static int mmc_rpmb_chrdev_open(struct inode *inode, struct file *filp) +{ + struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, + struct mmc_rpmb_data, chrdev); + + get_device(&rpmb->dev); + filp->private_data = rpmb; + mmc_blk_get(rpmb->md->disk); + + return nonseekable_open(inode, filp); +} + +static int mmc_rpmb_chrdev_release(struct inode *inode, struct file *filp) +{ + struct mmc_rpmb_data *rpmb = container_of(inode->i_cdev, + struct mmc_rpmb_data, chrdev); + + put_device(&rpmb->dev); + mmc_blk_put(rpmb->md); + + return 0; +} + +static const struct file_operations mmc_rpmb_fileops = { + .release = mmc_rpmb_chrdev_release, + .open = mmc_rpmb_chrdev_open, + .owner = THIS_MODULE, + .llseek = no_llseek, + .unlocked_ioctl = mmc_rpmb_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = mmc_rpmb_ioctl_compat, +#endif +}; + +static void mmc_blk_rpmb_device_release(struct device *dev) +{ + struct mmc_rpmb_data *rpmb = dev_get_drvdata(dev); + + ida_simple_remove(&mmc_rpmb_ida, rpmb->id); + kfree(rpmb); +} + +static int mmc_blk_alloc_rpmb_part(struct mmc_card *card, + struct mmc_blk_data *md, + unsigned int part_index, + sector_t size, + const char *subname) +{ + int devidx, ret; + char rpmb_name[DISK_NAME_LEN]; + char cap_str[10]; + struct mmc_rpmb_data *rpmb; + + /* This creates the minor number for the RPMB char device */ + devidx = ida_simple_get(&mmc_rpmb_ida, 0, max_devices, GFP_KERNEL); + if (devidx < 0) + return devidx; + + rpmb = kzalloc(sizeof(*rpmb), GFP_KERNEL); + if (!rpmb) { + ida_simple_remove(&mmc_rpmb_ida, devidx); + return -ENOMEM; + } + + snprintf(rpmb_name, sizeof(rpmb_name), + "mmcblk%u%s", card->host->index, subname ? subname : ""); + + rpmb->id = devidx; + rpmb->part_index = part_index; + rpmb->dev.init_name = rpmb_name; + rpmb->dev.bus = &mmc_rpmb_bus_type; + rpmb->dev.devt = MKDEV(MAJOR(mmc_rpmb_devt), rpmb->id); + rpmb->dev.parent = &card->dev; + rpmb->dev.release = mmc_blk_rpmb_device_release; + device_initialize(&rpmb->dev); + dev_set_drvdata(&rpmb->dev, rpmb); + rpmb->md = md; + + cdev_init(&rpmb->chrdev, &mmc_rpmb_fileops); + rpmb->chrdev.owner = THIS_MODULE; + ret = cdev_device_add(&rpmb->chrdev, &rpmb->dev); + if (ret) { + pr_err("%s: could not add character device\n", rpmb_name); + goto out_put_device; + } + + list_add(&rpmb->node, &md->rpmbs); + + string_get_size((u64)size, 512, STRING_UNITS_2, + cap_str, sizeof(cap_str)); + + pr_info("%s: %s %s partition %u %s, chardev (%d:%d)\n", + rpmb_name, mmc_card_id(card), + mmc_card_name(card), EXT_CSD_PART_CONFIG_ACC_RPMB, cap_str, + MAJOR(mmc_rpmb_devt), rpmb->id); + + return 0; + +out_put_device: + put_device(&rpmb->dev); + return ret; +} + +static void mmc_blk_remove_rpmb_part(struct mmc_rpmb_data *rpmb) + +{ + cdev_device_del(&rpmb->chrdev, &rpmb->dev); + put_device(&rpmb->dev); +} + /* MMC Physical partitions consist of two boot partitions and * up to four general purpose partitions. * For each partition enabled in EXT_CSD a block device will be allocatedi @@ -2194,13 +2409,26 @@ static int mmc_blk_alloc_part(struct mmc_card *card, static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) { - int idx, ret = 0; + int idx, ret; if (!mmc_card_mmc(card)) return 0; for (idx = 0; idx < card->nr_parts; idx++) { - if (card->part[idx].size) { + if (card->part[idx].area_type & MMC_BLK_DATA_AREA_RPMB) { + /* + * RPMB partitions does not provide block access, they + * are only accessed using ioctl():s. Thus create + * special RPMB block devices that do not have a + * backing block queue for these. + */ + ret = mmc_blk_alloc_rpmb_part(card, md, + card->part[idx].part_cfg, + card->part[idx].size >> 9, + card->part[idx].name); + if (ret) + return ret; + } else if (card->part[idx].size) { ret = mmc_blk_alloc_part(card, md, card->part[idx].part_cfg, card->part[idx].size >> 9, @@ -2212,7 +2440,7 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) } } - return ret; + return 0; } static void mmc_blk_remove_req(struct mmc_blk_data *md) @@ -2249,7 +2477,15 @@ static void mmc_blk_remove_parts(struct mmc_card *card, { struct list_head *pos, *q; struct mmc_blk_data *part_md; + struct mmc_rpmb_data *rpmb; + /* Remove RPMB partitions */ + list_for_each_safe(pos, q, &md->rpmbs) { + rpmb = list_entry(pos, struct mmc_rpmb_data, node); + list_del(pos); + mmc_blk_remove_rpmb_part(rpmb); + } + /* Remove block partitions */ list_for_each_safe(pos, q, &md->part) { part_md = list_entry(pos, struct mmc_blk_data, part); list_del(pos); @@ -2568,6 +2804,17 @@ static int __init mmc_blk_init(void) { int res; + res = bus_register(&mmc_rpmb_bus_type); + if (res < 0) { + pr_err("mmcblk: could not register RPMB bus type\n"); + return res; + } + res = alloc_chrdev_region(&mmc_rpmb_devt, 0, MAX_DEVICES, "rpmb"); + if (res < 0) { + pr_err("mmcblk: failed to allocate rpmb chrdev region\n"); + goto out_bus_unreg; + } + if (perdev_minors != CONFIG_MMC_BLOCK_MINORS) pr_info("mmcblk: using %d minors per device\n", perdev_minors); @@ -2575,16 +2822,20 @@ static int __init mmc_blk_init(void) res = register_blkdev(MMC_BLOCK_MAJOR, "mmc"); if (res) - goto out; + goto out_chrdev_unreg; res = mmc_register_driver(&mmc_driver); if (res) - goto out2; + goto out_blkdev_unreg; return 0; - out2: + +out_blkdev_unreg: unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); - out: +out_chrdev_unreg: + unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); +out_bus_unreg: + bus_unregister(&mmc_rpmb_bus_type); return res; } @@ -2592,6 +2843,7 @@ static void __exit mmc_blk_exit(void) { mmc_unregister_driver(&mmc_driver); unregister_blkdev(MMC_BLOCK_MAJOR, "mmc"); + unregister_chrdev_region(mmc_rpmb_devt, MAX_DEVICES); } module_init(mmc_blk_init); diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 301246513a37..a4b49e25fe96 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -369,10 +369,17 @@ int mmc_add_card(struct mmc_card *card) */ void mmc_remove_card(struct mmc_card *card) { + struct mmc_host *host = card->host; + #ifdef CONFIG_DEBUG_FS mmc_remove_card_debugfs(card); #endif + if (host->cqe_enabled) { + host->cqe_ops->cqe_disable(host); + host->cqe_enabled = false; + } + if (mmc_card_present(card)) { if (mmc_host_is_spi(card->host)) { pr_info("%s: SPI card removed\n", diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 66c9cf49ad2f..1f0f44f4dd5f 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -266,7 +266,8 @@ static void __mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) host->ops->request(host, mrq); } -static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) +static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq, + bool cqe) { if (mrq->sbc) { pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", @@ -275,9 +276,12 @@ static void mmc_mrq_pr_debug(struct mmc_host *host, struct mmc_request *mrq) } if (mrq->cmd) { - pr_debug("%s: starting CMD%u arg %08x flags %08x\n", - mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, - mrq->cmd->flags); + pr_debug("%s: starting %sCMD%u arg %08x flags %08x\n", + mmc_hostname(host), cqe ? "CQE direct " : "", + mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); + } else if (cqe) { + pr_debug("%s: starting CQE transfer for tag %d blkaddr %u\n", + mmc_hostname(host), mrq->tag, mrq->data->blk_addr); } if (mrq->data) { @@ -333,7 +337,7 @@ static int mmc_mrq_prep(struct mmc_host *host, struct mmc_request *mrq) return 0; } -static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) +int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) { int err; @@ -342,7 +346,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) if (mmc_card_removed(host->card)) return -ENOMEDIUM; - mmc_mrq_pr_debug(host, mrq); + mmc_mrq_pr_debug(host, mrq, false); WARN_ON(!host->claimed); @@ -355,6 +359,7 @@ static int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) return 0; } +EXPORT_SYMBOL(mmc_start_request); /* * mmc_wait_data_done() - done callback for data request @@ -482,6 +487,155 @@ void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq) } EXPORT_SYMBOL(mmc_wait_for_req_done); +/* + * mmc_cqe_start_req - Start a CQE request. + * @host: MMC host to start the request + * @mrq: request to start + * + * Start the request, re-tuning if needed and it is possible. Returns an error + * code if the request fails to start or -EBUSY if CQE is busy. + */ +int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq) +{ + int err; + + /* + * CQE cannot process re-tuning commands. Caller must hold retuning + * while CQE is in use. Re-tuning can happen here only when CQE has no + * active requests i.e. this is the first. Note, re-tuning will call + * ->cqe_off(). + */ + err = mmc_retune(host); + if (err) + goto out_err; + + mrq->host = host; + + mmc_mrq_pr_debug(host, mrq, true); + + err = mmc_mrq_prep(host, mrq); + if (err) + goto out_err; + + err = host->cqe_ops->cqe_request(host, mrq); + if (err) + goto out_err; + + trace_mmc_request_start(host, mrq); + + return 0; + +out_err: + if (mrq->cmd) { + pr_debug("%s: failed to start CQE direct CMD%u, error %d\n", + mmc_hostname(host), mrq->cmd->opcode, err); + } else { + pr_debug("%s: failed to start CQE transfer for tag %d, error %d\n", + mmc_hostname(host), mrq->tag, err); + } + return err; +} +EXPORT_SYMBOL(mmc_cqe_start_req); + +/** + * mmc_cqe_request_done - CQE has finished processing an MMC request + * @host: MMC host which completed request + * @mrq: MMC request which completed + * + * CQE drivers should call this function when they have completed + * their processing of a request. + */ +void mmc_cqe_request_done(struct mmc_host *host, struct mmc_request *mrq) +{ + mmc_should_fail_request(host, mrq); + + /* Flag re-tuning needed on CRC errors */ + if ((mrq->cmd && mrq->cmd->error == -EILSEQ) || + (mrq->data && mrq->data->error == -EILSEQ)) + mmc_retune_needed(host); + + trace_mmc_request_done(host, mrq); + + if (mrq->cmd) { + pr_debug("%s: CQE req done (direct CMD%u): %d\n", + mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->error); + } else { + pr_debug("%s: CQE transfer done tag %d\n", + mmc_hostname(host), mrq->tag); + } + + if (mrq->data) { + pr_debug("%s: %d bytes transferred: %d\n", + mmc_hostname(host), + mrq->data->bytes_xfered, mrq->data->error); + } + + mrq->done(mrq); +} +EXPORT_SYMBOL(mmc_cqe_request_done); + +/** + * mmc_cqe_post_req - CQE post process of a completed MMC request + * @host: MMC host + * @mrq: MMC request to be processed + */ +void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq) +{ + if (host->cqe_ops->cqe_post_req) + host->cqe_ops->cqe_post_req(host, mrq); +} +EXPORT_SYMBOL(mmc_cqe_post_req); + +/* Arbitrary 1 second timeout */ +#define MMC_CQE_RECOVERY_TIMEOUT 1000 + +/* + * mmc_cqe_recovery - Recover from CQE errors. + * @host: MMC host to recover + * + * Recovery consists of stopping CQE, stopping eMMC, discarding the queue in + * in eMMC, and discarding the queue in CQE. CQE must call + * mmc_cqe_request_done() on all requests. An error is returned if the eMMC + * fails to discard its queue. + */ +int mmc_cqe_recovery(struct mmc_host *host) +{ + struct mmc_command cmd; + int err; + + mmc_retune_hold_now(host); + + /* + * Recovery is expected seldom, if at all, but it reduces performance, + * so make sure it is not completely silent. + */ + pr_warn("%s: running CQE recovery\n", mmc_hostname(host)); + + host->cqe_ops->cqe_recovery_start(host); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = MMC_STOP_TRANSMISSION, + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC, + cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ + cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, + mmc_wait_for_cmd(host, &cmd, 0); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = MMC_CMDQ_TASK_MGMT; + cmd.arg = 1; /* Discard entire queue */ + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; + cmd.flags &= ~MMC_RSP_CRC; /* Ignore CRC */ + cmd.busy_timeout = MMC_CQE_RECOVERY_TIMEOUT, + err = mmc_wait_for_cmd(host, &cmd, 0); + + host->cqe_ops->cqe_recovery_finish(host); + + mmc_retune_release(host); + + return err; +} +EXPORT_SYMBOL(mmc_cqe_recovery); + /** * mmc_is_req_done - Determine if a 'cap_cmd_during_tfr' request is done * @host: MMC host @@ -832,9 +986,36 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) } EXPORT_SYMBOL(mmc_align_data_size); +/* + * Allow claiming an already claimed host if the context is the same or there is + * no context but the task is the same. + */ +static inline bool mmc_ctx_matches(struct mmc_host *host, struct mmc_ctx *ctx, + struct task_struct *task) +{ + return host->claimer == ctx || + (!ctx && task && host->claimer->task == task); +} + +static inline void mmc_ctx_set_claimer(struct mmc_host *host, + struct mmc_ctx *ctx, + struct task_struct *task) +{ + if (!host->claimer) { + if (ctx) + host->claimer = ctx; + else + host->claimer = &host->default_ctx; + } + if (task) + host->claimer->task = task; +} + /** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim + * @ctx: context that claims the host or NULL in which case the default + * context will be used * @abort: whether or not the operation should be aborted * * Claim a host for a set of operations. If @abort is non null and @@ -842,8 +1023,10 @@ EXPORT_SYMBOL(mmc_align_data_size); * that non-zero value without acquiring the lock. Returns zero * with the lock held otherwise. */ -int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) +int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx, + atomic_t *abort) { + struct task_struct *task = ctx ? NULL : current; DECLARE_WAITQUEUE(wait, current); unsigned long flags; int stop; @@ -856,7 +1039,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) while (1) { set_current_state(TASK_UNINTERRUPTIBLE); stop = abort ? atomic_read(abort) : 0; - if (stop || !host->claimed || host->claimer == current) + if (stop || !host->claimed || mmc_ctx_matches(host, ctx, task)) break; spin_unlock_irqrestore(&host->lock, flags); schedule(); @@ -865,7 +1048,7 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) set_current_state(TASK_RUNNING); if (!stop) { host->claimed = 1; - host->claimer = current; + mmc_ctx_set_claimer(host, ctx, task); host->claim_cnt += 1; if (host->claim_cnt == 1) pm = true; @@ -900,6 +1083,7 @@ void mmc_release_host(struct mmc_host *host) spin_unlock_irqrestore(&host->lock, flags); } else { host->claimed = 0; + host->claimer->task = NULL; host->claimer = NULL; spin_unlock_irqrestore(&host->lock, flags); wake_up(&host->wq); @@ -913,10 +1097,10 @@ EXPORT_SYMBOL(mmc_release_host); * This is a helper function, which fetches a runtime pm reference for the * card device and also claims the host. */ -void mmc_get_card(struct mmc_card *card) +void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx) { pm_runtime_get_sync(&card->dev); - mmc_claim_host(card->host); + __mmc_claim_host(card->host, ctx, NULL); } EXPORT_SYMBOL(mmc_get_card); @@ -924,9 +1108,13 @@ EXPORT_SYMBOL(mmc_get_card); * This is a helper function, which releases the host and drops the runtime * pm reference for the card device. */ -void mmc_put_card(struct mmc_card *card) +void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx) { - mmc_release_host(card->host); + struct mmc_host *host = card->host; + + WARN_ON(ctx && host->claimer != ctx); + + mmc_release_host(host); pm_runtime_mark_last_busy(&card->dev); pm_runtime_put_autosuspend(&card->dev); } @@ -1400,6 +1588,16 @@ EXPORT_SYMBOL_GPL(mmc_regulator_set_vqmmc); #endif /* CONFIG_REGULATOR */ +/** + * mmc_regulator_get_supply - try to get VMMC and VQMMC regulators for a host + * @mmc: the host to regulate + * + * Returns 0 or errno. errno should be handled, it is either a critical error + * or -EPROBE_DEFER. 0 means no critical error but it does not mean all + * regulators have been found because they all are optional. If you require + * certain regulators, you need to check separately in your driver if they got + * populated after calling this function. + */ int mmc_regulator_get_supply(struct mmc_host *mmc) { struct device *dev = mmc_dev(mmc); @@ -1484,11 +1682,33 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) } +int mmc_host_set_uhs_voltage(struct mmc_host *host) +{ + u32 clock; + + /* + * During a signal voltage level switch, the clock must be gated + * for 5 ms according to the SD spec + */ + clock = host->ios.clock; + host->ios.clock = 0; + mmc_set_ios(host); + + if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) + return -EAGAIN; + + /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ + mmc_delay(10); + host->ios.clock = clock; + mmc_set_ios(host); + + return 0; +} + int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) { struct mmc_command cmd = {}; int err = 0; - u32 clock; /* * If we cannot switch voltages, return failure so the caller @@ -1520,15 +1740,8 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) err = -EAGAIN; goto power_cycle; } - /* - * During a signal voltage level switch, the clock must be gated - * for 5 ms according to the SD spec - */ - clock = host->ios.clock; - host->ios.clock = 0; - mmc_set_ios(host); - if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180)) { + if (mmc_host_set_uhs_voltage(host)) { /* * Voltages may not have been switched, but we've already * sent CMD11, so a power cycle is required anyway @@ -1537,11 +1750,6 @@ int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr) goto power_cycle; } - /* Keep clock gated for at least 10 ms, though spec only says 5 ms */ - mmc_delay(10); - host->ios.clock = clock; - mmc_set_ios(host); - /* Wait for at least 1 ms according to spec */ mmc_delay(1); diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index ca861091a776..71e6c6d7ceb7 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -49,6 +49,7 @@ void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_width(struct mmc_host *host, unsigned int width); u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); int mmc_set_uhs_voltage(struct mmc_host *host, u32 ocr); +int mmc_host_set_uhs_voltage(struct mmc_host *host); int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); void mmc_set_timing(struct mmc_host *host, unsigned int timing); void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); @@ -107,6 +108,8 @@ static inline void mmc_unregister_pm_notifier(struct mmc_host *host) { } void mmc_wait_for_req_done(struct mmc_host *host, struct mmc_request *mrq); bool mmc_is_req_done(struct mmc_host *host, struct mmc_request *mrq); +int mmc_start_request(struct mmc_host *host, struct mmc_request *mrq); + struct mmc_async_req; struct mmc_async_req *mmc_start_areq(struct mmc_host *host, @@ -128,10 +131,11 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen); int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, bool is_rel_write); -int __mmc_claim_host(struct mmc_host *host, atomic_t *abort); +int __mmc_claim_host(struct mmc_host *host, struct mmc_ctx *ctx, + atomic_t *abort); void mmc_release_host(struct mmc_host *host); -void mmc_get_card(struct mmc_card *card); -void mmc_put_card(struct mmc_card *card); +void mmc_get_card(struct mmc_card *card, struct mmc_ctx *ctx); +void mmc_put_card(struct mmc_card *card, struct mmc_ctx *ctx); /** * mmc_claim_host - exclusively claim a host @@ -141,7 +145,11 @@ void mmc_put_card(struct mmc_card *card); */ static inline void mmc_claim_host(struct mmc_host *host) { - __mmc_claim_host(host, NULL); + __mmc_claim_host(host, NULL, NULL); } +int mmc_cqe_start_req(struct mmc_host *host, struct mmc_request *mrq); +void mmc_cqe_post_req(struct mmc_host *host, struct mmc_request *mrq); +int mmc_cqe_recovery(struct mmc_host *host); + #endif diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index ad88deb2e8f3..35a9e4fd1a9f 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -111,12 +111,6 @@ void mmc_retune_hold(struct mmc_host *host) host->hold_retune += 1; } -void mmc_retune_hold_now(struct mmc_host *host) -{ - host->retune_now = 0; - host->hold_retune += 1; -} - void mmc_retune_release(struct mmc_host *host) { if (host->hold_retune) @@ -124,6 +118,7 @@ void mmc_retune_release(struct mmc_host *host) else WARN_ON(1); } +EXPORT_SYMBOL(mmc_retune_release); int mmc_retune(struct mmc_host *host) { @@ -184,7 +179,7 @@ static void mmc_retune_timer(unsigned long data) int mmc_of_parse(struct mmc_host *host) { struct device *dev = host->parent; - u32 bus_width; + u32 bus_width, drv_type; int ret; bool cd_cap_invert, cd_gpio_invert = false; bool ro_cap_invert, ro_gpio_invert = false; @@ -326,6 +321,15 @@ int mmc_of_parse(struct mmc_host *host) if (device_property_read_bool(dev, "no-mmc")) host->caps2 |= MMC_CAP2_NO_MMC; + /* Must be after "non-removable" check */ + if (device_property_read_u32(dev, "fixed-emmc-driver-type", &drv_type) == 0) { + if (host->caps & MMC_CAP_NONREMOVABLE) + host->fixed_drv_type = drv_type; + else + dev_err(host->parent, + "can't use fixed driver type, media is removable\n"); + } + host->dsr_req = !device_property_read_u32(dev, "dsr", &host->dsr); if (host->dsr_req && (host->dsr & ~0xffff)) { dev_err(host->parent, @@ -398,6 +402,8 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) host->max_blk_size = 512; host->max_blk_count = PAGE_SIZE / 512; + host->fixed_drv_type = -EINVAL; + return host; } diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index 77d6f60d1bf9..fb689a1065ed 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -19,12 +19,17 @@ void mmc_unregister_host_class(void); void mmc_retune_enable(struct mmc_host *host); void mmc_retune_disable(struct mmc_host *host); void mmc_retune_hold(struct mmc_host *host); -void mmc_retune_hold_now(struct mmc_host *host); void mmc_retune_release(struct mmc_host *host); int mmc_retune(struct mmc_host *host); void mmc_retune_pause(struct mmc_host *host); void mmc_retune_unpause(struct mmc_host *host); +static inline void mmc_retune_hold_now(struct mmc_host *host) +{ + host->retune_now = 0; + host->hold_retune += 1; +} + static inline void mmc_retune_recheck(struct mmc_host *host) { if (host->hold_retune <= 1) diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 36217ad5e9b1..a552f61060d2 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -780,6 +780,7 @@ MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); +MMC_DEV_ATTR(rev, "0x%x\n", card->ext_csd.rev); MMC_DEV_ATTR(pre_eol_info, "%02x\n", card->ext_csd.pre_eol_info); MMC_DEV_ATTR(life_time, "0x%02x 0x%02x\n", card->ext_csd.device_life_time_est_typ_a, @@ -838,6 +839,7 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_name.attr, &dev_attr_oemid.attr, &dev_attr_prv.attr, + &dev_attr_rev.attr, &dev_attr_pre_eol_info.attr, &dev_attr_life_time.attr, &dev_attr_serial.attr, @@ -1289,13 +1291,18 @@ out_err: static void mmc_select_driver_type(struct mmc_card *card) { int card_drv_type, drive_strength, drv_type; + int fixed_drv_type = card->host->fixed_drv_type; card_drv_type = card->ext_csd.raw_driver_strength | mmc_driver_type_mask(0); - drive_strength = mmc_select_drive_strength(card, - card->ext_csd.hs200_max_dtr, - card_drv_type, &drv_type); + if (fixed_drv_type >= 0) + drive_strength = card_drv_type & mmc_driver_type_mask(fixed_drv_type) + ? fixed_drv_type : 0; + else + drive_strength = mmc_select_drive_strength(card, + card->ext_csd.hs200_max_dtr, + card_drv_type, &drv_type); card->drive_strength = drive_strength; @@ -1786,12 +1793,41 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } /* + * Enable Command Queue if supported. Note that Packed Commands cannot + * be used with Command Queue. + */ + card->ext_csd.cmdq_en = false; + if (card->ext_csd.cmdq_support && host->caps2 & MMC_CAP2_CQE) { + err = mmc_cmdq_enable(card); + if (err && err != -EBADMSG) + goto free_card; + if (err) { + pr_warn("%s: Enabling CMDQ failed\n", + mmc_hostname(card->host)); + card->ext_csd.cmdq_support = false; + card->ext_csd.cmdq_depth = 0; + err = 0; + } + } + /* * In some cases (e.g. RPMB or mmc_test), the Command Queue must be * disabled for a time, so a flag is needed to indicate to re-enable the * Command Queue. */ card->reenable_cmdq = card->ext_csd.cmdq_en; + if (card->ext_csd.cmdq_en && !host->cqe_enabled) { + err = host->cqe_ops->cqe_enable(host, card); + if (err) { + pr_err("%s: Failed to enable CQE, error %d\n", + mmc_hostname(host), err); + } else { + host->cqe_enabled = true; + pr_info("%s: Command Queue Engine enabled\n", + mmc_hostname(host)); + } + } + if (!oldcard) host->card = card; @@ -1911,14 +1947,14 @@ static void mmc_detect(struct mmc_host *host) { int err; - mmc_get_card(host->card); + mmc_get_card(host->card, NULL); /* * Just check if our card has been removed. */ err = _mmc_detect_card_removed(host); - mmc_put_card(host->card); + mmc_put_card(host->card, NULL); if (err) { mmc_remove(host); diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 54686ca4bfb7..908e4db03535 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -977,7 +977,6 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) from_exception) return; - mmc_claim_host(card->host); if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { timeout = MMC_OPS_TIMEOUT_MS; use_busy_signal = true; @@ -995,7 +994,7 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) pr_warn("%s: Error %d starting bkops\n", mmc_hostname(card->host), err); mmc_retune_release(card->host); - goto out; + return; } /* @@ -1007,9 +1006,8 @@ void mmc_start_bkops(struct mmc_card *card, bool from_exception) mmc_card_set_doing_bkops(card); else mmc_retune_release(card->host); -out: - mmc_release_host(card->host); } +EXPORT_SYMBOL(mmc_start_bkops); /* * Flush the cache to the non-volatile storage. diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index 0a4e77a5ba33..4f33d277b125 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -30,7 +30,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req) { struct mmc_queue *mq = q->queuedata; - if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq))) + if (mq && mmc_card_removed(mq->card)) return BLKPREP_KILL; req->rq_flags |= RQF_DONTPREP; @@ -177,6 +177,29 @@ static void mmc_exit_request(struct request_queue *q, struct request *req) mq_rq->sg = NULL; } +static void mmc_setup_queue(struct mmc_queue *mq, struct mmc_card *card) +{ + struct mmc_host *host = card->host; + u64 limit = BLK_BOUNCE_HIGH; + + if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; + + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); + queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); + if (mmc_can_erase(card)) + mmc_queue_setup_discard(mq->queue, card); + + blk_queue_bounce_limit(mq->queue, limit); + blk_queue_max_hw_sectors(mq->queue, + min(host->max_blk_count, host->max_req_size / 512)); + blk_queue_max_segments(mq->queue, host->max_segs); + blk_queue_max_segment_size(mq->queue, host->max_seg_size); + + /* Initialize thread_sem even if it is not used */ + sema_init(&mq->thread_sem, 1); +} + /** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue @@ -190,12 +213,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; - u64 limit = BLK_BOUNCE_HIGH; int ret = -ENOMEM; - if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; - mq->card = card; mq->queue = blk_alloc_queue(GFP_KERNEL); if (!mq->queue) @@ -214,18 +233,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, } blk_queue_prep_rq(mq->queue, mmc_prep_request); - queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); - queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue); - if (mmc_can_erase(card)) - mmc_queue_setup_discard(mq->queue, card); - blk_queue_bounce_limit(mq->queue, limit); - blk_queue_max_hw_sectors(mq->queue, - min(host->max_blk_count, host->max_req_size / 512)); - blk_queue_max_segments(mq->queue, host->max_segs); - blk_queue_max_segment_size(mq->queue, host->max_seg_size); - - sema_init(&mq->thread_sem, 1); + mmc_setup_queue(mq, card); mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", host->index, subname ? subname : ""); diff --git a/drivers/mmc/core/queue.h b/drivers/mmc/core/queue.h index 6bfba32ffa66..547b457c4251 100644 --- a/drivers/mmc/core/queue.h +++ b/drivers/mmc/core/queue.h @@ -36,12 +36,14 @@ struct mmc_blk_request { /** * enum mmc_drv_op - enumerates the operations in the mmc_queue_req * @MMC_DRV_OP_IOCTL: ioctl operation + * @MMC_DRV_OP_IOCTL_RPMB: RPMB-oriented ioctl operation * @MMC_DRV_OP_BOOT_WP: write protect boot partitions * @MMC_DRV_OP_GET_CARD_STATUS: get card status * @MMC_DRV_OP_GET_EXT_CSD: get the EXT CSD from an eMMC card */ enum mmc_drv_op { MMC_DRV_OP_IOCTL, + MMC_DRV_OP_IOCTL_RPMB, MMC_DRV_OP_BOOT_WP, MMC_DRV_OP_GET_CARD_STATUS, MMC_DRV_OP_GET_EXT_CSD, @@ -82,6 +84,4 @@ extern void mmc_queue_resume(struct mmc_queue *); extern unsigned int mmc_queue_map_sg(struct mmc_queue *, struct mmc_queue_req *); -extern int mmc_access_rpmb(struct mmc_queue *); - #endif diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 4fd1620b732d..45bf78f32716 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -908,6 +908,18 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card) return max_dtr; } +static bool mmc_sd_card_using_v18(struct mmc_card *card) +{ + /* + * According to the SD spec., the Bus Speed Mode (function group 1) bits + * 2 to 4 are zero if the card is initialized at 3.3V signal level. Thus + * they can be used to determine if the card has already switched to + * 1.8V signaling. + */ + return card->sw_caps.sd3_bus_mode & + (SD_MODE_UHS_SDR50 | SD_MODE_UHS_SDR104 | SD_MODE_UHS_DDR50); +} + /* * Handle the detection and initialisation of a card. * @@ -921,9 +933,10 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, int err; u32 cid[4]; u32 rocr = 0; + bool v18_fixup_failed = false; WARN_ON(!host->claimed); - +retry: err = mmc_sd_get_cid(host, ocr, cid, &rocr); if (err) return err; @@ -989,6 +1002,36 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (err) goto free_card; + /* + * If the card has not been power cycled, it may still be using 1.8V + * signaling. Detect that situation and try to initialize a UHS-I (1.8V) + * transfer mode. + */ + if (!v18_fixup_failed && !mmc_host_is_spi(host) && mmc_host_uhs(host) && + mmc_sd_card_using_v18(card) && + host->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_180) { + /* + * Re-read switch information in case it has changed since + * oldcard was initialized. + */ + if (oldcard) { + err = mmc_read_switch(card); + if (err) + goto free_card; + } + if (mmc_sd_card_using_v18(card)) { + if (mmc_host_set_uhs_voltage(host) || + mmc_sd_init_uhs_card(card)) { + v18_fixup_failed = true; + mmc_power_cycle(host, ocr); + if (!oldcard) + mmc_remove_card(card); + goto retry; + } + goto done; + } + } + /* Initialization sequence for UHS-I cards */ if (rocr & SD_ROCR_S18A) { err = mmc_sd_init_uhs_card(card); @@ -1021,7 +1064,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, mmc_set_bus_width(host, MMC_BUS_WIDTH_4); } } - +done: host->card = card; return 0; @@ -1056,14 +1099,14 @@ static void mmc_sd_detect(struct mmc_host *host) { int err; - mmc_get_card(host->card); + mmc_get_card(host->card, NULL); /* * Just check if our card has been removed. */ err = _mmc_detect_card_removed(host); - mmc_put_card(host->card); + mmc_put_card(host->card, NULL); if (err) { mmc_sd_remove(host); diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index c771843e4c15..7a2eaf8410a3 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -155,7 +155,8 @@ static int sdio_irq_thread(void *_host) * holding of the host lock does not cover too much work * that doesn't require that lock to be held. */ - ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); + ret = __mmc_claim_host(host, NULL, + &host->sdio_irq_thread_abort); if (ret) break; ret = process_sdio_pending_irqs(host); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 8c15637178ff..567028c9219a 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -352,6 +352,19 @@ config MMC_MESON_GX If you have a controller with this interface, say Y here. +config MMC_MESON_MX_SDIO + tristate "Amlogic Meson6/Meson8/Meson8b SD/MMC Host Controller support" + depends on ARCH_MESON || COMPILE_TEST + depends on COMMON_CLK + depends on HAS_DMA + depends on OF + help + This selects support for the SD/MMC Host Controller on + Amlogic Meson6, Meson8 and Meson8b SoCs. + + If you have a controller with this interface, say Y or M here. + If unsure, say N. + config MMC_MOXART tristate "MOXART SD/MMC Host Controller support" depends on ARCH_MOXART && MMC @@ -429,6 +442,7 @@ config MMC_SDHCI_MSM tristate "Qualcomm SDHCI Controller Support" depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS help This selects the Secure Digital Host Controller Interface (SDHCI) support present in Qualcomm SOCs. The controller supports @@ -663,7 +677,7 @@ config MMC_CAVIUM_OCTEON config MMC_CAVIUM_THUNDERX tristate "Cavium ThunderX SD/MMC Card Interface support" depends on PCI && 64BIT && (ARM64 || COMPILE_TEST) - depends on GPIOLIB + depends on GPIO_THUNDERX depends on OF_ADDRESS help This selects Cavium ThunderX SD/MMC Card Interface. @@ -899,3 +913,15 @@ config MMC_SDHCI_XENON This selects Marvell Xenon eMMC/SD/SDIO SDHCI. If you have a controller with this interface, say Y or M here. If unsure, say N. + +config MMC_SDHCI_OMAP + tristate "TI SDHCI Controller Support" + depends on MMC_SDHCI_PLTFM && OF + help + This selects the Secure Digital Host Controller Interface (SDHCI) + support present in TI's DRA7 SOCs. The controller supports + SD/MMC/SDIO devices. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 7c7b29ff591a..a43cf0d5a5d3 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -65,6 +65,7 @@ obj-$(CONFIG_MMC_VUB300) += vub300.o obj-$(CONFIG_MMC_USHC) += ushc.o obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o obj-$(CONFIG_MMC_MESON_GX) += meson-gx-mmc.o +obj-$(CONFIG_MMC_MESON_MX_SDIO) += meson-mx-sdio.o obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o @@ -90,6 +91,7 @@ obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o obj-$(CONFIG_MMC_SDHCI_MICROCHIP_PIC32) += sdhci-pic32.o obj-$(CONFIG_MMC_SDHCI_BRCMSTB) += sdhci-brcmstb.o +obj-$(CONFIG_MMC_SDHCI_OMAP) += sdhci-omap.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 0a0ebf3a096d..e55f3932d580 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -732,11 +732,11 @@ static inline unsigned int atmci_convert_chksize(struct atmel_mci *host, return 0; } -static void atmci_timeout_timer(unsigned long data) +static void atmci_timeout_timer(struct timer_list *t) { struct atmel_mci *host; - host = (struct atmel_mci *)data; + host = from_timer(host, t, timer); dev_dbg(&host->pdev->dev, "software timeout\n"); @@ -1661,9 +1661,9 @@ static void atmci_command_complete(struct atmel_mci *host, cmd->error = 0; } -static void atmci_detect_change(unsigned long data) +static void atmci_detect_change(struct timer_list *t) { - struct atmel_mci_slot *slot = (struct atmel_mci_slot *)data; + struct atmel_mci_slot *slot = from_timer(slot, t, detect_timer); bool present; bool present_old; @@ -2349,8 +2349,7 @@ static int atmci_init_slot(struct atmel_mci *host, if (gpio_is_valid(slot->detect_pin)) { int ret; - setup_timer(&slot->detect_timer, atmci_detect_change, - (unsigned long)slot); + timer_setup(&slot->detect_timer, atmci_detect_change, 0); ret = request_irq(gpio_to_irq(slot->detect_pin), atmci_detect_interrupt, @@ -2563,7 +2562,7 @@ static int atmci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); - setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); + timer_setup(&host->timer, atmci_timeout_timer, 0); pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); diff --git a/drivers/mmc/host/cavium.c b/drivers/mmc/host/cavium.c index fbd29f00fca0..ed5cefb83768 100644 --- a/drivers/mmc/host/cavium.c +++ b/drivers/mmc/host/cavium.c @@ -967,7 +967,7 @@ static int cvm_mmc_of_parse(struct device *dev, struct cvm_mmc_slot *slot) } ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) return ret; /* * Legacy Octeon firmware has no regulator entry, fall-back to diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c index 64cda84b2302..73fd75c3c824 100644 --- a/drivers/mmc/host/dw_mmc-k3.c +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -75,7 +75,7 @@ struct hs_timing { u32 smpl_phase_min; }; -struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = { +static struct hs_timing hs_timing_cfg[TIMING_MODE][TIMING_CFG_NUM] = { { /* reserved */ }, { /* SD */ {7, 0, 15, 15,}, /* 0: LEGACY 400k */ diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 4f2806720c5c..0aa39975f33b 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -817,7 +817,7 @@ static int dw_mci_edmac_start_dma(struct dw_mci *host, struct dma_slave_config cfg; struct dma_async_tx_descriptor *desc = NULL; struct scatterlist *sgl = host->data->sg; - const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; u32 sg_elems = host->data->sg_len; u32 fifoth_val; u32 fifo_offset = host->fifo_reg - host->regs; @@ -1024,7 +1024,7 @@ static int dw_mci_get_cd(struct mmc_host *mmc) static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) { unsigned int blksz = data->blksz; - const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + static const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; u32 fifo_width = 1 << host->data_shift; u32 blksz_depth = blksz / fifo_width, fifoth_val; u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; @@ -1938,6 +1938,7 @@ static void dw_mci_set_drto(struct dw_mci *host) unsigned int drto_clks; unsigned int drto_div; unsigned int drto_ms; + unsigned long irqflags; drto_clks = mci_readl(host, TMOUT) >> 8; drto_div = (mci_readl(host, CLKDIV) & 0xff) * 2; @@ -1949,7 +1950,11 @@ static void dw_mci_set_drto(struct dw_mci *host) /* add a bit spare time */ drto_ms += 10; - mod_timer(&host->dto_timer, jiffies + msecs_to_jiffies(drto_ms)); + spin_lock_irqsave(&host->irq_lock, irqflags); + if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) + mod_timer(&host->dto_timer, + jiffies + msecs_to_jiffies(drto_ms)); + spin_unlock_irqrestore(&host->irq_lock, irqflags); } static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) @@ -1970,6 +1975,18 @@ static bool dw_mci_clear_pending_cmd_complete(struct dw_mci *host) return true; } +static bool dw_mci_clear_pending_data_complete(struct dw_mci *host) +{ + if (!test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) + return false; + + /* Extra paranoia just like dw_mci_clear_pending_cmd_complete() */ + WARN_ON(del_timer_sync(&host->dto_timer)); + clear_bit(EVENT_DATA_COMPLETE, &host->pending_events); + + return true; +} + static void dw_mci_tasklet_func(unsigned long priv) { struct dw_mci *host = (struct dw_mci *)priv; @@ -2111,8 +2128,7 @@ static void dw_mci_tasklet_func(unsigned long priv) /* fall through */ case STATE_DATA_BUSY: - if (!test_and_clear_bit(EVENT_DATA_COMPLETE, - &host->pending_events)) { + if (!dw_mci_clear_pending_data_complete(host)) { /* * If data error interrupt comes but data over * interrupt doesn't come within the given time. @@ -2682,6 +2698,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } if (pending & SDMMC_INT_DATA_OVER) { + spin_lock_irqsave(&host->irq_lock, irqflags); + del_timer(&host->dto_timer); mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); @@ -2694,6 +2712,8 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } set_bit(EVENT_DATA_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); } if (pending & SDMMC_INT_RXDR) { @@ -2791,7 +2811,7 @@ static int dw_mci_init_slot(struct dw_mci *host) /*if there are external regulators, get them*/ ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto err_host_allocated; if (!mmc->ocr_avail) @@ -2971,9 +2991,9 @@ no_dma: host->use_dma = TRANS_MODE_PIO; } -static void dw_mci_cmd11_timer(unsigned long arg) +static void dw_mci_cmd11_timer(struct timer_list *t) { - struct dw_mci *host = (struct dw_mci *)arg; + struct dw_mci *host = from_timer(host, t, cmd11_timer); if (host->state != STATE_SENDING_CMD11) { dev_warn(host->dev, "Unexpected CMD11 timeout\n"); @@ -2985,9 +3005,9 @@ static void dw_mci_cmd11_timer(unsigned long arg) tasklet_schedule(&host->tasklet); } -static void dw_mci_cto_timer(unsigned long arg) +static void dw_mci_cto_timer(struct timer_list *t) { - struct dw_mci *host = (struct dw_mci *)arg; + struct dw_mci *host = from_timer(host, t, cto_timer); unsigned long irqflags; u32 pending; @@ -3040,10 +3060,34 @@ exit: spin_unlock_irqrestore(&host->irq_lock, irqflags); } -static void dw_mci_dto_timer(unsigned long arg) +static void dw_mci_dto_timer(struct timer_list *t) { - struct dw_mci *host = (struct dw_mci *)arg; + struct dw_mci *host = from_timer(host, t, dto_timer); + unsigned long irqflags; + u32 pending; + + spin_lock_irqsave(&host->irq_lock, irqflags); + /* + * The DTO timer is much longer than the CTO timer, so it's even less + * likely that we'll these cases, but it pays to be paranoid. + */ + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + if (pending & SDMMC_INT_DATA_OVER) { + /* The interrupt should fire; no need to act but we can warn */ + dev_warn(host->dev, "Unexpected data interrupt latency\n"); + goto exit; + } + if (test_bit(EVENT_DATA_COMPLETE, &host->pending_events)) { + /* Presumably interrupt handler couldn't delete the timer */ + dev_warn(host->dev, "DTO timeout when already completed\n"); + goto exit; + } + + /* + * Continued paranoia to make sure we're in the state we expect. + * This paranoia isn't really justified but it seems good to be safe. + */ switch (host->state) { case STATE_SENDING_DATA: case STATE_DATA_BUSY: @@ -3058,8 +3102,13 @@ static void dw_mci_dto_timer(unsigned long arg) tasklet_schedule(&host->tasklet); break; default: + dev_warn(host->dev, "Unexpected data timeout, state %d\n", + host->state); break; } + +exit: + spin_unlock_irqrestore(&host->irq_lock, irqflags); } #ifdef CONFIG_OF @@ -3208,14 +3257,9 @@ int dw_mci_probe(struct dw_mci *host) } } - setup_timer(&host->cmd11_timer, - dw_mci_cmd11_timer, (unsigned long)host); - - setup_timer(&host->cto_timer, - dw_mci_cto_timer, (unsigned long)host); - - setup_timer(&host->dto_timer, - dw_mci_dto_timer, (unsigned long)host); + timer_setup(&host->cmd11_timer, dw_mci_cmd11_timer, 0); + timer_setup(&host->cto_timer, dw_mci_cto_timer, 0); + timer_setup(&host->dto_timer, dw_mci_dto_timer, 0); spin_lock_init(&host->lock); spin_lock_init(&host->irq_lock); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index 34474ad731aa..e3124f06a47e 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -74,7 +74,8 @@ struct dw_mci_dma_slave { * @stop_abort: The command currently prepared for stoping transfer. * @prev_blksz: The former transfer blksz record. * @timing: Record of current ios timing. - * @use_dma: Whether DMA channel is initialized or not. + * @use_dma: Which DMA channel is in use for the current transfer, zero + * denotes PIO mode. * @using_dma: Whether DMA is in use for the current transfer. * @dma_64bit_address: Whether DMA supports 64-bit address mode or not. * @sg_dma: Bus address of DMA buffer. diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 7db8c7a8d38d..712e08d9a45e 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -586,9 +586,9 @@ poll_timeout: return true; } -static void jz4740_mmc_timeout(unsigned long data) +static void jz4740_mmc_timeout(struct timer_list *t) { - struct jz4740_mmc_host *host = (struct jz4740_mmc_host *)data; + struct jz4740_mmc_host *host = from_timer(host, t, timeout_timer); if (!test_and_clear_bit(0, &host->waiting)) return; @@ -1036,8 +1036,7 @@ static int jz4740_mmc_probe(struct platform_device* pdev) jz4740_mmc_reset(host); jz4740_mmc_clock_disable(host); - setup_timer(&host->timeout_timer, jz4740_mmc_timeout, - (unsigned long)host); + timer_setup(&host->timeout_timer, jz4740_mmc_timeout, 0); host->use_dma = true; if (host->use_dma && jz4740_mmc_acquire_dma_channels(host) != 0) diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c index 85745ef179e2..e0862d3f65b3 100644 --- a/drivers/mmc/host/meson-gx-mmc.c +++ b/drivers/mmc/host/meson-gx-mmc.c @@ -1190,7 +1190,7 @@ static int meson_mmc_probe(struct platform_device *pdev) /* Get regulators and the supported OCR mask */ host->vqmmc_enabled = false; ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto free_host; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/meson-mx-sdio.c b/drivers/mmc/host/meson-mx-sdio.c new file mode 100644 index 000000000000..09cb89645d06 --- /dev/null +++ b/drivers/mmc/host/meson-mx-sdio.c @@ -0,0 +1,768 @@ +/* + * meson-mx-sdio.c - Meson6, Meson8 and Meson8b SDIO/MMC Host Controller + * + * Copyright (C) 2015 Endless Mobile, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + * Copyright (C) 2017 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/module.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/platform_device.h> +#include <linux/of_platform.h> +#include <linux/timer.h> +#include <linux/types.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/slot-gpio.h> + +#define MESON_MX_SDIO_ARGU 0x00 + +#define MESON_MX_SDIO_SEND 0x04 + #define MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK GENMASK(7, 0) + #define MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK GENMASK(15, 8) + #define MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7 BIT(16) + #define MESON_MX_SDIO_SEND_RESP_HAS_DATA BIT(17) + #define MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8 BIT(18) + #define MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY BIT(19) + #define MESON_MX_SDIO_SEND_DATA BIT(20) + #define MESON_MX_SDIO_SEND_USE_INT_WINDOW BIT(21) + #define MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK GENMASK(31, 24) + +#define MESON_MX_SDIO_CONF 0x08 + #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT 0 + #define MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH 10 + #define MESON_MX_SDIO_CONF_CMD_DISABLE_CRC BIT(10) + #define MESON_MX_SDIO_CONF_CMD_OUT_AT_POSITIVE_EDGE BIT(11) + #define MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK GENMASK(17, 12) + #define MESON_MX_SDIO_CONF_RESP_LATCH_AT_NEGATIVE_EDGE BIT(18) + #define MESON_MX_SDIO_CONF_DATA_LATCH_AT_NEGATIVE_EDGE BIT(19) + #define MESON_MX_SDIO_CONF_BUS_WIDTH BIT(20) + #define MESON_MX_SDIO_CONF_M_ENDIAN_MASK GENMASK(22, 21) + #define MESON_MX_SDIO_CONF_WRITE_NWR_MASK GENMASK(28, 23) + #define MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK GENMASK(31, 29) + +#define MESON_MX_SDIO_IRQS 0x0c + #define MESON_MX_SDIO_IRQS_STATUS_STATE_MACHINE_MASK GENMASK(3, 0) + #define MESON_MX_SDIO_IRQS_CMD_BUSY BIT(4) + #define MESON_MX_SDIO_IRQS_RESP_CRC7_OK BIT(5) + #define MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK BIT(6) + #define MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK BIT(7) + #define MESON_MX_SDIO_IRQS_IF_INT BIT(8) + #define MESON_MX_SDIO_IRQS_CMD_INT BIT(9) + #define MESON_MX_SDIO_IRQS_STATUS_INFO_MASK GENMASK(15, 12) + #define MESON_MX_SDIO_IRQS_TIMING_OUT_INT BIT(16) + #define MESON_MX_SDIO_IRQS_AMRISC_TIMING_OUT_INT_EN BIT(17) + #define MESON_MX_SDIO_IRQS_ARC_TIMING_OUT_INT_EN BIT(18) + #define MESON_MX_SDIO_IRQS_TIMING_OUT_COUNT_MASK GENMASK(31, 19) + +#define MESON_MX_SDIO_IRQC 0x10 + #define MESON_MX_SDIO_IRQC_ARC_IF_INT_EN BIT(3) + #define MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN BIT(4) + #define MESON_MX_SDIO_IRQC_IF_CONFIG_MASK GENMASK(7, 6) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_CLK BIT(8) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_CMD BIT(9) + #define MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK GENMASK(10, 13) + #define MESON_MX_SDIO_IRQC_SOFT_RESET BIT(15) + #define MESON_MX_SDIO_IRQC_FORCE_HALT BIT(30) + #define MESON_MX_SDIO_IRQC_HALT_HOLE BIT(31) + +#define MESON_MX_SDIO_MULT 0x14 + #define MESON_MX_SDIO_MULT_PORT_SEL_MASK GENMASK(1, 0) + #define MESON_MX_SDIO_MULT_MEMORY_STICK_ENABLE BIT(2) + #define MESON_MX_SDIO_MULT_MEMORY_STICK_SCLK_ALWAYS BIT(3) + #define MESON_MX_SDIO_MULT_STREAM_ENABLE BIT(4) + #define MESON_MX_SDIO_MULT_STREAM_8BITS_MODE BIT(5) + #define MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX BIT(8) + #define MESON_MX_SDIO_MULT_DAT0_DAT1_SWAPPED BIT(10) + #define MESON_MX_SDIO_MULT_DAT1_DAT0_SWAPPED BIT(11) + #define MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK GENMASK(15, 12) + +#define MESON_MX_SDIO_ADDR 0x18 + +#define MESON_MX_SDIO_EXT 0x1c + #define MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK GENMASK(29, 16) + +#define MESON_MX_SDIO_BOUNCE_REQ_SIZE (128 * 1024) +#define MESON_MX_SDIO_RESPONSE_CRC16_BITS (16 - 1) +#define MESON_MX_SDIO_MAX_SLOTS 3 + +struct meson_mx_mmc_host { + struct device *controller_dev; + + struct clk *parent_clk; + struct clk *core_clk; + struct clk_divider cfg_div; + struct clk *cfg_div_clk; + struct clk_fixed_factor fixed_factor; + struct clk *fixed_factor_clk; + + void __iomem *base; + int irq; + spinlock_t irq_lock; + + struct timer_list cmd_timeout; + + unsigned int slot_id; + struct mmc_host *mmc; + + struct mmc_request *mrq; + struct mmc_command *cmd; + int error; +}; + +static void meson_mx_mmc_mask_bits(struct mmc_host *mmc, char reg, u32 mask, + u32 val) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 regval; + + regval = readl(host->base + reg); + regval &= ~mask; + regval |= (val & mask); + + writel(regval, host->base + reg); +} + +static void meson_mx_mmc_soft_reset(struct meson_mx_mmc_host *host) +{ + writel(MESON_MX_SDIO_IRQC_SOFT_RESET, host->base + MESON_MX_SDIO_IRQC); + udelay(2); +} + +static struct mmc_command *meson_mx_mmc_get_next_cmd(struct mmc_command *cmd) +{ + if (cmd->opcode == MMC_SET_BLOCK_COUNT && !cmd->error) + return cmd->mrq->cmd; + else if (mmc_op_multi(cmd->opcode) && + (!cmd->mrq->sbc || cmd->error || cmd->data->error)) + return cmd->mrq->stop; + else + return NULL; +} + +static void meson_mx_mmc_start_cmd(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + unsigned int pack_size; + unsigned long irqflags, timeout; + u32 mult, send = 0, ext = 0; + + host->cmd = cmd; + + if (cmd->busy_timeout) + timeout = msecs_to_jiffies(cmd->busy_timeout); + else + timeout = msecs_to_jiffies(1000); + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_R1: + case MMC_RSP_R1B: + case MMC_RSP_R3: + /* 7 (CMD) + 32 (response) + 7 (CRC) -1 */ + send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 45); + break; + case MMC_RSP_R2: + /* 7 (CMD) + 120 (response) + 7 (CRC) -1 */ + send |= FIELD_PREP(MESON_MX_SDIO_SEND_CMD_RESP_BITS_MASK, 133); + send |= MESON_MX_SDIO_SEND_RESP_CRC7_FROM_8; + break; + default: + break; + } + + if (!(cmd->flags & MMC_RSP_CRC)) + send |= MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7; + + if (cmd->flags & MMC_RSP_BUSY) + send |= MESON_MX_SDIO_SEND_CHECK_DAT0_BUSY; + + if (cmd->data) { + send |= FIELD_PREP(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK, + (cmd->data->blocks - 1)); + + pack_size = cmd->data->blksz * BITS_PER_BYTE; + if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) + pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 4; + else + pack_size += MESON_MX_SDIO_RESPONSE_CRC16_BITS * 1; + + ext |= FIELD_PREP(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK, + pack_size); + + if (cmd->data->flags & MMC_DATA_WRITE) + send |= MESON_MX_SDIO_SEND_DATA; + else + send |= MESON_MX_SDIO_SEND_RESP_HAS_DATA; + + cmd->data->bytes_xfered = 0; + } + + send |= FIELD_PREP(MESON_MX_SDIO_SEND_COMMAND_INDEX_MASK, + (0x40 | cmd->opcode)); + + spin_lock_irqsave(&host->irq_lock, irqflags); + + mult = readl(host->base + MESON_MX_SDIO_MULT); + mult &= ~MESON_MX_SDIO_MULT_PORT_SEL_MASK; + mult |= FIELD_PREP(MESON_MX_SDIO_MULT_PORT_SEL_MASK, host->slot_id); + mult |= BIT(31); + writel(mult, host->base + MESON_MX_SDIO_MULT); + + /* enable the CMD done interrupt */ + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQC, + MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN, + MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN); + + /* clear pending interrupts */ + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_IRQS, + MESON_MX_SDIO_IRQS_CMD_INT, + MESON_MX_SDIO_IRQS_CMD_INT); + + writel(cmd->arg, host->base + MESON_MX_SDIO_ARGU); + writel(ext, host->base + MESON_MX_SDIO_EXT); + writel(send, host->base + MESON_MX_SDIO_SEND); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + mod_timer(&host->cmd_timeout, jiffies + timeout); +} + +static void meson_mx_mmc_request_done(struct meson_mx_mmc_host *host) +{ + struct mmc_request *mrq; + + mrq = host->mrq; + + host->mrq = NULL; + host->cmd = NULL; + + mmc_request_done(host->mmc, mrq); +} + +static void meson_mx_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + unsigned short vdd = ios->vdd; + unsigned long clk_rate = ios->clock; + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF, + MESON_MX_SDIO_CONF_BUS_WIDTH, 0); + break; + + case MMC_BUS_WIDTH_4: + meson_mx_mmc_mask_bits(mmc, MESON_MX_SDIO_CONF, + MESON_MX_SDIO_CONF_BUS_WIDTH, + MESON_MX_SDIO_CONF_BUS_WIDTH); + break; + + case MMC_BUS_WIDTH_8: + default: + dev_err(mmc_dev(mmc), "unsupported bus width: %d\n", + ios->bus_width); + host->error = -EINVAL; + return; + } + + host->error = clk_set_rate(host->cfg_div_clk, ios->clock); + if (host->error) { + dev_warn(mmc_dev(mmc), + "failed to set MMC clock to %lu: %d\n", + clk_rate, host->error); + return; + } + + mmc->actual_clock = clk_get_rate(host->cfg_div_clk); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + vdd = 0; + /* fall-through: */ + case MMC_POWER_UP: + if (!IS_ERR(mmc->supply.vmmc)) { + host->error = mmc_regulator_set_ocr(mmc, + mmc->supply.vmmc, + vdd); + if (host->error) + return; + } + break; + } +} + +static int meson_mx_mmc_map_dma(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct mmc_data *data = mrq->data; + int dma_len; + struct scatterlist *sg; + + if (!data) + return 0; + + sg = data->sg; + if (sg->offset & 3 || sg->length & 3) { + dev_err(mmc_dev(mmc), + "unaligned scatterlist: offset %x length %d\n", + sg->offset, sg->length); + return -EINVAL; + } + + dma_len = dma_map_sg(mmc_dev(mmc), data->sg, data->sg_len, + mmc_get_dma_dir(data)); + if (dma_len <= 0) { + dev_err(mmc_dev(mmc), "dma_map_sg failed\n"); + return -ENOMEM; + } + + return 0; +} + +static void meson_mx_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + + if (!host->error) + host->error = meson_mx_mmc_map_dma(mmc, mrq); + + if (host->error) { + cmd->error = host->error; + mmc_request_done(mmc, mrq); + return; + } + + host->mrq = mrq; + + if (mrq->data) + writel(sg_dma_address(mrq->data->sg), + host->base + MESON_MX_SDIO_ADDR); + + if (mrq->sbc) + meson_mx_mmc_start_cmd(mmc, mrq->sbc); + else + meson_mx_mmc_start_cmd(mmc, mrq->cmd); +} + +static int meson_mx_mmc_card_busy(struct mmc_host *mmc) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 irqc = readl(host->base + MESON_MX_SDIO_IRQC); + + return !!(irqc & MESON_MX_SDIO_IRQC_FORCE_DATA_DAT_MASK); +} + +static void meson_mx_mmc_read_response(struct mmc_host *mmc, + struct mmc_command *cmd) +{ + struct meson_mx_mmc_host *host = mmc_priv(mmc); + u32 mult; + int i, resp[4]; + + mult = readl(host->base + MESON_MX_SDIO_MULT); + mult |= MESON_MX_SDIO_MULT_WR_RD_OUT_INDEX; + mult &= ~MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK; + mult |= FIELD_PREP(MESON_MX_SDIO_MULT_RESP_READ_INDEX_MASK, 0); + writel(mult, host->base + MESON_MX_SDIO_MULT); + + if (cmd->flags & MMC_RSP_136) { + for (i = 0; i <= 3; i++) + resp[3 - i] = readl(host->base + MESON_MX_SDIO_ARGU); + cmd->resp[0] = (resp[0] << 8) | ((resp[1] >> 24) & 0xff); + cmd->resp[1] = (resp[1] << 8) | ((resp[2] >> 24) & 0xff); + cmd->resp[2] = (resp[2] << 8) | ((resp[3] >> 24) & 0xff); + cmd->resp[3] = (resp[3] << 8); + } else if (cmd->flags & MMC_RSP_PRESENT) { + cmd->resp[0] = readl(host->base + MESON_MX_SDIO_ARGU); + } +} + +static irqreturn_t meson_mx_mmc_process_cmd_irq(struct meson_mx_mmc_host *host, + u32 irqs, u32 send) +{ + struct mmc_command *cmd = host->cmd; + + /* + * NOTE: even though it shouldn't happen we sometimes get command + * interrupts twice (at least this is what it looks like). Ideally + * we find out why this happens and warn here as soon as it occurs. + */ + if (!cmd) + return IRQ_HANDLED; + + cmd->error = 0; + meson_mx_mmc_read_response(host->mmc, cmd); + + if (cmd->data) { + if (!((irqs & MESON_MX_SDIO_IRQS_DATA_READ_CRC16_OK) || + (irqs & MESON_MX_SDIO_IRQS_DATA_WRITE_CRC16_OK))) + cmd->error = -EILSEQ; + } else { + if (!((irqs & MESON_MX_SDIO_IRQS_RESP_CRC7_OK) || + (send & MESON_MX_SDIO_SEND_RESP_WITHOUT_CRC7))) + cmd->error = -EILSEQ; + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t meson_mx_mmc_irq(int irq, void *data) +{ + struct meson_mx_mmc_host *host = (void *) data; + u32 irqs, send; + unsigned long irqflags; + irqreturn_t ret; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + irqs = readl(host->base + MESON_MX_SDIO_IRQS); + send = readl(host->base + MESON_MX_SDIO_SEND); + + if (irqs & MESON_MX_SDIO_IRQS_CMD_INT) + ret = meson_mx_mmc_process_cmd_irq(host, irqs, send); + else + ret = IRQ_HANDLED; + + /* finally ACK all pending interrupts */ + writel(irqs, host->base + MESON_MX_SDIO_IRQS); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + return ret; +} + +static irqreturn_t meson_mx_mmc_irq_thread(int irq, void *irq_data) +{ + struct meson_mx_mmc_host *host = (void *) irq_data; + struct mmc_command *cmd = host->cmd, *next_cmd; + + if (WARN_ON(!cmd)) + return IRQ_HANDLED; + + del_timer_sync(&host->cmd_timeout); + + if (cmd->data) { + dma_unmap_sg(mmc_dev(host->mmc), cmd->data->sg, + cmd->data->sg_len, + mmc_get_dma_dir(cmd->data)); + + cmd->data->bytes_xfered = cmd->data->blksz * cmd->data->blocks; + } + + next_cmd = meson_mx_mmc_get_next_cmd(cmd); + if (next_cmd) + meson_mx_mmc_start_cmd(host->mmc, next_cmd); + else + meson_mx_mmc_request_done(host); + + return IRQ_HANDLED; +} + +static void meson_mx_mmc_timeout(struct timer_list *t) +{ + struct meson_mx_mmc_host *host = from_timer(host, t, cmd_timeout); + unsigned long irqflags; + u32 irqc; + + spin_lock_irqsave(&host->irq_lock, irqflags); + + /* disable the CMD interrupt */ + irqc = readl(host->base + MESON_MX_SDIO_IRQC); + irqc &= ~MESON_MX_SDIO_IRQC_ARC_CMD_INT_EN; + writel(irqc, host->base + MESON_MX_SDIO_IRQC); + + spin_unlock_irqrestore(&host->irq_lock, irqflags); + + /* + * skip the timeout handling if the interrupt handler already processed + * the command. + */ + if (!host->cmd) + return; + + dev_dbg(mmc_dev(host->mmc), + "Timeout on CMD%u (IRQS = 0x%08x, ARGU = 0x%08x)\n", + host->cmd->opcode, readl(host->base + MESON_MX_SDIO_IRQS), + readl(host->base + MESON_MX_SDIO_ARGU)); + + host->cmd->error = -ETIMEDOUT; + + meson_mx_mmc_request_done(host); +} + +static struct mmc_host_ops meson_mx_mmc_ops = { + .request = meson_mx_mmc_request, + .set_ios = meson_mx_mmc_set_ios, + .card_busy = meson_mx_mmc_card_busy, + .get_cd = mmc_gpio_get_cd, + .get_ro = mmc_gpio_get_ro, +}; + +static struct platform_device *meson_mx_mmc_slot_pdev(struct device *parent) +{ + struct device_node *slot_node; + + /* + * TODO: the MMC core framework currently does not support + * controllers with multiple slots properly. So we only register + * the first slot for now + */ + slot_node = of_find_compatible_node(parent->of_node, NULL, "mmc-slot"); + if (!slot_node) { + dev_warn(parent, "no 'mmc-slot' sub-node found\n"); + return ERR_PTR(-ENOENT); + } + + return of_platform_device_create(slot_node, NULL, parent); +} + +static int meson_mx_mmc_add_host(struct meson_mx_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + struct device *slot_dev = mmc_dev(mmc); + int ret; + + if (of_property_read_u32(slot_dev->of_node, "reg", &host->slot_id)) { + dev_err(slot_dev, "missing 'reg' property\n"); + return -EINVAL; + } + + if (host->slot_id >= MESON_MX_SDIO_MAX_SLOTS) { + dev_err(slot_dev, "invalid 'reg' property value %d\n", + host->slot_id); + return -EINVAL; + } + + /* Get regulators and the supported OCR mask */ + ret = mmc_regulator_get_supply(mmc); + if (ret) + return ret; + + mmc->max_req_size = MESON_MX_SDIO_BOUNCE_REQ_SIZE; + mmc->max_seg_size = mmc->max_req_size; + mmc->max_blk_count = + FIELD_GET(MESON_MX_SDIO_SEND_REPEAT_PACKAGE_TIMES_MASK, + 0xffffffff); + mmc->max_blk_size = FIELD_GET(MESON_MX_SDIO_EXT_DATA_RW_NUMBER_MASK, + 0xffffffff); + mmc->max_blk_size -= (4 * MESON_MX_SDIO_RESPONSE_CRC16_BITS); + mmc->max_blk_size /= BITS_PER_BYTE; + + /* Get the min and max supported clock rates */ + mmc->f_min = clk_round_rate(host->cfg_div_clk, 1); + mmc->f_max = clk_round_rate(host->cfg_div_clk, + clk_get_rate(host->parent_clk)); + + mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; + mmc->ops = &meson_mx_mmc_ops; + + ret = mmc_of_parse(mmc); + if (ret) + return ret; + + ret = mmc_add_host(mmc); + if (ret) + return ret; + + return 0; +} + +static int meson_mx_mmc_register_clks(struct meson_mx_mmc_host *host) +{ + struct clk_init_data init; + const char *clk_div_parent, *clk_fixed_factor_parent; + + clk_fixed_factor_parent = __clk_get_name(host->parent_clk); + init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, + "%s#fixed_factor", + dev_name(host->controller_dev)); + init.ops = &clk_fixed_factor_ops; + init.flags = 0; + init.parent_names = &clk_fixed_factor_parent; + init.num_parents = 1; + host->fixed_factor.div = 2; + host->fixed_factor.mult = 1; + host->fixed_factor.hw.init = &init; + + host->fixed_factor_clk = devm_clk_register(host->controller_dev, + &host->fixed_factor.hw); + if (WARN_ON(IS_ERR(host->fixed_factor_clk))) + return PTR_ERR(host->fixed_factor_clk); + + clk_div_parent = __clk_get_name(host->fixed_factor_clk); + init.name = devm_kasprintf(host->controller_dev, GFP_KERNEL, + "%s#div", dev_name(host->controller_dev)); + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + init.parent_names = &clk_div_parent; + init.num_parents = 1; + host->cfg_div.reg = host->base + MESON_MX_SDIO_CONF; + host->cfg_div.shift = MESON_MX_SDIO_CONF_CMD_CLK_DIV_SHIFT; + host->cfg_div.width = MESON_MX_SDIO_CONF_CMD_CLK_DIV_WIDTH; + host->cfg_div.hw.init = &init; + host->cfg_div.flags = CLK_DIVIDER_ALLOW_ZERO; + + host->cfg_div_clk = devm_clk_register(host->controller_dev, + &host->cfg_div.hw); + if (WARN_ON(IS_ERR(host->cfg_div_clk))) + return PTR_ERR(host->cfg_div_clk); + + return 0; +} + +static int meson_mx_mmc_probe(struct platform_device *pdev) +{ + struct platform_device *slot_pdev; + struct mmc_host *mmc; + struct meson_mx_mmc_host *host; + struct resource *res; + int ret, irq; + u32 conf; + + slot_pdev = meson_mx_mmc_slot_pdev(&pdev->dev); + if (!slot_pdev) + return -ENODEV; + else if (IS_ERR(slot_pdev)) + return PTR_ERR(slot_pdev); + + mmc = mmc_alloc_host(sizeof(*host), &slot_pdev->dev); + if (!mmc) { + ret = -ENOMEM; + goto error_unregister_slot_pdev; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + host->controller_dev = &pdev->dev; + + spin_lock_init(&host->irq_lock); + timer_setup(&host->cmd_timeout, meson_mx_mmc_timeout, 0); + + platform_set_drvdata(pdev, host); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(host->controller_dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto error_free_mmc; + } + + irq = platform_get_irq(pdev, 0); + ret = devm_request_threaded_irq(host->controller_dev, irq, + meson_mx_mmc_irq, + meson_mx_mmc_irq_thread, IRQF_ONESHOT, + NULL, host); + if (ret) + goto error_free_mmc; + + host->core_clk = devm_clk_get(host->controller_dev, "core"); + if (IS_ERR(host->core_clk)) { + ret = PTR_ERR(host->core_clk); + goto error_free_mmc; + } + + host->parent_clk = devm_clk_get(host->controller_dev, "clkin"); + if (IS_ERR(host->parent_clk)) { + ret = PTR_ERR(host->parent_clk); + goto error_free_mmc; + } + + ret = meson_mx_mmc_register_clks(host); + if (ret) + goto error_free_mmc; + + ret = clk_prepare_enable(host->core_clk); + if (ret) { + dev_err(host->controller_dev, "Failed to enable core clock\n"); + goto error_free_mmc; + } + + ret = clk_prepare_enable(host->cfg_div_clk); + if (ret) { + dev_err(host->controller_dev, "Failed to enable MMC clock\n"); + goto error_disable_core_clk; + } + + conf = 0; + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_CMD_ARGUMENT_BITS_MASK, 39); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_M_ENDIAN_MASK, 0x3); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_NWR_MASK, 0x2); + conf |= FIELD_PREP(MESON_MX_SDIO_CONF_WRITE_CRC_OK_STATUS_MASK, 0x2); + writel(conf, host->base + MESON_MX_SDIO_CONF); + + meson_mx_mmc_soft_reset(host); + + ret = meson_mx_mmc_add_host(host); + if (ret) + goto error_disable_clks; + + return 0; + +error_disable_clks: + clk_disable_unprepare(host->cfg_div_clk); +error_disable_core_clk: + clk_disable_unprepare(host->core_clk); +error_free_mmc: + mmc_free_host(mmc); +error_unregister_slot_pdev: + of_platform_device_destroy(&slot_pdev->dev, NULL); + return ret; +} + +static int meson_mx_mmc_remove(struct platform_device *pdev) +{ + struct meson_mx_mmc_host *host = platform_get_drvdata(pdev); + struct device *slot_dev = mmc_dev(host->mmc); + + del_timer_sync(&host->cmd_timeout); + + mmc_remove_host(host->mmc); + + of_platform_device_destroy(slot_dev, NULL); + + clk_disable_unprepare(host->cfg_div_clk); + clk_disable_unprepare(host->core_clk); + + mmc_free_host(host->mmc); + + return 0; +} + +static const struct of_device_id meson_mx_mmc_of_match[] = { + { .compatible = "amlogic,meson8-sdio", }, + { .compatible = "amlogic,meson8b-sdio", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, meson_mx_mmc_of_match); + +static struct platform_driver meson_mx_mmc_driver = { + .probe = meson_mx_mmc_probe, + .remove = meson_mx_mmc_remove, + .driver = { + .name = "meson-mx-sdio", + .of_match_table = of_match_ptr(meson_mx_mmc_of_match), + }, +}; + +module_platform_driver(meson_mx_mmc_driver); + +MODULE_DESCRIPTION("Meson6, Meson8 and Meson8b SDIO/MMC Host Driver"); +MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>"); +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index f1f54a818489..e8a1bb1ae694 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -1658,7 +1658,7 @@ static int mmci_probe(struct amba_device *dev, /* Get regulators and the supported OCR mask */ ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto clk_disable; if (!mmc->ocr_avail) diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c index 267f7ab08420..6457a7d8880f 100644 --- a/drivers/mmc/host/mtk-sd.c +++ b/drivers/mmc/host/mtk-sd.c @@ -67,6 +67,7 @@ #define SDC_RESP2 0x48 #define SDC_RESP3 0x4c #define SDC_BLK_NUM 0x50 +#define SDC_ADV_CFG0 0x64 #define EMMC_IOCON 0x7c #define SDC_ACMD_RESP 0x80 #define MSDC_DMA_SA 0x90 @@ -74,10 +75,14 @@ #define MSDC_DMA_CFG 0x9c #define MSDC_PATCH_BIT 0xb0 #define MSDC_PATCH_BIT1 0xb4 +#define MSDC_PATCH_BIT2 0xb8 #define MSDC_PAD_TUNE 0xec +#define MSDC_PAD_TUNE0 0xf0 #define PAD_DS_TUNE 0x188 #define PAD_CMD_TUNE 0x18c #define EMMC50_CFG0 0x208 +#define EMMC50_CFG3 0x220 +#define SDC_FIFO_CFG 0x228 /*--------------------------------------------------------------------------*/ /* Register Mask */ @@ -95,6 +100,9 @@ #define MSDC_CFG_CKDIV (0xff << 8) /* RW */ #define MSDC_CFG_CKMOD (0x3 << 16) /* RW */ #define MSDC_CFG_HS400_CK_MODE (0x1 << 18) /* RW */ +#define MSDC_CFG_HS400_CK_MODE_EXTRA (0x1 << 22) /* RW */ +#define MSDC_CFG_CKDIV_EXTRA (0xfff << 8) /* RW */ +#define MSDC_CFG_CKMOD_EXTRA (0x3 << 20) /* RW */ /* MSDC_IOCON mask */ #define MSDC_IOCON_SDR104CKS (0x1 << 0) /* RW */ @@ -183,6 +191,9 @@ #define SDC_STS_CMDBUSY (0x1 << 1) /* RW */ #define SDC_STS_SWR_COMPL (0x1 << 31) /* RW */ +/* SDC_ADV_CFG0 mask */ +#define SDC_RX_ENHANCE_EN (0x1 << 20) /* RW */ + /* MSDC_DMA_CTRL mask */ #define MSDC_DMA_CTRL_START (0x1 << 0) /* W */ #define MSDC_DMA_CTRL_STOP (0x1 << 1) /* W */ @@ -212,11 +223,22 @@ #define MSDC_PATCH_BIT_SPCPUSH (0x1 << 29) /* RW */ #define MSDC_PATCH_BIT_DECRCTMO (0x1 << 30) /* RW */ +#define MSDC_PATCH_BIT1_STOP_DLY (0xf << 8) /* RW */ + +#define MSDC_PATCH_BIT2_CFGRESP (0x1 << 15) /* RW */ +#define MSDC_PATCH_BIT2_CFGCRCSTS (0x1 << 28) /* RW */ +#define MSDC_PB2_RESPWAIT (0x3 << 2) /* RW */ +#define MSDC_PB2_RESPSTSENSEL (0x7 << 16) /* RW */ +#define MSDC_PB2_CRCSTSENSEL (0x7 << 29) /* RW */ + #define MSDC_PAD_TUNE_DATWRDLY (0x1f << 0) /* RW */ #define MSDC_PAD_TUNE_DATRRDLY (0x1f << 8) /* RW */ #define MSDC_PAD_TUNE_CMDRDLY (0x1f << 16) /* RW */ #define MSDC_PAD_TUNE_CMDRRDLY (0x1f << 22) /* RW */ #define MSDC_PAD_TUNE_CLKTDLY (0x1f << 27) /* RW */ +#define MSDC_PAD_TUNE_RXDLYSEL (0x1 << 15) /* RW */ +#define MSDC_PAD_TUNE_RD_SEL (0x1 << 13) /* RW */ +#define MSDC_PAD_TUNE_CMD_SEL (0x1 << 21) /* RW */ #define PAD_DS_TUNE_DLY1 (0x1f << 2) /* RW */ #define PAD_DS_TUNE_DLY2 (0x1f << 7) /* RW */ @@ -228,6 +250,11 @@ #define EMMC50_CFG_CRCSTS_EDGE (0x1 << 3) /* RW */ #define EMMC50_CFG_CFCSTS_SEL (0x1 << 4) /* RW */ +#define EMMC50_CFG3_OUTS_WR (0x1f << 0) /* RW */ + +#define SDC_FIFO_CFG_WRVALIDSEL (0x1 << 24) /* RW */ +#define SDC_FIFO_CFG_RDVALIDSEL (0x1 << 25) /* RW */ + #define REQ_CMD_EIO (0x1 << 0) #define REQ_CMD_TMO (0x1 << 1) #define REQ_DAT_ERR (0x1 << 2) @@ -290,9 +317,23 @@ struct msdc_save_para { u32 pad_tune; u32 patch_bit0; u32 patch_bit1; + u32 patch_bit2; u32 pad_ds_tune; u32 pad_cmd_tune; u32 emmc50_cfg0; + u32 emmc50_cfg3; + u32 sdc_fifo_cfg; +}; + +struct mtk_mmc_compatible { + u8 clk_div_bits; + bool hs400_tune; /* only used for MT8173 */ + u32 pad_tune_reg; + bool async_fifo; + bool data_tune; + bool busy_check; + bool stop_clk_fix; + bool enhance_rx; }; struct msdc_tune_para { @@ -309,6 +350,7 @@ struct msdc_delay_phase { struct msdc_host { struct device *dev; + const struct mtk_mmc_compatible *dev_comp; struct mmc_host *mmc; /* mmc structure */ int cmd_rsp; @@ -334,11 +376,13 @@ struct msdc_host { struct clk *src_clk; /* msdc source clock */ struct clk *h_clk; /* msdc h_clk */ + struct clk *src_clk_cg; /* msdc source clock control gate */ u32 mclk; /* mmc subsystem clock frequency */ u32 src_clk_freq; /* source clock frequency */ u32 sclk; /* SD/MS bus clock frequency */ unsigned char timing; bool vqmmc_enabled; + u32 latch_ck; u32 hs400_ds_delay; u32 hs200_cmd_int_delay; /* cmd internal delay for HS200/SDR104 */ u32 hs400_cmd_int_delay; /* cmd internal delay for HS400 */ @@ -350,6 +394,59 @@ struct msdc_host { struct msdc_tune_para saved_tune_para; /* tune result of CMD21/CMD19 */ }; +static const struct mtk_mmc_compatible mt8135_compat = { + .clk_div_bits = 8, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE, + .async_fifo = false, + .data_tune = false, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, +}; + +static const struct mtk_mmc_compatible mt8173_compat = { + .clk_div_bits = 8, + .hs400_tune = true, + .pad_tune_reg = MSDC_PAD_TUNE, + .async_fifo = false, + .data_tune = false, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, +}; + +static const struct mtk_mmc_compatible mt2701_compat = { + .clk_div_bits = 12, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = false, + .stop_clk_fix = false, + .enhance_rx = false, +}; + +static const struct mtk_mmc_compatible mt2712_compat = { + .clk_div_bits = 12, + .hs400_tune = false, + .pad_tune_reg = MSDC_PAD_TUNE0, + .async_fifo = true, + .data_tune = true, + .busy_check = true, + .stop_clk_fix = true, + .enhance_rx = true, +}; + +static const struct of_device_id msdc_of_ids[] = { + { .compatible = "mediatek,mt8135-mmc", .data = &mt8135_compat}, + { .compatible = "mediatek,mt8173-mmc", .data = &mt8173_compat}, + { .compatible = "mediatek,mt2701-mmc", .data = &mt2701_compat}, + { .compatible = "mediatek,mt2712-mmc", .data = &mt2712_compat}, + {} +}; +MODULE_DEVICE_TABLE(of, msdc_of_ids); + static void sdr_set_bits(void __iomem *reg, u32 bs) { u32 val = readl(reg); @@ -509,7 +606,12 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) timeout = (ns + clk_ns - 1) / clk_ns + clks; /* in 1048576 sclk cycle unit */ timeout = (timeout + (0x1 << 20) - 1) >> 20; - sdr_get_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD, &mode); + if (host->dev_comp->clk_div_bits == 8) + sdr_get_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD, &mode); + else + sdr_get_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD_EXTRA, &mode); /*DDR mode will double the clk cycles for data timeout */ timeout = mode >= 2 ? timeout * 2 : timeout; timeout = timeout > 1 ? timeout - 1 : 0; @@ -520,6 +622,7 @@ static void msdc_set_timeout(struct msdc_host *host, u32 ns, u32 clks) static void msdc_gate_clock(struct msdc_host *host) { + clk_disable_unprepare(host->src_clk_cg); clk_disable_unprepare(host->src_clk); clk_disable_unprepare(host->h_clk); } @@ -528,6 +631,7 @@ static void msdc_ungate_clock(struct msdc_host *host) { clk_prepare_enable(host->h_clk); clk_prepare_enable(host->src_clk); + clk_prepare_enable(host->src_clk_cg); while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); } @@ -538,6 +642,7 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) u32 flags; u32 div; u32 sclk; + u32 tune_reg = host->dev_comp->pad_tune_reg; if (!hz) { dev_dbg(host->dev, "set mclk to 0\n"); @@ -548,7 +653,11 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) flags = readl(host->base + MSDC_INTEN); sdr_clr_bits(host->base + MSDC_INTEN, flags); - sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); + if (host->dev_comp->clk_div_bits == 8) + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_HS400_CK_MODE); + else + sdr_clr_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE_EXTRA); if (timing == MMC_TIMING_UHS_DDR50 || timing == MMC_TIMING_MMC_DDR52 || timing == MMC_TIMING_MMC_HS400) { @@ -568,8 +677,12 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) if (timing == MMC_TIMING_MMC_HS400 && hz >= (host->src_clk_freq >> 1)) { - sdr_set_bits(host->base + MSDC_CFG, - MSDC_CFG_HS400_CK_MODE); + if (host->dev_comp->clk_div_bits == 8) + sdr_set_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE); + else + sdr_set_bits(host->base + MSDC_CFG, + MSDC_CFG_HS400_CK_MODE_EXTRA); sclk = host->src_clk_freq >> 1; div = 0; /* div is ignore when bit18 is set */ } @@ -587,11 +700,31 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) sclk = (host->src_clk_freq >> 2) / div; } } - sdr_set_field(host->base + MSDC_CFG, MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, - (mode << 8) | div); - sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + sdr_clr_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); + /* + * As src_clk/HCLK use the same bit to gate/ungate, + * So if want to only gate src_clk, need gate its parent(mux). + */ + if (host->src_clk_cg) + clk_disable_unprepare(host->src_clk_cg); + else + clk_disable_unprepare(clk_get_parent(host->src_clk)); + if (host->dev_comp->clk_div_bits == 8) + sdr_set_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD | MSDC_CFG_CKDIV, + (mode << 8) | div); + else + sdr_set_field(host->base + MSDC_CFG, + MSDC_CFG_CKMOD_EXTRA | MSDC_CFG_CKDIV_EXTRA, + (mode << 12) | div); + if (host->src_clk_cg) + clk_prepare_enable(host->src_clk_cg); + else + clk_prepare_enable(clk_get_parent(host->src_clk)); + while (!(readl(host->base + MSDC_CFG) & MSDC_CFG_CKSTB)) cpu_relax(); + sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_CKPDN); host->sclk = sclk; host->mclk = hz; host->timing = timing; @@ -605,15 +738,16 @@ static void msdc_set_mclk(struct msdc_host *host, unsigned char timing, u32 hz) */ if (host->sclk <= 52000000) { writel(host->def_tune_para.iocon, host->base + MSDC_IOCON); - writel(host->def_tune_para.pad_tune, host->base + MSDC_PAD_TUNE); + writel(host->def_tune_para.pad_tune, host->base + tune_reg); } else { writel(host->saved_tune_para.iocon, host->base + MSDC_IOCON); - writel(host->saved_tune_para.pad_tune, host->base + MSDC_PAD_TUNE); + writel(host->saved_tune_para.pad_tune, host->base + tune_reg); writel(host->saved_tune_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); } - if (timing == MMC_TIMING_MMC_HS400) + if (timing == MMC_TIMING_MMC_HS400 && + host->dev_comp->hs400_tune) sdr_set_field(host->base + PAD_CMD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, host->hs400_cmd_int_delay); @@ -1165,6 +1299,7 @@ static irqreturn_t msdc_irq(int irq, void *dev_id) static void msdc_init_hw(struct msdc_host *host) { u32 val; + u32 tune_reg = host->dev_comp->pad_tune_reg; /* Configure to MMC/SD mode, clock free running */ sdr_set_bits(host->base + MSDC_CFG, MSDC_CFG_MODE | MSDC_CFG_CKPDN); @@ -1180,14 +1315,53 @@ static void msdc_init_hw(struct msdc_host *host) val = readl(host->base + MSDC_INT); writel(val, host->base + MSDC_INT); - writel(0, host->base + MSDC_PAD_TUNE); + writel(0, host->base + tune_reg); writel(0, host->base + MSDC_IOCON); sdr_set_field(host->base + MSDC_IOCON, MSDC_IOCON_DDLSEL, 0); writel(0x403c0046, host->base + MSDC_PATCH_BIT); sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_CKGEN_MSDC_DLY_SEL, 1); - writel(0xffff0089, host->base + MSDC_PATCH_BIT1); + writel(0xffff4089, host->base + MSDC_PATCH_BIT1); sdr_set_bits(host->base + EMMC50_CFG0, EMMC50_CFG_CFCSTS_SEL); + if (host->dev_comp->stop_clk_fix) { + sdr_set_field(host->base + MSDC_PATCH_BIT1, + MSDC_PATCH_BIT1_STOP_DLY, 3); + sdr_clr_bits(host->base + SDC_FIFO_CFG, + SDC_FIFO_CFG_WRVALIDSEL); + sdr_clr_bits(host->base + SDC_FIFO_CFG, + SDC_FIFO_CFG_RDVALIDSEL); + } + + if (host->dev_comp->busy_check) + sdr_clr_bits(host->base + MSDC_PATCH_BIT1, (1 << 7)); + + if (host->dev_comp->async_fifo) { + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_RESPWAIT, 3); + if (host->dev_comp->enhance_rx) { + sdr_set_bits(host->base + SDC_ADV_CFG0, + SDC_RX_ENHANCE_EN); + } else { + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_RESPSTSENSEL, 2); + sdr_set_field(host->base + MSDC_PATCH_BIT2, + MSDC_PB2_CRCSTSENSEL, 2); + } + /* use async fifo, then no need tune internal delay */ + sdr_clr_bits(host->base + MSDC_PATCH_BIT2, + MSDC_PATCH_BIT2_CFGRESP); + sdr_set_bits(host->base + MSDC_PATCH_BIT2, + MSDC_PATCH_BIT2_CFGCRCSTS); + } + + if (host->dev_comp->data_tune) { + sdr_set_bits(host->base + tune_reg, + MSDC_PAD_TUNE_RD_SEL | MSDC_PAD_TUNE_CMD_SEL); + } else { + /* choose clock tune */ + sdr_set_bits(host->base + tune_reg, MSDC_PAD_TUNE_RXDLYSEL); + } + /* Configure to enable SDIO mode. * it's must otherwise sdio cmd5 failed */ @@ -1200,7 +1374,9 @@ static void msdc_init_hw(struct msdc_host *host) sdr_set_field(host->base + SDC_CFG, SDC_CFG_DTOC, 3); host->def_tune_para.iocon = readl(host->base + MSDC_IOCON); - host->def_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); + host->def_tune_para.pad_tune = readl(host->base + tune_reg); + host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); + host->saved_tune_para.pad_tune = readl(host->base + tune_reg); dev_dbg(host->dev, "init hardware done!"); } @@ -1343,18 +1519,19 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) struct msdc_delay_phase internal_delay_phase; u8 final_delay, final_maxlen; u32 internal_delay = 0; + u32 tune_reg = host->dev_comp->pad_tune_reg; int cmd_err; int i, j; if (mmc->ios.timing == MMC_TIMING_MMC_HS200 || mmc->ios.timing == MMC_TIMING_UHS_SDR104) - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, host->hs200_cmd_int_delay); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); for (i = 0 ; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, i); /* * Using the same parameters, it may sometimes pass the test, @@ -1373,12 +1550,13 @@ static int msdc_tune_response(struct mmc_host *mmc, u32 opcode) } final_rise_delay = get_best_delay(host, rise_delay); /* if rising edge has enough margin, then do not scan falling edge */ - if (final_rise_delay.maxlen >= 12 && final_rise_delay.start < 4) + if (final_rise_delay.maxlen >= 12 || + (final_rise_delay.start == 0 && final_rise_delay.maxlen >= 4)) goto skip_fall; sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, i); /* * Using the same parameters, it may sometimes pass the test, @@ -1403,20 +1581,20 @@ skip_fall: final_maxlen = final_fall_delay.maxlen; if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, final_rise_delay.final_phase); final_delay = final_rise_delay.final_phase; } else { sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_RSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRDLY, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRDLY, final_fall_delay.final_phase); final_delay = final_fall_delay.final_phase; } - if (host->hs200_cmd_int_delay) + if (host->dev_comp->async_fifo || host->hs200_cmd_int_delay) goto skip_internal; for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, i); mmc_send_tuning(mmc, opcode, &cmd_err); if (!cmd_err) @@ -1424,7 +1602,7 @@ skip_fall: } dev_dbg(host->dev, "Final internal delay: 0x%x\n", internal_delay); internal_delay_phase = get_best_delay(host, internal_delay); - sdr_set_field(host->base + MSDC_PAD_TUNE, MSDC_PAD_TUNE_CMDRRDLY, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_CMDRRDLY, internal_delay_phase.final_phase); skip_internal: dev_dbg(host->dev, "Final cmd pad delay: %x\n", final_delay); @@ -1486,12 +1664,15 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) u32 rise_delay = 0, fall_delay = 0; struct msdc_delay_phase final_rise_delay, final_fall_delay = { 0,}; u8 final_delay, final_maxlen; + u32 tune_reg = host->dev_comp->pad_tune_reg; int i, ret; + sdr_set_field(host->base + MSDC_PATCH_BIT, MSDC_INT_DAT_LATCH_CK_SEL, + host->latch_ck); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); for (i = 0 ; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) @@ -1506,7 +1687,7 @@ static int msdc_tune_data(struct mmc_host *mmc, u32 opcode) sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); for (i = 0; i < PAD_DELAY_MAX; i++) { - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, i); ret = mmc_send_tuning(mmc, opcode, NULL); if (!ret) @@ -1519,14 +1700,14 @@ skip_fall: if (final_maxlen == final_rise_delay.maxlen) { sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_clr_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, final_rise_delay.final_phase); final_delay = final_rise_delay.final_phase; } else { sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_DSPL); sdr_set_bits(host->base + MSDC_IOCON, MSDC_IOCON_W_DSPL); - sdr_set_field(host->base + MSDC_PAD_TUNE, + sdr_set_field(host->base + tune_reg, MSDC_PAD_TUNE_DATRRDLY, final_fall_delay.final_phase); final_delay = final_fall_delay.final_phase; @@ -1540,8 +1721,10 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct msdc_host *host = mmc_priv(mmc); int ret; + u32 tune_reg = host->dev_comp->pad_tune_reg; - if (host->hs400_mode) + if (host->hs400_mode && + host->dev_comp->hs400_tune) ret = hs400_tune_response(mmc, opcode); else ret = msdc_tune_response(mmc, opcode); @@ -1556,7 +1739,7 @@ static int msdc_execute_tuning(struct mmc_host *mmc, u32 opcode) } host->saved_tune_para.iocon = readl(host->base + MSDC_IOCON); - host->saved_tune_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); + host->saved_tune_para.pad_tune = readl(host->base + tune_reg); host->saved_tune_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); return ret; } @@ -1567,6 +1750,11 @@ static int msdc_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) host->hs400_mode = true; writel(host->hs400_ds_delay, host->base + PAD_DS_TUNE); + /* hs400 mode must set it to 0 */ + sdr_clr_bits(host->base + MSDC_PATCH_BIT2, MSDC_PATCH_BIT2_CFGCRCSTS); + /* to improve read performance, set outstanding to 2 */ + sdr_set_field(host->base + EMMC50_CFG3, EMMC50_CFG3_OUTS_WR, 2); + return 0; } @@ -1596,6 +1784,9 @@ static const struct mmc_host_ops mt_msdc_ops = { static void msdc_of_property_parse(struct platform_device *pdev, struct msdc_host *host) { + of_property_read_u32(pdev->dev.of_node, "mediatek,latch-ck", + &host->latch_ck); + of_property_read_u32(pdev->dev.of_node, "hs400-ds-delay", &host->hs400_ds_delay); @@ -1617,12 +1808,17 @@ static int msdc_drv_probe(struct platform_device *pdev) struct mmc_host *mmc; struct msdc_host *host; struct resource *res; + const struct of_device_id *of_id; int ret; if (!pdev->dev.of_node) { dev_err(&pdev->dev, "No DT found\n"); return -EINVAL; } + + of_id = of_match_node(msdc_of_ids, pdev->dev.of_node); + if (!of_id) + return -EINVAL; /* Allocate MMC host for this device */ mmc = mmc_alloc_host(sizeof(struct msdc_host), &pdev->dev); if (!mmc) @@ -1641,7 +1837,7 @@ static int msdc_drv_probe(struct platform_device *pdev) } ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto host_free; host->src_clk = devm_clk_get(&pdev->dev, "source"); @@ -1656,6 +1852,11 @@ static int msdc_drv_probe(struct platform_device *pdev) goto host_free; } + /*source clock control gate is optional clock*/ + host->src_clk_cg = devm_clk_get(&pdev->dev, "source_cg"); + if (IS_ERR(host->src_clk_cg)) + host->src_clk_cg = NULL; + host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { ret = -EINVAL; @@ -1686,11 +1887,15 @@ static int msdc_drv_probe(struct platform_device *pdev) msdc_of_property_parse(pdev, host); host->dev = &pdev->dev; + host->dev_comp = of_id->data; host->mmc = mmc; host->src_clk_freq = clk_get_rate(host->src_clk); /* Set host parameters to mmc */ mmc->ops = &mt_msdc_ops; - mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); + if (host->dev_comp->clk_div_bits == 8) + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 255); + else + mmc->f_min = DIV_ROUND_UP(host->src_clk_freq, 4 * 4095); mmc->caps |= MMC_CAP_ERASE | MMC_CAP_CMD23; /* MMC core transfer sizes tunable parameters */ @@ -1788,28 +1993,38 @@ static int msdc_drv_remove(struct platform_device *pdev) #ifdef CONFIG_PM static void msdc_save_reg(struct msdc_host *host) { + u32 tune_reg = host->dev_comp->pad_tune_reg; + host->save_para.msdc_cfg = readl(host->base + MSDC_CFG); host->save_para.iocon = readl(host->base + MSDC_IOCON); host->save_para.sdc_cfg = readl(host->base + SDC_CFG); - host->save_para.pad_tune = readl(host->base + MSDC_PAD_TUNE); + host->save_para.pad_tune = readl(host->base + tune_reg); host->save_para.patch_bit0 = readl(host->base + MSDC_PATCH_BIT); host->save_para.patch_bit1 = readl(host->base + MSDC_PATCH_BIT1); + host->save_para.patch_bit2 = readl(host->base + MSDC_PATCH_BIT2); host->save_para.pad_ds_tune = readl(host->base + PAD_DS_TUNE); host->save_para.pad_cmd_tune = readl(host->base + PAD_CMD_TUNE); host->save_para.emmc50_cfg0 = readl(host->base + EMMC50_CFG0); + host->save_para.emmc50_cfg3 = readl(host->base + EMMC50_CFG3); + host->save_para.sdc_fifo_cfg = readl(host->base + SDC_FIFO_CFG); } static void msdc_restore_reg(struct msdc_host *host) { + u32 tune_reg = host->dev_comp->pad_tune_reg; + writel(host->save_para.msdc_cfg, host->base + MSDC_CFG); writel(host->save_para.iocon, host->base + MSDC_IOCON); writel(host->save_para.sdc_cfg, host->base + SDC_CFG); - writel(host->save_para.pad_tune, host->base + MSDC_PAD_TUNE); + writel(host->save_para.pad_tune, host->base + tune_reg); writel(host->save_para.patch_bit0, host->base + MSDC_PATCH_BIT); writel(host->save_para.patch_bit1, host->base + MSDC_PATCH_BIT1); + writel(host->save_para.patch_bit2, host->base + MSDC_PATCH_BIT2); writel(host->save_para.pad_ds_tune, host->base + PAD_DS_TUNE); writel(host->save_para.pad_cmd_tune, host->base + PAD_CMD_TUNE); writel(host->save_para.emmc50_cfg0, host->base + EMMC50_CFG0); + writel(host->save_para.emmc50_cfg3, host->base + EMMC50_CFG3); + writel(host->save_para.sdc_fifo_cfg, host->base + SDC_FIFO_CFG); } static int msdc_runtime_suspend(struct device *dev) @@ -1839,12 +2054,6 @@ static const struct dev_pm_ops msdc_dev_pm_ops = { SET_RUNTIME_PM_OPS(msdc_runtime_suspend, msdc_runtime_resume, NULL) }; -static const struct of_device_id msdc_of_ids[] = { - { .compatible = "mediatek,mt8135-mmc", }, - {} -}; -MODULE_DEVICE_TABLE(of, msdc_of_ids); - static struct platform_driver mt_msdc_driver = { .probe = msdc_drv_probe, .remove = msdc_drv_remove, diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index 58d74b8d6c79..210247b3d11a 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -508,9 +508,9 @@ static irqreturn_t mvsd_irq(int irq, void *dev) return IRQ_NONE; } -static void mvsd_timeout_timer(unsigned long data) +static void mvsd_timeout_timer(struct timer_list *t) { - struct mvsd_host *host = (struct mvsd_host *)data; + struct mvsd_host *host = from_timer(host, t, timer); void __iomem *iobase = host->base; struct mmc_request *mrq; unsigned long flags; @@ -776,7 +776,7 @@ static int mvsd_probe(struct platform_device *pdev) goto out; } - setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); + timer_setup(&host->timer, mvsd_timeout_timer, 0); platform_set_drvdata(pdev, mmc); ret = mmc_add_host(mmc); if (ret) diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 1d5418e4efae..5ff8ef7223cc 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -963,10 +963,9 @@ static bool filter(struct dma_chan *chan, void *param) return true; } -static void mxcmci_watchdog(unsigned long data) +static void mxcmci_watchdog(struct timer_list *t) { - struct mmc_host *mmc = (struct mmc_host *)data; - struct mxcmci_host *host = mmc_priv(mmc); + struct mxcmci_host *host = from_timer(host, t, watchdog); struct mmc_request *req = host->req; unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS); @@ -1075,7 +1074,7 @@ static int mxcmci_probe(struct platform_device *pdev) dat3_card_detect = true; ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto out_free; if (!mmc->ocr_avail) { @@ -1165,9 +1164,7 @@ static int mxcmci_probe(struct platform_device *pdev) goto out_free_dma; } - init_timer(&host->watchdog); - host->watchdog.function = &mxcmci_watchdog; - host->watchdog.data = (unsigned long)mmc; + timer_setup(&host->watchdog, mxcmci_watchdog, 0); mmc_add_host(mmc); diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index bd49f34d7654..adf32682f27a 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -625,9 +625,9 @@ static void mmc_omap_abort_command(struct work_struct *work) } static void -mmc_omap_cmd_timer(unsigned long data) +mmc_omap_cmd_timer(struct timer_list *t) { - struct mmc_omap_host *host = (struct mmc_omap_host *) data; + struct mmc_omap_host *host = from_timer(host, t, cmd_abort_timer); unsigned long flags; spin_lock_irqsave(&host->slot_lock, flags); @@ -654,9 +654,9 @@ mmc_omap_sg_to_buf(struct mmc_omap_host *host) } static void -mmc_omap_clk_timer(unsigned long data) +mmc_omap_clk_timer(struct timer_list *t) { - struct mmc_omap_host *host = (struct mmc_omap_host *) data; + struct mmc_omap_host *host = from_timer(host, t, clk_timer); mmc_omap_fclk_enable(host, 0); } @@ -874,9 +874,9 @@ void omap_mmc_notify_cover_event(struct device *dev, int num, int is_closed) tasklet_hi_schedule(&slot->cover_tasklet); } -static void mmc_omap_cover_timer(unsigned long arg) +static void mmc_omap_cover_timer(struct timer_list *t) { - struct mmc_omap_slot *slot = (struct mmc_omap_slot *) arg; + struct mmc_omap_slot *slot = from_timer(slot, t, cover_timer); tasklet_schedule(&slot->cover_tasklet); } @@ -1264,8 +1264,7 @@ static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) mmc->max_seg_size = mmc->max_req_size; if (slot->pdata->get_cover_state != NULL) { - setup_timer(&slot->cover_timer, mmc_omap_cover_timer, - (unsigned long)slot); + timer_setup(&slot->cover_timer, mmc_omap_cover_timer, 0); tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, (unsigned long)slot); } @@ -1352,11 +1351,10 @@ static int mmc_omap_probe(struct platform_device *pdev) INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); INIT_WORK(&host->cmd_abort_work, mmc_omap_abort_command); - setup_timer(&host->cmd_abort_timer, mmc_omap_cmd_timer, - (unsigned long) host); + timer_setup(&host->cmd_abort_timer, mmc_omap_cmd_timer, 0); spin_lock_init(&host->clk_lock); - setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); + timer_setup(&host->clk_timer, mmc_omap_clk_timer, 0); spin_lock_init(&host->dma_lock); spin_lock_init(&host->slot_lock); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 3b5e6d11069b..071693ebfe18 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -147,10 +147,6 @@ #define OMAP_MMC_MAX_CLOCK 52000000 #define DRIVER_NAME "omap_hsmmc" -#define VDD_1V8 1800000 /* 180000 uV */ -#define VDD_3V0 3000000 /* 300000 uV */ -#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1) - /* * One controller can have multiple slots, like on some omap boards using * omap.c controller driver. Luckily this is not currently done on any known @@ -308,8 +304,7 @@ err_set_ocr: return ret; } -static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, - int vdd) +static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on) { int ret; @@ -317,17 +312,6 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return 0; if (power_on) { - if (vdd <= VDD_165_195) - ret = regulator_set_voltage(host->pbias, VDD_1V8, - VDD_1V8); - else - ret = regulator_set_voltage(host->pbias, VDD_3V0, - VDD_3V0); - if (ret < 0) { - dev_err(host->dev, "pbias set voltage fail\n"); - return ret; - } - if (host->pbias_enabled == 0) { ret = regulator_enable(host->pbias); if (ret) { @@ -350,8 +334,7 @@ static int omap_hsmmc_set_pbias(struct omap_hsmmc_host *host, bool power_on, return 0; } -static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, - int vdd) +static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on) { struct mmc_host *mmc = host->mmc; int ret = 0; @@ -363,7 +346,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, if (IS_ERR(mmc->supply.vmmc)) return 0; - ret = omap_hsmmc_set_pbias(host, false, 0); + ret = omap_hsmmc_set_pbias(host, false); if (ret) return ret; @@ -385,7 +368,7 @@ static int omap_hsmmc_set_power(struct omap_hsmmc_host *host, int power_on, if (ret) return ret; - ret = omap_hsmmc_set_pbias(host, true, vdd); + ret = omap_hsmmc_set_pbias(host, true); if (ret) goto err_set_voltage; } else { @@ -462,7 +445,7 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) return ret; /* Allow an aux regulator */ @@ -1220,11 +1203,11 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) clk_disable_unprepare(host->dbclk); /* Turn the power off */ - ret = omap_hsmmc_set_power(host, 0, 0); + ret = omap_hsmmc_set_power(host, 0); /* Turn the power ON with given VDD 1.8 or 3.0v */ if (!ret) - ret = omap_hsmmc_set_power(host, 1, vdd); + ret = omap_hsmmc_set_power(host, 1); if (host->dbclk) clk_prepare_enable(host->dbclk); @@ -1621,10 +1604,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->power_mode != host->power_mode) { switch (ios->power_mode) { case MMC_POWER_OFF: - omap_hsmmc_set_power(host, 0, 0); + omap_hsmmc_set_power(host, 0); break; case MMC_POWER_UP: - omap_hsmmc_set_power(host, 1, ios->vdd); + omap_hsmmc_set_power(host, 1); break; case MMC_POWER_ON: do_send_init_stream = 1; diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c index 8bae88a150fd..41cbe84c1d18 100644 --- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c @@ -88,6 +88,7 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { static const struct of_device_id renesas_sdhi_internal_dmac_of_match[] = { { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, {}, }; MODULE_DEVICE_TABLE(of, renesas_sdhi_internal_dmac_of_match); diff --git a/drivers/mmc/host/renesas_sdhi_sys_dmac.c b/drivers/mmc/host/renesas_sdhi_sys_dmac.c index df4465439e13..9ab10436e4b8 100644 --- a/drivers/mmc/host/renesas_sdhi_sys_dmac.c +++ b/drivers/mmc/host/renesas_sdhi_sys_dmac.c @@ -91,7 +91,6 @@ static const struct renesas_sdhi_of_data of_rcar_gen3_compatible = { }; static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { - { .compatible = "renesas,sdhi-shmobile" }, { .compatible = "renesas,sdhi-sh73a0", .data = &of_default_cfg, }, { .compatible = "renesas,sdhi-r8a73a4", .data = &of_default_cfg, }, { .compatible = "renesas,sdhi-r8a7740", .data = &of_default_cfg, }, @@ -107,6 +106,10 @@ static const struct of_device_id renesas_sdhi_sys_dmac_of_match[] = { { .compatible = "renesas,sdhi-r8a7794", .data = &of_rcar_gen2_compatible, }, { .compatible = "renesas,sdhi-r8a7795", .data = &of_rcar_gen3_compatible, }, { .compatible = "renesas,sdhi-r8a7796", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,rcar-gen1-sdhi", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,rcar-gen2-sdhi", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,rcar-gen3-sdhi", .data = &of_rcar_gen3_compatible, }, + { .compatible = "renesas,sdhi-shmobile" }, {}, }; MODULE_DEVICE_TABLE(of, renesas_sdhi_sys_dmac_of_match); diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 41b57713b620..0848dc0f882e 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -618,29 +618,22 @@ static int sd_change_phase(struct realtek_pci_sdmmc *host, u8 sample_point, bool rx) { struct rtsx_pcr *pcr = host->pcr; - int err; dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", __func__, rx ? "RX" : "TX", sample_point); - rtsx_pci_init_cmd(pcr); - - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK); + rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, CHANGE_CLK); if (rx) - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, - SD_VPRX_CTL, 0x1F, sample_point); + rtsx_pci_write_register(pcr, SD_VPRX_CTL, + PHASE_SELECT_MASK, sample_point); else - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, - SD_VPTX_CTL, 0x1F, sample_point); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, - PHASE_NOT_RESET, PHASE_NOT_RESET); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0); - rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); - - err = rtsx_pci_send_cmd(pcr, 100); - if (err < 0) - return err; + rtsx_pci_write_register(pcr, SD_VPTX_CTL, + PHASE_SELECT_MASK, sample_point); + rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); + rtsx_pci_write_register(pcr, SD_VPCLK0_CTL, PHASE_NOT_RESET, + PHASE_NOT_RESET); + rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); return 0; } @@ -708,10 +701,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, { int err; struct mmc_command cmd = {}; + struct rtsx_pcr *pcr = host->pcr; - err = sd_change_phase(host, sample_point, true); - if (err < 0) - return err; + sd_change_phase(host, sample_point, true); + + rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, + SD_RSP_80CLK_TIMEOUT_EN); cmd.opcode = opcode; err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); @@ -719,9 +714,12 @@ static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, /* Wait till SD DATA IDLE */ sd_wait_data_idle(host); sd_clear_error(host); + rtsx_pci_write_register(pcr, SD_CFG3, + SD_RSP_80CLK_TIMEOUT_EN, 0); return err; } + rtsx_pci_write_register(pcr, SD_CFG3, SD_RSP_80CLK_TIMEOUT_EN, 0); return 0; } diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index 08ae0ff13513..b988997a1e80 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -73,6 +73,7 @@ struct sdhci_acpi_slot { unsigned int caps2; mmc_pm_flag_t pm_caps; unsigned int flags; + size_t priv_size; int (*probe_slot)(struct platform_device *, const char *, const char *); int (*remove_slot)(struct platform_device *); }; @@ -82,13 +83,118 @@ struct sdhci_acpi_host { const struct sdhci_acpi_slot *slot; struct platform_device *pdev; bool use_runtime_pm; + unsigned long private[0] ____cacheline_aligned; }; +static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) +{ + return (void *)c->private; +} + static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) { return c->slot && (c->slot->flags & flag); } +enum { + INTEL_DSM_FNS = 0, + INTEL_DSM_V18_SWITCH = 3, + INTEL_DSM_V33_SWITCH = 4, +}; + +struct intel_host { + u32 dsm_fns; +}; + +static const guid_t intel_dsm_guid = + GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, + 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); + +static int __intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + union acpi_object *obj; + int err = 0; + + obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); + if (!obj) + return -EOPNOTSUPP; + + if (obj->type == ACPI_TYPE_INTEGER) { + *result = obj->integer.value; + } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) { + size_t len = min_t(size_t, obj->buffer.length, 4); + + *result = 0; + memcpy(result, obj->buffer.pointer, len); + } else { + dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n", + __func__, fn, obj->type, obj->buffer.length); + err = -EINVAL; + } + + ACPI_FREE(obj); + + return err; +} + +static int intel_dsm(struct intel_host *intel_host, struct device *dev, + unsigned int fn, u32 *result) +{ + if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) + return -EOPNOTSUPP; + + return __intel_dsm(intel_host, dev, fn, result); +} + +static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, + struct mmc_host *mmc) +{ + int err; + + err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); + if (err) { + pr_debug("%s: DSM not supported, error %d\n", + mmc_hostname(mmc), err); + return; + } + + pr_debug("%s: DSM function mask %#x\n", + mmc_hostname(mmc), intel_host->dsm_fns); +} + +static int intel_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + struct device *dev = mmc_dev(mmc); + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + struct intel_host *intel_host = sdhci_acpi_priv(c); + unsigned int fn; + u32 result = 0; + int err; + + err = sdhci_start_signal_voltage_switch(mmc, ios); + if (err) + return err; + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + fn = INTEL_DSM_V33_SWITCH; + break; + case MMC_SIGNAL_VOLTAGE_180: + fn = INTEL_DSM_V18_SWITCH; + break; + default: + return 0; + } + + err = intel_dsm(intel_host, dev, fn, &result); + pr_debug("%s: %s DSM fn %u error %d result %u\n", + mmc_hostname(mmc), __func__, fn, err, result); + + return 0; +} + static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) { u8 reg; @@ -269,56 +375,26 @@ out: return ret; } -static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev, - const char *hid, const char *uid) +static int intel_probe_slot(struct platform_device *pdev, const char *hid, + const char *uid) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); - struct sdhci_host *host; - - if (!c || !c->host) - return 0; - - host = c->host; - - /* Platform specific code during emmc probe slot goes here */ + struct intel_host *intel_host = sdhci_acpi_priv(c); + struct sdhci_host *host = c->host; if (hid && uid && !strcmp(hid, "80860F14") && !strcmp(uid, "1") && sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ - return 0; -} - -static int sdhci_acpi_sdio_probe_slot(struct platform_device *pdev, - const char *hid, const char *uid) -{ - struct sdhci_acpi_host *c = platform_get_drvdata(pdev); - - if (!c || !c->host) - return 0; - - /* Platform specific code during sdio probe slot goes here */ - - return 0; -} - -static int sdhci_acpi_sd_probe_slot(struct platform_device *pdev, - const char *hid, const char *uid) -{ - struct sdhci_acpi_host *c = platform_get_drvdata(pdev); - struct sdhci_host *host; - - if (!c || !c->host || !c->slot) - return 0; - - host = c->host; - - /* Platform specific code during sd probe slot goes here */ - if (hid && !strcmp(hid, "80865ACA")) host->mmc_host_ops.get_cd = bxt_get_cd; + intel_dsm_init(intel_host, &pdev->dev, host->mmc); + + host->mmc_host_ops.start_signal_voltage_switch = + intel_start_signal_voltage_switch; + return 0; } @@ -332,7 +408,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, - .probe_slot = sdhci_acpi_emmc_probe_slot, + .probe_slot = intel_probe_slot, + .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { @@ -343,7 +420,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { MMC_CAP_WAIT_WHILE_BUSY, .flags = SDHCI_ACPI_RUNTIME_PM, .pm_caps = MMC_PM_KEEP_POWER, - .probe_slot = sdhci_acpi_sdio_probe_slot, + .probe_slot = intel_probe_slot, + .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { @@ -353,7 +431,8 @@ static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | SDHCI_QUIRK2_STOP_WITH_TC, .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM, - .probe_slot = sdhci_acpi_sd_probe_slot, + .probe_slot = intel_probe_slot, + .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = { @@ -429,11 +508,13 @@ static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(const char *hid, static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; + const struct sdhci_acpi_slot *slot; struct acpi_device *device, *child; struct sdhci_acpi_host *c; struct sdhci_host *host; struct resource *iomem; resource_size_t len; + size_t priv_size; const char *hid; const char *uid; int err; @@ -443,7 +524,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev) return -ENODEV; hid = acpi_device_hid(device); - uid = device->pnp.unique_id; + uid = acpi_device_uid(device); + + slot = sdhci_acpi_get_slot(hid, uid); /* Power on the SDHCI controller and its children */ acpi_device_fix_up_power(device); @@ -467,13 +550,14 @@ static int sdhci_acpi_probe(struct platform_device *pdev) if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) return -ENOMEM; - host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); + priv_size = slot ? slot->priv_size : 0; + host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size); if (IS_ERR(host)) return PTR_ERR(host); c = sdhci_priv(host); c->host = host; - c->slot = sdhci_acpi_get_slot(hid, uid); + c->slot = slot; c->pdev = pdev; c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); diff --git a/drivers/mmc/host/sdhci-cadence.c b/drivers/mmc/host/sdhci-cadence.c index 56529c3d389a..0f589e26ee63 100644 --- a/drivers/mmc/host/sdhci-cadence.c +++ b/drivers/mmc/host/sdhci-cadence.c @@ -13,6 +13,7 @@ * GNU General Public License for more details. */ +#include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/iopoll.h> #include <linux/module.h> @@ -27,15 +28,14 @@ #define SDHCI_CDNS_HRS04_ACK BIT(26) #define SDHCI_CDNS_HRS04_RD BIT(25) #define SDHCI_CDNS_HRS04_WR BIT(24) -#define SDHCI_CDNS_HRS04_RDATA_SHIFT 16 -#define SDHCI_CDNS_HRS04_WDATA_SHIFT 8 -#define SDHCI_CDNS_HRS04_ADDR_SHIFT 0 +#define SDHCI_CDNS_HRS04_RDATA GENMASK(23, 16) +#define SDHCI_CDNS_HRS04_WDATA GENMASK(15, 8) +#define SDHCI_CDNS_HRS04_ADDR GENMASK(5, 0) #define SDHCI_CDNS_HRS06 0x18 /* eMMC control */ #define SDHCI_CDNS_HRS06_TUNE_UP BIT(15) -#define SDHCI_CDNS_HRS06_TUNE_SHIFT 8 -#define SDHCI_CDNS_HRS06_TUNE_MASK 0x3f -#define SDHCI_CDNS_HRS06_MODE_MASK 0x7 +#define SDHCI_CDNS_HRS06_TUNE GENMASK(13, 8) +#define SDHCI_CDNS_HRS06_MODE GENMASK(2, 0) #define SDHCI_CDNS_HRS06_MODE_SD 0x0 #define SDHCI_CDNS_HRS06_MODE_MMC_SDR 0x2 #define SDHCI_CDNS_HRS06_MODE_MMC_DDR 0x3 @@ -105,8 +105,8 @@ static int sdhci_cdns_write_phy_reg(struct sdhci_cdns_priv *priv, u32 tmp; int ret; - tmp = (data << SDHCI_CDNS_HRS04_WDATA_SHIFT) | - (addr << SDHCI_CDNS_HRS04_ADDR_SHIFT); + tmp = FIELD_PREP(SDHCI_CDNS_HRS04_WDATA, data) | + FIELD_PREP(SDHCI_CDNS_HRS04_ADDR, addr); writel(tmp, reg); tmp |= SDHCI_CDNS_HRS04_WR; @@ -189,8 +189,8 @@ static void sdhci_cdns_set_emmc_mode(struct sdhci_cdns_priv *priv, u32 mode) /* The speed mode for eMMC is selected by HRS06 register */ tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06); - tmp &= ~SDHCI_CDNS_HRS06_MODE_MASK; - tmp |= mode; + tmp &= ~SDHCI_CDNS_HRS06_MODE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_MODE, mode); writel(tmp, priv->hrs_addr + SDHCI_CDNS_HRS06); } @@ -199,7 +199,7 @@ static u32 sdhci_cdns_get_emmc_mode(struct sdhci_cdns_priv *priv) u32 tmp; tmp = readl(priv->hrs_addr + SDHCI_CDNS_HRS06); - return tmp & SDHCI_CDNS_HRS06_MODE_MASK; + return FIELD_GET(SDHCI_CDNS_HRS06_MODE, tmp); } static void sdhci_cdns_set_uhs_signaling(struct sdhci_host *host, @@ -254,12 +254,12 @@ static int sdhci_cdns_set_tune_val(struct sdhci_host *host, unsigned int val) void __iomem *reg = priv->hrs_addr + SDHCI_CDNS_HRS06; u32 tmp; - if (WARN_ON(val > SDHCI_CDNS_HRS06_TUNE_MASK)) + if (WARN_ON(!FIELD_FIT(SDHCI_CDNS_HRS06_TUNE, val))) return -EINVAL; tmp = readl(reg); - tmp &= ~(SDHCI_CDNS_HRS06_TUNE_MASK << SDHCI_CDNS_HRS06_TUNE_SHIFT); - tmp |= val << SDHCI_CDNS_HRS06_TUNE_SHIFT; + tmp &= ~SDHCI_CDNS_HRS06_TUNE; + tmp |= FIELD_PREP(SDHCI_CDNS_HRS06_TUNE, val); tmp |= SDHCI_CDNS_HRS06_TUNE_UP; writel(tmp, reg); diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c index fc73e56eb1e2..3fb7d2eec93f 100644 --- a/drivers/mmc/host/sdhci-msm.c +++ b/drivers/mmc/host/sdhci-msm.c @@ -123,14 +123,17 @@ #define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) #define MSM_MMC_AUTOSUSPEND_DELAY_MS 50 + +/* Timeout value to avoid infinite waiting for pwr_irq */ +#define MSM_PWR_IRQ_TIMEOUT_MS 5000 + struct sdhci_msm_host { struct platform_device *pdev; void __iomem *core_mem; /* MSM SDCC mapped address */ int pwr_irq; /* power irq */ - struct clk *clk; /* main SD/MMC bus clock */ - struct clk *pclk; /* SDHC peripheral bus clock */ struct clk *bus_clk; /* SDHC bus voter clock */ struct clk *xo_clk; /* TCXO clk needed for FLL feature of cm_dll*/ + struct clk_bulk_data bulk_clks[4]; /* core, iface, cal, sleep clocks */ unsigned long clk_rate; struct mmc_host *mmc; bool use_14lpp_dll_reset; @@ -138,6 +141,10 @@ struct sdhci_msm_host { bool calibration_done; u8 saved_tuning_phase; bool use_cdclp533; + u32 curr_pwr_state; + u32 curr_io_level; + wait_queue_head_t pwr_irq_wait; + bool pwr_irq_flag; }; static unsigned int msm_get_clock_rate_for_bus_mode(struct sdhci_host *host, @@ -164,10 +171,11 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); struct mmc_ios curr_ios = host->mmc->ios; + struct clk *core_clk = msm_host->bulk_clks[0].clk; int rc; clock = msm_get_clock_rate_for_bus_mode(host, clock); - rc = clk_set_rate(msm_host->clk, clock); + rc = clk_set_rate(core_clk, clock); if (rc) { pr_err("%s: Failed to set clock at rate %u at timing %d\n", mmc_hostname(host->mmc), clock, @@ -176,7 +184,7 @@ static void msm_set_clock_rate_for_bus_mode(struct sdhci_host *host, } msm_host->clk_rate = clock; pr_debug("%s: Setting clock at rate %lu at timing %d\n", - mmc_hostname(host->mmc), clk_get_rate(msm_host->clk), + mmc_hostname(host->mmc), clk_get_rate(core_clk), curr_ios.timing); } @@ -995,21 +1003,142 @@ static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host, sdhci_msm_hs400(host, &mmc->ios); } -static void sdhci_msm_voltage_switch(struct sdhci_host *host) +static inline void sdhci_msm_init_pwr_irq_wait(struct sdhci_msm_host *msm_host) +{ + init_waitqueue_head(&msm_host->pwr_irq_wait); +} + +static inline void sdhci_msm_complete_pwr_irq_wait( + struct sdhci_msm_host *msm_host) +{ + wake_up(&msm_host->pwr_irq_wait); +} + +/* + * sdhci_msm_check_power_status API should be called when registers writes + * which can toggle sdhci IO bus ON/OFF or change IO lines HIGH/LOW happens. + * To what state the register writes will change the IO lines should be passed + * as the argument req_type. This API will check whether the IO line's state + * is already the expected state and will wait for power irq only if + * power irq is expected to be trigerred based on the current IO line state + * and expected IO line state. + */ +static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + bool done = false; + + pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n", + mmc_hostname(host->mmc), __func__, req_type, + msm_host->curr_pwr_state, msm_host->curr_io_level); + + /* + * The IRQ for request type IO High/LOW will be generated when - + * there is a state change in 1.8V enable bit (bit 3) of + * SDHCI_HOST_CONTROL2 register. The reset state of that bit is 0 + * which indicates 3.3V IO voltage. So, when MMC core layer tries + * to set it to 3.3V before card detection happens, the + * IRQ doesn't get triggered as there is no state change in this bit. + * The driver already handles this case by changing the IO voltage + * level to high as part of controller power up sequence. Hence, check + * for host->pwr to handle a case where IO voltage high request is + * issued even before controller power up. + */ + if ((req_type & REQ_IO_HIGH) && !host->pwr) { + pr_debug("%s: do not wait for power IRQ that never comes, req_type: %d\n", + mmc_hostname(host->mmc), req_type); + return; + } + if ((req_type & msm_host->curr_pwr_state) || + (req_type & msm_host->curr_io_level)) + done = true; + /* + * This is needed here to handle cases where register writes will + * not change the current bus state or io level of the controller. + * In this case, no power irq will be triggerred and we should + * not wait. + */ + if (!done) { + if (!wait_event_timeout(msm_host->pwr_irq_wait, + msm_host->pwr_irq_flag, + msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS))) + dev_warn(&msm_host->pdev->dev, + "%s: pwr_irq for req: (%d) timed out\n", + mmc_hostname(host->mmc), req_type); + } + pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc), + __func__, req_type); +} + +static void sdhci_msm_dump_pwr_ctrl_regs(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + pr_err("%s: PWRCTL_STATUS: 0x%08x | PWRCTL_MASK: 0x%08x | PWRCTL_CTL: 0x%08x\n", + mmc_hostname(host->mmc), + readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS), + readl_relaxed(msm_host->core_mem + CORE_PWRCTL_MASK), + readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL)); +} + +static void sdhci_msm_handle_pwr_irq(struct sdhci_host *host, int irq) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); u32 irq_status, irq_ack = 0; + int retry = 10; + int pwr_state = 0, io_level = 0; + irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS); irq_status &= INT_MASK; writel_relaxed(irq_status, msm_host->core_mem + CORE_PWRCTL_CLEAR); - if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF)) + /* + * There is a rare HW scenario where the first clear pulse could be + * lost when actual reset and clear/read of status register is + * happening at a time. Hence, retry for at least 10 times to make + * sure status register is cleared. Otherwise, this will result in + * a spurious power IRQ resulting in system instability. + */ + while (irq_status & readl_relaxed(msm_host->core_mem + + CORE_PWRCTL_STATUS)) { + if (retry == 0) { + pr_err("%s: Timedout clearing (0x%x) pwrctl status register\n", + mmc_hostname(host->mmc), irq_status); + sdhci_msm_dump_pwr_ctrl_regs(host); + WARN_ON(1); + break; + } + writel_relaxed(irq_status, + msm_host->core_mem + CORE_PWRCTL_CLEAR); + retry--; + udelay(10); + } + + /* Handle BUS ON/OFF*/ + if (irq_status & CORE_PWRCTL_BUS_ON) { + pwr_state = REQ_BUS_ON; + io_level = REQ_IO_HIGH; + irq_ack |= CORE_PWRCTL_BUS_SUCCESS; + } + if (irq_status & CORE_PWRCTL_BUS_OFF) { + pwr_state = REQ_BUS_OFF; + io_level = REQ_IO_LOW; irq_ack |= CORE_PWRCTL_BUS_SUCCESS; - if (irq_status & (CORE_PWRCTL_IO_LOW | CORE_PWRCTL_IO_HIGH)) + } + /* Handle IO LOW/HIGH */ + if (irq_status & CORE_PWRCTL_IO_LOW) { + io_level = REQ_IO_LOW; + irq_ack |= CORE_PWRCTL_IO_SUCCESS; + } + if (irq_status & CORE_PWRCTL_IO_HIGH) { + io_level = REQ_IO_HIGH; irq_ack |= CORE_PWRCTL_IO_SUCCESS; + } /* * The driver has to acknowledge the interrupt, switch voltages and @@ -1017,13 +1146,27 @@ static void sdhci_msm_voltage_switch(struct sdhci_host *host) * switches are handled by the sdhci core, so just report success. */ writel_relaxed(irq_ack, msm_host->core_mem + CORE_PWRCTL_CTL); + + if (pwr_state) + msm_host->curr_pwr_state = pwr_state; + if (io_level) + msm_host->curr_io_level = io_level; + + pr_debug("%s: %s: Handled IRQ(%d), irq_status=0x%x, ack=0x%x\n", + mmc_hostname(msm_host->mmc), __func__, irq, irq_status, + irq_ack); } static irqreturn_t sdhci_msm_pwr_irq(int irq, void *data) { struct sdhci_host *host = (struct sdhci_host *)data; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_msm_handle_pwr_irq(host, irq); + msm_host->pwr_irq_flag = 1; + sdhci_msm_complete_pwr_irq_wait(msm_host); - sdhci_msm_voltage_switch(host); return IRQ_HANDLED; } @@ -1032,8 +1175,9 @@ static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + struct clk *core_clk = msm_host->bulk_clks[0].clk; - return clk_round_rate(msm_host->clk, ULONG_MAX); + return clk_round_rate(core_clk, ULONG_MAX); } static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host) @@ -1092,6 +1236,69 @@ out: __sdhci_msm_set_clock(host, clock); } +/* + * Platform specific register write functions. This is so that, if any + * register write needs to be followed up by platform specific actions, + * they can be added here. These functions can go to sleep when writes + * to certain registers are done. + * These functions are relying on sdhci_set_ios not using spinlock. + */ +static int __sdhci_msm_check_write(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); + u32 req_type = 0; + + switch (reg) { + case SDHCI_HOST_CONTROL2: + req_type = (val & SDHCI_CTRL_VDD_180) ? REQ_IO_LOW : + REQ_IO_HIGH; + break; + case SDHCI_SOFTWARE_RESET: + if (host->pwr && (val & SDHCI_RESET_ALL)) + req_type = REQ_BUS_OFF; + break; + case SDHCI_POWER_CONTROL: + req_type = !val ? REQ_BUS_OFF : REQ_BUS_ON; + break; + } + + if (req_type) { + msm_host->pwr_irq_flag = 0; + /* + * Since this register write may trigger a power irq, ensure + * all previous register writes are complete by this point. + */ + mb(); + } + return req_type; +} + +/* This function may sleep*/ +static void sdhci_msm_writew(struct sdhci_host *host, u16 val, int reg) +{ + u32 req_type = 0; + + req_type = __sdhci_msm_check_write(host, val, reg); + writew_relaxed(val, host->ioaddr + reg); + + if (req_type) + sdhci_msm_check_power_status(host, req_type); +} + +/* This function may sleep*/ +static void sdhci_msm_writeb(struct sdhci_host *host, u8 val, int reg) +{ + u32 req_type = 0; + + req_type = __sdhci_msm_check_write(host, val, reg); + + writeb_relaxed(val, host->ioaddr + reg); + + if (req_type) + sdhci_msm_check_power_status(host, req_type); +} + static const struct of_device_id sdhci_msm_dt_match[] = { { .compatible = "qcom,sdhci-msm-v4" }, {}, @@ -1106,7 +1313,8 @@ static const struct sdhci_ops sdhci_msm_ops = { .get_max_clock = sdhci_msm_get_max_clock, .set_bus_width = sdhci_set_bus_width, .set_uhs_signaling = sdhci_msm_set_uhs_signaling, - .voltage_switch = sdhci_msm_voltage_switch, + .write_w = sdhci_msm_writew, + .write_b = sdhci_msm_writeb, }; static const struct sdhci_pltfm_data sdhci_msm_pdata = { @@ -1124,6 +1332,7 @@ static int sdhci_msm_probe(struct platform_device *pdev) struct sdhci_pltfm_host *pltfm_host; struct sdhci_msm_host *msm_host; struct resource *core_memres; + struct clk *clk; int ret; u16 host_version, core_minor; u32 core_version, config; @@ -1160,24 +1369,42 @@ static int sdhci_msm_probe(struct platform_device *pdev) } /* Setup main peripheral bus clock */ - msm_host->pclk = devm_clk_get(&pdev->dev, "iface"); - if (IS_ERR(msm_host->pclk)) { - ret = PTR_ERR(msm_host->pclk); + clk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(&pdev->dev, "Peripheral clk setup failed (%d)\n", ret); goto bus_clk_disable; } - - ret = clk_prepare_enable(msm_host->pclk); - if (ret) - goto bus_clk_disable; + msm_host->bulk_clks[1].clk = clk; /* Setup SDC MMC clock */ - msm_host->clk = devm_clk_get(&pdev->dev, "core"); - if (IS_ERR(msm_host->clk)) { - ret = PTR_ERR(msm_host->clk); + clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(clk)) { + ret = PTR_ERR(clk); dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); - goto pclk_disable; + goto bus_clk_disable; } + msm_host->bulk_clks[0].clk = clk; + + /* Vote for maximum clock rate for maximum performance */ + ret = clk_set_rate(clk, INT_MAX); + if (ret) + dev_warn(&pdev->dev, "core clock boost failed\n"); + + clk = devm_clk_get(&pdev->dev, "cal"); + if (IS_ERR(clk)) + clk = NULL; + msm_host->bulk_clks[2].clk = clk; + + clk = devm_clk_get(&pdev->dev, "sleep"); + if (IS_ERR(clk)) + clk = NULL; + msm_host->bulk_clks[3].clk = clk; + + ret = clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); + if (ret) + goto bus_clk_disable; /* * xo clock is needed for FLL feature of cm_dll. @@ -1189,15 +1416,6 @@ static int sdhci_msm_probe(struct platform_device *pdev) dev_warn(&pdev->dev, "TCXO clk not present (%d)\n", ret); } - /* Vote for maximum clock rate for maximum performance */ - ret = clk_set_rate(msm_host->clk, INT_MAX); - if (ret) - dev_warn(&pdev->dev, "core clock boost failed\n"); - - ret = clk_prepare_enable(msm_host->clk); - if (ret) - goto pclk_disable; - core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres); @@ -1251,6 +1469,21 @@ static int sdhci_msm_probe(struct platform_device *pdev) CORE_VENDOR_SPEC_CAPABILITIES0); } + /* + * Power on reset state may trigger power irq if previous status of + * PWRCTL was either BUS_ON or IO_HIGH_V. So before enabling pwr irq + * interrupt in GIC, any pending power irq interrupt should be + * acknowledged. Otherwise power irq interrupt handler would be + * fired prematurely. + */ + sdhci_msm_handle_pwr_irq(host, 0); + + /* + * Ensure that above writes are propogated before interrupt enablement + * in GIC. + */ + mb(); + /* Setup IRQ for handling power/voltage tasks with PMIC */ msm_host->pwr_irq = platform_get_irq_byname(pdev, "pwr_irq"); if (msm_host->pwr_irq < 0) { @@ -1260,6 +1493,10 @@ static int sdhci_msm_probe(struct platform_device *pdev) goto clk_disable; } + sdhci_msm_init_pwr_irq_wait(msm_host); + /* Enable pwr irq interrupts */ + writel_relaxed(INT_MASK, msm_host->core_mem + CORE_PWRCTL_MASK); + ret = devm_request_threaded_irq(&pdev->dev, msm_host->pwr_irq, NULL, sdhci_msm_pwr_irq, IRQF_ONESHOT, dev_name(&pdev->dev), host); @@ -1290,9 +1527,8 @@ pm_runtime_disable: pm_runtime_set_suspended(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); clk_disable: - clk_disable_unprepare(msm_host->clk); -pclk_disable: - clk_disable_unprepare(msm_host->pclk); + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); bus_clk_disable: if (!IS_ERR(msm_host->bus_clk)) clk_disable_unprepare(msm_host->bus_clk); @@ -1315,8 +1551,8 @@ static int sdhci_msm_remove(struct platform_device *pdev) pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); - clk_disable_unprepare(msm_host->clk); - clk_disable_unprepare(msm_host->pclk); + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); if (!IS_ERR(msm_host->bus_clk)) clk_disable_unprepare(msm_host->bus_clk); sdhci_pltfm_free(pdev); @@ -1330,8 +1566,8 @@ static int sdhci_msm_runtime_suspend(struct device *dev) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); - clk_disable_unprepare(msm_host->clk); - clk_disable_unprepare(msm_host->pclk); + clk_bulk_disable_unprepare(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); return 0; } @@ -1341,21 +1577,9 @@ static int sdhci_msm_runtime_resume(struct device *dev) struct sdhci_host *host = dev_get_drvdata(dev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_msm_host *msm_host = sdhci_pltfm_priv(pltfm_host); - int ret; - ret = clk_prepare_enable(msm_host->clk); - if (ret) { - dev_err(dev, "clk_enable failed for core_clk: %d\n", ret); - return ret; - } - ret = clk_prepare_enable(msm_host->pclk); - if (ret) { - dev_err(dev, "clk_enable failed for iface_clk: %d\n", ret); - clk_disable_unprepare(msm_host->clk); - return ret; - } - - return 0; + return clk_bulk_prepare_enable(ARRAY_SIZE(msm_host->bulk_clks), + msm_host->bulk_clks); } #endif diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c index 4e47ed6bc716..682c573e20a7 100644 --- a/drivers/mmc/host/sdhci-of-at91.c +++ b/drivers/mmc/host/sdhci-of-at91.c @@ -114,7 +114,8 @@ static void sdhci_at91_set_power(struct sdhci_host *host, unsigned char mode, sdhci_set_power_noreg(host, mode, vdd); } -void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing) +static void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, + unsigned int timing) { if (timing == MMC_TIMING_MMC_DDR52) sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index d96a057a7db8..1f424374bbbb 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -458,6 +458,33 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) return clock / 256 / 16; } +static void esdhc_clock_enable(struct sdhci_host *host, bool enable) +{ + u32 val; + ktime_t timeout; + + val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + + if (enable) + val |= ESDHC_CLOCK_SDCLKEN; + else + val &= ~ESDHC_CLOCK_SDCLKEN; + + sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); + + /* Wait max 20 ms */ + timeout = ktime_add_ms(ktime_get(), 20); + val = ESDHC_CLOCK_STABLE; + while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { + if (ktime_after(ktime_get(), timeout)) { + pr_err("%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + break; + } + udelay(10); + } +} + static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -469,8 +496,10 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) host->mmc->actual_clock = 0; - if (clock == 0) + if (clock == 0) { + esdhc_clock_enable(host, false); return; + } /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ if (esdhc->vendor_ver < VENDOR_V_23) @@ -558,33 +587,6 @@ static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) sdhci_writel(host, ctrl, ESDHC_PROCTL); } -static void esdhc_clock_enable(struct sdhci_host *host, bool enable) -{ - u32 val; - ktime_t timeout; - - val = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - - if (enable) - val |= ESDHC_CLOCK_SDCLKEN; - else - val &= ~ESDHC_CLOCK_SDCLKEN; - - sdhci_writel(host, val, ESDHC_SYSTEM_CONTROL); - - /* Wait max 20 ms */ - timeout = ktime_add_ms(ktime_get(), 20); - val = ESDHC_CLOCK_STABLE; - while (!(sdhci_readl(host, ESDHC_PRSSTAT) & val)) { - if (ktime_after(ktime_get(), timeout)) { - pr_err("%s: Internal clock never stabilised.\n", - mmc_hostname(host->mmc)); - break; - } - udelay(10); - } -} - static void esdhc_reset(struct sdhci_host *host, u8 mask) { sdhci_reset(host, mask); diff --git a/drivers/mmc/host/sdhci-omap.c b/drivers/mmc/host/sdhci-omap.c new file mode 100644 index 000000000000..628bfe9a3d17 --- /dev/null +++ b/drivers/mmc/host/sdhci-omap.c @@ -0,0 +1,607 @@ +/** + * SDHCI Controller driver for TI's OMAP SoCs + * + * Copyright (C) 2017 Texas Instruments + * Author: Kishon Vijay Abraham I <kishon@ti.com> + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 of + * the License as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/delay.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> + +#include "sdhci-pltfm.h" + +#define SDHCI_OMAP_CON 0x12c +#define CON_DW8 BIT(5) +#define CON_DMA_MASTER BIT(20) +#define CON_INIT BIT(1) +#define CON_OD BIT(0) + +#define SDHCI_OMAP_CMD 0x20c + +#define SDHCI_OMAP_HCTL 0x228 +#define HCTL_SDBP BIT(8) +#define HCTL_SDVS_SHIFT 9 +#define HCTL_SDVS_MASK (0x7 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_33 (0x7 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_30 (0x6 << HCTL_SDVS_SHIFT) +#define HCTL_SDVS_18 (0x5 << HCTL_SDVS_SHIFT) + +#define SDHCI_OMAP_SYSCTL 0x22c +#define SYSCTL_CEN BIT(2) +#define SYSCTL_CLKD_SHIFT 6 +#define SYSCTL_CLKD_MASK 0x3ff + +#define SDHCI_OMAP_STAT 0x230 + +#define SDHCI_OMAP_IE 0x234 +#define INT_CC_EN BIT(0) + +#define SDHCI_OMAP_AC12 0x23c +#define AC12_V1V8_SIGEN BIT(19) + +#define SDHCI_OMAP_CAPA 0x240 +#define CAPA_VS33 BIT(24) +#define CAPA_VS30 BIT(25) +#define CAPA_VS18 BIT(26) + +#define SDHCI_OMAP_TIMEOUT 1 /* 1 msec */ + +#define SYSCTL_CLKD_MAX 0x3FF + +#define IOV_1V8 1800000 /* 180000 uV */ +#define IOV_3V0 3000000 /* 300000 uV */ +#define IOV_3V3 3300000 /* 330000 uV */ + +struct sdhci_omap_data { + u32 offset; +}; + +struct sdhci_omap_host { + void __iomem *base; + struct device *dev; + struct regulator *pbias; + bool pbias_enabled; + struct sdhci_host *host; + u8 bus_mode; + u8 power_mode; +}; + +static inline u32 sdhci_omap_readl(struct sdhci_omap_host *host, + unsigned int offset) +{ + return readl(host->base + offset); +} + +static inline void sdhci_omap_writel(struct sdhci_omap_host *host, + unsigned int offset, u32 data) +{ + writel(data, host->base + offset); +} + +static int sdhci_omap_set_pbias(struct sdhci_omap_host *omap_host, + bool power_on, unsigned int iov) +{ + int ret; + struct device *dev = omap_host->dev; + + if (IS_ERR(omap_host->pbias)) + return 0; + + if (power_on) { + ret = regulator_set_voltage(omap_host->pbias, iov, iov); + if (ret) { + dev_err(dev, "pbias set voltage failed\n"); + return ret; + } + + if (omap_host->pbias_enabled) + return 0; + + ret = regulator_enable(omap_host->pbias); + if (ret) { + dev_err(dev, "pbias reg enable fail\n"); + return ret; + } + + omap_host->pbias_enabled = true; + } else { + if (!omap_host->pbias_enabled) + return 0; + + ret = regulator_disable(omap_host->pbias); + if (ret) { + dev_err(dev, "pbias reg disable fail\n"); + return ret; + } + omap_host->pbias_enabled = false; + } + + return 0; +} + +static int sdhci_omap_enable_iov(struct sdhci_omap_host *omap_host, + unsigned int iov) +{ + int ret; + struct sdhci_host *host = omap_host->host; + struct mmc_host *mmc = host->mmc; + + ret = sdhci_omap_set_pbias(omap_host, false, 0); + if (ret) + return ret; + + if (!IS_ERR(mmc->supply.vqmmc)) { + ret = regulator_set_voltage(mmc->supply.vqmmc, iov, iov); + if (ret) { + dev_err(mmc_dev(mmc), "vqmmc set voltage failed\n"); + return ret; + } + } + + ret = sdhci_omap_set_pbias(omap_host, true, iov); + if (ret) + return ret; + + return 0; +} + +static void sdhci_omap_conf_bus_power(struct sdhci_omap_host *omap_host, + unsigned char signal_voltage) +{ + u32 reg; + ktime_t timeout; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL); + reg &= ~HCTL_SDVS_MASK; + + if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) + reg |= HCTL_SDVS_33; + else + reg |= HCTL_SDVS_18; + + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg); + + reg |= HCTL_SDBP; + sdhci_omap_writel(omap_host, SDHCI_OMAP_HCTL, reg); + + /* wait 1ms */ + timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); + while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_HCTL) & HCTL_SDBP)) { + if (WARN_ON(ktime_after(ktime_get(), timeout))) + return; + usleep_range(5, 10); + } +} + +static int sdhci_omap_start_signal_voltage_switch(struct mmc_host *mmc, + struct mmc_ios *ios) +{ + u32 reg; + int ret; + unsigned int iov; + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + struct device *dev; + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + dev = omap_host->dev; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + if (!(reg & CAPA_VS33)) + return -EOPNOTSUPP; + + sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg &= ~AC12_V1V8_SIGEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg); + + iov = IOV_3V3; + } else if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180) { + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + if (!(reg & CAPA_VS18)) + return -EOPNOTSUPP; + + sdhci_omap_conf_bus_power(omap_host, ios->signal_voltage); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_AC12); + reg |= AC12_V1V8_SIGEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_AC12, reg); + + iov = IOV_1V8; + } else { + return -EOPNOTSUPP; + } + + ret = sdhci_omap_enable_iov(omap_host, iov); + if (ret) { + dev_err(dev, "failed to switch IO voltage to %dmV\n", iov); + return ret; + } + + dev_dbg(dev, "IO voltage switched to %dmV\n", iov); + return 0; +} + +static void sdhci_omap_set_bus_mode(struct sdhci_omap_host *omap_host, + unsigned int mode) +{ + u32 reg; + + if (omap_host->bus_mode == mode) + return; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (mode == MMC_BUSMODE_OPENDRAIN) + reg |= CON_OD; + else + reg &= ~CON_OD; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + omap_host->bus_mode = mode; +} + +static void sdhci_omap_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + + sdhci_omap_set_bus_mode(omap_host, ios->bus_mode); + sdhci_set_ios(mmc, ios); +} + +static u16 sdhci_omap_calc_divisor(struct sdhci_pltfm_host *host, + unsigned int clock) +{ + u16 dsor; + + dsor = DIV_ROUND_UP(clk_get_rate(host->clk), clock); + if (dsor > SYSCTL_CLKD_MAX) + dsor = SYSCTL_CLKD_MAX; + + return dsor; +} + +static void sdhci_omap_start_clock(struct sdhci_omap_host *omap_host) +{ + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL); + reg |= SYSCTL_CEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg); +} + +static void sdhci_omap_stop_clock(struct sdhci_omap_host *omap_host) +{ + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_SYSCTL); + reg &= ~SYSCTL_CEN; + sdhci_omap_writel(omap_host, SDHCI_OMAP_SYSCTL, reg); +} + +static void sdhci_omap_set_clock(struct sdhci_host *host, unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + unsigned long clkdiv; + + sdhci_omap_stop_clock(omap_host); + + if (!clock) + return; + + clkdiv = sdhci_omap_calc_divisor(pltfm_host, clock); + clkdiv = (clkdiv & SYSCTL_CLKD_MASK) << SYSCTL_CLKD_SHIFT; + sdhci_enable_clk(host, clkdiv); + + sdhci_omap_start_clock(omap_host); +} + +static void sdhci_omap_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +} + +static int sdhci_omap_enable_dma(struct sdhci_host *host) +{ + u32 reg; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg |= CON_DMA_MASTER; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + return 0; +} + +static unsigned int sdhci_omap_get_min_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return clk_get_rate(pltfm_host->clk) / SYSCTL_CLKD_MAX; +} + +static void sdhci_omap_set_bus_width(struct sdhci_host *host, int width) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + u32 reg; + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + if (width == MMC_BUS_WIDTH_8) + reg |= CON_DW8; + else + reg &= ~CON_DW8; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + + sdhci_set_bus_width(host, width); +} + +static void sdhci_omap_init_74_clocks(struct sdhci_host *host, u8 power_mode) +{ + u32 reg; + ktime_t timeout; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_omap_host *omap_host = sdhci_pltfm_priv(pltfm_host); + + if (omap_host->power_mode == power_mode) + return; + + if (power_mode != MMC_POWER_ON) + return; + + disable_irq(host->irq); + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg |= CON_INIT; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + sdhci_omap_writel(omap_host, SDHCI_OMAP_CMD, 0x0); + + /* wait 1ms */ + timeout = ktime_add_ms(ktime_get(), SDHCI_OMAP_TIMEOUT); + while (!(sdhci_omap_readl(omap_host, SDHCI_OMAP_STAT) & INT_CC_EN)) { + if (WARN_ON(ktime_after(ktime_get(), timeout))) + return; + usleep_range(5, 10); + } + + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CON); + reg &= ~CON_INIT; + sdhci_omap_writel(omap_host, SDHCI_OMAP_CON, reg); + sdhci_omap_writel(omap_host, SDHCI_OMAP_STAT, INT_CC_EN); + + enable_irq(host->irq); + + omap_host->power_mode = power_mode; +} + +static struct sdhci_ops sdhci_omap_ops = { + .set_clock = sdhci_omap_set_clock, + .set_power = sdhci_omap_set_power, + .enable_dma = sdhci_omap_enable_dma, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_min_clock = sdhci_omap_get_min_clock, + .set_bus_width = sdhci_omap_set_bus_width, + .platform_send_init_74_clocks = sdhci_omap_init_74_clocks, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static int sdhci_omap_set_capabilities(struct sdhci_omap_host *omap_host) +{ + u32 reg; + int ret = 0; + struct device *dev = omap_host->dev; + struct regulator *vqmmc; + + vqmmc = regulator_get(dev, "vqmmc"); + if (IS_ERR(vqmmc)) { + ret = PTR_ERR(vqmmc); + goto reg_put; + } + + /* voltage capabilities might be set by boot loader, clear it */ + reg = sdhci_omap_readl(omap_host, SDHCI_OMAP_CAPA); + reg &= ~(CAPA_VS18 | CAPA_VS30 | CAPA_VS33); + + if (regulator_is_supported_voltage(vqmmc, IOV_3V3, IOV_3V3)) + reg |= CAPA_VS33; + if (regulator_is_supported_voltage(vqmmc, IOV_1V8, IOV_1V8)) + reg |= CAPA_VS18; + + sdhci_omap_writel(omap_host, SDHCI_OMAP_CAPA, reg); + +reg_put: + regulator_put(vqmmc); + + return ret; +} + +static const struct sdhci_pltfm_data sdhci_omap_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, + .quirks2 = SDHCI_QUIRK2_NO_1_8_V | + SDHCI_QUIRK2_ACMD23_BROKEN | + SDHCI_QUIRK2_RSP_136_HAS_CRC, + .ops = &sdhci_omap_ops, +}; + +static const struct sdhci_omap_data dra7_data = { + .offset = 0x200, +}; + +static const struct of_device_id omap_sdhci_match[] = { + { .compatible = "ti,dra7-sdhci", .data = &dra7_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_sdhci_match); + +static int sdhci_omap_probe(struct platform_device *pdev) +{ + int ret; + u32 offset; + struct device *dev = &pdev->dev; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_omap_host *omap_host; + struct mmc_host *mmc; + const struct of_device_id *match; + struct sdhci_omap_data *data; + + match = of_match_device(omap_sdhci_match, dev); + if (!match) + return -EINVAL; + + data = (struct sdhci_omap_data *)match->data; + if (!data) { + dev_err(dev, "no sdhci omap data\n"); + return -EINVAL; + } + offset = data->offset; + + host = sdhci_pltfm_init(pdev, &sdhci_omap_pdata, + sizeof(*omap_host)); + if (IS_ERR(host)) { + dev_err(dev, "Failed sdhci_pltfm_init\n"); + return PTR_ERR(host); + } + + pltfm_host = sdhci_priv(host); + omap_host = sdhci_pltfm_priv(pltfm_host); + omap_host->host = host; + omap_host->base = host->ioaddr; + omap_host->dev = dev; + host->ioaddr += offset; + + mmc = host->mmc; + ret = mmc_of_parse(mmc); + if (ret) + goto err_pltfm_free; + + pltfm_host->clk = devm_clk_get(dev, "fck"); + if (IS_ERR(pltfm_host->clk)) { + ret = PTR_ERR(pltfm_host->clk); + goto err_pltfm_free; + } + + ret = clk_set_rate(pltfm_host->clk, mmc->f_max); + if (ret) { + dev_err(dev, "failed to set clock to %d\n", mmc->f_max); + goto err_pltfm_free; + } + + omap_host->pbias = devm_regulator_get_optional(dev, "pbias"); + if (IS_ERR(omap_host->pbias)) { + ret = PTR_ERR(omap_host->pbias); + if (ret != -ENODEV) + goto err_pltfm_free; + dev_dbg(dev, "unable to get pbias regulator %d\n", ret); + } + omap_host->pbias_enabled = false; + + /* + * omap_device_pm_domain has callbacks to enable the main + * functional clock, interface clock and also configure the + * SYSCONFIG register of omap devices. The callback will be invoked + * as part of pm_runtime_get_sync. + */ + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync failed\n"); + pm_runtime_put_noidle(dev); + goto err_rpm_disable; + } + + ret = sdhci_omap_set_capabilities(omap_host); + if (ret) { + dev_err(dev, "failed to set system capabilities\n"); + goto err_put_sync; + } + + host->mmc_host_ops.get_ro = mmc_gpio_get_ro; + host->mmc_host_ops.start_signal_voltage_switch = + sdhci_omap_start_signal_voltage_switch; + host->mmc_host_ops.set_ios = sdhci_omap_set_ios; + + sdhci_read_caps(host); + host->caps |= SDHCI_CAN_DO_ADMA2; + + ret = sdhci_add_host(host); + if (ret) + goto err_put_sync; + + return 0; + +err_put_sync: + pm_runtime_put_sync(dev); + +err_rpm_disable: + pm_runtime_disable(dev); + +err_pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_omap_remove(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct sdhci_host *host = platform_get_drvdata(pdev); + + sdhci_remove_host(host, true); + pm_runtime_put_sync(dev); + pm_runtime_disable(dev); + sdhci_pltfm_free(pdev); + + return 0; +} + +static struct platform_driver sdhci_omap_driver = { + .probe = sdhci_omap_probe, + .remove = sdhci_omap_remove, + .driver = { + .name = "sdhci-omap", + .of_match_table = omap_sdhci_match, + }, +}; + +module_platform_driver(sdhci_omap_driver); + +MODULE_DESCRIPTION("SDHCI driver for OMAP SoCs"); +MODULE_AUTHOR("Texas Instruments Inc."); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sdhci_omap"); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 67d787fa3306..3e4f04fd5175 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -32,7 +32,6 @@ #include "sdhci.h" #include "sdhci-pci.h" -#include "sdhci-pci-o2micro.h" static int sdhci_pci_enable_dma(struct sdhci_host *host); static void sdhci_pci_hw_reset(struct sdhci_host *host); @@ -798,15 +797,6 @@ static const struct sdhci_pci_fixes sdhci_intel_mrfld_mmc = { .probe_slot = intel_mrfld_mmc_probe_slot, }; -/* O2Micro extra registers */ -#define O2_SD_LOCK_WP 0xD3 -#define O2_SD_MULTI_VCC3V 0xEE -#define O2_SD_CLKREQ 0xEC -#define O2_SD_CAPS 0xE0 -#define O2_SD_ADMA1 0xE2 -#define O2_SD_ADMA2 0xE7 -#define O2_SD_INF_MOD 0xF1 - static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) { u8 scratch; @@ -1290,6 +1280,7 @@ static const struct pci_device_id pci_ids[] = { SDHCI_PCI_DEVICE(INTEL, SPT_SDIO, intel_byt_sdio), SDHCI_PCI_DEVICE(INTEL, SPT_SD, intel_byt_sd), SDHCI_PCI_DEVICE(INTEL, DNV_EMMC, intel_byt_emmc), + SDHCI_PCI_DEVICE(INTEL, CDF_EMMC, intel_glk_emmc), SDHCI_PCI_DEVICE(INTEL, BXT_EMMC, intel_byt_emmc), SDHCI_PCI_DEVICE(INTEL, BXT_SDIO, intel_byt_sdio), SDHCI_PCI_DEVICE(INTEL, BXT_SD, intel_byt_sd), diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index 14273ca00641..555970a29c94 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -19,7 +19,40 @@ #include "sdhci.h" #include "sdhci-pci.h" -#include "sdhci-pci-o2micro.h" + +/* + * O2Micro device registers + */ + +#define O2_SD_MISC_REG5 0x64 +#define O2_SD_LD0_CTRL 0x68 +#define O2_SD_DEV_CTRL 0x88 +#define O2_SD_LOCK_WP 0xD3 +#define O2_SD_TEST_REG 0xD4 +#define O2_SD_FUNC_REG0 0xDC +#define O2_SD_MULTI_VCC3V 0xEE +#define O2_SD_CLKREQ 0xEC +#define O2_SD_CAPS 0xE0 +#define O2_SD_ADMA1 0xE2 +#define O2_SD_ADMA2 0xE7 +#define O2_SD_INF_MOD 0xF1 +#define O2_SD_MISC_CTRL4 0xFC +#define O2_SD_TUNING_CTRL 0x300 +#define O2_SD_PLL_SETTING 0x304 +#define O2_SD_CLK_SETTING 0x328 +#define O2_SD_CAP_REG2 0x330 +#define O2_SD_CAP_REG0 0x334 +#define O2_SD_UHS1_CAP_SETTING 0x33C +#define O2_SD_DELAY_CTRL 0x350 +#define O2_SD_UHS2_L1_CTRL 0x35C +#define O2_SD_FUNC_REG3 0x3E0 +#define O2_SD_FUNC_REG4 0x3E4 +#define O2_SD_LED_ENABLE BIT(6) +#define O2_SD_FREG0_LEDOFF BIT(13) +#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22) + +#define O2_SD_VENDOR_SETTING 0x110 +#define O2_SD_VENDOR_SETTING2 0x1C8 static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value) { diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h deleted file mode 100644 index 770f53857211..000000000000 --- a/drivers/mmc/host/sdhci-pci-o2micro.h +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2013 BayHub Technology Ltd. - * - * Authors: Peter Guo <peter.guo@bayhubtech.com> - * Adam Lee <adam.lee@canonical.com> - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef __SDHCI_PCI_O2MICRO_H -#define __SDHCI_PCI_O2MICRO_H - -#include "sdhci-pci.h" - -/* - * O2Micro device IDs - */ - -#define PCI_DEVICE_ID_O2_SDS0 0x8420 -#define PCI_DEVICE_ID_O2_SDS1 0x8421 -#define PCI_DEVICE_ID_O2_FUJIN2 0x8520 -#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620 -#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621 - -/* - * O2Micro device registers - */ - -#define O2_SD_MISC_REG5 0x64 -#define O2_SD_LD0_CTRL 0x68 -#define O2_SD_DEV_CTRL 0x88 -#define O2_SD_LOCK_WP 0xD3 -#define O2_SD_TEST_REG 0xD4 -#define O2_SD_FUNC_REG0 0xDC -#define O2_SD_MULTI_VCC3V 0xEE -#define O2_SD_CLKREQ 0xEC -#define O2_SD_CAPS 0xE0 -#define O2_SD_ADMA1 0xE2 -#define O2_SD_ADMA2 0xE7 -#define O2_SD_INF_MOD 0xF1 -#define O2_SD_MISC_CTRL4 0xFC -#define O2_SD_TUNING_CTRL 0x300 -#define O2_SD_PLL_SETTING 0x304 -#define O2_SD_CLK_SETTING 0x328 -#define O2_SD_CAP_REG2 0x330 -#define O2_SD_CAP_REG0 0x334 -#define O2_SD_UHS1_CAP_SETTING 0x33C -#define O2_SD_DELAY_CTRL 0x350 -#define O2_SD_UHS2_L1_CTRL 0x35C -#define O2_SD_FUNC_REG3 0x3E0 -#define O2_SD_FUNC_REG4 0x3E4 -#define O2_SD_LED_ENABLE BIT(6) -#define O2_SD_FREG0_LEDOFF BIT(13) -#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22) - -#define O2_SD_VENDOR_SETTING 0x110 -#define O2_SD_VENDOR_SETTING2 0x1C8 - -extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); - -extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); - -extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); - -#endif /* __SDHCI_PCI_O2MICRO_H */ diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index 3e8ea3e566f6..0056f08a29cc 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -6,6 +6,12 @@ * PCI device IDs, sub IDs */ +#define PCI_DEVICE_ID_O2_SDS0 0x8420 +#define PCI_DEVICE_ID_O2_SDS1 0x8421 +#define PCI_DEVICE_ID_O2_FUJIN2 0x8520 +#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620 +#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621 + #define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 #define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a #define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 @@ -26,6 +32,7 @@ #define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c #define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d #define PCI_DEVICE_ID_INTEL_DNV_EMMC 0x19db +#define PCI_DEVICE_ID_INTEL_CDF_EMMC 0x18db #define PCI_DEVICE_ID_INTEL_BXT_SD 0x0aca #define PCI_DEVICE_ID_INTEL_BXT_EMMC 0x0acc #define PCI_DEVICE_ID_INTEL_BXT_SDIO 0x0ad0 @@ -164,4 +171,10 @@ static inline void *sdhci_pci_priv(struct sdhci_pci_slot *slot) int sdhci_pci_resume_host(struct sdhci_pci_chip *chip); #endif +int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); +int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); +#ifdef CONFIG_PM_SLEEP +int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); +#endif + #endif /* __SDHCI_PCI_H */ diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index d328fcf284d1..cda83ccb2702 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -761,32 +761,24 @@ static const struct dev_pm_ops sdhci_s3c_pmops = { NULL) }; -#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) -static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { - .no_divider = true, -}; -#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data) -#else -#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL) -#endif - static const struct platform_device_id sdhci_s3c_driver_ids[] = { { .name = "s3c-sdhci", .driver_data = (kernel_ulong_t)NULL, - }, { - .name = "exynos4-sdhci", - .driver_data = EXYNOS4_SDHCI_DRV_DATA, }, { } }; MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); #ifdef CONFIG_OF +static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { + .no_divider = true, +}; + static const struct of_device_id sdhci_s3c_dt_match[] = { { .compatible = "samsung,s3c6410-sdhci", }, { .compatible = "samsung,exynos4210-sdhci", - .data = (void *)EXYNOS4_SDHCI_DRV_DATA }, + .data = &exynos4_sdhci_drv_data }, {}, }; MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 0cd6fa80db66..b877c13184c2 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -422,7 +422,15 @@ static const struct sdhci_pltfm_data sdhci_tegra186_pdata = { SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, - .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN, + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + /* SDHCI controllers on Tegra186 support 40-bit addressing. + * IOVA addresses are 48-bit wide on Tegra186. + * With 64-bit dma mask used for SDHCI, accesses can + * be broken. Disable 64-bit dma, which would fall back + * to 32-bit dma mask. Ideally 40-bit dma mask would work, + * But it is not supported as of now. + */ + SDHCI_QUIRK2_BROKEN_64_BIT_DMA, .ops = &tegra114_sdhci_ops, }; diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 0d5fcca18c9e..2f14334e42df 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -2407,12 +2407,12 @@ static void sdhci_tasklet_finish(unsigned long param) ; } -static void sdhci_timeout_timer(unsigned long data) +static void sdhci_timeout_timer(struct timer_list *t) { struct sdhci_host *host; unsigned long flags; - host = (struct sdhci_host*)data; + host = from_timer(host, t, timer); spin_lock_irqsave(&host->lock, flags); @@ -2429,12 +2429,12 @@ static void sdhci_timeout_timer(unsigned long data) spin_unlock_irqrestore(&host->lock, flags); } -static void sdhci_timeout_data_timer(unsigned long data) +static void sdhci_timeout_data_timer(struct timer_list *t) { struct sdhci_host *host; unsigned long flags; - host = (struct sdhci_host *)data; + host = from_timer(host, t, data_timer); spin_lock_irqsave(&host->lock, flags); @@ -3238,7 +3238,7 @@ int sdhci_setup_host(struct sdhci_host *host) * available. */ ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) return ret; DBG("Version: 0x%08x | Present: 0x%08x\n", @@ -3749,9 +3749,8 @@ int __sdhci_add_host(struct sdhci_host *host) tasklet_init(&host->finish_tasklet, sdhci_tasklet_finish, (unsigned long)host); - setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); - setup_timer(&host->data_timer, sdhci_timeout_data_timer, - (unsigned long)host); + timer_setup(&host->timer, sdhci_timeout_timer, 0); + timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0); init_waitqueue_head(&host->buf_ready_int); diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 111b66f5439b..04ca0d33a521 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/delay.h> #include <linux/module.h> +#include <linux/property.h> #include <linux/clk.h> #include "sdhci-pltfm.h" @@ -47,6 +48,7 @@ struct f_sdhost_priv { struct clk *clk; u32 vendor_hs200; struct device *dev; + bool enable_cmd_dat_delay; }; static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) @@ -84,10 +86,19 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { + struct f_sdhost_priv *priv = sdhci_priv(host); + u32 ctl; + if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) sdhci_writew(host, 0xBC01, SDHCI_CLOCK_CONTROL); sdhci_reset(host, mask); + + if (priv->enable_cmd_dat_delay) { + ctl = sdhci_readl(host, F_SDH30_ESD_CONTROL); + ctl |= F_SDH30_CMD_DAT_DELAY; + sdhci_writel(host, ctl, F_SDH30_ESD_CONTROL); + } } static const struct sdhci_ops sdhci_f_sdh30_ops = { @@ -126,6 +137,9 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | SDHCI_QUIRK2_TUNING_WORK_AROUND; + priv->enable_cmd_dat_delay = device_property_read_bool(dev, + "fujitsu,cmd-dat-delay-select"); + ret = mmc_of_parse(host->mmc); if (ret) goto err; diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 53c970fe0873..cc98355dbdb9 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1175,11 +1175,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return -EINVAL; ret = mmc_regulator_get_supply(host->mmc); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(&pdev->dev, "Could not get vmmc supply\n"); + if (ret) return ret; - } host->reg_base = devm_ioremap_resource(&pdev->dev, platform_get_resource(pdev, IORESOURCE_MEM, 0)); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 93c4b40df90a..a3d8380ab480 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -783,9 +783,9 @@ static void tifm_sd_end_cmd(unsigned long data) mmc_request_done(mmc, mrq); } -static void tifm_sd_abort(unsigned long data) +static void tifm_sd_abort(struct timer_list *t) { - struct tifm_sd *host = (struct tifm_sd*)data; + struct tifm_sd *host = from_timer(host, t, timer); pr_err("%s : card failed to respond for a long period of time " "(%x, %x)\n", @@ -968,7 +968,7 @@ static int tifm_sd_probe(struct tifm_dev *sock) tasklet_init(&host->finish_tasklet, tifm_sd_end_cmd, (unsigned long)host); - setup_timer(&host->timer, tifm_sd_abort, (unsigned long)host); + timer_setup(&host->timer, tifm_sd_abort, 0); mmc->ops = &tifm_sd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; diff --git a/drivers/mmc/host/tmio_mmc_core.c b/drivers/mmc/host/tmio_mmc_core.c index 9c4e6199b854..583bf3262df5 100644 --- a/drivers/mmc/host/tmio_mmc_core.c +++ b/drivers/mmc/host/tmio_mmc_core.c @@ -167,11 +167,11 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host) /* HW engineers overrode docs: no sleep needed on R-Car2+ */ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - msleep(10); + usleep_range(10000, 11000); if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); - msleep(10); + usleep_range(10000, 11000); } } @@ -179,7 +179,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) { if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); - msleep(10); + usleep_range(10000, 11000); } sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~CLK_CTL_SCLKEN & @@ -187,7 +187,7 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) /* HW engineers overrode docs: no sleep needed on R-Car2+ */ if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - msleep(10); + usleep_range(10000, 11000); } static void tmio_mmc_set_clock(struct tmio_mmc_host *host, @@ -219,7 +219,7 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & CLK_CTL_DIV_MASK); if (!(host->pdata->flags & TMIO_MMC_MIN_RCAR2)) - msleep(10); + usleep_range(10000, 11000); tmio_mmc_clk_start(host); } @@ -230,11 +230,11 @@ static void tmio_mmc_reset(struct tmio_mmc_host *host) sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); - msleep(10); + usleep_range(10000, 11000); sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); - msleep(10); + usleep_range(10000, 11000); if (host->pdata->flags & TMIO_MMC_SDIO_IRQ) { sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); @@ -1113,8 +1113,11 @@ static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) { struct tmio_mmc_data *pdata = host->pdata; struct mmc_host *mmc = host->mmc; + int err; - mmc_regulator_get_supply(mmc); + err = mmc_regulator_get_supply(mmc); + if (err) + return err; /* use ocr_mask if no regulator */ if (!mmc->ocr_avail) @@ -1299,23 +1302,24 @@ int tmio_mmc_host_probe(struct tmio_mmc_host *_host, pm_runtime_enable(&pdev->dev); ret = mmc_add_host(mmc); - if (ret < 0) { - tmio_mmc_host_remove(_host); - return ret; - } + if (ret) + goto remove_host; dev_pm_qos_expose_latency_limit(&pdev->dev, 100); if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); - if (ret < 0) { - tmio_mmc_host_remove(_host); - return ret; - } + if (ret) + goto remove_host; + mmc_gpiod_request_cd_irq(mmc); } return 0; + +remove_host: + tmio_mmc_host_remove(_host); + return ret; } EXPORT_SYMBOL_GPL(tmio_mmc_host_probe); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c index 64da6a88cfb9..cdfeb15b6f05 100644 --- a/drivers/mmc/host/usdhi6rol0.c +++ b/drivers/mmc/host/usdhi6rol0.c @@ -1757,7 +1757,7 @@ static int usdhi6_probe(struct platform_device *pdev) return -ENOMEM; ret = mmc_regulator_get_supply(mmc); - if (ret == -EPROBE_DEFER) + if (ret) goto e_free_mmc; ret = mmc_of_parse(mmc); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index a838bf5480d8..32c4211506fc 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -932,12 +932,12 @@ out: return result; } -static void via_sdc_timeout(unsigned long ulongdata) +static void via_sdc_timeout(struct timer_list *t) { struct via_crdr_mmc_host *sdhost; unsigned long flags; - sdhost = (struct via_crdr_mmc_host *)ulongdata; + sdhost = from_timer(sdhost, t, timer); spin_lock_irqsave(&sdhost->lock, flags); @@ -1036,9 +1036,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host) u32 lenreg; u32 status; - init_timer(&host->timer); - host->timer.data = (unsigned long)host; - host->timer.function = via_sdc_timeout; + timer_setup(&host->timer, via_sdc_timeout, 0); spin_lock_init(&host->lock); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index 8f569d257405..1fe68137a30f 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -741,9 +741,10 @@ static void vub300_deadwork_thread(struct work_struct *work) kref_put(&vub300->kref, vub300_delete); } -static void vub300_inactivity_timer_expired(unsigned long data) +static void vub300_inactivity_timer_expired(struct timer_list *t) { /* softirq */ - struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + struct vub300_mmc_host *vub300 = from_timer(vub300, t, + inactivity_timer); if (!vub300->interface) { kref_put(&vub300->kref, vub300_delete); } else if (vub300->cmd) { @@ -1180,9 +1181,10 @@ static void send_command(struct vub300_mmc_host *vub300) * timer callback runs in atomic mode * so it cannot call usb_kill_urb() */ -static void vub300_sg_timed_out(unsigned long data) +static void vub300_sg_timed_out(struct timer_list *t) { - struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; + struct vub300_mmc_host *vub300 = from_timer(vub300, t, + sg_transfer_timer); vub300->usb_timed_out = 1; usb_sg_cancel(&vub300->sg_request); usb_unlink_urb(vub300->command_out_urb); @@ -1244,12 +1246,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); - if (retval < 0) { - strncpy(vub300->vub_name, - "SDIO pseudocode download failed", - sizeof(vub300->vub_name)); - return; - } + if (retval < 0) + goto copy_error_message; } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" @@ -1291,12 +1289,8 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, USB_RECIP_DEVICE, 0x0000, 0x0000, xfer_buffer, xfer_length, HZ); kfree(xfer_buffer); - if (retval < 0) { - strncpy(vub300->vub_name, - "SDIO pseudocode download failed", - sizeof(vub300->vub_name)); - return; - } + if (retval < 0) + goto copy_error_message; } else { dev_err(&vub300->udev->dev, "not enough memory for xfer buffer to send" @@ -1349,6 +1343,12 @@ static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, sizeof(vub300->vub_name)); return; } + + return; + +copy_error_message: + strncpy(vub300->vub_name, "SDIO pseudocode download failed", + sizeof(vub300->vub_name)); } /* @@ -2323,13 +2323,10 @@ static int vub300_probe(struct usb_interface *interface, INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); kref_init(&vub300->kref); - init_timer(&vub300->sg_transfer_timer); - vub300->sg_transfer_timer.data = (unsigned long)vub300; - vub300->sg_transfer_timer.function = vub300_sg_timed_out; + timer_setup(&vub300->sg_transfer_timer, vub300_sg_timed_out, 0); kref_get(&vub300->kref); - init_timer(&vub300->inactivity_timer); - vub300->inactivity_timer.data = (unsigned long)vub300; - vub300->inactivity_timer.function = vub300_inactivity_timer_expired; + timer_setup(&vub300->inactivity_timer, + vub300_inactivity_timer_expired, 0); vub300->inactivity_timer.expires = jiffies + HZ; add_timer(&vub300->inactivity_timer); if (vub300->card_present) diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 546aaf8d1507..f4233576153b 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -956,9 +956,9 @@ static const struct mmc_host_ops wbsd_ops = { * Helper function to reset detection ignore */ -static void wbsd_reset_ignore(unsigned long data) +static void wbsd_reset_ignore(struct timer_list *t) { - struct wbsd_host *host = (struct wbsd_host *)data; + struct wbsd_host *host = from_timer(host, t, ignore_timer); BUG_ON(host == NULL); @@ -1224,9 +1224,7 @@ static int wbsd_alloc_mmc(struct device *dev) /* * Set up timers */ - init_timer(&host->ignore_timer); - host->ignore_timer.data = (unsigned long)host; - host->ignore_timer.function = wbsd_reset_ignore; + timer_setup(&host->ignore_timer, wbsd_reset_ignore, 0); /* * Maximum number of segments. Worst case is one sector per segment diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index c02cc817a490..1ed9529e7bd1 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1378,7 +1378,7 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev) unsigned int count; slaves = rcu_dereference(bond->slave_arr); - count = slaves ? ACCESS_ONCE(slaves->count) : 0; + count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) tx_slave = slaves->arr[hash_index % count]; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b2db581131b2..08a4f57cf409 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1167,7 +1167,7 @@ static rx_handler_result_t bond_handle_frame(struct sk_buff **pskb) slave = bond_slave_get_rcu(skb->dev); bond = slave->bond; - recv_probe = ACCESS_ONCE(bond->recv_probe); + recv_probe = READ_ONCE(bond->recv_probe); if (recv_probe) { ret = recv_probe(skb, bond, slave); if (ret == RX_HANDLER_CONSUMED) { @@ -3811,7 +3811,7 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev else bond_xmit_slave_id(bond, skb, 0); } else { - int slave_cnt = ACCESS_ONCE(bond->slave_cnt); + int slave_cnt = READ_ONCE(bond->slave_cnt); if (likely(slave_cnt)) { slave_id = bond_rr_gen_slave_id(bond); @@ -3973,7 +3973,7 @@ static int bond_3ad_xor_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int count; slaves = rcu_dereference(bond->slave_arr); - count = slaves ? ACCESS_ONCE(slaves->count) : 0; + count = slaves ? READ_ONCE(slaves->count) : 0; if (likely(count)) { slave = slaves->arr[bond_xmit_hash(bond, skb) % count]; bond_dev_queue_xmit(bond, skb, slave->dev); diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 1cbca8e5741e..b6e2bfd7d2d6 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -166,8 +166,8 @@ static unsigned int network_rec_config_shadow = 0; static unsigned int network_tr_ctrl_shadow = 0; /* Network speed indication. */ -static DEFINE_TIMER(speed_timer, NULL, 0, 0); -static DEFINE_TIMER(clear_led_timer, NULL, 0, 0); +static DEFINE_TIMER(speed_timer, NULL); +static DEFINE_TIMER(clear_led_timer, NULL); static int current_speed; /* Speed read from transceiver */ static int current_speed_selection; /* Speed selected by user */ static unsigned long led_next_time; @@ -175,7 +175,7 @@ static int led_active; static int rx_queue_len; /* Duplex */ -static DEFINE_TIMER(duplex_timer, NULL, 0, 0); +static DEFINE_TIMER(duplex_timer, NULL); static int full_duplex; static enum duplex current_duplex; diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 4ef68f69b58c..43f52a8fe708 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -405,7 +405,7 @@ void free_tx_desc(struct adapter *adap, struct sge_txq *q, */ static inline int reclaimable(const struct sge_txq *q) { - int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); + int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); hw_cidx -= q->cidx; return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx; } @@ -1375,7 +1375,7 @@ out_free: dev_kfree_skb_any(skb); */ static inline void reclaim_completed_tx_imm(struct sge_txq *q) { - int hw_cidx = ntohs(ACCESS_ONCE(q->stat->cidx)); + int hw_cidx = ntohs(READ_ONCE(q->stat->cidx)); int reclaim = hw_cidx - q->cidx; if (reclaim < 0) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 0e3d9f39a807..c6e859a27ee6 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -605,7 +605,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val) if (wrapped) newacc += 65536; - ACCESS_ONCE(*acc) = newacc; + WRITE_ONCE(*acc, newacc); } static void populate_erx_stats(struct be_adapter *adapter, diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 0cec06bec63e..340e28211135 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -373,7 +373,7 @@ static int hip04_tx_reclaim(struct net_device *ndev, bool force) unsigned int count; smp_rmb(); - count = tx_count(ACCESS_ONCE(priv->tx_head), tx_tail); + count = tx_count(READ_ONCE(priv->tx_head), tx_tail); if (count == 0) goto out; @@ -431,7 +431,7 @@ static int hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) dma_addr_t phys; smp_rmb(); - count = tx_count(tx_head, ACCESS_ONCE(priv->tx_tail)); + count = tx_count(tx_head, READ_ONCE(priv->tx_tail)); if (count == (TX_DESC_NUM - 1)) { netif_stop_queue(ndev); return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 8f326f87a815..2cb9539c931e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -264,7 +264,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) vsi->rx_buf_failed, vsi->rx_page_failed); rcu_read_lock(); for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *rx_ring = ACCESS_ONCE(vsi->rx_rings[i]); + struct i40e_ring *rx_ring = READ_ONCE(vsi->rx_rings[i]); if (!rx_ring) continue; @@ -320,7 +320,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ITR_IS_DYNAMIC(rx_ring->rx_itr_setting) ? "dynamic" : "fixed"); } for (i = 0; i < vsi->num_queue_pairs; i++) { - struct i40e_ring *tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + struct i40e_ring *tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 05e89864f781..e9e04a485e0a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1570,7 +1570,7 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, } rcu_read_lock(); for (j = 0; j < vsi->num_queue_pairs; j++) { - tx_ring = ACCESS_ONCE(vsi->tx_rings[j]); + tx_ring = READ_ONCE(vsi->tx_rings[j]); if (!tx_ring) continue; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 6498da8806cb..de1fcac7834d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -455,7 +455,7 @@ static void i40e_get_netdev_stats_struct(struct net_device *netdev, u64 bytes, packets; unsigned int start; - tx_ring = ACCESS_ONCE(vsi->tx_rings[i]); + tx_ring = READ_ONCE(vsi->tx_rings[i]); if (!tx_ring) continue; i40e_get_netdev_stats_struct_tx(tx_ring, stats); @@ -791,7 +791,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) rcu_read_lock(); for (q = 0; q < vsi->num_queue_pairs; q++) { /* locate Tx ring */ - p = ACCESS_ONCE(vsi->tx_rings[q]); + p = READ_ONCE(vsi->tx_rings[q]); do { start = u64_stats_fetch_begin_irq(&p->syncp); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index d8456c381c99..97381238eb7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -130,7 +130,7 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) } smp_mb(); /* Force any pending update before accessing. */ - adj = ACCESS_ONCE(pf->ptp_base_adj); + adj = READ_ONCE(pf->ptp_base_adj); freq = adj; freq *= ppb; @@ -499,7 +499,7 @@ void i40e_ptp_set_increment(struct i40e_pf *pf) wr32(hw, I40E_PRTTSYN_INC_H, incval >> 32); /* Update the base adjustement value. */ - ACCESS_ONCE(pf->ptp_base_adj) = incval; + WRITE_ONCE(pf->ptp_base_adj, incval); smp_mb(); /* Force the above update. */ } diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h index 58adbf234e07..31a3f09df9f7 100644 --- a/drivers/net/ethernet/intel/igb/e1000_regs.h +++ b/drivers/net/ethernet/intel/igb/e1000_regs.h @@ -375,7 +375,7 @@ u32 igb_rd32(struct e1000_hw *hw, u32 reg); /* write operations, indexed using DWORDS */ #define wr32(reg, val) \ do { \ - u8 __iomem *hw_addr = ACCESS_ONCE((hw)->hw_addr); \ + u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \ if (!E1000_REMOVED(hw_addr)) \ writel((val), &hw_addr[(reg)]); \ } while (0) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index ea69af267d63..18b6c25d4705 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -750,7 +750,7 @@ static void igb_cache_ring_register(struct igb_adapter *adapter) u32 igb_rd32(struct e1000_hw *hw, u32 reg) { struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw); - u8 __iomem *hw_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr); u32 value = 0; if (E1000_REMOVED(hw_addr)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h index e083732adf64..a01409e2e06c 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h @@ -161,7 +161,7 @@ static inline bool ixgbe_removed(void __iomem *addr) static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (ixgbe_removed(reg_addr)) return; @@ -180,7 +180,7 @@ static inline void writeq(u64 val, void __iomem *addr) static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (ixgbe_removed(reg_addr)) return; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6d5f31e94358..935a2f15b0b0 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -380,7 +380,7 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) */ u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (ixgbe_removed(reg_addr)) @@ -8624,7 +8624,7 @@ static void ixgbe_get_stats64(struct net_device *netdev, rcu_read_lock(); for (i = 0; i < adapter->num_rx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->rx_ring[i]); u64 bytes, packets; unsigned int start; @@ -8640,12 +8640,12 @@ static void ixgbe_get_stats64(struct net_device *netdev, } for (i = 0; i < adapter->num_tx_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->tx_ring[i]); ixgbe_get_ring_stats64(stats, ring); } for (i = 0; i < adapter->num_xdp_queues; i++) { - struct ixgbe_ring *ring = ACCESS_ONCE(adapter->xdp_ring[i]); + struct ixgbe_ring *ring = READ_ONCE(adapter->xdp_ring[i]); ixgbe_get_ring_stats64(stats, ring); } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index 86d6924a2b71..ae312c45696a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -378,7 +378,7 @@ static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) } smp_mb(); - incval = ACCESS_ONCE(adapter->base_incval); + incval = READ_ONCE(adapter->base_incval); freq = incval; freq *= ppb; @@ -1159,7 +1159,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) } /* update the base incval used to calculate frequency adjustment */ - ACCESS_ONCE(adapter->base_incval) = incval; + WRITE_ONCE(adapter->base_incval, incval); smp_mb(); /* need lock to prevent incorrect read while modifying cyclecounter */ diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 032f8ac06357..cacb30682434 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -164,7 +164,7 @@ static void ixgbevf_check_remove(struct ixgbe_hw *hw, u32 reg) u32 ixgbevf_read_reg(struct ixgbe_hw *hw, u32 reg) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); u32 value; if (IXGBE_REMOVED(reg_addr)) diff --git a/drivers/net/ethernet/intel/ixgbevf/vf.h b/drivers/net/ethernet/intel/ixgbevf/vf.h index 04d8d4ee4f04..c651fefcc3d2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/vf.h +++ b/drivers/net/ethernet/intel/ixgbevf/vf.h @@ -182,7 +182,7 @@ struct ixgbevf_info { static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) { - u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); + u8 __iomem *reg_addr = READ_ONCE(hw->hw_addr); if (IXGBE_REMOVED(reg_addr)) return; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 8a32a8f7f9c0..3541a7f9d12e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -414,8 +414,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, index = cons_index & size_mask; cqe = mlx4_en_get_cqe(buf, index, priv->cqe_size) + factor; - last_nr_txbb = ACCESS_ONCE(ring->last_nr_txbb); - ring_cons = ACCESS_ONCE(ring->cons); + last_nr_txbb = READ_ONCE(ring->last_nr_txbb); + ring_cons = READ_ONCE(ring->cons); ring_index = ring_cons & size_mask; stamp_index = ring_index; @@ -479,8 +479,8 @@ bool mlx4_en_process_tx_cq(struct net_device *dev, wmb(); /* we want to dirty this cache line once */ - ACCESS_ONCE(ring->last_nr_txbb) = last_nr_txbb; - ACCESS_ONCE(ring->cons) = ring_cons + txbbs_skipped; + WRITE_ONCE(ring->last_nr_txbb, last_nr_txbb); + WRITE_ONCE(ring->cons, ring_cons + txbbs_skipped); if (cq->type == TX_XDP) return done < budget; @@ -858,7 +858,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) goto tx_drop; /* fetch ring->cons far ahead before needing it to avoid stall */ - ring_cons = ACCESS_ONCE(ring->cons); + ring_cons = READ_ONCE(ring->cons); real_size = get_real_size(skb, shinfo, dev, &lso_header_size, &inline_ok, &fragptr); @@ -1066,7 +1066,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) */ smp_rmb(); - ring_cons = ACCESS_ONCE(ring->cons); + ring_cons = READ_ONCE(ring->cons); if (unlikely(!mlx4_en_is_tx_ring_full(ring))) { netif_tx_wake_queue(ring->tx_queue); ring->wake_queue++; diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 50ea69d88480..5dd5f61e1114 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -2629,7 +2629,7 @@ static void vxge_poll_vp_lockup(unsigned long data) ring = &vdev->vpaths[i].ring; /* Truncated to machine word size number of frames */ - rx_frms = ACCESS_ONCE(ring->stats.rx_frms); + rx_frms = READ_ONCE(ring->stats.rx_frms); /* Did this vpath received any packets */ if (ring->stats.prev_rx_frms == rx_frms) { diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c index 9feec7009443..29fea74bff2e 100644 --- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c +++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c @@ -4725,9 +4725,9 @@ static const struct net_device_ops qlge_netdev_ops = { .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid, }; -static void ql_timer(unsigned long data) +static void ql_timer(struct timer_list *t) { - struct ql_adapter *qdev = (struct ql_adapter *)data; + struct ql_adapter *qdev = from_timer(qdev, t, timer); u32 var = 0; var = ql_read32(qdev, STS); @@ -4806,11 +4806,8 @@ static int qlge_probe(struct pci_dev *pdev, /* Start up the timer to trigger EEH if * the bus goes dead */ - init_timer_deferrable(&qdev->timer); - qdev->timer.data = (unsigned long)qdev; - qdev->timer.function = ql_timer; - qdev->timer.expires = jiffies + (5*HZ); - add_timer(&qdev->timer); + timer_setup(&qdev->timer, ql_timer, TIMER_DEFERRABLE); + mod_timer(&qdev->timer, jiffies + (5*HZ)); ql_link_off(qdev); ql_display_dev_info(ndev); atomic_set(&qdev->lb_count, 0); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 13f72f5b18d2..a95a46bcd339 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -2073,7 +2073,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d\n", irq, raw_smp_processor_id()); - if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) { + if (likely(READ_ONCE(efx->irq_soft_enabled))) { /* Note test interrupts */ if (context->index == efx->irq_level) efx->last_irq_cpu = raw_smp_processor_id(); @@ -2088,7 +2088,7 @@ static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id) static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); struct efx_channel *channel; efx_dword_t reg; u32 queues; @@ -3291,7 +3291,7 @@ static int efx_ef10_handle_rx_event(struct efx_channel *channel, bool rx_cont; u16 flags = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; /* Basic packet information */ @@ -3428,7 +3428,7 @@ efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) unsigned int tx_ev_q_label; int tx_descs = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT))) @@ -5316,7 +5316,7 @@ static void efx_ef10_filter_remove_old(struct efx_nic *efx) int i; for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) { - if (ACCESS_ONCE(table->entry[i].spec) & + if (READ_ONCE(table->entry[i].spec) & EFX_EF10_FILTER_FLAG_AUTO_OLD) { rc = efx_ef10_filter_remove_internal(efx, 1U << EFX_FILTER_PRI_AUTO, i, true); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index b9cb697b2818..016616a63880 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -2809,7 +2809,7 @@ static void efx_reset_work(struct work_struct *data) unsigned long pending; enum reset_type method; - pending = ACCESS_ONCE(efx->reset_pending); + pending = READ_ONCE(efx->reset_pending); method = fls(pending) - 1; if (method == RESET_TYPE_MC_BIST) @@ -2874,7 +2874,7 @@ void efx_schedule_reset(struct efx_nic *efx, enum reset_type type) /* If we're not READY then just leave the flags set as the cue * to abort probing or reschedule the reset later. */ - if (ACCESS_ONCE(efx->state) != STATE_READY) + if (READ_ONCE(efx->state) != STATE_READY) return; /* efx_process_channel() will no longer read events once a diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 29614da91cbf..7263275fde4a 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2545,7 +2545,7 @@ static void ef4_reset_work(struct work_struct *data) unsigned long pending; enum reset_type method; - pending = ACCESS_ONCE(efx->reset_pending); + pending = READ_ONCE(efx->reset_pending); method = fls(pending) - 1; if ((method == RESET_TYPE_RECOVER_OR_DISABLE || @@ -2605,7 +2605,7 @@ void ef4_schedule_reset(struct ef4_nic *efx, enum reset_type type) /* If we're not READY then just leave the flags set as the cue * to abort probing or reschedule the reset later. */ - if (ACCESS_ONCE(efx->state) != STATE_READY) + if (READ_ONCE(efx->state) != STATE_READY) return; queue_work(reset_workqueue, &efx->reset_work); diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 93c713c1f627..cd8bb472d758 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -452,7 +452,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Check to see if we have a serious error condition */ @@ -1372,7 +1372,7 @@ static void falcon_reconfigure_mac_wrapper(struct ef4_nic *efx) ef4_oword_t reg; int link_speed, isolate; - isolate = !!ACCESS_ONCE(efx->reset_pending); + isolate = !!READ_ONCE(efx->reset_pending); switch (link_state->speed) { case 10000: link_speed = 3; break; diff --git a/drivers/net/ethernet/sfc/falcon/farch.c b/drivers/net/ethernet/sfc/falcon/farch.c index 05916c710d8c..494884f6af4a 100644 --- a/drivers/net/ethernet/sfc/falcon/farch.c +++ b/drivers/net/ethernet/sfc/falcon/farch.c @@ -834,7 +834,7 @@ ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event) struct ef4_nic *efx = channel->efx; int tx_packets = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { @@ -990,7 +990,7 @@ ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event) struct ef4_rx_queue *rx_queue; struct ef4_nic *efx = channel->efx; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return; rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); @@ -1504,7 +1504,7 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx) irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id) { struct ef4_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); ef4_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct ef4_channel *channel; @@ -1596,7 +1596,7 @@ irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id) "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n", irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Handle non-event-queue sources */ diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index a4c4592f6023..54ca457cdb15 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h @@ -83,7 +83,7 @@ static inline struct ef4_tx_queue *ef4_tx_queue_partner(struct ef4_tx_queue *tx_ static inline bool __ef4_nic_tx_is_empty(struct ef4_tx_queue *tx_queue, unsigned int write_count) { - unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); if (empty_read_count == 0) return false; @@ -464,11 +464,11 @@ irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx); static inline int ef4_nic_event_test_irq_cpu(struct ef4_channel *channel) { - return ACCESS_ONCE(channel->event_test_cpu); + return READ_ONCE(channel->event_test_cpu); } static inline int ef4_nic_irq_test_irq_cpu(struct ef4_nic *efx) { - return ACCESS_ONCE(efx->last_irq_cpu); + return READ_ONCE(efx->last_irq_cpu); } /* Global Resources */ diff --git a/drivers/net/ethernet/sfc/falcon/tx.c b/drivers/net/ethernet/sfc/falcon/tx.c index 6a75f4140a4b..6486814e97dc 100644 --- a/drivers/net/ethernet/sfc/falcon/tx.c +++ b/drivers/net/ethernet/sfc/falcon/tx.c @@ -134,8 +134,8 @@ static void ef4_tx_maybe_stop_queue(struct ef4_tx_queue *txq1) */ netif_tx_stop_queue(txq1->core_txq); smp_mb(); - txq1->old_read_count = ACCESS_ONCE(txq1->read_count); - txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + txq1->old_read_count = READ_ONCE(txq1->read_count); + txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = max(txq1->insert_count - txq1->old_read_count, txq2->insert_count - txq2->old_read_count); @@ -524,7 +524,7 @@ void ef4_xmit_done(struct ef4_tx_queue *tx_queue, unsigned int index) /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { - tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index ba45150f53c7..86454d25a405 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -827,7 +827,7 @@ efx_farch_handle_tx_event(struct efx_channel *channel, efx_qword_t *event) struct efx_nic *efx = channel->efx; int tx_packets = 0; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return 0; if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) { @@ -979,7 +979,7 @@ efx_farch_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event) struct efx_rx_queue *rx_queue; struct efx_nic *efx = channel->efx; - if (unlikely(ACCESS_ONCE(efx->reset_pending))) + if (unlikely(READ_ONCE(efx->reset_pending))) return; rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT); @@ -1520,7 +1520,7 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx) irqreturn_t efx_farch_legacy_interrupt(int irq, void *dev_id) { struct efx_nic *efx = dev_id; - bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled); + bool soft_enabled = READ_ONCE(efx->irq_soft_enabled); efx_oword_t *int_ker = efx->irq_status.addr; irqreturn_t result = IRQ_NONE; struct efx_channel *channel; @@ -1612,7 +1612,7 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id) "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); - if (!likely(ACCESS_ONCE(efx->irq_soft_enabled))) + if (!likely(READ_ONCE(efx->irq_soft_enabled))) return IRQ_HANDLED; /* Handle non-event-queue sources */ diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h index 4d7fb8af880d..7b51b6371724 100644 --- a/drivers/net/ethernet/sfc/nic.h +++ b/drivers/net/ethernet/sfc/nic.h @@ -81,7 +81,7 @@ static struct efx_tx_queue *efx_tx_queue_partner(struct efx_tx_queue *tx_queue) static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue, unsigned int write_count) { - unsigned int empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count); + unsigned int empty_read_count = READ_ONCE(tx_queue->empty_read_count); if (empty_read_count == 0) return false; @@ -617,11 +617,11 @@ irqreturn_t efx_farch_fatal_interrupt(struct efx_nic *efx); static inline int efx_nic_event_test_irq_cpu(struct efx_channel *channel) { - return ACCESS_ONCE(channel->event_test_cpu); + return READ_ONCE(channel->event_test_cpu); } static inline int efx_nic_irq_test_irq_cpu(struct efx_nic *efx) { - return ACCESS_ONCE(efx->last_irq_cpu); + return READ_ONCE(efx->last_irq_cpu); } /* Global Resources */ diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index 60cdb97f58e2..56c2db398def 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -658,7 +658,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, /* Write host time for specified period or until MC is done */ while ((timespec64_compare(&now.ts_real, &limit) < 0) && - ACCESS_ONCE(*mc_running)) { + READ_ONCE(*mc_running)) { struct timespec64 update_time; unsigned int host_time; @@ -668,7 +668,7 @@ static void efx_ptp_send_times(struct efx_nic *efx, do { pps_get_ts(&now); } while ((timespec64_compare(&now.ts_real, &update_time) < 0) && - ACCESS_ONCE(*mc_running)); + READ_ONCE(*mc_running)); /* Synchronise NIC with single word of time only */ host_time = (now.ts_real.tv_sec << MC_NANOSECOND_BITS | @@ -832,14 +832,14 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) ptp->start.dma_addr); /* Clear flag that signals MC ready */ - ACCESS_ONCE(*start) = 0; + WRITE_ONCE(*start, 0); rc = efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf, MC_CMD_PTP_IN_SYNCHRONIZE_LEN); EFX_WARN_ON_ONCE_PARANOID(rc); /* Wait for start from MCDI (or timeout) */ timeout = jiffies + msecs_to_jiffies(MAX_SYNCHRONISE_WAIT_MS); - while (!ACCESS_ONCE(*start) && (time_before(jiffies, timeout))) { + while (!READ_ONCE(*start) && (time_before(jiffies, timeout))) { udelay(20); /* Usually start MCDI execution quickly */ loops++; } @@ -849,7 +849,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings) if (!time_before(jiffies, timeout)) ++ptp->sync_timeouts; - if (ACCESS_ONCE(*start)) + if (READ_ONCE(*start)) efx_ptp_send_times(efx, &last_time); /* Collect results */ diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index 32bf1fecf864..efb66ea21f27 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -136,8 +136,8 @@ static void efx_tx_maybe_stop_queue(struct efx_tx_queue *txq1) */ netif_tx_stop_queue(txq1->core_txq); smp_mb(); - txq1->old_read_count = ACCESS_ONCE(txq1->read_count); - txq2->old_read_count = ACCESS_ONCE(txq2->read_count); + txq1->old_read_count = READ_ONCE(txq1->read_count); + txq2->old_read_count = READ_ONCE(txq2->read_count); fill_level = max(txq1->insert_count - txq1->old_read_count, txq2->insert_count - txq2->old_read_count); @@ -752,7 +752,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index) /* Check whether the hardware queue is now empty */ if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) { - tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count); + tx_queue->old_write_count = READ_ONCE(tx_queue->write_count); if (tx_queue->read_count == tx_queue->old_write_count) { smp_mb(); tx_queue->empty_read_count = diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 6a4e8e1bbd90..8ab0fb6892d5 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -6245,7 +6245,7 @@ static void niu_get_rx_stats(struct niu *np, pkts = dropped = errors = bytes = 0; - rx_rings = ACCESS_ONCE(np->rx_rings); + rx_rings = READ_ONCE(np->rx_rings); if (!rx_rings) goto no_rings; @@ -6276,7 +6276,7 @@ static void niu_get_tx_stats(struct niu *np, pkts = errors = bytes = 0; - tx_rings = ACCESS_ONCE(np->tx_rings); + tx_rings = READ_ONCE(np->tx_rings); if (!tx_rings) goto no_rings; diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c index c00102b8145a..b3e5816a4678 100644 --- a/drivers/net/ethernet/tile/tilegx.c +++ b/drivers/net/ethernet/tile/tilegx.c @@ -40,7 +40,7 @@ #include <linux/tcp.h> #include <linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> -#include <linux/tick.h> +#include <linux/sched/isolation.h> #include <asm/checksum.h> #include <asm/homecache.h> @@ -2270,8 +2270,8 @@ static int __init tile_net_init_module(void) tile_net_dev_init(name, mac); if (!network_cpus_init()) - cpumask_and(&network_cpus_map, housekeeping_cpumask(), - cpu_online_mask); + cpumask_and(&network_cpus_map, + housekeeping_cpumask(HK_FLAG_MISC), cpu_online_mask); return 0; } diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c index 49ccee4b9aec..56d06282fbde 100644 --- a/drivers/net/ethernet/tile/tilepro.c +++ b/drivers/net/ethernet/tile/tilepro.c @@ -608,9 +608,9 @@ static void tile_net_schedule_egress_timer(struct tile_net_cpu *info) * ISSUE: Maybe instead track number of expected completions, and free * only that many, resetting to zero if "pending" is ever false. */ -static void tile_net_handle_egress_timer(unsigned long arg) +static void tile_net_handle_egress_timer(struct timer_list *t) { - struct tile_net_cpu *info = (struct tile_net_cpu *)arg; + struct tile_net_cpu *info = from_timer(info, t, egress_timer); struct net_device *dev = info->napi.dev; /* The timer is no longer scheduled. */ @@ -1004,9 +1004,8 @@ static void tile_net_register(void *dev_ptr) BUG(); /* Initialize the egress timer. */ - init_timer_pinned(&info->egress_timer); - info->egress_timer.data = (long)info; - info->egress_timer.function = tile_net_handle_egress_timer; + timer_setup(&info->egress_timer, tile_net_handle_egress_timer, + TIMER_PINNED); u64_stats_init(&info->stats.syncp); diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c index 7a7c5224a336..104f71fa9c5e 100644 --- a/drivers/net/hamradio/yam.c +++ b/drivers/net/hamradio/yam.c @@ -157,7 +157,7 @@ static struct net_device *yam_devs[NR_PORTS]; static struct yam_mcs *yam_data; -static DEFINE_TIMER(yam_timer, NULL, 0, 0); +static DEFINE_TIMER(yam_timer, NULL); /* --------------------------------------------------------------------- */ diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 6c0c84c33e1f..b13890953ebb 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -257,7 +257,7 @@ static struct tap_queue *tap_get_queue(struct tap_dev *tap, * and validate that the result isn't NULL - in case we are * racing against queue removal. */ - int numvtaps = ACCESS_ONCE(tap->numvtaps); + int numvtaps = READ_ONCE(tap->numvtaps); __u32 rxq; if (!numvtaps) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 42bb820a56c9..c1685a6d7883 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -469,7 +469,7 @@ static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb, u32 numqueues = 0; rcu_read_lock(); - numqueues = ACCESS_ONCE(tun->numqueues); + numqueues = READ_ONCE(tun->numqueues); txq = __skb_get_hash_symmetric(skb); if (txq) { @@ -864,7 +864,7 @@ static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) rcu_read_lock(); tfile = rcu_dereference(tun->tfiles[txq]); - numqueues = ACCESS_ONCE(tun->numqueues); + numqueues = READ_ONCE(tun->numqueues); /* Drop packet if interface is not attached */ if (txq >= numqueues) diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index d7c49cf1d5e9..3247d2feda07 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -2325,9 +2325,9 @@ static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev) } /* Walk the forwarding table and purge stale entries */ -static void vxlan_cleanup(unsigned long arg) +static void vxlan_cleanup(struct timer_list *t) { - struct vxlan_dev *vxlan = (struct vxlan_dev *) arg; + struct vxlan_dev *vxlan = from_timer(vxlan, t, age_timer); unsigned long next_timer = jiffies + FDB_AGE_INTERVAL; unsigned int h; @@ -2647,9 +2647,7 @@ static void vxlan_setup(struct net_device *dev) INIT_LIST_HEAD(&vxlan->next); spin_lock_init(&vxlan->hash_lock); - init_timer_deferrable(&vxlan->age_timer); - vxlan->age_timer.function = vxlan_cleanup; - vxlan->age_timer.data = (unsigned long) vxlan; + timer_setup(&vxlan->age_timer, vxlan_cleanup, TIMER_DEFERRABLE); vxlan->dev = dev; diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index a408abc25512..f4b0ab34f048 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -276,8 +276,6 @@ static void cisco_timer(unsigned long arg) spin_unlock(&st->lock); st->timer.expires = jiffies + st->settings.interval * HZ; - st->timer.function = cisco_timer; - st->timer.data = arg; add_timer(&st->timer); } diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 78596e42a3f3..07f265fa2826 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -644,8 +644,6 @@ static void fr_timer(unsigned long arg) state(hdlc)->settings.t391 * HZ; } - state(hdlc)->timer.function = fr_timer; - state(hdlc)->timer.data = arg; add_timer(&state(hdlc)->timer); } diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c index bd8d4392d68b..80f75139495f 100644 --- a/drivers/net/wireless/ath/ath5k/desc.c +++ b/drivers/net/wireless/ath/ath5k/desc.c @@ -500,13 +500,13 @@ ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah, tx_status = &desc->ud.ds_tx5212.tx_stat; - txstat1 = ACCESS_ONCE(tx_status->tx_status_1); + txstat1 = READ_ONCE(tx_status->tx_status_1); /* No frame has been send or error */ if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE))) return -EINPROGRESS; - txstat0 = ACCESS_ONCE(tx_status->tx_status_0); + txstat0 = READ_ONCE(tx_status->tx_status_0); /* * Get descriptor status @@ -700,14 +700,14 @@ ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah, u32 rxstat0, rxstat1; rx_status = &desc->ud.ds_rx.rx_stat; - rxstat1 = ACCESS_ONCE(rx_status->rx_status_1); + rxstat1 = READ_ONCE(rx_status->rx_status_1); /* No frame received / not ready */ if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE))) return -EINPROGRESS; memset(rs, 0, sizeof(struct ath5k_rx_status)); - rxstat0 = ACCESS_ONCE(rx_status->rx_status_0); + rxstat0 = READ_ONCE(rx_status->rx_status_0); /* * Frame receive status diff --git a/drivers/net/wireless/ath/ath6kl/recovery.c b/drivers/net/wireless/ath/ath6kl/recovery.c index 3a8d5e97dc8e..c09e40c9010f 100644 --- a/drivers/net/wireless/ath/ath6kl/recovery.c +++ b/drivers/net/wireless/ath/ath6kl/recovery.c @@ -60,9 +60,9 @@ void ath6kl_recovery_hb_event(struct ath6kl *ar, u32 cookie) ar->fw_recovery.hb_pending = false; } -static void ath6kl_recovery_hb_timer(unsigned long data) +static void ath6kl_recovery_hb_timer(struct timer_list *t) { - struct ath6kl *ar = (struct ath6kl *) data; + struct ath6kl *ar = from_timer(ar, t, fw_recovery.hb_timer); int err; if (test_bit(RECOVERY_CLEANUP, &ar->flag) || @@ -104,9 +104,8 @@ void ath6kl_recovery_init(struct ath6kl *ar) recovery->seq_num = 0; recovery->hb_misscnt = 0; ar->fw_recovery.hb_pending = false; - ar->fw_recovery.hb_timer.function = ath6kl_recovery_hb_timer; - ar->fw_recovery.hb_timer.data = (unsigned long) ar; - init_timer_deferrable(&ar->fw_recovery.hb_timer); + timer_setup(&ar->fw_recovery.hb_timer, ath6kl_recovery_hb_timer, + TIMER_DEFERRABLE); if (ar->fw_recovery.hb_poll) mod_timer(&ar->fw_recovery.hb_timer, jiffies + diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 94bf01f8b2a8..ede89d4ffc88 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -519,7 +519,7 @@ exit: /* LED trigger */ static int tx_activity; static void at76_ledtrig_tx_timerfunc(unsigned long data); -static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc, 0, 0); +static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc); DEFINE_LED_TRIGGER(ledtrig_tx); static void at76_ledtrig_tx_timerfunc(unsigned long data) diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 613caca7dc02..785a0f33b7e6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -3628,7 +3628,7 @@ static void brcmf_sdio_dataworker(struct work_struct *work) bus->dpc_running = true; wmb(); - while (ACCESS_ONCE(bus->dpc_triggered)) { + while (READ_ONCE(bus->dpc_triggered)) { bus->dpc_triggered = false; brcmf_sdio_dpc(bus); bus->idlecount = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index 231878969332..0f45f34e39d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -1118,7 +1118,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state) static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - bool calibrating = ACCESS_ONCE(mvm->calibrating); + bool calibrating = READ_ONCE(mvm->calibrating); if (state) set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 6f2e2af23219..6e9d3289b9d0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -652,7 +652,7 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) return -1; } else if (info.control.vif->type == NL80211_IFTYPE_STATION && is_multicast_ether_addr(hdr->addr1)) { - u8 ap_sta_id = ACCESS_ONCE(mvmvif->ap_sta_id); + u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); if (ap_sta_id != IWL_MVM_INVALID_STA) sta_id = ap_sta_id; @@ -700,7 +700,7 @@ static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb, snap_ip_tcp = 8 + skb_transport_header(skb) - skb_network_header(skb) + tcp_hdrlen(skb); - dbg_max_amsdu_len = ACCESS_ONCE(mvm->max_amsdu_len); + dbg_max_amsdu_len = READ_ONCE(mvm->max_amsdu_len); if (!sta->max_amsdu_len || !ieee80211_is_data_qos(hdr->frame_control) || diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index a06b6612b658..f25ce3a1ea50 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1247,7 +1247,7 @@ restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 2e3e013ec95a..9ad3f4fe5894 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -2076,12 +2076,12 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); txq = trans_pcie->txq[txq_idx]; - wr_ptr = ACCESS_ONCE(txq->write_ptr); + wr_ptr = READ_ONCE(txq->write_ptr); - while (txq->read_ptr != ACCESS_ONCE(txq->write_ptr) && + while (txq->read_ptr != READ_ONCE(txq->write_ptr) && !time_after(jiffies, now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) { - u8 write_ptr = ACCESS_ONCE(txq->write_ptr); + u8 write_ptr = READ_ONCE(txq->write_ptr); if (WARN_ONCE(wr_ptr != write_ptr, "WR pointer moved while flushing %d -> %d\n", @@ -2553,7 +2553,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, spin_lock(&rxq->lock); - r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; @@ -2814,7 +2814,7 @@ static struct iwl_trans_dump_data /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) + num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index 6467ffac9811..d2b3d6177a55 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -1380,7 +1380,7 @@ static void mac80211_hwsim_tx(struct ieee80211_hw *hw, mac80211_hwsim_monitor_rx(hw, skb, channel); /* wmediumd mode check */ - _portid = ACCESS_ONCE(data->wmediumd); + _portid = READ_ONCE(data->wmediumd); if (_portid) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid); @@ -1477,7 +1477,7 @@ static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; - u32 _pid = ACCESS_ONCE(data->wmediumd); + u32 _pid = READ_ONCE(data->wmediumd); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); diff --git a/drivers/nubus/nubus.c b/drivers/nubus/nubus.c index 8ce69c833362..b793727cd4f7 100644 --- a/drivers/nubus/nubus.c +++ b/drivers/nubus/nubus.c @@ -19,11 +19,6 @@ #include <asm/setup.h> #include <asm/page.h> #include <asm/hwtest.h> -#include <asm/mac_via.h> -#include <asm/mac_oss.h> - -extern void via_nubus_init(void); -extern void oss_nubus_init(void); /* Constants */ @@ -841,14 +836,6 @@ static int __init nubus_init(void) if (!MACH_IS_MAC) return 0; - /* Initialize the NuBus interrupts */ - if (oss_present) { - oss_nubus_init(); - } else { - via_nubus_init(); - } - - /* And probe */ pr_info("NuBus: Scanning NuBus slots.\n"); nubus_devices = NULL; nubus_boards = NULL; diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c index 74cc6dd982d2..2d1a5c737c6e 100644 --- a/drivers/parport/ieee1284.c +++ b/drivers/parport/ieee1284.c @@ -44,10 +44,11 @@ static void parport_ieee1284_wakeup (struct parport *port) up (&port->physport->ieee1284.irq); } -static struct parport *port_from_cookie[PARPORT_MAX]; -static void timeout_waiting_on_port (unsigned long cookie) +static void timeout_waiting_on_port (struct timer_list *t) { - parport_ieee1284_wakeup (port_from_cookie[cookie % PARPORT_MAX]); + struct parport *port = from_timer(port, t, timer); + + parport_ieee1284_wakeup (port); } /** @@ -69,27 +70,19 @@ static void timeout_waiting_on_port (unsigned long cookie) int parport_wait_event (struct parport *port, signed long timeout) { int ret; - struct timer_list timer; if (!port->physport->cad->timeout) /* Zero timeout is special, and we can't down() the semaphore. */ return 1; - init_timer_on_stack(&timer); - timer.expires = jiffies + timeout; - timer.function = timeout_waiting_on_port; - port_from_cookie[port->number % PARPORT_MAX] = port; - timer.data = port->number; - - add_timer (&timer); + timer_setup(&port->timer, timeout_waiting_on_port, 0); + mod_timer(&port->timer, jiffies + timeout); ret = down_interruptible (&port->physport->ieee1284.irq); - if (!del_timer_sync(&timer) && !ret) + if (!del_timer_sync(&port->timer) && !ret) /* Timed out. */ ret = 1; - destroy_timer_on_stack(&timer); - return ret; } diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c index 496ed9130600..e06607167858 100644 --- a/drivers/pci/msi.c +++ b/drivers/pci/msi.c @@ -1441,6 +1441,8 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode, pci_msi_domain_update_chip_ops(info); info->flags |= MSI_FLAG_ACTIVATE_EARLY; + if (IS_ENABLED(CONFIG_GENERIC_IRQ_RESERVATION_MODE)) + info->flags |= MSI_FLAG_MUST_REACTIVATE; domain = msi_create_irq_domain(fwnode, info, parent); if (!domain) diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c index 0802e0bc7d0c..16f573173471 100644 --- a/drivers/pcmcia/bcm63xx_pcmcia.c +++ b/drivers/pcmcia/bcm63xx_pcmcia.c @@ -263,12 +263,12 @@ static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock, /* * socket polling timer callback */ -static void bcm63xx_pcmcia_poll(unsigned long data) +static void bcm63xx_pcmcia_poll(struct timer_list *t) { struct bcm63xx_pcmcia_socket *skt; unsigned int stat, events; - skt = (struct bcm63xx_pcmcia_socket *)data; + skt = from_timer(skt, t, timer); spin_lock_bh(&skt->lock); @@ -392,7 +392,7 @@ static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev) sock->map_size = resource_size(skt->common_res); /* initialize polling timer */ - setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt); + timer_setup(&skt->timer, bcm63xx_pcmcia_poll, 0); /* initialize pcmcia control register, drive VS[12] to 0, * leave CB IDSEL to the old value since it is set by the PCI diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c index 8b0923fd76c6..00a296d431ba 100644 --- a/drivers/pcmcia/bfin_cf_pcmcia.c +++ b/drivers/pcmcia/bfin_cf_pcmcia.c @@ -86,9 +86,9 @@ static int bfin_cf_ss_init(struct pcmcia_socket *s) } /* the timer is primarily to kick this socket's pccardd */ -static void bfin_cf_timer(unsigned long _cf) +static void bfin_cf_timer(struct timer_list *t) { - struct bfin_cf_socket *cf = (void *)_cf; + struct bfin_cf_socket *cf = from_timer(cf, t, timer); unsigned short present = bfin_cf_present(cf->cd_pfx); if (present != cf->present) { @@ -227,7 +227,7 @@ static int bfin_cf_probe(struct platform_device *pdev) cf->cd_pfx = cd_pfx; - setup_timer(&cf->timer, bfin_cf_timer, (unsigned long)cf); + timer_setup(&cf->timer, bfin_cf_timer, 0); cf->pdev = pdev; platform_set_drvdata(pdev, cf); diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c index fb38cc01859f..891ccea2cccb 100644 --- a/drivers/pcmcia/i82365.c +++ b/drivers/pcmcia/i82365.c @@ -875,7 +875,7 @@ static irqreturn_t pcic_interrupt(int irq, void *dev) return IRQ_RETVAL(handled); } /* pcic_interrupt */ -static void pcic_interrupt_wrapper(u_long data) +static void pcic_interrupt_wrapper(struct timer_list *unused) { pcic_interrupt(0, NULL); poll_timer.expires = jiffies + poll_interval; @@ -1289,9 +1289,7 @@ static int __init init_i82365(void) /* Finally, schedule a polling interrupt */ if (poll_interval != 0) { - poll_timer.function = pcic_interrupt_wrapper; - poll_timer.data = 0; - init_timer(&poll_timer); + timer_setup(&poll_timer, pcic_interrupt_wrapper, 0); poll_timer.expires = jiffies + poll_interval; add_timer(&poll_timer); } diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c index 4e2f501e5548..c2a17a79f0b2 100644 --- a/drivers/pcmcia/omap_cf.c +++ b/drivers/pcmcia/omap_cf.c @@ -80,9 +80,9 @@ static int omap_cf_ss_init(struct pcmcia_socket *s) } /* the timer is primarily to kick this socket's pccardd */ -static void omap_cf_timer(unsigned long _cf) +static void omap_cf_timer(struct timer_list *t) { - struct omap_cf_socket *cf = (void *) _cf; + struct omap_cf_socket *cf = from_timer(cf, t, timer); unsigned present = omap_cf_present(); if (present != cf->present) { @@ -102,7 +102,9 @@ static void omap_cf_timer(unsigned long _cf) */ static irqreturn_t omap_cf_irq(int irq, void *_cf) { - omap_cf_timer((unsigned long)_cf); + struct omap_cf_socket *cf = (struct omap_cf_socket *)_cf; + + omap_cf_timer(&cf->timer); return IRQ_HANDLED; } @@ -220,7 +222,7 @@ static int __init omap_cf_probe(struct platform_device *pdev) cf = kzalloc(sizeof *cf, GFP_KERNEL); if (!cf) return -ENOMEM; - setup_timer(&cf->timer, omap_cf_timer, (unsigned long)cf); + timer_setup(&cf->timer, omap_cf_timer, 0); cf->pdev = pdev; platform_set_drvdata(pdev, cf); diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c index 0f70b4d58f9e..959ae3e65ef8 100644 --- a/drivers/pcmcia/pd6729.c +++ b/drivers/pcmcia/pd6729.c @@ -234,9 +234,9 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev) /* socket functions */ -static void pd6729_interrupt_wrapper(unsigned long data) +static void pd6729_interrupt_wrapper(struct timer_list *t) { - struct pd6729_socket *socket = (struct pd6729_socket *) data; + struct pd6729_socket *socket = from_timer(socket, t, poll_timer); pd6729_interrupt(0, (void *)socket); mod_timer(&socket->poll_timer, jiffies + HZ); @@ -707,8 +707,7 @@ static int pd6729_pci_probe(struct pci_dev *dev, } } else { /* poll Card status change */ - setup_timer(&socket->poll_timer, pd6729_interrupt_wrapper, - (unsigned long)socket); + timer_setup(&socket->poll_timer, pd6729_interrupt_wrapper, 0); mod_timer(&socket->poll_timer, jiffies + HZ); } diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c index b6b316de055c..764650eb8897 100644 --- a/drivers/pcmcia/soc_common.c +++ b/drivers/pcmcia/soc_common.c @@ -456,9 +456,9 @@ static void soc_common_check_status(struct soc_pcmcia_socket *skt) } /* Let's poll for events in addition to IRQs since IRQ only is unreliable... */ -static void soc_common_pcmcia_poll_event(unsigned long dummy) +static void soc_common_pcmcia_poll_event(struct timer_list *t) { - struct soc_pcmcia_socket *skt = (struct soc_pcmcia_socket *)dummy; + struct soc_pcmcia_socket *skt = from_timer(skt, t, poll_timer); debug(skt, 4, "polling for events\n"); mod_timer(&skt->poll_timer, jiffies + SOC_PCMCIA_POLL_PERIOD); @@ -794,8 +794,7 @@ int soc_pcmcia_add_one(struct soc_pcmcia_socket *skt) skt->cs_state = dead_socket; - setup_timer(&skt->poll_timer, soc_common_pcmcia_poll_event, - (unsigned long)skt); + timer_setup(&skt->poll_timer, soc_common_pcmcia_poll_event, 0); skt->poll_timer.expires = jiffies + SOC_PCMCIA_POLL_PERIOD; ret = request_resource(&iomem_resource, &skt->res_skt); diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c index a1ac72d51d70..1a0e3f098759 100644 --- a/drivers/pcmcia/tcic.c +++ b/drivers/pcmcia/tcic.c @@ -98,7 +98,7 @@ module_param(cycle_time, int, 0444); /*====================================================================*/ static irqreturn_t tcic_interrupt(int irq, void *dev); -static void tcic_timer(u_long data); +static void tcic_timer(struct timer_list *unused); static struct pccard_operations tcic_operations; struct tcic_socket { @@ -435,9 +435,7 @@ static int __init init_tcic(void) } /* Set up polling */ - poll_timer.function = &tcic_timer; - poll_timer.data = 0; - init_timer(&poll_timer); + timer_setup(&poll_timer, &tcic_timer, 0); /* Build interrupt mask */ printk(KERN_CONT ", %d sockets\n", sockets); @@ -583,7 +581,7 @@ static irqreturn_t tcic_interrupt(int irq, void *dev) return IRQ_HANDLED; } /* tcic_interrupt */ -static void tcic_timer(u_long data) +static void tcic_timer(struct timer_list *unused) { pr_debug("tcic_timer()\n"); tcic_timer_pending = 0; diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c index 5d6d9b1549bc..ab3da2262f0f 100644 --- a/drivers/pcmcia/yenta_socket.c +++ b/drivers/pcmcia/yenta_socket.c @@ -534,9 +534,9 @@ static irqreturn_t yenta_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static void yenta_interrupt_wrapper(unsigned long data) +static void yenta_interrupt_wrapper(struct timer_list *t) { - struct yenta_socket *socket = (struct yenta_socket *) data; + struct yenta_socket *socket = from_timer(socket, t, poll_timer); yenta_interrupt(0, (void *)socket); socket->poll_timer.expires = jiffies + HZ; @@ -1233,8 +1233,7 @@ static int yenta_probe(struct pci_dev *dev, const struct pci_device_id *id) if (!socket->cb_irq || request_irq(socket->cb_irq, yenta_interrupt, IRQF_SHARED, "yenta", socket)) { /* No IRQ or request_irq failed. Poll */ socket->cb_irq = 0; /* But zero is a valid IRQ number. */ - setup_timer(&socket->poll_timer, yenta_interrupt_wrapper, - (unsigned long)socket); + timer_setup(&socket->poll_timer, yenta_interrupt_wrapper, 0); mod_timer(&socket->poll_timer, jiffies + HZ); dev_info(&dev->dev, "no PCI IRQ, CardBus support disabled for this socket.\n"); diff --git a/drivers/pinctrl/stm32/pinctrl-stm32.c b/drivers/pinctrl/stm32/pinctrl-stm32.c index 50299ad96659..02b66588cac6 100644 --- a/drivers/pinctrl/stm32/pinctrl-stm32.c +++ b/drivers/pinctrl/stm32/pinctrl-stm32.c @@ -289,13 +289,14 @@ static int stm32_gpio_domain_translate(struct irq_domain *d, return 0; } -static void stm32_gpio_domain_activate(struct irq_domain *d, - struct irq_data *irq_data) +static int stm32_gpio_domain_activate(struct irq_domain *d, + struct irq_data *irq_data, bool early) { struct stm32_gpio_bank *bank = d->host_data; struct stm32_pinctrl *pctl = dev_get_drvdata(bank->gpio_chip.parent); regmap_field_write(pctl->irqmux[irq_data->hwirq], bank->bank_nr); + return 0; } static int stm32_gpio_domain_alloc(struct irq_domain *d, diff --git a/drivers/ras/cec.c b/drivers/ras/cec.c index adbf1a9e089e..ca44e6977cf2 100644 --- a/drivers/ras/cec.c +++ b/drivers/ras/cec.c @@ -169,11 +169,9 @@ static void cec_mod_timer(struct timer_list *t, unsigned long interval) mod_timer(t, round_jiffies(iv)); } -static void cec_timer_fn(unsigned long data) +static void cec_timer_fn(struct timer_list *unused) { - struct ce_array *ca = (struct ce_array *)data; - - do_spring_cleaning(ca); + do_spring_cleaning(&ce_arr); cec_mod_timer(&cec_timer, timer_interval); } @@ -510,7 +508,7 @@ void __init cec_init(void) if (create_debugfs_nodes()) return; - setup_timer(&cec_timer, cec_timer_fn, (unsigned long)&ce_arr); + timer_setup(&cec_timer, cec_timer_fn, 0); cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL); pr_info("Correctable Errors collector initialized.\n"); diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index 0fd6195601ba..96cd55f9e3c5 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig @@ -244,7 +244,7 @@ config REGULATOR_DA9210 interface. config REGULATOR_DA9211 - tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9214/DA9215 regulator" + tristate "Dialog Semiconductor DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator" depends on I2C select REGMAP_I2C help diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 376a99b7cf5d..181622b2813d 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -244,6 +244,7 @@ static const struct regulator_desc axp22x_drivevbus_regulator = { .ops = &axp20x_ops_sw, }; +/* DCDC ranges shared with AXP813 */ static const struct regulator_linear_range axp803_dcdc234_ranges[] = { REGULATOR_LINEAR_RANGE(500000, 0x0, 0x46, 10000), REGULATOR_LINEAR_RANGE(1220000, 0x47, 0x4b, 20000), @@ -426,6 +427,69 @@ static const struct regulator_desc axp809_regulators[] = { AXP_DESC_SW(AXP809, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(6)), }; +static const struct regulator_desc axp813_regulators[] = { + AXP_DESC(AXP813, DCDC1, "dcdc1", "vin1", 1600, 3400, 100, + AXP803_DCDC1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL1, BIT(0)), + AXP_DESC_RANGES(AXP813, DCDC2, "dcdc2", "vin2", axp803_dcdc234_ranges, + 76, AXP803_DCDC2_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1, + BIT(1)), + AXP_DESC_RANGES(AXP813, DCDC3, "dcdc3", "vin3", axp803_dcdc234_ranges, + 76, AXP803_DCDC3_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1, + BIT(2)), + AXP_DESC_RANGES(AXP813, DCDC4, "dcdc4", "vin4", axp803_dcdc234_ranges, + 76, AXP803_DCDC4_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1, + BIT(3)), + AXP_DESC_RANGES(AXP813, DCDC5, "dcdc5", "vin5", axp803_dcdc5_ranges, + 68, AXP803_DCDC5_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1, + BIT(4)), + AXP_DESC_RANGES(AXP813, DCDC6, "dcdc6", "vin6", axp803_dcdc6_ranges, + 72, AXP803_DCDC6_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1, + BIT(5)), + AXP_DESC_RANGES(AXP813, DCDC7, "dcdc7", "vin7", axp803_dcdc6_ranges, + 72, AXP813_DCDC7_V_OUT, 0x7f, AXP22X_PWR_OUT_CTRL1, + BIT(6)), + AXP_DESC(AXP813, ALDO1, "aldo1", "aldoin", 700, 3300, 100, + AXP22X_ALDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(5)), + AXP_DESC(AXP813, ALDO2, "aldo2", "aldoin", 700, 3300, 100, + AXP22X_ALDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(6)), + AXP_DESC(AXP813, ALDO3, "aldo3", "aldoin", 700, 3300, 100, + AXP22X_ALDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL3, BIT(7)), + AXP_DESC(AXP813, DLDO1, "dldo1", "dldoin", 700, 3300, 100, + AXP22X_DLDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(3)), + AXP_DESC_RANGES(AXP813, DLDO2, "dldo2", "dldoin", axp803_dldo2_ranges, + 32, AXP22X_DLDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, + BIT(4)), + AXP_DESC(AXP813, DLDO3, "dldo3", "dldoin", 700, 3300, 100, + AXP22X_DLDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(5)), + AXP_DESC(AXP813, DLDO4, "dldo4", "dldoin", 700, 3300, 100, + AXP22X_DLDO4_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(6)), + AXP_DESC(AXP813, ELDO1, "eldo1", "eldoin", 700, 1900, 50, + AXP22X_ELDO1_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(0)), + AXP_DESC(AXP813, ELDO2, "eldo2", "eldoin", 700, 1900, 50, + AXP22X_ELDO2_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(1)), + AXP_DESC(AXP813, ELDO3, "eldo3", "eldoin", 700, 1900, 50, + AXP22X_ELDO3_V_OUT, 0x1f, AXP22X_PWR_OUT_CTRL2, BIT(2)), + /* to do / check ... */ + AXP_DESC(AXP813, FLDO1, "fldo1", "fldoin", 700, 1450, 50, + AXP803_FLDO1_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(2)), + AXP_DESC(AXP813, FLDO2, "fldo2", "fldoin", 700, 1450, 50, + AXP803_FLDO2_V_OUT, 0x0f, AXP22X_PWR_OUT_CTRL3, BIT(3)), + /* + * TODO: FLDO3 = {DCDC5, FLDOIN} / 2 + * + * This means FLDO3 effectively switches supplies at runtime, + * something the regulator subsystem does not support. + */ + AXP_DESC_FIXED(AXP813, RTC_LDO, "rtc-ldo", "ips", 1800), + AXP_DESC_IO(AXP813, LDO_IO0, "ldo-io0", "ips", 700, 3300, 100, + AXP22X_LDO_IO0_V_OUT, 0x1f, AXP20X_GPIO0_CTRL, 0x07, + AXP22X_IO_ENABLED, AXP22X_IO_DISABLED), + AXP_DESC_IO(AXP813, LDO_IO1, "ldo-io1", "ips", 700, 3300, 100, + AXP22X_LDO_IO1_V_OUT, 0x1f, AXP20X_GPIO1_CTRL, 0x07, + AXP22X_IO_ENABLED, AXP22X_IO_DISABLED), + AXP_DESC_SW(AXP813, SW, "sw", "swin", AXP22X_PWR_OUT_CTRL2, BIT(7)), +}; + static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) { struct axp20x_dev *axp20x = dev_get_drvdata(pdev->dev.parent); @@ -441,9 +505,10 @@ static int axp20x_set_dcdc_freq(struct platform_device *pdev, u32 dcdcfreq) step = 75; break; case AXP803_ID: + case AXP813_ID: /* - * AXP803 DCDC work frequency setting has the same range and - * step as AXP22X, but at a different register. + * AXP803/AXP813 DCDC work frequency setting has the same + * range and step as AXP22X, but at a different register. * Fall through to the check below. * (See include/linux/mfd/axp20x.h) */ @@ -561,6 +626,14 @@ static int axp20x_set_dcdc_workmode(struct regulator_dev *rdev, int id, u32 work workmode <<= id - AXP803_DCDC1; break; + case AXP813_ID: + if (id < AXP813_DCDC1 || id > AXP813_DCDC7) + return -EINVAL; + + mask = AXP22X_WORKMODE_DCDCX_MASK(id - AXP813_DCDC1); + workmode <<= id - AXP813_DCDC1; + break; + default: /* should not happen */ WARN_ON(1); @@ -579,11 +652,12 @@ static bool axp20x_is_polyphase_slave(struct axp20x_dev *axp20x, int id) u32 reg = 0; /* - * Currently in our supported AXP variants, only AXP803 and AXP806 - * have polyphase regulators. + * Currently in our supported AXP variants, only AXP803, AXP806, + * and AXP813 have polyphase regulators. */ switch (axp20x->variant) { case AXP803_ID: + case AXP813_ID: regmap_read(axp20x->regmap, AXP803_POLYPHASE_CTRL, ®); switch (id) { @@ -656,6 +730,12 @@ static int axp20x_regulator_probe(struct platform_device *pdev) regulators = axp809_regulators; nregulators = AXP809_REG_ID_MAX; break; + case AXP813_ID: + regulators = axp813_regulators; + nregulators = AXP813_REG_ID_MAX; + drivevbus = of_property_read_bool(pdev->dev.parent->of_node, + "x-powers,drive-vbus-en"); + break; default: dev_err(&pdev->dev, "Unsupported AXP variant: %ld\n", axp20x->variant); @@ -677,6 +757,10 @@ static int axp20x_regulator_probe(struct platform_device *pdev) if (axp20x_is_polyphase_slave(axp20x, i)) continue; + /* Support for AXP813's FLDO3 is not implemented */ + if (axp20x->variant == AXP813_ID && i == AXP813_FLDO3) + continue; + /* * Regulators DC1SW and DC5LDO are connected internally, * so we have to handle their supply names separately. diff --git a/drivers/regulator/da9211-regulator.c b/drivers/regulator/da9211-regulator.c index aa47280efd32..9b8f47617724 100644 --- a/drivers/regulator/da9211-regulator.c +++ b/drivers/regulator/da9211-regulator.c @@ -1,6 +1,6 @@ /* * da9211-regulator.c - Regulator device driver for DA9211/DA9212 - * /DA9213/DA9214/DA9215 + * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 * Copyright (C) 2015 Dialog Semiconductor Ltd. * * This library is free software; you can redistribute it and/or @@ -496,8 +496,11 @@ static const struct i2c_device_id da9211_i2c_id[] = { {"da9211", DA9211}, {"da9212", DA9212}, {"da9213", DA9213}, + {"da9223", DA9223}, {"da9214", DA9214}, + {"da9224", DA9224}, {"da9215", DA9215}, + {"da9225", DA9225}, {}, }; MODULE_DEVICE_TABLE(i2c, da9211_i2c_id); @@ -507,8 +510,11 @@ static const struct of_device_id da9211_dt_ids[] = { { .compatible = "dlg,da9211", .data = &da9211_i2c_id[0] }, { .compatible = "dlg,da9212", .data = &da9211_i2c_id[1] }, { .compatible = "dlg,da9213", .data = &da9211_i2c_id[2] }, - { .compatible = "dlg,da9214", .data = &da9211_i2c_id[3] }, - { .compatible = "dlg,da9215", .data = &da9211_i2c_id[4] }, + { .compatible = "dlg,da9223", .data = &da9211_i2c_id[3] }, + { .compatible = "dlg,da9214", .data = &da9211_i2c_id[4] }, + { .compatible = "dlg,da9224", .data = &da9211_i2c_id[5] }, + { .compatible = "dlg,da9215", .data = &da9211_i2c_id[6] }, + { .compatible = "dlg,da9225", .data = &da9211_i2c_id[7] }, {}, }; MODULE_DEVICE_TABLE(of, da9211_dt_ids); @@ -526,5 +532,5 @@ static struct i2c_driver da9211_regulator_driver = { module_i2c_driver(da9211_regulator_driver); MODULE_AUTHOR("James Ban <James.Ban.opensource@diasemi.com>"); -MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9214/DA9215 regulator driver"); +MODULE_DESCRIPTION("DA9211/DA9212/DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 regulator driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/regulator/da9211-regulator.h b/drivers/regulator/da9211-regulator.h index b841bbf330cc..2cb32aab4f82 100644 --- a/drivers/regulator/da9211-regulator.h +++ b/drivers/regulator/da9211-regulator.h @@ -1,6 +1,6 @@ /* * da9211-regulator.h - Regulator definitions for DA9211/DA9212 - * /DA9213/DA9214/DA9215 + * /DA9213/DA9223/DA9214/DA9224/DA9215/DA9225 * Copyright (C) 2015 Dialog Semiconductor Ltd. * * This program is free software; you can redistribute it and/or diff --git a/drivers/regulator/pbias-regulator.c b/drivers/regulator/pbias-regulator.c index 0cb76ba29e84..8f782d22fdbe 100644 --- a/drivers/regulator/pbias-regulator.c +++ b/drivers/regulator/pbias-regulator.c @@ -34,6 +34,8 @@ struct pbias_reg_info { u32 vmode; unsigned int enable_time; char *name; + const unsigned int *pbias_volt_table; + int n_voltages; }; struct pbias_regulator_data { @@ -49,11 +51,16 @@ struct pbias_of_data { unsigned int offset; }; -static const unsigned int pbias_volt_table[] = { +static const unsigned int pbias_volt_table_3_0V[] = { 1800000, 3000000 }; +static const unsigned int pbias_volt_table_3_3V[] = { + 1800000, + 3300000 +}; + static const struct regulator_ops pbias_regulator_voltage_ops = { .list_voltage = regulator_list_voltage_table, .get_voltage_sel = regulator_get_voltage_sel_regmap, @@ -69,6 +76,8 @@ static const struct pbias_reg_info pbias_mmc_omap2430 = { .vmode = BIT(0), .disable_val = 0, .enable_time = 100, + .pbias_volt_table = pbias_volt_table_3_0V, + .n_voltages = 2, .name = "pbias_mmc_omap2430" }; @@ -77,6 +86,8 @@ static const struct pbias_reg_info pbias_sim_omap3 = { .enable_mask = BIT(9), .vmode = BIT(8), .enable_time = 100, + .pbias_volt_table = pbias_volt_table_3_0V, + .n_voltages = 2, .name = "pbias_sim_omap3" }; @@ -86,6 +97,8 @@ static const struct pbias_reg_info pbias_mmc_omap4 = { .disable_val = BIT(25), .vmode = BIT(21), .enable_time = 100, + .pbias_volt_table = pbias_volt_table_3_0V, + .n_voltages = 2, .name = "pbias_mmc_omap4" }; @@ -95,6 +108,8 @@ static const struct pbias_reg_info pbias_mmc_omap5 = { .disable_val = BIT(25), .vmode = BIT(21), .enable_time = 100, + .pbias_volt_table = pbias_volt_table_3_3V, + .n_voltages = 2, .name = "pbias_mmc_omap5" }; @@ -199,8 +214,8 @@ static int pbias_regulator_probe(struct platform_device *pdev) drvdata[data_idx].desc.owner = THIS_MODULE; drvdata[data_idx].desc.type = REGULATOR_VOLTAGE; drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops; - drvdata[data_idx].desc.volt_table = pbias_volt_table; - drvdata[data_idx].desc.n_voltages = 2; + drvdata[data_idx].desc.volt_table = info->pbias_volt_table; + drvdata[data_idx].desc.n_voltages = info->n_voltages; drvdata[data_idx].desc.enable_time = info->enable_time; drvdata[data_idx].desc.vsel_reg = offset; drvdata[data_idx].desc.vsel_mask = info->vmode; diff --git a/drivers/regulator/qcom_spmi-regulator.c b/drivers/regulator/qcom_spmi-regulator.c index 16c5f84e06a7..0241ada47d04 100644 --- a/drivers/regulator/qcom_spmi-regulator.c +++ b/drivers/regulator/qcom_spmi-regulator.c @@ -593,13 +593,20 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg, u8 *voltage_sel) { const struct spmi_voltage_range *range, *end; + unsigned offset; range = vreg->set_points->range; end = range + vreg->set_points->count; for (; range < end; range++) { if (selector < range->n_voltages) { - *voltage_sel = selector; + /* + * hardware selectors between set point min and real + * min are invalid so we ignore them + */ + offset = range->set_point_min_uV - range->min_uV; + offset /= range->step_uV; + *voltage_sel = selector + offset; *range_sel = range->range_sel; return 0; } @@ -613,15 +620,35 @@ static int spmi_sw_selector_to_hw(struct spmi_regulator *vreg, static int spmi_hw_selector_to_sw(struct spmi_regulator *vreg, u8 hw_sel, const struct spmi_voltage_range *range) { - int sw_sel = hw_sel; + unsigned sw_sel = 0; + unsigned offset, max_hw_sel; const struct spmi_voltage_range *r = vreg->set_points->range; - - while (r != range) { + const struct spmi_voltage_range *end = r + vreg->set_points->count; + + for (; r < end; r++) { + if (r == range && range->n_voltages) { + /* + * hardware selectors between set point min and real + * min and between set point max and real max are + * invalid so we return an error if they're + * programmed into the hardware + */ + offset = range->set_point_min_uV - range->min_uV; + offset /= range->step_uV; + if (hw_sel < offset) + return -EINVAL; + + max_hw_sel = range->set_point_max_uV - range->min_uV; + max_hw_sel /= range->step_uV; + if (hw_sel > max_hw_sel) + return -EINVAL; + + return sw_sel + hw_sel - offset; + } sw_sel += r->n_voltages; - r++; } - return sw_sel; + return -EINVAL; } static const struct spmi_voltage_range * @@ -1619,11 +1646,20 @@ static const struct spmi_regulator_data pm8994_regulators[] = { { } }; +static const struct spmi_regulator_data pmi8994_regulators[] = { + { "s1", 0x1400, "vdd_s1", }, + { "s2", 0x1700, "vdd_s2", }, + { "s3", 0x1a00, "vdd_s3", }, + { "l1", 0x4000, "vdd_l1", }, + { } +}; + static const struct of_device_id qcom_spmi_regulator_match[] = { { .compatible = "qcom,pm8841-regulators", .data = &pm8841_regulators }, { .compatible = "qcom,pm8916-regulators", .data = &pm8916_regulators }, { .compatible = "qcom,pm8941-regulators", .data = &pm8941_regulators }, { .compatible = "qcom,pm8994-regulators", .data = &pm8994_regulators }, + { .compatible = "qcom,pmi8994-regulators", .data = &pmi8994_regulators }, { } }; MODULE_DEVICE_TABLE(of, qcom_spmi_regulator_match); diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c index 9aafbb03482d..bc489958fed7 100644 --- a/drivers/regulator/tps65218-regulator.c +++ b/drivers/regulator/tps65218-regulator.c @@ -154,7 +154,7 @@ static int tps65218_pmic_set_suspend_disable(struct regulator_dev *dev) if (!tps->strobes[rid]) { if (rid == TPS65218_DCDC_3) - tps->info[rid]->strobe = 3; + tps->strobes[rid] = 3; else return -EINVAL; } diff --git a/drivers/rtc/class.c b/drivers/rtc/class.c index 2ed970d61da1..722d683e0b0f 100644 --- a/drivers/rtc/class.c +++ b/drivers/rtc/class.c @@ -161,6 +161,9 @@ static struct rtc_device *rtc_allocate_device(void) device_initialize(&rtc->dev); + /* Drivers can revise this default after allocating the device. */ + rtc->set_offset_nsec = NSEC_PER_SEC / 2; + rtc->irq_freq = 1; rtc->max_user_freq = 64; rtc->dev.class = rtc_class; diff --git a/drivers/rtc/systohc.c b/drivers/rtc/systohc.c index b4a68ffcd06b..0c177647ea6c 100644 --- a/drivers/rtc/systohc.c +++ b/drivers/rtc/systohc.c @@ -10,6 +10,7 @@ /** * rtc_set_ntp_time - Save NTP synchronized time to the RTC * @now: Current time of day + * @target_nsec: pointer for desired now->tv_nsec value * * Replacement for the NTP platform function update_persistent_clock64 * that stores time for later retrieval by rtc_hctosys. @@ -18,30 +19,52 @@ * possible at all, and various other -errno for specific temporary failure * cases. * + * -EPROTO is returned if now.tv_nsec is not close enough to *target_nsec. + ( * If temporary failure is indicated the caller should try again 'soon' */ -int rtc_set_ntp_time(struct timespec64 now) +int rtc_set_ntp_time(struct timespec64 now, unsigned long *target_nsec) { struct rtc_device *rtc; struct rtc_time tm; + struct timespec64 to_set; int err = -ENODEV; - - if (now.tv_nsec < (NSEC_PER_SEC >> 1)) - rtc_time64_to_tm(now.tv_sec, &tm); - else - rtc_time64_to_tm(now.tv_sec + 1, &tm); + bool ok; rtc = rtc_class_open(CONFIG_RTC_SYSTOHC_DEVICE); - if (rtc) { - /* rtc_hctosys exclusively uses UTC, so we call set_time here, - * not set_mmss. */ - if (rtc->ops && - (rtc->ops->set_time || - rtc->ops->set_mmss64 || - rtc->ops->set_mmss)) - err = rtc_set_time(rtc, &tm); - rtc_class_close(rtc); + if (!rtc) + goto out_err; + + if (!rtc->ops || (!rtc->ops->set_time && !rtc->ops->set_mmss64 && + !rtc->ops->set_mmss)) + goto out_close; + + /* Compute the value of tv_nsec we require the caller to supply in + * now.tv_nsec. This is the value such that (now + + * set_offset_nsec).tv_nsec == 0. + */ + set_normalized_timespec64(&to_set, 0, -rtc->set_offset_nsec); + *target_nsec = to_set.tv_nsec; + + /* The ntp code must call this with the correct value in tv_nsec, if + * it does not we update target_nsec and return EPROTO to make the ntp + * code try again later. + */ + ok = rtc_tv_nsec_ok(rtc->set_offset_nsec, &to_set, &now); + if (!ok) { + err = -EPROTO; + goto out_close; } + rtc_time64_to_tm(to_set.tv_sec, &tm); + + /* rtc_hctosys exclusively uses UTC, so we call set_time here, not + * set_mmss. + */ + err = rtc_set_time(rtc, &tm); + +out_close: + rtc_class_close(rtc); +out_err: return err; } diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c index 4630782b5456..a7917d473774 100644 --- a/drivers/s390/block/dasd_eer.c +++ b/drivers/s390/block/dasd_eer.c @@ -296,7 +296,7 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device, { struct dasd_ccw_req *temp_cqr; int data_size; - struct timeval tv; + struct timespec64 ts; struct dasd_eer_header header; unsigned long flags; struct eerbuffer *eerb; @@ -310,9 +310,9 @@ static void dasd_eer_write_standard_trigger(struct dasd_device *device, header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ header.trigger = trigger; - do_gettimeofday(&tv); - header.tv_sec = tv.tv_sec; - header.tv_usec = tv.tv_usec; + ktime_get_real_ts64(&ts); + header.tv_sec = ts.tv_sec; + header.tv_usec = ts.tv_nsec / NSEC_PER_USEC; strncpy(header.busid, dev_name(&device->cdev->dev), DASD_EER_BUSID_SIZE); @@ -340,7 +340,7 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device, { int data_size; int snss_rc; - struct timeval tv; + struct timespec64 ts; struct dasd_eer_header header; unsigned long flags; struct eerbuffer *eerb; @@ -353,9 +353,9 @@ static void dasd_eer_write_snss_trigger(struct dasd_device *device, header.total_size = sizeof(header) + data_size + 4; /* "EOR" */ header.trigger = DASD_EER_STATECHANGE; - do_gettimeofday(&tv); - header.tv_sec = tv.tv_sec; - header.tv_usec = tv.tv_usec; + ktime_get_real_ts64(&ts); + header.tv_sec = ts.tv_sec; + header.tv_usec = ts.tv_nsec / NSEC_PER_USEC; strncpy(header.busid, dev_name(&device->cdev->dev), DASD_EER_BUSID_SIZE); diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index e94080a5196f..b095a23bcc0c 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h @@ -96,14 +96,6 @@ do { \ d_data); \ } while(0) -#define DBF_DEV_EXC(d_level, d_device, d_str, d_data...) \ -do { \ - debug_sprintf_exception(d_device->debug_area, \ - d_level, \ - d_str "\n", \ - d_data); \ -} while(0) - #define DBF_EVENT(d_level, d_str, d_data...)\ do { \ debug_sprintf_event(dasd_debug_area, \ @@ -122,14 +114,6 @@ do { \ __dev_id.ssid, __dev_id.devno, d_data); \ } while (0) -#define DBF_EXC(d_level, d_str, d_data...)\ -do { \ - debug_sprintf_exception(dasd_debug_area, \ - d_level,\ - d_str "\n", \ - d_data); \ -} while(0) - /* limit size for an errorstring */ #define ERRORLENGTH 30 diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index aa42c3a2c90a..a05a4297cfae 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h @@ -56,13 +56,7 @@ extern debug_info_t *scm_debug; static inline void SCM_LOG_HEX(int level, void *data, int length) { - if (!debug_level_enabled(scm_debug, level)) - return; - while (length > 0) { - debug_event(scm_debug, level, data, length); - length -= scm_debug->buf_size; - data += scm_debug->buf_size; - } + debug_event(scm_debug, level, data, length); } static inline void SCM_LOG_STATE(int level, struct scm_device *scmdev) diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index d247f238faf8..7027e61a6931 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c @@ -211,11 +211,8 @@ sclp_console_write(struct console *console, const char *message, /* Setup timer to output current console buffer after 1/10 second */ if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 && !timer_pending(&sclp_con_timer)) { - init_timer(&sclp_con_timer); - sclp_con_timer.function = sclp_console_timeout; - sclp_con_timer.data = 0UL; - sclp_con_timer.expires = jiffies + HZ/10; - add_timer(&sclp_con_timer); + setup_timer(&sclp_con_timer, sclp_console_timeout, 0UL); + mod_timer(&sclp_con_timer, jiffies + HZ / 10); } out: spin_unlock_irqrestore(&sclp_con_lock, flags); diff --git a/drivers/s390/char/sclp_tty.c b/drivers/s390/char/sclp_tty.c index 875628dab419..1cceefdc03e0 100644 --- a/drivers/s390/char/sclp_tty.c +++ b/drivers/s390/char/sclp_tty.c @@ -218,11 +218,8 @@ static int sclp_tty_write_string(const unsigned char *str, int count, int may_fa /* Setup timer to output current console buffer after 1/10 second */ if (sclp_ttybuf && sclp_chars_in_buffer(sclp_ttybuf) && !timer_pending(&sclp_tty_timer)) { - init_timer(&sclp_tty_timer); - sclp_tty_timer.function = sclp_tty_timeout; - sclp_tty_timer.data = 0UL; - sclp_tty_timer.expires = jiffies + HZ/10; - add_timer(&sclp_tty_timer); + setup_timer(&sclp_tty_timer, sclp_tty_timeout, 0UL); + mod_timer(&sclp_tty_timer, jiffies + HZ / 10); } spin_unlock_irqrestore(&sclp_tty_lock, flags); out: diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 997b25f6e4cc..8bec5f9ea92c 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h @@ -129,6 +129,7 @@ struct tape_request { int options; /* options for execution. */ int retries; /* retry counter for error recovery. */ int rescnt; /* residual count from devstat. */ + struct timer_list timer; /* timer for std_assign_timeout(). */ /* Callback for delivering final status. */ void (*callback)(struct tape_request *, void *); diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index 91c3c642c76e..e7d23048d3f0 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c @@ -68,9 +68,8 @@ struct tape_class_device *register_tape_dev( tcd->char_device->owner = fops->owner; tcd->char_device->ops = fops; - tcd->char_device->dev = dev; - rc = cdev_add(tcd->char_device, tcd->char_device->dev, 1); + rc = cdev_add(tcd->char_device, dev, 1); if (rc) goto fail_with_cdev; diff --git a/drivers/s390/char/tape_std.c b/drivers/s390/char/tape_std.c index 7caba0cc8b2a..1f5fab617b67 100644 --- a/drivers/s390/char/tape_std.c +++ b/drivers/s390/char/tape_std.c @@ -33,14 +33,12 @@ * tape_std_assign */ static void -tape_std_assign_timeout(unsigned long data) +tape_std_assign_timeout(struct timer_list *t) { - struct tape_request * request; - struct tape_device * device; + struct tape_request * request = from_timer(request, t, timer); + struct tape_device * device = request->device; int rc; - request = (struct tape_request *) data; - device = request->device; BUG_ON(!device); DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n", @@ -71,16 +69,12 @@ tape_std_assign(struct tape_device *device) * to another host (actually this shouldn't happen but it does). * So we set up a timeout for this call. */ - init_timer_on_stack(&timeout); - timeout.function = tape_std_assign_timeout; - timeout.data = (unsigned long) request; - timeout.expires = jiffies + 2 * HZ; - add_timer(&timeout); + timer_setup(&request->timer, tape_std_assign_timeout, 0); + mod_timer(&timeout, jiffies + 2 * HZ); rc = tape_do_io_interruptible(device, request); - del_timer_sync(&timeout); - destroy_timer_on_stack(&timeout); + del_timer_sync(&request->timer); if (rc != 0) { DBF_EVENT(3, "%08x: assign failed - device might be busy\n", diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index b19020b9efff..62559dc0169f 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c @@ -812,8 +812,7 @@ static int vmlogrdr_register_cdev(dev_t dev) } vmlogrdr_cdev->owner = THIS_MODULE; vmlogrdr_cdev->ops = &vmlogrdr_fops; - vmlogrdr_cdev->dev = dev; - rc = cdev_add(vmlogrdr_cdev, vmlogrdr_cdev->dev, MAXMINOR); + rc = cdev_add(vmlogrdr_cdev, dev, MAXMINOR); if (!rc) return 0; diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 04aceb694d51..fa90ef05afc0 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c @@ -110,7 +110,7 @@ static struct urdev *urdev_alloc(struct ccw_device *cdev) mutex_init(&urd->io_mutex); init_waitqueue_head(&urd->wait); spin_lock_init(&urd->open_lock); - atomic_set(&urd->ref_count, 1); + refcount_set(&urd->ref_count, 1); urd->cdev = cdev; get_device(&cdev->dev); return urd; @@ -126,7 +126,7 @@ static void urdev_free(struct urdev *urd) static void urdev_get(struct urdev *urd) { - atomic_inc(&urd->ref_count); + refcount_inc(&urd->ref_count); } static struct urdev *urdev_get_from_cdev(struct ccw_device *cdev) @@ -159,7 +159,7 @@ static struct urdev *urdev_get_from_devno(u16 devno) static void urdev_put(struct urdev *urd) { - if (atomic_dec_and_test(&urd->ref_count)) + if (refcount_dec_and_test(&urd->ref_count)) urdev_free(urd); } @@ -892,10 +892,9 @@ static int ur_set_online(struct ccw_device *cdev) } urd->char_device->ops = &ur_fops; - urd->char_device->dev = MKDEV(major, minor); urd->char_device->owner = ur_fops.owner; - rc = cdev_add(urd->char_device, urd->char_device->dev, 1); + rc = cdev_add(urd->char_device, MKDEV(major, minor), 1); if (rc) goto fail_free_cdev; if (urd->cdev->id.cu_type == READER_PUNCH_DEVTYPE) { @@ -946,7 +945,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force) rc = -EBUSY; goto fail_urdev_put; } - if (!force && (atomic_read(&urd->ref_count) > 2)) { + if (!force && (refcount_read(&urd->ref_count) > 2)) { /* There is still a user of urd (e.g. ur_open) */ TRACE("ur_set_offline: BUSY\n"); rc = -EBUSY; diff --git a/drivers/s390/char/vmur.h b/drivers/s390/char/vmur.h index 67164ba22f11..608b0719ce17 100644 --- a/drivers/s390/char/vmur.h +++ b/drivers/s390/char/vmur.h @@ -12,6 +12,8 @@ #ifndef _VMUR_H_ #define _VMUR_H_ +#include <linux/refcount.h> + #define DEV_CLASS_UR_I 0x20 /* diag210 unit record input device class */ #define DEV_CLASS_UR_O 0x10 /* diag210 unit record output device class */ /* @@ -70,7 +72,7 @@ struct urdev { size_t reclen; /* Record length for *write* CCWs */ int class; /* VM device class */ int io_request_rc; /* return code from I/O request */ - atomic_t ref_count; /* reference counter */ + refcount_t ref_count; /* reference counter */ wait_queue_head_t wait; /* wait queue to serialize open */ int open_flag; /* "urdev is open" flag */ spinlock_t open_lock; /* serialize critical sections */ diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 34b9ad6b3143..e2f7b6e93efd 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c @@ -373,6 +373,12 @@ int ccwgroup_create_dev(struct device *parent, struct ccwgroup_driver *gdrv, rc = -EINVAL; goto error; } + /* Check if the devices are bound to the required ccw driver. */ + if (gdev->count && gdrv && gdrv->ccw_driver && + gdev->cdev[0]->drv != gdrv->ccw_driver) { + rc = -EINVAL; + goto error; + } dev_set_name(&gdev->dev, "%s", dev_name(&gdev->cdev[0]->dev)); gdev->dev.groups = ccwgroup_attr_groups; diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 735052ecd3e5..8e7e19b9e92c 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c @@ -43,11 +43,7 @@ static DEFINE_MUTEX(on_close_mutex); static void CHSC_LOG_HEX(int level, void *data, int length) { - while (length > 0) { - debug_event(chsc_debug_log_id, level, data, length); - length -= chsc_debug_log_id->buf_size; - data += chsc_debug_log_id->buf_size; - } + debug_event(chsc_debug_log_id, level, data, length); } MODULE_AUTHOR("IBM Corporation"); diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h index fa817efcec8f..7bdbe73707c2 100644 --- a/drivers/s390/cio/cio_debug.h +++ b/drivers/s390/cio/cio_debug.h @@ -23,13 +23,7 @@ extern debug_info_t *cio_debug_crw_id; static inline void CIO_HEX_EVENT(int level, void *data, int length) { - if (unlikely(!cio_debug_trace_id)) - return; - while (length > 0) { - debug_event(cio_debug_trace_id, level, data, length); - length -= cio_debug_trace_id->buf_size; - data += cio_debug_trace_id->buf_size; - } + debug_event(cio_debug_trace_id, level, data, length); } #endif diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 220491d27ef4..7d59230e88bb 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c @@ -58,8 +58,9 @@ /* indices for READCMB */ enum cmb_index { + avg_utilization = -1, /* basic and exended format: */ - cmb_ssch_rsch_count, + cmb_ssch_rsch_count = 0, cmb_sample_count, cmb_device_connect_time, cmb_function_pending_time, @@ -215,71 +216,52 @@ struct set_schib_struct { unsigned long address; wait_queue_head_t wait; int ret; - struct kref kref; }; -static void cmf_set_schib_release(struct kref *kref) -{ - struct set_schib_struct *set_data; - - set_data = container_of(kref, struct set_schib_struct, kref); - kfree(set_data); -} - #define CMF_PENDING 1 +#define SET_SCHIB_TIMEOUT (10 * HZ) static int set_schib_wait(struct ccw_device *cdev, u32 mme, - int mbfc, unsigned long address) + int mbfc, unsigned long address) { - struct set_schib_struct *set_data; - int ret; + struct set_schib_struct set_data; + int ret = -ENODEV; spin_lock_irq(cdev->ccwlock); - if (!cdev->private->cmb) { - ret = -ENODEV; + if (!cdev->private->cmb) goto out; - } - set_data = kzalloc(sizeof(struct set_schib_struct), GFP_ATOMIC); - if (!set_data) { - ret = -ENOMEM; - goto out; - } - init_waitqueue_head(&set_data->wait); - kref_init(&set_data->kref); - set_data->mme = mme; - set_data->mbfc = mbfc; - set_data->address = address; ret = set_schib(cdev, mme, mbfc, address); if (ret != -EBUSY) - goto out_put; + goto out; - if (cdev->private->state != DEV_STATE_ONLINE) { - /* if the device is not online, don't even try again */ - ret = -EBUSY; - goto out_put; - } + /* if the device is not online, don't even try again */ + if (cdev->private->state != DEV_STATE_ONLINE) + goto out; - cdev->private->state = DEV_STATE_CMFCHANGE; - set_data->ret = CMF_PENDING; - cdev->private->cmb_wait = set_data; + init_waitqueue_head(&set_data.wait); + set_data.mme = mme; + set_data.mbfc = mbfc; + set_data.address = address; + set_data.ret = CMF_PENDING; + cdev->private->state = DEV_STATE_CMFCHANGE; + cdev->private->cmb_wait = &set_data; spin_unlock_irq(cdev->ccwlock); - if (wait_event_interruptible(set_data->wait, - set_data->ret != CMF_PENDING)) { - spin_lock_irq(cdev->ccwlock); - if (set_data->ret == CMF_PENDING) { - set_data->ret = -ERESTARTSYS; + + ret = wait_event_interruptible_timeout(set_data.wait, + set_data.ret != CMF_PENDING, + SET_SCHIB_TIMEOUT); + spin_lock_irq(cdev->ccwlock); + if (ret <= 0) { + if (set_data.ret == CMF_PENDING) { + set_data.ret = (ret == 0) ? -ETIME : ret; if (cdev->private->state == DEV_STATE_CMFCHANGE) cdev->private->state = DEV_STATE_ONLINE; } - spin_unlock_irq(cdev->ccwlock); } - spin_lock_irq(cdev->ccwlock); cdev->private->cmb_wait = NULL; - ret = set_data->ret; -out_put: - kref_put(&set_data->kref, cmf_set_schib_release); + ret = set_data.ret; out: spin_unlock_irq(cdev->ccwlock); return ret; @@ -287,28 +269,21 @@ out: void retry_set_schib(struct ccw_device *cdev) { - struct set_schib_struct *set_data; + struct set_schib_struct *set_data = cdev->private->cmb_wait; - set_data = cdev->private->cmb_wait; - if (!set_data) { - WARN_ON(1); + if (!set_data) return; - } - kref_get(&set_data->kref); + set_data->ret = set_schib(cdev, set_data->mme, set_data->mbfc, set_data->address); wake_up(&set_data->wait); - kref_put(&set_data->kref, cmf_set_schib_release); } static int cmf_copy_block(struct ccw_device *cdev) { - struct subchannel *sch; - void *reference_buf; - void *hw_block; + struct subchannel *sch = to_subchannel(cdev->dev.parent); struct cmb_data *cmb_data; - - sch = to_subchannel(cdev->dev.parent); + void *hw_block; if (cio_update_schib(sch)) return -ENODEV; @@ -323,102 +298,65 @@ static int cmf_copy_block(struct ccw_device *cdev) } cmb_data = cdev->private->cmb; hw_block = cmb_data->hw_block; - if (!memcmp(cmb_data->last_block, hw_block, cmb_data->size)) - /* No need to copy. */ - return 0; - reference_buf = kzalloc(cmb_data->size, GFP_ATOMIC); - if (!reference_buf) - return -ENOMEM; - /* Ensure consistency of block copied from hardware. */ - do { - memcpy(cmb_data->last_block, hw_block, cmb_data->size); - memcpy(reference_buf, hw_block, cmb_data->size); - } while (memcmp(cmb_data->last_block, reference_buf, cmb_data->size)); + memcpy(cmb_data->last_block, hw_block, cmb_data->size); cmb_data->last_update = get_tod_clock(); - kfree(reference_buf); return 0; } struct copy_block_struct { wait_queue_head_t wait; int ret; - struct kref kref; }; -static void cmf_copy_block_release(struct kref *kref) -{ - struct copy_block_struct *copy_block; - - copy_block = container_of(kref, struct copy_block_struct, kref); - kfree(copy_block); -} - static int cmf_cmb_copy_wait(struct ccw_device *cdev) { - struct copy_block_struct *copy_block; - int ret; - unsigned long flags; + struct copy_block_struct copy_block; + int ret = -ENODEV; - spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - ret = -ENODEV; - goto out; - } - copy_block = kzalloc(sizeof(struct copy_block_struct), GFP_ATOMIC); - if (!copy_block) { - ret = -ENOMEM; + spin_lock_irq(cdev->ccwlock); + if (!cdev->private->cmb) goto out; - } - init_waitqueue_head(©_block->wait); - kref_init(©_block->kref); ret = cmf_copy_block(cdev); if (ret != -EBUSY) - goto out_put; + goto out; - if (cdev->private->state != DEV_STATE_ONLINE) { - ret = -EBUSY; - goto out_put; - } + if (cdev->private->state != DEV_STATE_ONLINE) + goto out; + + init_waitqueue_head(©_block.wait); + copy_block.ret = CMF_PENDING; cdev->private->state = DEV_STATE_CMFUPDATE; - copy_block->ret = CMF_PENDING; - cdev->private->cmb_wait = copy_block; + cdev->private->cmb_wait = ©_block; + spin_unlock_irq(cdev->ccwlock); - spin_unlock_irqrestore(cdev->ccwlock, flags); - if (wait_event_interruptible(copy_block->wait, - copy_block->ret != CMF_PENDING)) { - spin_lock_irqsave(cdev->ccwlock, flags); - if (copy_block->ret == CMF_PENDING) { - copy_block->ret = -ERESTARTSYS; + ret = wait_event_interruptible(copy_block.wait, + copy_block.ret != CMF_PENDING); + spin_lock_irq(cdev->ccwlock); + if (ret) { + if (copy_block.ret == CMF_PENDING) { + copy_block.ret = -ERESTARTSYS; if (cdev->private->state == DEV_STATE_CMFUPDATE) cdev->private->state = DEV_STATE_ONLINE; } - spin_unlock_irqrestore(cdev->ccwlock, flags); } - spin_lock_irqsave(cdev->ccwlock, flags); cdev->private->cmb_wait = NULL; - ret = copy_block->ret; -out_put: - kref_put(©_block->kref, cmf_copy_block_release); + ret = copy_block.ret; out: - spin_unlock_irqrestore(cdev->ccwlock, flags); + spin_unlock_irq(cdev->ccwlock); return ret; } void cmf_retry_copy_block(struct ccw_device *cdev) { - struct copy_block_struct *copy_block; + struct copy_block_struct *copy_block = cdev->private->cmb_wait; - copy_block = cdev->private->cmb_wait; - if (!copy_block) { - WARN_ON(1); + if (!copy_block) return; - } - kref_get(©_block->kref); + copy_block->ret = cmf_copy_block(cdev); wake_up(©_block->wait); - kref_put(©_block->kref, cmf_copy_block_release); } static void cmf_generic_reset(struct ccw_device *cdev) @@ -650,25 +588,44 @@ static int set_cmb(struct ccw_device *cdev, u32 mme) return set_schib_wait(cdev, mme, 0, offset); } +/* calculate utilization in 0.1 percent units */ +static u64 __cmb_utilization(u64 device_connect_time, u64 function_pending_time, + u64 device_disconnect_time, u64 start_time) +{ + u64 utilization, elapsed_time; + + utilization = time_to_nsec(device_connect_time + + function_pending_time + + device_disconnect_time); + + elapsed_time = get_tod_clock() - start_time; + elapsed_time = tod_to_ns(elapsed_time); + elapsed_time /= 1000; + + return elapsed_time ? (utilization / elapsed_time) : 0; +} + static u64 read_cmb(struct ccw_device *cdev, int index) { + struct cmb_data *cmb_data; + unsigned long flags; struct cmb *cmb; + u64 ret = 0; u32 val; - int ret; - unsigned long flags; - - ret = cmf_cmb_copy_wait(cdev); - if (ret < 0) - return 0; spin_lock_irqsave(cdev->ccwlock, flags); - if (!cdev->private->cmb) { - ret = 0; + cmb_data = cdev->private->cmb; + if (!cmb_data) goto out; - } - cmb = ((struct cmb_data *)cdev->private->cmb)->last_block; + cmb = cmb_data->hw_block; switch (index) { + case avg_utilization: + ret = __cmb_utilization(cmb->device_connect_time, + cmb->function_pending_time, + cmb->device_disconnect_time, + cdev->private->cmb_start_time); + goto out; case cmb_ssch_rsch_count: ret = cmb->ssch_rsch_count; goto out; @@ -691,7 +648,6 @@ static u64 read_cmb(struct ccw_device *cdev, int index) val = cmb->device_active_only_time; break; default: - ret = 0; goto out; } ret = time_to_avg_nsec(val, cmb->sample_count); @@ -729,8 +685,7 @@ static int readall_cmb(struct ccw_device *cdev, struct cmbdata *data) /* we only know values before device_busy_time */ data->size = offsetof(struct cmbdata, device_busy_time); - /* convert to nanoseconds */ - data->elapsed_time = (time * 1000) >> 12; + data->elapsed_time = tod_to_ns(time); /* copy data to new structure */ data->ssch_rsch_count = cmb->ssch_rsch_count; @@ -904,28 +859,27 @@ static int set_cmbe(struct ccw_device *cdev, u32 mme) return set_schib_wait(cdev, mme, 1, mba); } - static u64 read_cmbe(struct ccw_device *cdev, int index) { - struct cmbe *cmb; struct cmb_data *cmb_data; - u32 val; - int ret; unsigned long flags; - - ret = cmf_cmb_copy_wait(cdev); - if (ret < 0) - return 0; + struct cmbe *cmb; + u64 ret = 0; + u32 val; spin_lock_irqsave(cdev->ccwlock, flags); cmb_data = cdev->private->cmb; - if (!cmb_data) { - ret = 0; + if (!cmb_data) goto out; - } - cmb = cmb_data->last_block; + cmb = cmb_data->hw_block; switch (index) { + case avg_utilization: + ret = __cmb_utilization(cmb->device_connect_time, + cmb->function_pending_time, + cmb->device_disconnect_time, + cdev->private->cmb_start_time); + goto out; case cmb_ssch_rsch_count: ret = cmb->ssch_rsch_count; goto out; @@ -954,7 +908,6 @@ static u64 read_cmbe(struct ccw_device *cdev, int index) val = cmb->initial_command_response_time; break; default: - ret = 0; goto out; } ret = time_to_avg_nsec(val, cmb->sample_count); @@ -991,8 +944,7 @@ static int readall_cmbe(struct ccw_device *cdev, struct cmbdata *data) /* we only know values before device_busy_time */ data->size = offsetof(struct cmbdata, device_busy_time); - /* conver to nanoseconds */ - data->elapsed_time = (time * 1000) >> 12; + data->elapsed_time = tod_to_ns(time); cmb = cmb_data->last_block; /* copy data to new structure */ @@ -1045,19 +997,15 @@ static ssize_t cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, char *buf) { - struct ccw_device *cdev; - long interval; + struct ccw_device *cdev = to_ccwdev(dev); unsigned long count; - struct cmb_data *cmb_data; + long interval; - cdev = to_ccwdev(dev); count = cmf_read(cdev, cmb_sample_count); spin_lock_irq(cdev->ccwlock); - cmb_data = cdev->private->cmb; if (count) { - interval = cmb_data->last_update - - cdev->private->cmb_start_time; - interval = (interval * 1000) >> 12; + interval = get_tod_clock() - cdev->private->cmb_start_time; + interval = tod_to_ns(interval); interval /= count; } else interval = -1; @@ -1069,27 +1017,9 @@ static ssize_t cmb_show_avg_utilization(struct device *dev, struct device_attribute *attr, char *buf) { - struct cmbdata data; - u64 utilization; - unsigned long t, u; - int ret; - - ret = cmf_readall(to_ccwdev(dev), &data); - if (ret == -EAGAIN || ret == -ENODEV) - /* No data (yet/currently) available to use for calculation. */ - return sprintf(buf, "n/a\n"); - else if (ret) - return ret; - - utilization = data.device_connect_time + - data.function_pending_time + - data.device_disconnect_time; - - /* calculate value in 0.1 percent units */ - t = data.elapsed_time / 1000; - u = utilization / t; + unsigned long u = cmf_read(to_ccwdev(dev), avg_utilization); - return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10); + return sprintf(buf, "%02lu.%01lu%%\n", u / 10, u % 10); } #define cmf_attr(name) \ diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c index 0f11f3bcac82..d14795f7110b 100644 --- a/drivers/s390/cio/eadm_sch.c +++ b/drivers/s390/cio/eadm_sch.c @@ -43,13 +43,7 @@ static debug_info_t *eadm_debug; static void EADM_LOG_HEX(int level, void *data, int length) { - if (!debug_level_enabled(eadm_debug, level)) - return; - while (length > 0) { - debug_event(eadm_debug, level, data, length); - length -= eadm_debug->buf_size; - data += eadm_debug->buf_size; - } + debug_event(eadm_debug, level, data, length); } static void orb_init(union orb *orb) diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index e06496ab0036..f85f5fa7cefc 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h @@ -34,11 +34,7 @@ extern debug_info_t *qdio_dbf_error; static inline void DBF_HEX(void *addr, int len) { - while (len > 0) { - debug_event(qdio_dbf_setup, DBF_ERR, addr, len); - len -= qdio_dbf_setup->buf_size; - addr += qdio_dbf_setup->buf_size; - } + debug_event(qdio_dbf_setup, DBF_ERR, addr, len); } #define DBF_ERROR(text...) \ @@ -50,11 +46,7 @@ static inline void DBF_HEX(void *addr, int len) static inline void DBF_ERROR_HEX(void *addr, int len) { - while (len > 0) { - debug_event(qdio_dbf_error, DBF_ERR, addr, len); - len -= qdio_dbf_error->buf_size; - addr += qdio_dbf_error->buf_size; - } + debug_event(qdio_dbf_error, DBF_ERR, addr, len); } #define DBF_DEV_EVENT(level, device, text...) \ @@ -69,11 +61,7 @@ static inline void DBF_ERROR_HEX(void *addr, int len) static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr, int len, int level) { - while (len > 0) { - debug_event(dev->debug_area, level, addr, len); - len -= dev->debug_area->buf_size; - addr += dev->debug_area->buf_size; - } + debug_event(dev->debug_area, level, addr, len); } int qdio_allocate_dbf(struct qdio_initialize *init_data, diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index a739bdf9630e..0787b587e4b8 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c @@ -57,10 +57,8 @@ static u32 *get_indicator(void) int i; for (i = 0; i < TIQDIO_NR_NONSHARED_IND; i++) - if (!atomic_read(&q_indicators[i].count)) { - atomic_set(&q_indicators[i].count, 1); + if (!atomic_cmpxchg(&q_indicators[i].count, 0, 1)) return &q_indicators[i].ind; - } /* use the shared indicator */ atomic_inc(&q_indicators[TIQDIO_SHARED_IND].count); @@ -69,13 +67,11 @@ static u32 *get_indicator(void) static void put_indicator(u32 *addr) { - int i; + struct indicator_t *ind = container_of(addr, struct indicator_t, ind); if (!addr) return; - i = ((unsigned long)addr - (unsigned long)q_indicators) / - sizeof(struct indicator_t); - atomic_dec(&q_indicators[i].count); + atomic_dec(&ind->count); } void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c index f20b4d66c75f..d9a2fffd034b 100644 --- a/drivers/s390/cio/vfio_ccw_cp.c +++ b/drivers/s390/cio/vfio_ccw_cp.c @@ -106,7 +106,10 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev, { int ret = 0; - if (!len || pa->pa_nr) + if (!len) + return 0; + + if (pa->pa_nr) return -EINVAL; pa->pa_iova = iova; @@ -330,6 +333,8 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx) { struct ccw1 *ccw = chain->ch_ccw + idx; + if (ccw_is_test(ccw) || ccw_is_noop(ccw) || ccw_is_tic(ccw)) + return; if (!ccw->count) return; @@ -502,6 +507,16 @@ static int ccwchain_fetch_direct(struct ccwchain *chain, ccw = chain->ch_ccw + idx; + if (!ccw->count) { + /* + * We just want the translation result of any direct ccw + * to be an IDA ccw, so let's add the IDA flag for it. + * Although the flag will be ignored by firmware. + */ + ccw->flags |= CCW_FLAG_IDA; + return 0; + } + /* * Pin data page(s) in memory. * The number of pages actually is the count of the idaws which will be @@ -542,6 +557,9 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, ccw = chain->ch_ccw + idx; + if (!ccw->count) + return 0; + /* Calculate size of idaws. */ ret = copy_from_iova(cp->mdev, &idaw_iova, ccw->cda, sizeof(idaw_iova)); if (ret) @@ -570,10 +588,6 @@ static int ccwchain_fetch_idal(struct ccwchain *chain, for (i = 0; i < idaw_nr; i++) { idaw_iova = *(idaws + i); - if (IS_ERR_VALUE(idaw_iova)) { - ret = -EFAULT; - goto out_free_idaws; - } ret = pfn_array_alloc_pin(pat->pat_pa + i, cp->mdev, idaw_iova, 1); diff --git a/drivers/s390/crypto/ap_asm.h b/drivers/s390/crypto/ap_asm.h index 6c0474c834d4..16b59ce5e01d 100644 --- a/drivers/s390/crypto/ap_asm.h +++ b/drivers/s390/crypto/ap_asm.h @@ -117,6 +117,49 @@ static inline int ap_qci(void *config) return reg1; } +/* + * union ap_qact_ap_info - used together with the + * ap_aqic() function to provide a convenient way + * to handle the ap info needed by the qact function. + */ +union ap_qact_ap_info { + unsigned long val; + struct { + unsigned int : 3; + unsigned int mode : 3; + unsigned int : 26; + unsigned int cat : 8; + unsigned int : 8; + unsigned char ver[2]; + }; +}; + +/** + * ap_qact(): Query AP combatibility type. + * @qid: The AP queue number + * @apinfo: On input the info about the AP queue. On output the + * alternate AP queue info provided by the qact function + * in GR2 is stored in. + * + * Returns AP queue status. Check response_code field for failures. + */ +static inline struct ap_queue_status ap_qact(ap_qid_t qid, int ifbit, + union ap_qact_ap_info *apinfo) +{ + register unsigned long reg0 asm ("0") = qid | (5UL << 24) + | ((ifbit & 0x01) << 22); + register unsigned long reg1_in asm ("1") = apinfo->val; + register struct ap_queue_status reg1_out asm ("1"); + register unsigned long reg2 asm ("2") = 0; + + asm volatile( + ".long 0xb2af0000" /* PQAP(QACT) */ + : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) + : : "cc"); + apinfo->val = reg2; + return reg1_out; +} + /** * ap_nqap(): Send message to adjunct processor queue. * @qid: The AP queue number diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 5f0be2040272..8b5658b0bec3 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -176,6 +176,18 @@ static int ap_apft_available(void) return test_facility(15); } +/* + * ap_qact_available(): Test if the PQAP(QACT) subfunction is available. + * + * Returns 1 if the QACT subfunction is available. + */ +static inline int ap_qact_available(void) +{ + if (ap_configuration) + return ap_configuration->qact; + return 0; +} + /** * ap_test_queue(): Test adjunct processor queue. * @qid: The AP queue number @@ -988,6 +1000,47 @@ static int ap_select_domain(void) } /* + * This function checks the type and returns either 0 for not + * supported or the highest compatible type value (which may + * include the input type value). + */ +static int ap_get_compatible_type(ap_qid_t qid, int rawtype, unsigned int func) +{ + int comp_type = 0; + + /* < CEX2A is not supported */ + if (rawtype < AP_DEVICE_TYPE_CEX2A) + return 0; + /* up to CEX6 known and fully supported */ + if (rawtype <= AP_DEVICE_TYPE_CEX6) + return rawtype; + /* + * unknown new type > CEX6, check for compatibility + * to the highest known and supported type which is + * currently CEX6 with the help of the QACT function. + */ + if (ap_qact_available()) { + struct ap_queue_status status; + union ap_qact_ap_info apinfo = {0}; + + apinfo.mode = (func >> 26) & 0x07; + apinfo.cat = AP_DEVICE_TYPE_CEX6; + status = ap_qact(qid, 0, &apinfo); + if (status.response_code == AP_RESPONSE_NORMAL + && apinfo.cat >= AP_DEVICE_TYPE_CEX2A + && apinfo.cat <= AP_DEVICE_TYPE_CEX6) + comp_type = apinfo.cat; + } + if (!comp_type) + AP_DBF(DBF_WARN, "queue=%02x.%04x unable to map type %d\n", + AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype); + else if (comp_type != rawtype) + AP_DBF(DBF_INFO, "queue=%02x.%04x map type %d to %d\n", + AP_QID_CARD(qid), AP_QID_QUEUE(qid), rawtype, comp_type); + return comp_type; +} + +/* * helper function to be used with bus_find_dev * matches for the card device with the given id */ @@ -1014,8 +1067,8 @@ static void ap_scan_bus(struct work_struct *unused) struct ap_card *ac; struct device *dev; ap_qid_t qid; - int depth = 0, type = 0; - unsigned int functions = 0; + int comp_type, depth = 0, type = 0; + unsigned int func = 0; int rc, id, dom, borked, domains, defdomdevs = 0; AP_DBF(DBF_DEBUG, "ap_scan_bus running\n"); @@ -1066,12 +1119,12 @@ static void ap_scan_bus(struct work_struct *unused) } continue; } - rc = ap_query_queue(qid, &depth, &type, &functions); + rc = ap_query_queue(qid, &depth, &type, &func); if (dev) { spin_lock_bh(&aq->lock); if (rc == -ENODEV || /* adapter reconfiguration */ - (ac && ac->functions != functions)) + (ac && ac->functions != func)) aq->state = AP_STATE_BORKED; borked = aq->state == AP_STATE_BORKED; spin_unlock_bh(&aq->lock); @@ -1087,11 +1140,14 @@ static void ap_scan_bus(struct work_struct *unused) } if (rc) continue; - /* new queue device needed */ + /* a new queue device is needed, check out comp type */ + comp_type = ap_get_compatible_type(qid, type, func); + if (!comp_type) + continue; + /* maybe a card device needs to be created first */ if (!ac) { - /* but first create the card device */ - ac = ap_card_create(id, depth, - type, functions); + ac = ap_card_create(id, depth, type, + comp_type, func); if (!ac) continue; ac->ap_dev.device.bus = &ap_bus_type; @@ -1109,7 +1165,7 @@ static void ap_scan_bus(struct work_struct *unused) get_device(&ac->ap_dev.device); } /* now create the new queue device */ - aq = ap_queue_create(qid, type); + aq = ap_queue_create(qid, comp_type); if (!aq) continue; aq->card = ac; diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 754cf2223cfb..3a0e19d87e7c 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h @@ -250,8 +250,8 @@ void ap_queue_remove(struct ap_queue *aq); void ap_queue_suspend(struct ap_device *ap_dev); void ap_queue_resume(struct ap_device *ap_dev); -struct ap_card *ap_card_create(int id, int queue_depth, int device_type, - unsigned int device_functions); +struct ap_card *ap_card_create(int id, int queue_depth, int raw_device_type, + int comp_device_type, unsigned int functions); int ap_module_init(void); void ap_module_exit(void); diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c index 8a31c9e95430..97a8cf578116 100644 --- a/drivers/s390/crypto/ap_card.c +++ b/drivers/s390/crypto/ap_card.c @@ -171,22 +171,20 @@ static void ap_card_device_release(struct device *dev) kfree(ac); } -struct ap_card *ap_card_create(int id, int queue_depth, int device_type, - unsigned int functions) +struct ap_card *ap_card_create(int id, int queue_depth, int raw_type, + int comp_type, unsigned int functions) { struct ap_card *ac; ac = kzalloc(sizeof(*ac), GFP_KERNEL); if (!ac) return NULL; + INIT_LIST_HEAD(&ac->list); INIT_LIST_HEAD(&ac->queues); ac->ap_dev.device.release = ap_card_device_release; ac->ap_dev.device.type = &ap_card_type; - ac->ap_dev.device_type = device_type; - /* CEX6 toleration: map to CEX5 */ - if (device_type == AP_DEVICE_TYPE_CEX6) - ac->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; - ac->raw_hwtype = device_type; + ac->ap_dev.device_type = comp_type; + ac->raw_hwtype = raw_type; ac->queue_depth = queue_depth; ac->functions = functions; ac->id = id; diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c index 6c8bd8ad6185..a550d40921e7 100644 --- a/drivers/s390/crypto/ap_queue.c +++ b/drivers/s390/crypto/ap_queue.c @@ -627,13 +627,11 @@ struct ap_queue *ap_queue_create(ap_qid_t qid, int device_type) aq->ap_dev.device.release = ap_queue_device_release; aq->ap_dev.device.type = &ap_queue_type; aq->ap_dev.device_type = device_type; - /* CEX6 toleration: map to CEX5 */ - if (device_type == AP_DEVICE_TYPE_CEX6) - aq->ap_dev.device_type = AP_DEVICE_TYPE_CEX5; aq->qid = qid; aq->state = AP_STATE_RESET_START; aq->interrupt = AP_INTR_DISABLED; spin_lock_init(&aq->lock); + INIT_LIST_HEAD(&aq->list); INIT_LIST_HEAD(&aq->pendingq); INIT_LIST_HEAD(&aq->requestq); setup_timer(&aq->timeout, ap_request_timeout, (unsigned long) aq); diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c index f61fa47135a6..8dda5bb34a2f 100644 --- a/drivers/s390/crypto/pkey_api.c +++ b/drivers/s390/crypto/pkey_api.c @@ -125,10 +125,9 @@ static int alloc_and_prep_cprbmem(size_t paramblen, * allocate consecutive memory for request CPRB, request param * block, reply CPRB and reply param block */ - cprbmem = kmalloc(2 * cprbplusparamblen, GFP_KERNEL); + cprbmem = kzalloc(2 * cprbplusparamblen, GFP_KERNEL); if (!cprbmem) return -ENOMEM; - memset(cprbmem, 0, 2 * cprbplusparamblen); preqcblk = (struct CPRBX *) cprbmem; prepcblk = (struct CPRBX *) (cprbmem + cprbplusparamblen); diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h index 6c94efd23eac..73541a798db7 100644 --- a/drivers/s390/crypto/zcrypt_api.h +++ b/drivers/s390/crypto/zcrypt_api.h @@ -76,6 +76,7 @@ struct ica_z90_status { #define ZCRYPT_CEX3A 8 #define ZCRYPT_CEX4 10 #define ZCRYPT_CEX5 11 +#define ZCRYPT_CEX6 12 /** * Large random numbers are pulled in 4096 byte chunks from the crypto cards diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c index 4e91163d70a6..e2eebc775a37 100644 --- a/drivers/s390/crypto/zcrypt_cex4.c +++ b/drivers/s390/crypto/zcrypt_cex4.c @@ -45,6 +45,8 @@ static struct ap_device_id zcrypt_cex4_card_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX5, .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX6, + .match_flags = AP_DEVICE_ID_MATCH_CARD_TYPE }, { /* end of list */ }, }; @@ -55,6 +57,8 @@ static struct ap_device_id zcrypt_cex4_queue_ids[] = { .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { .dev_type = AP_DEVICE_TYPE_CEX5, .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, + { .dev_type = AP_DEVICE_TYPE_CEX6, + .match_flags = AP_DEVICE_ID_MATCH_QUEUE_TYPE }, { /* end of list */ }, }; @@ -72,17 +76,25 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) * MEX_1k, MEX_2k, MEX_4k, CRT_1k, CRT_2k, CRT_4k, RNG, SECKEY */ static const int CEX4A_SPEED_IDX[] = { - 5, 6, 59, 20, 115, 581, 0, 0}; + 14, 19, 249, 42, 228, 1458, 0, 0}; static const int CEX5A_SPEED_IDX[] = { - 3, 3, 6, 8, 32, 218, 0, 0}; + 8, 9, 20, 18, 66, 458, 0, 0}; + static const int CEX6A_SPEED_IDX[] = { + 6, 9, 20, 17, 65, 438, 0, 0}; + static const int CEX4C_SPEED_IDX[] = { - 24, 25, 82, 41, 138, 1111, 79, 8}; + 59, 69, 308, 83, 278, 2204, 209, 40}; static const int CEX5C_SPEED_IDX[] = { - 10, 14, 23, 17, 45, 242, 63, 4}; + 24, 31, 50, 37, 90, 479, 27, 10}; + static const int CEX6C_SPEED_IDX[] = { + 16, 20, 32, 27, 77, 455, 23, 9}; + static const int CEX4P_SPEED_IDX[] = { - 142, 198, 1852, 203, 331, 1563, 0, 8}; + 224, 313, 3560, 359, 605, 2827, 0, 50}; static const int CEX5P_SPEED_IDX[] = { - 49, 67, 131, 52, 85, 287, 0, 4}; + 63, 84, 156, 83, 142, 533, 0, 10}; + static const int CEX6P_SPEED_IDX[] = { + 55, 70, 121, 73, 129, 522, 0, 9}; struct ap_card *ac = to_ap_card(&ap_dev->device); struct zcrypt_card *zc; @@ -99,11 +111,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX4; memcpy(zc->speed_rating, CEX4A_SPEED_IDX, sizeof(CEX4A_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { zc->type_string = "CEX5A"; zc->user_space_type = ZCRYPT_CEX5; memcpy(zc->speed_rating, CEX5A_SPEED_IDX, sizeof(CEX5A_SPEED_IDX)); + } else { + zc->type_string = "CEX6A"; + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX6A_SPEED_IDX, + sizeof(CEX6A_SPEED_IDX)); } zc->min_mod_size = CEX4A_MIN_MOD_SIZE; if (ap_test_bit(&ac->functions, AP_FUNC_MEX4K) && @@ -125,7 +142,7 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX3C; memcpy(zc->speed_rating, CEX4C_SPEED_IDX, sizeof(CEX4C_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { zc->type_string = "CEX5C"; /* wrong user space type, must be CEX5 * just keep it for cca compatibility @@ -133,6 +150,14 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX3C; memcpy(zc->speed_rating, CEX5C_SPEED_IDX, sizeof(CEX5C_SPEED_IDX)); + } else { + zc->type_string = "CEX6C"; + /* wrong user space type, must be CEX6 + * just keep it for cca compatibility + */ + zc->user_space_type = ZCRYPT_CEX3C; + memcpy(zc->speed_rating, CEX6C_SPEED_IDX, + sizeof(CEX6C_SPEED_IDX)); } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; @@ -143,11 +168,16 @@ static int zcrypt_cex4_card_probe(struct ap_device *ap_dev) zc->user_space_type = ZCRYPT_CEX4; memcpy(zc->speed_rating, CEX4P_SPEED_IDX, sizeof(CEX4P_SPEED_IDX)); - } else { + } else if (ac->ap_dev.device_type == AP_DEVICE_TYPE_CEX5) { zc->type_string = "CEX5P"; zc->user_space_type = ZCRYPT_CEX5; memcpy(zc->speed_rating, CEX5P_SPEED_IDX, sizeof(CEX5P_SPEED_IDX)); + } else { + zc->type_string = "CEX6P"; + zc->user_space_type = ZCRYPT_CEX6; + memcpy(zc->speed_rating, CEX6P_SPEED_IDX, + sizeof(CEX6P_SPEED_IDX)); } zc->min_mod_size = CEX4C_MIN_MOD_SIZE; zc->max_mod_size = CEX4C_MAX_MOD_SIZE; diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c index 6dd5d7c58dd0..db5bde47dfb0 100644 --- a/drivers/s390/crypto/zcrypt_msgtype50.c +++ b/drivers/s390/crypto/zcrypt_msgtype50.c @@ -240,8 +240,7 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, mod = meb2->modulus + sizeof(meb2->modulus) - mod_len; exp = meb2->exponent + sizeof(meb2->exponent) - mod_len; inp = meb2->message + sizeof(meb2->message) - mod_len; - } else { - /* mod_len > 256 = 4096 bit RSA Key */ + } else if (mod_len <= 512) { struct type50_meb3_msg *meb3 = ap_msg->message; memset(meb3, 0, sizeof(*meb3)); ap_msg->length = sizeof(*meb3); @@ -251,7 +250,8 @@ static int ICAMEX_msg_to_type50MEX_msg(struct zcrypt_queue *zq, mod = meb3->modulus + sizeof(meb3->modulus) - mod_len; exp = meb3->exponent + sizeof(meb3->exponent) - mod_len; inp = meb3->message + sizeof(meb3->message) - mod_len; - } + } else + return -EINVAL; if (copy_from_user(mod, mex->n_modulus, mod_len) || copy_from_user(exp, mex->b_key, mod_len) || diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index afd20cee7ea0..785620d30504 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -474,7 +474,8 @@ static int XCRB_msg_to_type6CPRB_msgX(struct ap_message *ap_msg, *fcode = (msg->hdr.function_code[0] << 8) | msg->hdr.function_code[1]; *dom = (unsigned short *)&msg->cprbx.domain; - if (memcmp(function_code, "US", 2) == 0) + if (memcmp(function_code, "US", 2) == 0 + || memcmp(function_code, "AU", 2) == 0) ap_msg->special = 1; else ap_msg->special = 0; diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 26363e0816fe..be9f17218531 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c @@ -1761,6 +1761,7 @@ static struct ccwgroup_driver ctcm_group_driver = { .owner = THIS_MODULE, .name = CTC_DRIVER_NAME, }, + .ccw_driver = &ctcm_ccw_driver, .setup = ctcm_probe_device, .remove = ctcm_remove_device, .set_online = ctcm_new_device, diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index d01b5c2a7760..e131a03262ad 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -834,13 +834,13 @@ lcs_notify_lancmd_waiters(struct lcs_card *card, struct lcs_cmd *cmd) * Emit buffer of a lan command. */ static void -lcs_lancmd_timeout(unsigned long data) +lcs_lancmd_timeout(struct timer_list *t) { - struct lcs_reply *reply, *list_reply, *r; + struct lcs_reply *reply = from_timer(reply, t, timer); + struct lcs_reply *list_reply, *r; unsigned long flags; LCS_DBF_TEXT(4, trace, "timeout"); - reply = (struct lcs_reply *) data; spin_lock_irqsave(&reply->card->lock, flags); list_for_each_entry_safe(list_reply, r, &reply->card->lancmd_waiters,list) { @@ -864,7 +864,6 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, { struct lcs_reply *reply; struct lcs_cmd *cmd; - struct timer_list timer; unsigned long flags; int rc; @@ -885,14 +884,10 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer, rc = lcs_ready_buffer(&card->write, buffer); if (rc) return rc; - init_timer_on_stack(&timer); - timer.function = lcs_lancmd_timeout; - timer.data = (unsigned long) reply; - timer.expires = jiffies + HZ*card->lancmd_timeout; - add_timer(&timer); + timer_setup(&reply->timer, lcs_lancmd_timeout, 0); + mod_timer(&reply->timer, jiffies + HZ * card->lancmd_timeout); wait_event(reply->wait_q, reply->received); - del_timer_sync(&timer); - destroy_timer_on_stack(&timer); + del_timer_sync(&reply->timer); LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); rc = reply->rc; lcs_put_reply(reply); @@ -2396,6 +2391,7 @@ static struct ccwgroup_driver lcs_group_driver = { .owner = THIS_MODULE, .name = "lcs", }, + .ccw_driver = &lcs_ccw_driver, .setup = lcs_probe_device, .remove = lcs_remove_device, .set_online = lcs_new_device, diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h index f94d8f6dd7a8..fbc8b90b1f85 100644 --- a/drivers/s390/net/lcs.h +++ b/drivers/s390/net/lcs.h @@ -276,6 +276,7 @@ struct lcs_reply { void (*callback)(struct lcs_card *, struct lcs_cmd *); wait_queue_head_t wait_q; struct lcs_card *card; + struct timer_list timer; int received; int rc; }; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index bae7440abc01..61cf3e9c0acb 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5875,6 +5875,7 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = { .owner = THIS_MODULE, .name = "qeth", }, + .ccw_driver = &qeth_ccw_driver, .setup = qeth_core_probe_device, .remove = qeth_core_remove_device, .set_online = qeth_core_set_online, diff --git a/drivers/s390/virtio/Makefile b/drivers/s390/virtio/Makefile index df40692a9011..f68af1f317f1 100644 --- a/drivers/s390/virtio/Makefile +++ b/drivers/s390/virtio/Makefile @@ -6,8 +6,4 @@ # it under the terms of the GNU General Public License (version 2 only) # as published by the Free Software Foundation. -s390-virtio-objs := virtio_ccw.o -ifdef CONFIG_S390_GUEST_OLD_TRANSPORT -s390-virtio-objs += kvm_virtio.o -endif -obj-$(CONFIG_S390_GUEST) += $(s390-virtio-objs) +obj-$(CONFIG_S390_GUEST) += virtio_ccw.o diff --git a/drivers/s390/virtio/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c deleted file mode 100644 index a99d09a11f05..000000000000 --- a/drivers/s390/virtio/kvm_virtio.c +++ /dev/null @@ -1,515 +0,0 @@ -/* - * virtio for kvm on s390 - * - * Copyright IBM Corp. 2008 - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License (version 2 only) - * as published by the Free Software Foundation. - * - * Author(s): Christian Borntraeger <borntraeger@de.ibm.com> - */ - -#include <linux/kernel_stat.h> -#include <linux/init.h> -#include <linux/bootmem.h> -#include <linux/err.h> -#include <linux/virtio.h> -#include <linux/virtio_config.h> -#include <linux/slab.h> -#include <linux/virtio_console.h> -#include <linux/interrupt.h> -#include <linux/virtio_ring.h> -#include <linux/export.h> -#include <linux/pfn.h> -#include <asm/io.h> -#include <asm/kvm_para.h> -#include <asm/kvm_virtio.h> -#include <asm/sclp.h> -#include <asm/setup.h> -#include <asm/irq.h> - -#define VIRTIO_SUBCODE_64 0x0D00 - -/* - * The pointer to our (page) of device descriptions. - */ -static void *kvm_devices; -static struct work_struct hotplug_work; - -struct kvm_device { - struct virtio_device vdev; - struct kvm_device_desc *desc; -}; - -#define to_kvmdev(vd) container_of(vd, struct kvm_device, vdev) - -/* - * memory layout: - * - kvm_device_descriptor - * struct kvm_device_desc - * - configuration - * struct kvm_vqconfig - * - feature bits - * - config space - */ -static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc) -{ - return (struct kvm_vqconfig *)(desc + 1); -} - -static u8 *kvm_vq_features(const struct kvm_device_desc *desc) -{ - return (u8 *)(kvm_vq_config(desc) + desc->num_vq); -} - -static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc) -{ - return kvm_vq_features(desc) + desc->feature_len * 2; -} - -/* - * The total size of the config page used by this device (incl. desc) - */ -static unsigned desc_size(const struct kvm_device_desc *desc) -{ - return sizeof(*desc) - + desc->num_vq * sizeof(struct kvm_vqconfig) - + desc->feature_len * 2 - + desc->config_len; -} - -/* This gets the device's feature bits. */ -static u64 kvm_get_features(struct virtio_device *vdev) -{ - unsigned int i; - u32 features = 0; - struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; - u8 *in_features = kvm_vq_features(desc); - - for (i = 0; i < min(desc->feature_len * 8, 32); i++) - if (in_features[i / 8] & (1 << (i % 8))) - features |= (1 << i); - return features; -} - -static int kvm_finalize_features(struct virtio_device *vdev) -{ - unsigned int i, bits; - struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; - /* Second half of bitmap is features we accept. */ - u8 *out_features = kvm_vq_features(desc) + desc->feature_len; - - /* Give virtio_ring a chance to accept features. */ - vring_transport_features(vdev); - - /* Make sure we don't have any features > 32 bits! */ - BUG_ON((u32)vdev->features != vdev->features); - - memset(out_features, 0, desc->feature_len); - bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; - for (i = 0; i < bits; i++) { - if (__virtio_test_bit(vdev, i)) - out_features[i / 8] |= (1 << (i % 8)); - } - - return 0; -} - -/* - * Reading and writing elements in config space - */ -static void kvm_get(struct virtio_device *vdev, unsigned int offset, - void *buf, unsigned len) -{ - struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; - - BUG_ON(offset + len > desc->config_len); - memcpy(buf, kvm_vq_configspace(desc) + offset, len); -} - -static void kvm_set(struct virtio_device *vdev, unsigned int offset, - const void *buf, unsigned len) -{ - struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; - - BUG_ON(offset + len > desc->config_len); - memcpy(kvm_vq_configspace(desc) + offset, buf, len); -} - -/* - * The operations to get and set the status word just access - * the status field of the device descriptor. set_status will also - * make a hypercall to the host, to tell about status changes - */ -static u8 kvm_get_status(struct virtio_device *vdev) -{ - return to_kvmdev(vdev)->desc->status; -} - -static void kvm_set_status(struct virtio_device *vdev, u8 status) -{ - BUG_ON(!status); - to_kvmdev(vdev)->desc->status = status; - kvm_hypercall1(KVM_S390_VIRTIO_SET_STATUS, - (unsigned long) to_kvmdev(vdev)->desc); -} - -/* - * To reset the device, we use the KVM_VIRTIO_RESET hypercall, using the - * descriptor address. The Host will zero the status and all the - * features. - */ -static void kvm_reset(struct virtio_device *vdev) -{ - kvm_hypercall1(KVM_S390_VIRTIO_RESET, - (unsigned long) to_kvmdev(vdev)->desc); -} - -/* - * When the virtio_ring code wants to notify the Host, it calls us here and we - * make a hypercall. We hand the address of the virtqueue so the Host - * knows which virtqueue we're talking about. - */ -static bool kvm_notify(struct virtqueue *vq) -{ - long rc; - struct kvm_vqconfig *config = vq->priv; - - rc = kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, config->address); - if (rc < 0) - return false; - return true; -} - -/* - * This routine finds the first virtqueue described in the configuration of - * this device and sets it up. - */ -static struct virtqueue *kvm_find_vq(struct virtio_device *vdev, - unsigned index, - void (*callback)(struct virtqueue *vq), - const char *name, bool ctx) -{ - struct kvm_device *kdev = to_kvmdev(vdev); - struct kvm_vqconfig *config; - struct virtqueue *vq; - int err; - - if (index >= kdev->desc->num_vq) - return ERR_PTR(-ENOENT); - - if (!name) - return NULL; - - config = kvm_vq_config(kdev->desc)+index; - - err = vmem_add_mapping(config->address, - vring_size(config->num, - KVM_S390_VIRTIO_RING_ALIGN)); - if (err) - goto out; - - vq = vring_new_virtqueue(index, config->num, KVM_S390_VIRTIO_RING_ALIGN, - vdev, true, ctx, (void *) config->address, - kvm_notify, callback, name); - if (!vq) { - err = -ENOMEM; - goto unmap; - } - - /* - * register a callback token - * The host will sent this via the external interrupt parameter - */ - config->token = (u64) vq; - - vq->priv = config; - return vq; -unmap: - vmem_remove_mapping(config->address, - vring_size(config->num, - KVM_S390_VIRTIO_RING_ALIGN)); -out: - return ERR_PTR(err); -} - -static void kvm_del_vq(struct virtqueue *vq) -{ - struct kvm_vqconfig *config = vq->priv; - - vring_del_virtqueue(vq); - vmem_remove_mapping(config->address, - vring_size(config->num, - KVM_S390_VIRTIO_RING_ALIGN)); -} - -static void kvm_del_vqs(struct virtio_device *vdev) -{ - struct virtqueue *vq, *n; - - list_for_each_entry_safe(vq, n, &vdev->vqs, list) - kvm_del_vq(vq); -} - -static int kvm_find_vqs(struct virtio_device *vdev, unsigned nvqs, - struct virtqueue *vqs[], - vq_callback_t *callbacks[], - const char * const names[], - const bool *ctx, - struct irq_affinity *desc) -{ - struct kvm_device *kdev = to_kvmdev(vdev); - int i; - - /* We must have this many virtqueues. */ - if (nvqs > kdev->desc->num_vq) - return -ENOENT; - - for (i = 0; i < nvqs; ++i) { - vqs[i] = kvm_find_vq(vdev, i, callbacks[i], names[i], - ctx ? ctx[i] : false); - if (IS_ERR(vqs[i])) - goto error; - } - return 0; - -error: - kvm_del_vqs(vdev); - return PTR_ERR(vqs[i]); -} - -static const char *kvm_bus_name(struct virtio_device *vdev) -{ - return ""; -} - -/* - * The config ops structure as defined by virtio config - */ -static const struct virtio_config_ops kvm_vq_configspace_ops = { - .get_features = kvm_get_features, - .finalize_features = kvm_finalize_features, - .get = kvm_get, - .set = kvm_set, - .get_status = kvm_get_status, - .set_status = kvm_set_status, - .reset = kvm_reset, - .find_vqs = kvm_find_vqs, - .del_vqs = kvm_del_vqs, - .bus_name = kvm_bus_name, -}; - -/* - * The root device for the kvm virtio devices. - * This makes them appear as /sys/devices/kvm_s390/0,1,2 not /sys/devices/0,1,2. - */ -static struct device *kvm_root; - -/* - * adds a new device and register it with virtio - * appropriate drivers are loaded by the device model - */ -static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset) -{ - struct kvm_device *kdev; - - kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); - if (!kdev) { - printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n", - offset, d->type); - return; - } - - kdev->vdev.dev.parent = kvm_root; - kdev->vdev.id.device = d->type; - kdev->vdev.config = &kvm_vq_configspace_ops; - kdev->desc = d; - - if (register_virtio_device(&kdev->vdev) != 0) { - printk(KERN_ERR "Failed to register kvm device %u type %u\n", - offset, d->type); - kfree(kdev); - } -} - -/* - * scan_devices() simply iterates through the device page. - * The type 0 is reserved to mean "end of devices". - */ -static void scan_devices(void) -{ - unsigned int i; - struct kvm_device_desc *d; - - for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { - d = kvm_devices + i; - - if (d->type == 0) - break; - - add_kvm_device(d, i); - } -} - -/* - * match for a kvm device with a specific desc pointer - */ -static int match_desc(struct device *dev, void *data) -{ - struct virtio_device *vdev = dev_to_virtio(dev); - struct kvm_device *kdev = to_kvmdev(vdev); - - return kdev->desc == data; -} - -/* - * hotplug_device tries to find changes in the device page. - */ -static void hotplug_devices(struct work_struct *dummy) -{ - unsigned int i; - struct kvm_device_desc *d; - struct device *dev; - - for (i = 0; i < PAGE_SIZE; i += desc_size(d)) { - d = kvm_devices + i; - - /* end of list */ - if (d->type == 0) - break; - - /* device already exists */ - dev = device_find_child(kvm_root, d, match_desc); - if (dev) { - /* XXX check for hotplug remove */ - put_device(dev); - continue; - } - - /* new device */ - printk(KERN_INFO "Adding new virtio device %p\n", d); - add_kvm_device(d, i); - } -} - -/* - * we emulate the request_irq behaviour on top of s390 extints - */ -static void kvm_extint_handler(struct ext_code ext_code, - unsigned int param32, unsigned long param64) -{ - struct virtqueue *vq; - u32 param; - - if ((ext_code.subcode & 0xff00) != VIRTIO_SUBCODE_64) - return; - inc_irq_stat(IRQEXT_VRT); - - /* The LSB might be overloaded, we have to mask it */ - vq = (struct virtqueue *)(param64 & ~1UL); - - /* We use ext_params to decide what this interrupt means */ - param = param32 & VIRTIO_PARAM_MASK; - - switch (param) { - case VIRTIO_PARAM_CONFIG_CHANGED: - virtio_config_changed(vq->vdev); - break; - case VIRTIO_PARAM_DEV_ADD: - schedule_work(&hotplug_work); - break; - case VIRTIO_PARAM_VRING_INTERRUPT: - default: - vring_interrupt(0, vq); - break; - } -} - -/* - * For s390-virtio, we expect a page above main storage containing - * the virtio configuration. Try to actually load from this area - * in order to figure out if the host provides this page. - */ -static int __init test_devices_support(unsigned long addr) -{ - int ret = -EIO; - - asm volatile( - "0: lura 0,%1\n" - "1: xgr %0,%0\n" - "2:\n" - EX_TABLE(0b,2b) - EX_TABLE(1b,2b) - : "+d" (ret) - : "a" (addr) - : "0", "cc"); - return ret; -} -/* - * Init function for virtio - * devices are in a single page above top of "normal" + standby mem - */ -static int __init kvm_devices_init(void) -{ - int rc; - unsigned long total_memory_size = sclp.rzm * sclp.rnmax; - - if (!MACHINE_IS_KVM) - return -ENODEV; - - if (test_devices_support(total_memory_size) < 0) - return -ENODEV; - - pr_warn("The s390-virtio transport is deprecated. Please switch to a modern host providing virtio-ccw.\n"); - - rc = vmem_add_mapping(total_memory_size, PAGE_SIZE); - if (rc) - return rc; - - kvm_devices = (void *) total_memory_size; - - kvm_root = root_device_register("kvm_s390"); - if (IS_ERR(kvm_root)) { - rc = PTR_ERR(kvm_root); - printk(KERN_ERR "Could not register kvm_s390 root device"); - vmem_remove_mapping(total_memory_size, PAGE_SIZE); - return rc; - } - - INIT_WORK(&hotplug_work, hotplug_devices); - - irq_subclass_register(IRQ_SUBCLASS_SERVICE_SIGNAL); - register_external_irq(EXT_IRQ_CP_SERVICE, kvm_extint_handler); - - scan_devices(); - return 0; -} - -/* code for early console output with virtio_console */ -static int early_put_chars(u32 vtermno, const char *buf, int count) -{ - char scratch[17]; - unsigned int len = count; - - if (len > sizeof(scratch) - 1) - len = sizeof(scratch) - 1; - scratch[len] = '\0'; - memcpy(scratch, buf, len); - kvm_hypercall1(KVM_S390_VIRTIO_NOTIFY, __pa(scratch)); - return len; -} - -static int __init s390_virtio_console_init(void) -{ - if (sclp.has_vt220 || sclp.has_linemode) - return -ENODEV; - return virtio_cons_early_init(early_put_chars); -} -console_initcall(s390_virtio_console_init); - - -/* - * We do this after core stuff, but before the drivers. - */ -postcore_initcall(kvm_devices_init); diff --git a/drivers/scsi/aic7xxx/aic79xx.h b/drivers/scsi/aic7xxx/aic79xx.h index d47b527b25dd..31f2bb9d7146 100644 --- a/drivers/scsi/aic7xxx/aic79xx.h +++ b/drivers/scsi/aic7xxx/aic79xx.h @@ -1046,8 +1046,6 @@ typedef enum { typedef uint8_t ahd_mode_state; -typedef void ahd_callback_t (void *); - struct ahd_completion { uint16_t tag; @@ -1122,8 +1120,7 @@ struct ahd_softc { /* * Timer handles for timer driven callbacks. */ - ahd_timer_t reset_timer; - ahd_timer_t stat_timer; + struct timer_list stat_timer; /* * Statistics. diff --git a/drivers/scsi/aic7xxx/aic79xx_core.c b/drivers/scsi/aic7xxx/aic79xx_core.c index 95d8f25cbcca..b560f396ee99 100644 --- a/drivers/scsi/aic7xxx/aic79xx_core.c +++ b/drivers/scsi/aic7xxx/aic79xx_core.c @@ -207,7 +207,7 @@ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); -static ahd_callback_t ahd_stat_timer; +static void ahd_stat_timer(struct timer_list *t); #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif @@ -6104,8 +6104,7 @@ ahd_alloc(void *platform_arg, char *name) ahd->bugs = AHD_BUGNONE; ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; - ahd_timer_init(&ahd->reset_timer); - ahd_timer_init(&ahd->stat_timer); + timer_setup(&ahd->stat_timer, ahd_stat_timer, 0); ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; @@ -6235,8 +6234,7 @@ ahd_shutdown(void *arg) /* * Stop periodic timer callbacks. */ - ahd_timer_stop(&ahd->reset_timer); - ahd_timer_stop(&ahd->stat_timer); + del_timer_sync(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); @@ -7039,20 +7037,11 @@ static const char *termstat_strings[] = { }; /***************************** Timer Facilities *******************************/ -#define ahd_timer_init init_timer -#define ahd_timer_stop del_timer_sync -typedef void ahd_linux_callback_t (u_long); - static void -ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg) +ahd_timer_reset(struct timer_list *timer, int usec) { - struct ahd_softc *ahd; - - ahd = (struct ahd_softc *)arg; del_timer(timer); - timer->data = (u_long)arg; timer->expires = jiffies + (usec * HZ)/1000000; - timer->function = (ahd_linux_callback_t*)func; add_timer(timer); } @@ -7279,8 +7268,7 @@ ahd_init(struct ahd_softc *ahd) } init_done: ahd_restart(ahd); - ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, - ahd_stat_timer, ahd); + ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); return (0); } @@ -8878,9 +8866,9 @@ ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) /**************************** Statistics Processing ***************************/ static void -ahd_stat_timer(void *arg) +ahd_stat_timer(struct timer_list *t) { - struct ahd_softc *ahd = arg; + struct ahd_softc *ahd = from_timer(ahd, t, stat_timer); u_long s; int enint_coal; @@ -8907,8 +8895,7 @@ ahd_stat_timer(void *arg) ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; - ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, - ahd_stat_timer, ahd); + ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US); ahd_unlock(ahd, &s); } diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h index 728193a42e6e..8a8b7ae7aed3 100644 --- a/drivers/scsi/aic7xxx/aic79xx_osm.h +++ b/drivers/scsi/aic7xxx/aic79xx_osm.h @@ -203,9 +203,6 @@ int ahd_dmamap_unload(struct ahd_softc *, bus_dma_tag_t, bus_dmamap_t); */ #define ahd_dmamap_sync(ahd, dma_tag, dmamap, offset, len, op) -/************************** Timer DataStructures ******************************/ -typedef struct timer_list ahd_timer_t; - /********************************** Includes **********************************/ #ifdef CONFIG_AIC79XX_REG_PRETTY_PRINT #define AIC_DEBUG_REGISTERS 1 @@ -214,10 +211,6 @@ typedef struct timer_list ahd_timer_t; #endif #include "aic79xx.h" -/***************************** Timer Facilities *******************************/ -#define ahd_timer_init init_timer -#define ahd_timer_stop del_timer_sync - /***************************** SMP support ************************************/ #include <linux/spinlock.h> diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.c b/drivers/scsi/aic94xx/aic94xx_hwi.c index f2671a8fa7e3..7cbc7213b2b2 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.c +++ b/drivers/scsi/aic94xx/aic94xx_hwi.c @@ -1178,8 +1178,7 @@ static void asd_start_scb_timers(struct list_head *list) struct asd_ascb *ascb; list_for_each_entry(ascb, list, list) { if (!ascb->uldd_timer) { - ascb->timer.data = (unsigned long) ascb; - ascb->timer.function = asd_ascb_timedout; + ascb->timer.function = (TIMER_FUNC_TYPE)asd_ascb_timedout; ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; add_timer(&ascb->timer); } diff --git a/drivers/scsi/aic94xx/aic94xx_hwi.h b/drivers/scsi/aic94xx/aic94xx_hwi.h index 8c1c28239e93..8f147e720cfd 100644 --- a/drivers/scsi/aic94xx/aic94xx_hwi.h +++ b/drivers/scsi/aic94xx/aic94xx_hwi.h @@ -291,8 +291,7 @@ static inline void asd_init_ascb(struct asd_ha_struct *asd_ha, INIT_LIST_HEAD(&ascb->list); ascb->scb = ascb->dma_scb.vaddr; ascb->ha = asd_ha; - ascb->timer.function = NULL; - init_timer(&ascb->timer); + timer_setup(&ascb->timer, NULL, 0); ascb->tc_index = -1; } @@ -392,7 +391,7 @@ void asd_control_led(struct asd_ha_struct *asd_ha, int phy_id, int op); void asd_turn_led(struct asd_ha_struct *asd_ha, int phy_id, int op); int asd_enable_phys(struct asd_ha_struct *asd_ha, const u8 phy_mask); -void asd_ascb_timedout(unsigned long data); +void asd_ascb_timedout(struct timer_list *t); int asd_chip_hardrst(struct asd_ha_struct *asd_ha); #endif diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index fdac7c2fef37..22873ce8bbfa 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c @@ -866,12 +866,12 @@ void asd_build_initiate_link_adm_task(struct asd_ascb *ascb, int phy_id, * Upper layers can implement their own timeout function, say to free * resources they have with this SCB, and then call this one at the * end of their timeout function. To do this, one should initialize - * the ascb->timer.{function, data, expires} prior to calling the post + * the ascb->timer.{function, expires} prior to calling the post * function. The timer is started by the post function. */ -void asd_ascb_timedout(unsigned long data) +void asd_ascb_timedout(struct timer_list *t) { - struct asd_ascb *ascb = (void *) data; + struct asd_ascb *ascb = from_timer(ascb, t, timer); struct asd_seq_data *seq = &ascb->ha->seq; unsigned long flags; diff --git a/drivers/scsi/aic94xx/aic94xx_tmf.c b/drivers/scsi/aic94xx/aic94xx_tmf.c index d4c35df3d4ae..4637119c09d8 100644 --- a/drivers/scsi/aic94xx/aic94xx_tmf.c +++ b/drivers/scsi/aic94xx/aic94xx_tmf.c @@ -35,15 +35,14 @@ static int asd_enqueue_internal(struct asd_ascb *ascb, void (*tasklet_complete)(struct asd_ascb *, struct done_list_struct *), - void (*timed_out)(unsigned long)) + void (*timed_out)(struct timer_list *t)) { int res; ascb->tasklet_complete = tasklet_complete; ascb->uldd_timer = 1; - ascb->timer.data = (unsigned long) ascb; - ascb->timer.function = timed_out; + ascb->timer.function = (TIMER_FUNC_TYPE)timed_out; ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT; add_timer(&ascb->timer); @@ -87,9 +86,9 @@ static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb, asd_ascb_free(ascb); } -static void asd_clear_nexus_timedout(unsigned long data) +static void asd_clear_nexus_timedout(struct timer_list *t) { - struct asd_ascb *ascb = (void *)data; + struct asd_ascb *ascb = from_timer(ascb, t, timer); struct tasklet_completion_status *tcs = ascb->uldd_task; ASD_DPRINTK("%s: here\n", __func__); @@ -261,9 +260,9 @@ static int asd_clear_nexus_index(struct sas_task *task) /* ---------- TMFs ---------- */ -static void asd_tmf_timedout(unsigned long data) +static void asd_tmf_timedout(struct timer_list *t) { - struct asd_ascb *ascb = (void *) data; + struct asd_ascb *ascb = from_timer(ascb, t, timer); struct tasklet_completion_status *tcs = ascb->uldd_task; ASD_DPRINTK("tmf timed out\n"); diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c index b4542e7e2ad5..d8bd6f2c9c83 100644 --- a/drivers/scsi/be2iscsi/be_main.c +++ b/drivers/scsi/be2iscsi/be_main.c @@ -5230,12 +5230,11 @@ static void beiscsi_eqd_update_work(struct work_struct *work) msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL)); } -static void beiscsi_hw_tpe_check(unsigned long ptr) +static void beiscsi_hw_tpe_check(struct timer_list *t) { - struct beiscsi_hba *phba; + struct beiscsi_hba *phba = from_timer(phba, t, hw_check); u32 wait; - phba = (struct beiscsi_hba *)ptr; /* if not TPE, do nothing */ if (!beiscsi_detect_tpe(phba)) return; @@ -5248,11 +5247,10 @@ static void beiscsi_hw_tpe_check(unsigned long ptr) msecs_to_jiffies(wait)); } -static void beiscsi_hw_health_check(unsigned long ptr) +static void beiscsi_hw_health_check(struct timer_list *t) { - struct beiscsi_hba *phba; + struct beiscsi_hba *phba = from_timer(phba, t, hw_check); - phba = (struct beiscsi_hba *)ptr; beiscsi_detect_ue(phba); if (beiscsi_detect_ue(phba)) { __beiscsi_log(phba, KERN_ERR, @@ -5264,7 +5262,7 @@ static void beiscsi_hw_health_check(unsigned long ptr) if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state)) return; /* modify this timer to check TPE */ - phba->hw_check.function = beiscsi_hw_tpe_check; + phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_tpe_check; } mod_timer(&phba->hw_check, @@ -5351,7 +5349,7 @@ static int beiscsi_enable_port(struct beiscsi_hba *phba) * Timer function gets modified for TPE detection. * Always reinit to do health check first. */ - phba->hw_check.function = beiscsi_hw_health_check; + phba->hw_check.function = (TIMER_FUNC_TYPE)beiscsi_hw_health_check; mod_timer(&phba->hw_check, jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); return 0; @@ -5708,9 +5706,7 @@ static int beiscsi_dev_probe(struct pci_dev *pcidev, * Start UE detection here. UE before this will cause stall in probe * and eventually fail the probe. */ - init_timer(&phba->hw_check); - phba->hw_check.function = beiscsi_hw_health_check; - phba->hw_check.data = (unsigned long)phba; + timer_setup(&phba->hw_check, beiscsi_hw_health_check, 0); mod_timer(&phba->hw_check, jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL)); beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT, diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 6844ba361616..e6b9de7d41ac 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -823,7 +823,7 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) skb_queue_head_init(&port->fcoe_pending_queue); port->fcoe_pending_queue_active = 0; - setup_timer(&port->timer, fcoe_queue_timer, (unsigned long) lport); + timer_setup(&port->timer, fcoe_queue_timer, 0); fcoe_link_speed_update(lport); @@ -845,9 +845,9 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev) return 0; } -static void bnx2fc_destroy_timer(unsigned long data) +static void bnx2fc_destroy_timer(struct timer_list *t) { - struct bnx2fc_hba *hba = (struct bnx2fc_hba *)data; + struct bnx2fc_hba *hba = from_timer(hba, t, destroy_timer); printk(KERN_ERR PFX "ERROR:bnx2fc_destroy_timer - " "Destroy compl not received!!\n"); @@ -1946,11 +1946,10 @@ static void bnx2fc_fw_destroy(struct bnx2fc_hba *hba) { if (test_and_clear_bit(BNX2FC_FLAG_FW_INIT_DONE, &hba->flags)) { if (bnx2fc_send_fw_fcoe_destroy_msg(hba) == 0) { - init_timer(&hba->destroy_timer); + timer_setup(&hba->destroy_timer, bnx2fc_destroy_timer, + 0); hba->destroy_timer.expires = BNX2FC_FW_TIMEOUT + jiffies; - hba->destroy_timer.function = bnx2fc_destroy_timer; - hba->destroy_timer.data = (unsigned long)hba; add_timer(&hba->destroy_timer); wait_event_interruptible(hba->destroy_wait, test_bit(BNX2FC_FLAG_DESTROY_CMPL, diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h index 89ef1a1678d1..663a63d4dae4 100644 --- a/drivers/scsi/bnx2i/bnx2i.h +++ b/drivers/scsi/bnx2i/bnx2i.h @@ -858,7 +858,7 @@ extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep); extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep); -extern void bnx2i_ep_ofld_timer(unsigned long data); +extern void bnx2i_ep_ofld_timer(struct timer_list *t); extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list( struct bnx2i_hba *hba, u32 iscsi_cid); extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list( diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c index 42921dbba927..61a93994d5ed 100644 --- a/drivers/scsi/bnx2i/bnx2i_hwi.c +++ b/drivers/scsi/bnx2i/bnx2i_hwi.c @@ -698,9 +698,9 @@ void bnx2i_update_iscsi_conn(struct iscsi_conn *conn) * * routine to handle connection offload/destroy request timeout */ -void bnx2i_ep_ofld_timer(unsigned long data) +void bnx2i_ep_ofld_timer(struct timer_list *t) { - struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data; + struct bnx2i_endpoint *ep = from_timer(ep, t, ofld_timer); if (ep->state == EP_STATE_OFLD_START) { printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n"); diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c index 03c104b47f31..de0a507577ef 100644 --- a/drivers/scsi/bnx2i/bnx2i_iscsi.c +++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c @@ -1611,9 +1611,8 @@ static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn) * this should normally not sleep for a long time so it should * not disrupt the caller. */ + timer_setup(&bnx2i_conn->ep->ofld_timer, bnx2i_ep_ofld_timer, 0); bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies; - bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer; - bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep; add_timer(&bnx2i_conn->ep->ofld_timer); /* update iSCSI context for this conn, wait for CNIC to complete */ wait_event_interruptible(bnx2i_conn->ep->ofld_wait, @@ -1729,10 +1728,8 @@ static int bnx2i_tear_down_conn(struct bnx2i_hba *hba, } ep->state = EP_STATE_CLEANUP_START; - init_timer(&ep->ofld_timer); + timer_setup(&ep->ofld_timer, bnx2i_ep_ofld_timer, 0); ep->ofld_timer.expires = hba->conn_ctx_destroy_tmo + jiffies; - ep->ofld_timer.function = bnx2i_ep_ofld_timer; - ep->ofld_timer.data = (unsigned long) ep; add_timer(&ep->ofld_timer); bnx2i_ep_destroy_list_add(hba, ep); @@ -1835,10 +1832,8 @@ static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost, bnx2i_ep->state = EP_STATE_OFLD_START; bnx2i_ep_ofld_list_add(hba, bnx2i_ep); - init_timer(&bnx2i_ep->ofld_timer); + timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies; - bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; - bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; add_timer(&bnx2i_ep->ofld_timer); if (bnx2i_send_conn_ofld_req(hba, bnx2i_ep)) { @@ -2054,10 +2049,8 @@ int bnx2i_hw_ep_disconnect(struct bnx2i_endpoint *bnx2i_ep) session = conn->session; } - init_timer(&bnx2i_ep->ofld_timer); + timer_setup(&bnx2i_ep->ofld_timer, bnx2i_ep_ofld_timer, 0); bnx2i_ep->ofld_timer.expires = hba->conn_teardown_tmo + jiffies; - bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer; - bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep; add_timer(&bnx2i_ep->ofld_timer); if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) diff --git a/drivers/scsi/csiostor/csio_hw.c b/drivers/scsi/csiostor/csio_hw.c index 5be0086142ca..0bd1131b6cc9 100644 --- a/drivers/scsi/csiostor/csio_hw.c +++ b/drivers/scsi/csiostor/csio_hw.c @@ -3347,9 +3347,10 @@ csio_mberr_worker(void *data) * **/ static void -csio_hw_mb_timer(uintptr_t data) +csio_hw_mb_timer(struct timer_list *t) { - struct csio_hw *hw = (struct csio_hw *)data; + struct csio_mbm *mbm = from_timer(mbm, t, timer); + struct csio_hw *hw = mbm->hw; struct csio_mb *mbp = NULL; spin_lock_irq(&hw->lock); @@ -3715,9 +3716,9 @@ csio_mgmt_req_lookup(struct csio_mgmtm *mgmtm, struct csio_ioreq *io_req) * Return - none. */ static void -csio_mgmt_tmo_handler(uintptr_t data) +csio_mgmt_tmo_handler(struct timer_list *t) { - struct csio_mgmtm *mgmtm = (struct csio_mgmtm *) data; + struct csio_mgmtm *mgmtm = from_timer(mgmtm, t, mgmt_timer); struct list_head *tmp; struct csio_ioreq *io_req; @@ -3797,11 +3798,7 @@ csio_mgmtm_cleanup(struct csio_mgmtm *mgmtm) static int csio_mgmtm_init(struct csio_mgmtm *mgmtm, struct csio_hw *hw) { - struct timer_list *timer = &mgmtm->mgmt_timer; - - init_timer(timer); - timer->function = csio_mgmt_tmo_handler; - timer->data = (unsigned long)mgmtm; + timer_setup(&mgmtm->mgmt_timer, csio_mgmt_tmo_handler, 0); INIT_LIST_HEAD(&mgmtm->active_q); INIT_LIST_HEAD(&mgmtm->cbfn_q); diff --git a/drivers/scsi/csiostor/csio_mb.c b/drivers/scsi/csiostor/csio_mb.c index 9451787ca7f2..abcedfbcecda 100644 --- a/drivers/scsi/csiostor/csio_mb.c +++ b/drivers/scsi/csiostor/csio_mb.c @@ -1644,13 +1644,10 @@ csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q) */ int csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw, - void (*timer_fn)(uintptr_t)) + void (*timer_fn)(struct timer_list *)) { - struct timer_list *timer = &mbm->timer; - - init_timer(timer); - timer->function = timer_fn; - timer->data = (unsigned long)hw; + mbm->hw = hw; + timer_setup(&mbm->timer, timer_fn, 0); INIT_LIST_HEAD(&mbm->req_q); INIT_LIST_HEAD(&mbm->cbfn_q); diff --git a/drivers/scsi/csiostor/csio_mb.h b/drivers/scsi/csiostor/csio_mb.h index 1bc82d0bc260..a6823df73015 100644 --- a/drivers/scsi/csiostor/csio_mb.h +++ b/drivers/scsi/csiostor/csio_mb.h @@ -137,6 +137,7 @@ struct csio_mbm { uint32_t a_mbox; /* Async mbox num */ uint32_t intr_idx; /* Interrupt index */ struct timer_list timer; /* Mbox timer */ + struct csio_hw *hw; /* Hardware pointer */ struct list_head req_q; /* Mbox request queue */ struct list_head cbfn_q; /* Mbox completion q */ struct csio_mb *mcurrent; /* Current mailbox */ @@ -252,7 +253,7 @@ void csio_mb_process_portparams_rsp(struct csio_hw *hw, struct csio_mb *mbp, /* MB module functions */ int csio_mbm_init(struct csio_mbm *, struct csio_hw *, - void (*)(uintptr_t)); + void (*)(struct timer_list *)); void csio_mbm_exit(struct csio_mbm *); void csio_mb_intr_enable(struct csio_hw *); void csio_mb_intr_disable(struct csio_hw *); diff --git a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c index 7b09e7ddf35e..babd79361a46 100644 --- a/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c +++ b/drivers/scsi/cxgbi/cxgb3i/cxgb3i.c @@ -545,10 +545,10 @@ static int act_open_rpl_status_to_errno(int status) } } -static void act_open_retry_timer(unsigned long data) +static void act_open_retry_timer(struct timer_list *t) { + struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); struct sk_buff *skb; - struct cxgbi_sock *csk = (struct cxgbi_sock *)data; log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK, "csk 0x%p,%u,0x%lx,%u.\n", @@ -586,8 +586,8 @@ static int do_act_open_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) cxgbi_sock_get(csk); spin_lock_bh(&csk->lock); if (rpl->status == CPL_ERR_CONN_EXIST && - csk->retry_timer.function != act_open_retry_timer) { - csk->retry_timer.function = act_open_retry_timer; + csk->retry_timer.function != (TIMER_FUNC_TYPE)act_open_retry_timer) { + csk->retry_timer.function = (TIMER_FUNC_TYPE)act_open_retry_timer; mod_timer(&csk->retry_timer, jiffies + HZ / 2); } else cxgbi_sock_fail_act_open(csk, diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c index 1d02cf9fe06c..1bef2724eb78 100644 --- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c +++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c @@ -872,10 +872,10 @@ static int act_open_rpl_status_to_errno(int status) } } -static void csk_act_open_retry_timer(unsigned long data) +static void csk_act_open_retry_timer(struct timer_list *t) { struct sk_buff *skb = NULL; - struct cxgbi_sock *csk = (struct cxgbi_sock *)data; + struct cxgbi_sock *csk = from_timer(csk, t, retry_timer); struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *, struct l2t_entry *); @@ -963,8 +963,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb) spin_lock_bh(&csk->lock); if (status == CPL_ERR_CONN_EXIST && - csk->retry_timer.function != csk_act_open_retry_timer) { - csk->retry_timer.function = csk_act_open_retry_timer; + csk->retry_timer.function != (TIMER_FUNC_TYPE)csk_act_open_retry_timer) { + csk->retry_timer.function = (TIMER_FUNC_TYPE)csk_act_open_retry_timer; mod_timer(&csk->retry_timer, jiffies + HZ / 2); } else cxgbi_sock_fail_act_open(csk, diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c index 512c8f1ea5b0..a61a152136a3 100644 --- a/drivers/scsi/cxgbi/libcxgbi.c +++ b/drivers/scsi/cxgbi/libcxgbi.c @@ -572,7 +572,7 @@ static struct cxgbi_sock *cxgbi_sock_create(struct cxgbi_device *cdev) kref_init(&csk->refcnt); skb_queue_head_init(&csk->receive_queue); skb_queue_head_init(&csk->write_queue); - setup_timer(&csk->retry_timer, NULL, (unsigned long)csk); + timer_setup(&csk->retry_timer, NULL, 0); rwlock_init(&csk->callback_lock); csk->cdev = cdev; csk->flags = 0; diff --git a/drivers/scsi/dc395x.c b/drivers/scsi/dc395x.c index 5ee7f44cf869..60ef8df42b95 100644 --- a/drivers/scsi/dc395x.c +++ b/drivers/scsi/dc395x.c @@ -395,7 +395,7 @@ static void request_sense(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb, struct ScsiReqBlk *srb); static void set_xfer_rate(struct AdapterCtlBlk *acb, struct DeviceCtlBlk *dcb); -static void waiting_timeout(unsigned long ptr); +static void waiting_timeout(struct timer_list *t); /*--------------------------------------------------------------------------- @@ -857,9 +857,6 @@ static void waiting_set_timer(struct AdapterCtlBlk *acb, unsigned long to) { if (timer_pending(&acb->waiting_timer)) return; - init_timer(&acb->waiting_timer); - acb->waiting_timer.function = waiting_timeout; - acb->waiting_timer.data = (unsigned long) acb; if (time_before(jiffies + to, acb->last_reset - HZ / 2)) acb->waiting_timer.expires = acb->last_reset - HZ / 2 + 1; @@ -936,10 +933,10 @@ static void waiting_process_next(struct AdapterCtlBlk *acb) /* Wake up waiting queue */ -static void waiting_timeout(unsigned long ptr) +static void waiting_timeout(struct timer_list *t) { unsigned long flags; - struct AdapterCtlBlk *acb = (struct AdapterCtlBlk *)ptr; + struct AdapterCtlBlk *acb = from_timer(acb, t, waiting_timer); dprintkdbg(DBG_1, "waiting_timeout: Queue woken up by timer. acb=%p\n", acb); DC395x_LOCK_IO(acb->scsi_host, flags); @@ -4366,8 +4363,8 @@ static void adapter_init_params(struct AdapterCtlBlk *acb) INIT_LIST_HEAD(&acb->srb_free_list); /* temp SRB for Q tag used or abort command used */ acb->tmp_srb = &acb->srb; - init_timer(&acb->waiting_timer); - init_timer(&acb->selto_timer); + timer_setup(&acb->waiting_timer, waiting_timeout, 0); + timer_setup(&acb->selto_timer, NULL, 0); acb->srb_count = DC395x_MAX_SRB_CNT; diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c index 85f9a3eba387..5cc09dce4d25 100644 --- a/drivers/scsi/fcoe/fcoe.c +++ b/drivers/scsi/fcoe/fcoe.c @@ -754,7 +754,7 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev) skb_queue_head_init(&port->fcoe_pending_queue); port->fcoe_pending_queue_active = 0; - setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lport); + timer_setup(&port->timer, fcoe_queue_timer, 0); fcoe_link_speed_update(lport); diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c index 375c536cbc68..1ba5f51713a3 100644 --- a/drivers/scsi/fcoe/fcoe_transport.c +++ b/drivers/scsi/fcoe/fcoe_transport.c @@ -455,9 +455,11 @@ EXPORT_SYMBOL_GPL(fcoe_check_wait_queue); * * Calls fcoe_check_wait_queue on timeout */ -void fcoe_queue_timer(ulong lport) +void fcoe_queue_timer(struct timer_list *t) { - fcoe_check_wait_queue((struct fc_lport *)lport, NULL); + struct fcoe_port *port = from_timer(port, t, timer); + + fcoe_check_wait_queue(port->lport, NULL); } EXPORT_SYMBOL_GPL(fcoe_queue_timer); diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index a4473356a9dc..c35f05c4c6bb 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c @@ -3705,7 +3705,7 @@ static void gdth_log_event(gdth_evt_data *dvr, char *buffer) #ifdef GDTH_STATISTICS static u8 gdth_timer_running; -static void gdth_timeout(unsigned long data) +static void gdth_timeout(struct timer_list *unused) { u32 i; Scsi_Cmnd *nscp; @@ -3743,8 +3743,6 @@ static void gdth_timer_init(void) gdth_timer_running = 1; TRACE2(("gdth_detect(): Initializing timer !\n")); gdth_timer.expires = jiffies + HZ; - gdth_timer.data = 0L; - gdth_timer.function = gdth_timeout; add_timer(&gdth_timer); } #else @@ -5165,7 +5163,7 @@ static int __init gdth_init(void) /* initializations */ gdth_polling = TRUE; gdth_clear_events(); - init_timer(&gdth_timer); + timer_setup(&gdth_timer, gdth_timeout, 0); /* As default we do not probe for EISA or ISA controllers */ if (probe_eisa_isa) { diff --git a/drivers/scsi/hisi_sas/hisi_sas.h b/drivers/scsi/hisi_sas/hisi_sas.h index 07f4a4cfbec1..15692ea05ced 100644 --- a/drivers/scsi/hisi_sas/hisi_sas.h +++ b/drivers/scsi/hisi_sas/hisi_sas.h @@ -103,7 +103,6 @@ struct hisi_sas_phy { struct hisi_sas_port *port; struct asd_sas_phy sas_phy; struct sas_identify identify; - struct timer_list timer; struct work_struct phyup_ws; u64 port_id; /* from hw */ u64 dev_sas_addr; diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 16664f2e15fb..37c838be4757 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -627,7 +627,6 @@ static void hisi_sas_phy_init(struct hisi_hba *hisi_hba, int phy_no) phy->hisi_hba = hisi_hba; phy->port = NULL; - init_timer(&phy->timer); sas_phy->enabled = (phy_no < hisi_hba->n_phy) ? 1 : 0; sas_phy->class = SAS; sas_phy->iproto = SAS_PROTOCOL_ALL; @@ -792,9 +791,10 @@ static void hisi_sas_task_done(struct sas_task *task) complete(&task->slow_task->completion); } -static void hisi_sas_tmf_timedout(unsigned long data) +static void hisi_sas_tmf_timedout(struct timer_list *t) { - struct sas_task *task = (struct sas_task *)data; + struct sas_task_slow *slow = from_timer(slow, t, timer); + struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); @@ -833,8 +833,7 @@ static int hisi_sas_exec_internal_tmf_task(struct domain_device *device, } task->task_done = hisi_sas_task_done; - task->slow_task->timer.data = (unsigned long) task; - task->slow_task->timer.function = hisi_sas_tmf_timedout; + task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout; task->slow_task->timer.expires = jiffies + TASK_TIMEOUT*HZ; add_timer(&task->slow_task->timer); @@ -1447,8 +1446,7 @@ hisi_sas_internal_task_abort(struct hisi_hba *hisi_hba, task->dev = device; task->task_proto = device->tproto; task->task_done = hisi_sas_task_done; - task->slow_task->timer.data = (unsigned long)task; - task->slow_task->timer.function = hisi_sas_tmf_timedout; + task->slow_task->timer.function = (TIMER_FUNC_TYPE)hisi_sas_tmf_timedout; task->slow_task->timer.expires = jiffies + msecs_to_jiffies(110); add_timer(&task->slow_task->timer); @@ -1877,7 +1875,7 @@ static struct Scsi_Host *hisi_sas_shost_alloc(struct platform_device *pdev, hisi_hba->shost = shost; SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; - init_timer(&hisi_hba->timer); + timer_setup(&hisi_hba->timer, NULL, 0); if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c index 08eca20b0b81..9385554e43a6 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c @@ -807,9 +807,9 @@ static void phy_hard_reset_v1_hw(struct hisi_hba *hisi_hba, int phy_no) start_phy_v1_hw(hisi_hba, phy_no); } -static void start_phys_v1_hw(unsigned long data) +static void start_phys_v1_hw(struct timer_list *t) { - struct hisi_hba *hisi_hba = (struct hisi_hba *)data; + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); int i; for (i = 0; i < hisi_hba->n_phy; i++) { @@ -828,7 +828,7 @@ static void phys_init_v1_hw(struct hisi_hba *hisi_hba) hisi_sas_phy_read32(hisi_hba, i, CHL_INT2_MSK); } - setup_timer(timer, start_phys_v1_hw, (unsigned long)hisi_hba); + timer_setup(timer, start_phys_v1_hw, 0); mod_timer(timer, jiffies + HZ); } diff --git a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c index 779af979b6db..b1f097dabd01 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v2_hw.c @@ -728,7 +728,7 @@ enum { #define ERR_ON_RX_PHASE(err_phase) (err_phase == 0x10 || \ err_phase == 0x20 || err_phase == 0x40) -static void link_timeout_disable_link(unsigned long data); +static void link_timeout_disable_link(struct timer_list *t); static u32 hisi_sas_read32(struct hisi_hba *hisi_hba, u32 off) { @@ -1270,9 +1270,9 @@ static void init_reg_v2_hw(struct hisi_hba *hisi_hba) upper_32_bits(hisi_hba->initial_fis_dma)); } -static void link_timeout_enable_link(unsigned long data) +static void link_timeout_enable_link(struct timer_list *t) { - struct hisi_hba *hisi_hba = (struct hisi_hba *)data; + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); int i, reg_val; for (i = 0; i < hisi_hba->n_phy; i++) { @@ -1287,13 +1287,13 @@ static void link_timeout_enable_link(unsigned long data) } } - hisi_hba->timer.function = link_timeout_disable_link; + hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link; mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(900)); } -static void link_timeout_disable_link(unsigned long data) +static void link_timeout_disable_link(struct timer_list *t) { - struct hisi_hba *hisi_hba = (struct hisi_hba *)data; + struct hisi_hba *hisi_hba = from_timer(hisi_hba, t, timer); int i, reg_val; reg_val = hisi_sas_read32(hisi_hba, PHY_STATE); @@ -1308,14 +1308,13 @@ static void link_timeout_disable_link(unsigned long data) } } - hisi_hba->timer.function = link_timeout_enable_link; + hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_enable_link; mod_timer(&hisi_hba->timer, jiffies + msecs_to_jiffies(100)); } static void set_link_timer_quirk(struct hisi_hba *hisi_hba) { - hisi_hba->timer.data = (unsigned long)hisi_hba; - hisi_hba->timer.function = link_timeout_disable_link; + hisi_hba->timer.function = (TIMER_FUNC_TYPE)link_timeout_disable_link; hisi_hba->timer.expires = jiffies + msecs_to_jiffies(1000); add_timer(&hisi_hba->timer); } @@ -2574,9 +2573,9 @@ static int prep_ata_v2_hw(struct hisi_hba *hisi_hba, return 0; } -static void hisi_sas_internal_abort_quirk_timeout(unsigned long data) +static void hisi_sas_internal_abort_quirk_timeout(struct timer_list *t) { - struct hisi_sas_slot *slot = (struct hisi_sas_slot *)data; + struct hisi_sas_slot *slot = from_timer(slot, t, internal_abort_timer); struct hisi_sas_port *port = slot->port; struct asd_sas_port *asd_sas_port; struct asd_sas_phy *sas_phy; @@ -2619,8 +2618,7 @@ static int prep_abort_v2_hw(struct hisi_hba *hisi_hba, struct timer_list *timer = &slot->internal_abort_timer; /* setup the quirk timer */ - setup_timer(timer, hisi_sas_internal_abort_quirk_timeout, - (unsigned long)slot); + timer_setup(timer, hisi_sas_internal_abort_quirk_timeout, 0); /* Set the timeout to 10ms less than internal abort timeout */ mod_timer(timer, jiffies + msecs_to_jiffies(100)); diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c index 2e5fa9717be8..3f2f0baf2a5e 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c +++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c @@ -1823,7 +1823,7 @@ hisi_sas_shost_alloc_pci(struct pci_dev *pdev) hisi_hba->shost = shost; SHOST_TO_SAS_HA(shost) = &hisi_hba->sha; - init_timer(&hisi_hba->timer); + timer_setup(&hisi_hba->timer, NULL, 0); if (hisi_sas_get_fw_info(hisi_hba) < 0) goto err_out; diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index b491af31a5f8..0d2f7eb3acb6 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -1393,8 +1393,9 @@ static int ibmvfc_map_sg_data(struct scsi_cmnd *scmd, * * Called when an internally generated command times out **/ -static void ibmvfc_timeout(struct ibmvfc_event *evt) +static void ibmvfc_timeout(struct timer_list *t) { + struct ibmvfc_event *evt = from_timer(evt, t, timer); struct ibmvfc_host *vhost = evt->vhost; dev_err(vhost->dev, "Command timed out (%p). Resetting connection\n", evt); ibmvfc_reset_host(vhost); @@ -1424,12 +1425,10 @@ static int ibmvfc_send_event(struct ibmvfc_event *evt, BUG(); list_add_tail(&evt->queue, &vhost->sent); - init_timer(&evt->timer); + timer_setup(&evt->timer, ibmvfc_timeout, 0); if (timeout) { - evt->timer.data = (unsigned long) evt; evt->timer.expires = jiffies + (timeout * HZ); - evt->timer.function = (void (*)(unsigned long))ibmvfc_timeout; add_timer(&evt->timer); } @@ -3692,8 +3691,9 @@ static void ibmvfc_tgt_adisc_cancel_done(struct ibmvfc_event *evt) * out, reset the CRQ. When the ADISC comes back as cancelled, * log back into the target. **/ -static void ibmvfc_adisc_timeout(struct ibmvfc_target *tgt) +static void ibmvfc_adisc_timeout(struct timer_list *t) { + struct ibmvfc_target *tgt = from_timer(tgt, t, timer); struct ibmvfc_host *vhost = tgt->vhost; struct ibmvfc_event *evt; struct ibmvfc_tmf *tmf; @@ -3778,9 +3778,7 @@ static void ibmvfc_tgt_adisc(struct ibmvfc_target *tgt) if (timer_pending(&tgt->timer)) mod_timer(&tgt->timer, jiffies + (IBMVFC_ADISC_TIMEOUT * HZ)); else { - tgt->timer.data = (unsigned long) tgt; tgt->timer.expires = jiffies + (IBMVFC_ADISC_TIMEOUT * HZ); - tgt->timer.function = (void (*)(unsigned long))ibmvfc_adisc_timeout; add_timer(&tgt->timer); } @@ -3912,7 +3910,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id) tgt->vhost = vhost; tgt->need_login = 1; tgt->cancel_key = vhost->task_set++; - init_timer(&tgt->timer); + timer_setup(&tgt->timer, ibmvfc_adisc_timeout, 0); kref_init(&tgt->kref); ibmvfc_init_tgt(tgt, ibmvfc_tgt_implicit_logout); spin_lock_irqsave(vhost->host->host_lock, flags); diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c index 7d156b161482..17df76f0be3c 100644 --- a/drivers/scsi/ibmvscsi/ibmvscsi.c +++ b/drivers/scsi/ibmvscsi/ibmvscsi.c @@ -837,8 +837,9 @@ static void ibmvscsi_reset_host(struct ibmvscsi_host_data *hostdata) * * Called when an internally generated command times out */ -static void ibmvscsi_timeout(struct srp_event_struct *evt_struct) +static void ibmvscsi_timeout(struct timer_list *t) { + struct srp_event_struct *evt_struct = from_timer(evt_struct, t, timer); struct ibmvscsi_host_data *hostdata = evt_struct->hostdata; dev_err(hostdata->dev, "Command timed out (%x). Resetting connection\n", @@ -927,11 +928,9 @@ static int ibmvscsi_send_srp_event(struct srp_event_struct *evt_struct, */ list_add_tail(&evt_struct->list, &hostdata->sent); - init_timer(&evt_struct->timer); + timer_setup(&evt_struct->timer, ibmvscsi_timeout, 0); if (timeout) { - evt_struct->timer.data = (unsigned long) evt_struct; evt_struct->timer.expires = jiffies + (timeout * HZ); - evt_struct->timer.function = (void (*)(unsigned long))ibmvscsi_timeout; add_timer(&evt_struct->timer); } diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index f838bd73befa..d53429371127 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -694,7 +694,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd, ipr_cmd->sibling = NULL; ipr_cmd->eh_comp = NULL; ipr_cmd->fast_done = fast_done; - init_timer(&ipr_cmd->timer); + timer_setup(&ipr_cmd->timer, NULL, 0); } /** @@ -990,15 +990,14 @@ static void ipr_send_command(struct ipr_cmnd *ipr_cmd) **/ static void ipr_do_req(struct ipr_cmnd *ipr_cmd, void (*done) (struct ipr_cmnd *), - void (*timeout_func) (struct ipr_cmnd *), u32 timeout) + void (*timeout_func) (struct timer_list *), u32 timeout) { list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); ipr_cmd->done = done; - ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + timeout; - ipr_cmd->timer.function = (void (*)(unsigned long))timeout_func; + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func; add_timer(&ipr_cmd->timer); @@ -1080,7 +1079,7 @@ static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr, * none **/ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd, - void (*timeout_func) (struct ipr_cmnd *ipr_cmd), + void (*timeout_func) (struct timer_list *), u32 timeout) { struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; @@ -2664,8 +2663,9 @@ static void ipr_process_error(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_timeout(struct ipr_cmnd *ipr_cmd) +static void ipr_timeout(struct timer_list *t) { + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); unsigned long lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; @@ -2696,8 +2696,9 @@ static void ipr_timeout(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_oper_timeout(struct ipr_cmnd *ipr_cmd) +static void ipr_oper_timeout(struct timer_list *t) { + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); unsigned long lock_flags = 0; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; @@ -5449,8 +5450,9 @@ static void ipr_bus_reset_done(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_abort_timeout(struct ipr_cmnd *ipr_cmd) +static void ipr_abort_timeout(struct timer_list *t) { + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); struct ipr_cmnd *reset_cmd; struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; struct ipr_cmd_pkt *cmd_pkt; @@ -8271,8 +8273,9 @@ static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd) * Return value: * none **/ -static void ipr_reset_timer_done(struct ipr_cmnd *ipr_cmd) +static void ipr_reset_timer_done(struct timer_list *t) { + struct ipr_cmnd *ipr_cmd = from_timer(ipr_cmd, t, timer); struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; unsigned long lock_flags = 0; @@ -8308,9 +8311,8 @@ static void ipr_reset_start_timer(struct ipr_cmnd *ipr_cmd, list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); ipr_cmd->done = ipr_reset_ioa_job; - ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + timeout; - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_reset_timer_done; + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_reset_timer_done; add_timer(&ipr_cmd->timer); } @@ -8394,9 +8396,8 @@ static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd) } } - ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + stage_time * HZ; - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); @@ -8466,9 +8467,8 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd) return IPR_RC_JOB_CONTINUE; } - ipr_cmd->timer.data = (unsigned long) ipr_cmd; ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ); - ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout; + ipr_cmd->timer.function = (TIMER_FUNC_TYPE)ipr_oper_timeout; ipr_cmd->done = ipr_reset_ioa_job; add_timer(&ipr_cmd->timer); list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_pending_q); diff --git a/drivers/scsi/isci/host.c b/drivers/scsi/isci/host.c index 609dafd661d1..13b37cdffa8e 100644 --- a/drivers/scsi/isci/host.c +++ b/drivers/scsi/isci/host.c @@ -958,9 +958,9 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) return status; } -static void phy_startup_timeout(unsigned long data) +static void phy_startup_timeout(struct timer_list *t) { - struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); unsigned long flags; enum sci_status status; @@ -1592,9 +1592,9 @@ static const struct sci_base_state sci_controller_state_table[] = { [SCIC_FAILED] = {} }; -static void controller_timeout(unsigned long data) +static void controller_timeout(struct timer_list *t) { - struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); struct sci_base_state_machine *sm = &ihost->sm; unsigned long flags; @@ -1737,9 +1737,9 @@ static u8 max_spin_up(struct isci_host *ihost) MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); } -static void power_control_timeout(unsigned long data) +static void power_control_timeout(struct timer_list *t) { - struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); struct isci_phy *iphy; unsigned long flags; diff --git a/drivers/scsi/isci/isci.h b/drivers/scsi/isci/isci.h index 234ab46fce33..680e30947671 100644 --- a/drivers/scsi/isci/isci.h +++ b/drivers/scsi/isci/isci.h @@ -498,12 +498,10 @@ struct sci_timer { }; static inline -void sci_init_timer(struct sci_timer *tmr, void (*fn)(unsigned long)) +void sci_init_timer(struct sci_timer *tmr, void (*fn)(struct timer_list *t)) { - tmr->timer.function = fn; - tmr->timer.data = (unsigned long) tmr; tmr->cancel = 0; - init_timer(&tmr->timer); + timer_setup(&tmr->timer, fn, 0); } static inline void sci_mod_timer(struct sci_timer *tmr, unsigned long msec) diff --git a/drivers/scsi/isci/phy.c b/drivers/scsi/isci/phy.c index cb87b2ef7c92..1deca8c5a94f 100644 --- a/drivers/scsi/isci/phy.c +++ b/drivers/scsi/isci/phy.c @@ -315,9 +315,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy, return SCI_SUCCESS; } -static void phy_sata_timeout(unsigned long data) +static void phy_sata_timeout(struct timer_list *t) { - struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_phy *iphy = container_of(tmr, typeof(*iphy), sata_timer); struct isci_host *ihost = iphy->owning_port->owning_controller; unsigned long flags; diff --git a/drivers/scsi/isci/port.c b/drivers/scsi/isci/port.c index a4dd5c91508c..1df45f028ea7 100644 --- a/drivers/scsi/isci/port.c +++ b/drivers/scsi/isci/port.c @@ -769,9 +769,9 @@ bool sci_port_link_detected(struct isci_port *iport, struct isci_phy *iphy) return true; } -static void port_timeout(unsigned long data) +static void port_timeout(struct timer_list *t) { - struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_timer *tmr = from_timer(tmr, t, timer); struct isci_port *iport = container_of(tmr, typeof(*iport), timer); struct isci_host *ihost = iport->owning_controller; unsigned long flags; diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c index ac879745ef80..edb7be786c65 100644 --- a/drivers/scsi/isci/port_config.c +++ b/drivers/scsi/isci/port_config.c @@ -319,10 +319,10 @@ sci_mpc_agent_validate_phy_configuration(struct isci_host *ihost, return sci_port_configuration_agent_validate_ports(ihost, port_agent); } -static void mpc_agent_timeout(unsigned long data) +static void mpc_agent_timeout(struct timer_list *t) { u8 index; - struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_timer *tmr = from_timer(tmr, t, timer); struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; @@ -654,10 +654,10 @@ static void sci_apc_agent_link_down( } /* configure the phys into ports when the timer fires */ -static void apc_agent_timeout(unsigned long data) +static void apc_agent_timeout(struct timer_list *t) { u32 index; - struct sci_timer *tmr = (struct sci_timer *)data; + struct sci_timer *tmr = from_timer(tmr, t, timer); struct sci_port_configuration_agent *port_agent; struct isci_host *ihost; unsigned long flags; diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c index 772c35a5c49e..1a4e701a8449 100644 --- a/drivers/scsi/libfc/fc_fcp.c +++ b/drivers/scsi/libfc/fc_fcp.c @@ -97,7 +97,7 @@ static void fc_fcp_complete_locked(struct fc_fcp_pkt *); static void fc_tm_done(struct fc_seq *, struct fc_frame *, void *); static void fc_fcp_error(struct fc_fcp_pkt *, struct fc_frame *); static void fc_fcp_recovery(struct fc_fcp_pkt *, u8 code); -static void fc_fcp_timeout(unsigned long); +static void fc_fcp_timeout(struct timer_list *); static void fc_fcp_rec(struct fc_fcp_pkt *); static void fc_fcp_rec_error(struct fc_fcp_pkt *, struct fc_frame *); static void fc_fcp_rec_resp(struct fc_seq *, struct fc_frame *, void *); @@ -155,8 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp) fsp->lp = lport; fsp->xfer_ddp = FC_XID_UNKNOWN; refcount_set(&fsp->ref_cnt, 1); - init_timer(&fsp->timer); - fsp->timer.data = (unsigned long)fsp; + timer_setup(&fsp->timer, NULL, 0); INIT_LIST_HEAD(&fsp->list); spin_lock_init(&fsp->scsi_pkt_lock); } else { @@ -1215,7 +1214,7 @@ static int fc_fcp_cmd_send(struct fc_lport *lport, struct fc_fcp_pkt *fsp, fsp->seq_ptr = seq; fc_fcp_pkt_hold(fsp); /* hold for fc_fcp_pkt_destroy */ - setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); + fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout; if (rpriv->flags & FC_RP_FLAGS_REC_SUPPORTED) fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); @@ -1298,9 +1297,9 @@ static int fc_fcp_pkt_abort(struct fc_fcp_pkt *fsp) * fc_lun_reset_send() - Send LUN reset command * @data: The FCP packet that identifies the LUN to be reset */ -static void fc_lun_reset_send(unsigned long data) +static void fc_lun_reset_send(struct timer_list *t) { - struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; + struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); struct fc_lport *lport = fsp->lp; if (lport->tt.fcp_cmd_send(lport, fsp, fc_tm_done)) { @@ -1308,7 +1307,7 @@ static void fc_lun_reset_send(unsigned long data) return; if (fc_fcp_lock_pkt(fsp)) return; - setup_timer(&fsp->timer, fc_lun_reset_send, (unsigned long)fsp); + fsp->timer.function = (TIMER_FUNC_TYPE)fc_lun_reset_send; fc_fcp_timer_set(fsp, get_fsp_rec_tov(fsp)); fc_fcp_unlock_pkt(fsp); } @@ -1334,7 +1333,7 @@ static int fc_lun_reset(struct fc_lport *lport, struct fc_fcp_pkt *fsp, fsp->wait_for_comp = 1; init_completion(&fsp->tm_done); - fc_lun_reset_send((unsigned long)fsp); + fc_lun_reset_send(&fsp->timer); /* * wait for completion of reset @@ -1431,9 +1430,9 @@ static void fc_fcp_cleanup(struct fc_lport *lport) * received we see if data was received recently. If it has been then we * continue waiting, otherwise, we abort the command. */ -static void fc_fcp_timeout(unsigned long data) +static void fc_fcp_timeout(struct timer_list *t) { - struct fc_fcp_pkt *fsp = (struct fc_fcp_pkt *)data; + struct fc_fcp_pkt *fsp = from_timer(fsp, t, timer); struct fc_rport *rport = fsp->rport; struct fc_rport_libfc_priv *rpriv = rport->dd_data; @@ -1446,7 +1445,7 @@ static void fc_fcp_timeout(unsigned long data) if (fsp->lp->qfull) { FC_FCP_DBG(fsp, "fcp timeout, resetting timer delay %d\n", fsp->timer_delay); - setup_timer(&fsp->timer, fc_fcp_timeout, (unsigned long)fsp); + fsp->timer.function = (TIMER_FUNC_TYPE)fc_fcp_timeout; fc_fcp_timer_set(fsp, fsp->timer_delay); goto unlock; } diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c index f8dc1601efd5..9c50d2d9f27c 100644 --- a/drivers/scsi/libiscsi.c +++ b/drivers/scsi/libiscsi.c @@ -1805,9 +1805,9 @@ int iscsi_target_alloc(struct scsi_target *starget) } EXPORT_SYMBOL_GPL(iscsi_target_alloc); -static void iscsi_tmf_timedout(unsigned long data) +static void iscsi_tmf_timedout(struct timer_list *t) { - struct iscsi_conn *conn = (struct iscsi_conn *)data; + struct iscsi_conn *conn = from_timer(conn, t, tmf_timer); struct iscsi_session *session = conn->session; spin_lock(&session->frwd_lock); @@ -1838,8 +1838,6 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn, } conn->tmfcmd_pdus_cnt++; conn->tmf_timer.expires = timeout * HZ + jiffies; - conn->tmf_timer.function = iscsi_tmf_timedout; - conn->tmf_timer.data = (unsigned long)conn; add_timer(&conn->tmf_timer); ISCSI_DBG_EH(session, "tmf set timeout\n"); @@ -2089,9 +2087,9 @@ done: } EXPORT_SYMBOL_GPL(iscsi_eh_cmd_timed_out); -static void iscsi_check_transport_timeouts(unsigned long data) +static void iscsi_check_transport_timeouts(struct timer_list *t) { - struct iscsi_conn *conn = (struct iscsi_conn *)data; + struct iscsi_conn *conn = from_timer(conn, t, transport_timer); struct iscsi_session *session = conn->session; unsigned long recv_timeout, next_timeout = 0, last_recv; @@ -2913,9 +2911,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, conn->exp_statsn = 0; conn->tmf_state = TMF_INITIAL; - init_timer(&conn->transport_timer); - conn->transport_timer.data = (unsigned long)conn; - conn->transport_timer.function = iscsi_check_transport_timeouts; + timer_setup(&conn->transport_timer, iscsi_check_transport_timeouts, 0); INIT_LIST_HEAD(&conn->mgmtqueue); INIT_LIST_HEAD(&conn->cmdqueue); @@ -2939,7 +2935,7 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size, goto login_task_data_alloc_fail; conn->login_task->data = conn->data = data; - init_timer(&conn->tmf_timer); + timer_setup(&conn->tmf_timer, iscsi_tmf_timedout, 0); init_waitqueue_head(&conn->ehwait); return cls_conn; diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index 6b4fd2375178..174e5eff6155 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c @@ -41,9 +41,10 @@ static int sas_disable_routing(struct domain_device *dev, u8 *sas_addr); /* ---------- SMP task management ---------- */ -static void smp_task_timedout(unsigned long _task) +static void smp_task_timedout(struct timer_list *t) { - struct sas_task *task = (void *) _task; + struct sas_task_slow *slow = from_timer(slow, t, timer); + struct sas_task *task = slow->task; unsigned long flags; spin_lock_irqsave(&task->task_state_lock, flags); @@ -91,8 +92,7 @@ static int smp_execute_task_sg(struct domain_device *dev, task->task_done = smp_task_done; - task->slow_task->timer.data = (unsigned long) task; - task->slow_task->timer.function = smp_task_timedout; + task->slow_task->timer.function = (TIMER_FUNC_TYPE)smp_task_timedout; task->slow_task->timer.expires = jiffies + SMP_TIMEOUT*HZ; add_timer(&task->slow_task->timer); diff --git a/drivers/scsi/libsas/sas_init.c b/drivers/scsi/libsas/sas_init.c index 64e9cdda1c3c..681fcb837354 100644 --- a/drivers/scsi/libsas/sas_init.c +++ b/drivers/scsi/libsas/sas_init.c @@ -66,7 +66,8 @@ struct sas_task *sas_alloc_slow_task(gfp_t flags) } task->slow_task = slow; - init_timer(&slow->timer); + slow->task = task; + timer_setup(&slow->timer, NULL, 0); init_completion(&slow->completion); return task; diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c index ea8ad06ff582..91795eb56206 100644 --- a/drivers/scsi/libsas/sas_scsi_host.c +++ b/drivers/scsi/libsas/sas_scsi_host.c @@ -919,7 +919,7 @@ void sas_task_abort(struct sas_task *task) return; if (!del_timer(&slow->timer)) return; - slow->timer.function(slow->timer.data); + slow->timer.function((TIMER_DATA_TYPE)&slow->timer); return; } diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h index 7e300734b345..4e858b38529a 100644 --- a/drivers/scsi/lpfc/lpfc_crtn.h +++ b/drivers/scsi/lpfc/lpfc_crtn.h @@ -113,7 +113,7 @@ void lpfc_disc_list_loopmap(struct lpfc_vport *); void lpfc_disc_start(struct lpfc_vport *); void lpfc_cleanup_discovery_resources(struct lpfc_vport *); void lpfc_cleanup(struct lpfc_vport *); -void lpfc_disc_timeout(unsigned long); +void lpfc_disc_timeout(struct timer_list *); int lpfc_unregister_fcf_prep(struct lpfc_hba *); struct lpfc_nodelist *__lpfc_findnode_rpi(struct lpfc_vport *, uint16_t); @@ -154,7 +154,7 @@ int lpfc_els_rsp_adisc_acc(struct lpfc_vport *, struct lpfc_iocbq *, int lpfc_els_rsp_prli_acc(struct lpfc_vport *, struct lpfc_iocbq *, struct lpfc_nodelist *); void lpfc_cancel_retry_delay_tmo(struct lpfc_vport *, struct lpfc_nodelist *); -void lpfc_els_retry_delay(unsigned long); +void lpfc_els_retry_delay(struct timer_list *); void lpfc_els_retry_delay_handler(struct lpfc_nodelist *); void lpfc_els_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *, struct lpfc_iocbq *); @@ -165,7 +165,7 @@ void lpfc_els_flush_all_cmd(struct lpfc_hba *); void lpfc_els_flush_cmd(struct lpfc_vport *); int lpfc_els_disc_adisc(struct lpfc_vport *); int lpfc_els_disc_plogi(struct lpfc_vport *); -void lpfc_els_timeout(unsigned long); +void lpfc_els_timeout(struct timer_list *); void lpfc_els_timeout_handler(struct lpfc_vport *); struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t, uint8_t, struct lpfc_nodelist *, @@ -180,7 +180,7 @@ int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *iocbq); int lpfc_ns_cmd(struct lpfc_vport *, int, uint8_t, uint32_t); int lpfc_fdmi_cmd(struct lpfc_vport *, struct lpfc_nodelist *, int, uint32_t); void lpfc_fdmi_num_disc_check(struct lpfc_vport *); -void lpfc_delayed_disc_tmo(unsigned long); +void lpfc_delayed_disc_tmo(struct timer_list *); void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *); int lpfc_config_port_prep(struct lpfc_hba *); @@ -279,9 +279,9 @@ void lpfc_mem_free(struct lpfc_hba *); void lpfc_mem_free_all(struct lpfc_hba *); void lpfc_stop_vport_timers(struct lpfc_vport *); -void lpfc_poll_timeout(unsigned long ptr); +void lpfc_poll_timeout(struct timer_list *t); void lpfc_poll_start_timer(struct lpfc_hba *); -void lpfc_poll_eratt(unsigned long); +void lpfc_poll_eratt(struct timer_list *); int lpfc_sli_handle_fast_ring_event(struct lpfc_hba *, struct lpfc_sli_ring *, uint32_t); @@ -351,7 +351,7 @@ int lpfc_sli_abort_taskmgmt(struct lpfc_vport *, struct lpfc_sli_ring *, uint16_t, uint64_t, lpfc_ctx_cmd); -void lpfc_mbox_timeout(unsigned long); +void lpfc_mbox_timeout(struct timer_list *t); void lpfc_mbox_timeout_handler(struct lpfc_hba *); struct lpfc_nodelist *lpfc_findnode_did(struct lpfc_vport *, uint32_t); @@ -445,7 +445,7 @@ extern unsigned int lpfc_fcp_look_ahead; /* Interface exported by fabric iocb scheduler */ void lpfc_fabric_abort_nport(struct lpfc_nodelist *); void lpfc_fabric_abort_hba(struct lpfc_hba *); -void lpfc_fabric_block_timeout(unsigned long); +void lpfc_fabric_block_timeout(struct timer_list *); void lpfc_unblock_fabric_iocbs(struct lpfc_hba *); void lpfc_rampdown_queue_depth(struct lpfc_hba *); void lpfc_ramp_down_queue_handler(struct lpfc_hba *); diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 33417681f5d4..f77673ab4a84 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -2884,9 +2884,9 @@ fdmi_cmd_exit: * the worker thread. **/ void -lpfc_delayed_disc_tmo(unsigned long ptr) +lpfc_delayed_disc_tmo(struct timer_list *t) { - struct lpfc_vport *vport = (struct lpfc_vport *)ptr; + struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c index 468a66371de9..0dd6c21433fe 100644 --- a/drivers/scsi/lpfc/lpfc_els.c +++ b/drivers/scsi/lpfc/lpfc_els.c @@ -3131,9 +3131,9 @@ lpfc_cancel_retry_delay_tmo(struct lpfc_vport *vport, struct lpfc_nodelist *nlp) * to the event associated with the ndlp. **/ void -lpfc_els_retry_delay(unsigned long ptr) +lpfc_els_retry_delay(struct timer_list *t) { - struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) ptr; + struct lpfc_nodelist *ndlp = from_timer(ndlp, t, nlp_delayfunc); struct lpfc_vport *vport = ndlp->vport; struct lpfc_hba *phba = vport->phba; unsigned long flags; @@ -7385,9 +7385,9 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb, * lpfc_els_timeout_handler() to work on the posted event WORKER_ELS_TMO. **/ void -lpfc_els_timeout(unsigned long ptr) +lpfc_els_timeout(struct timer_list *t) { - struct lpfc_vport *vport = (struct lpfc_vport *) ptr; + struct lpfc_vport *vport = from_timer(vport, t, els_tmofunc); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; @@ -9017,9 +9017,9 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp) * posted event WORKER_FABRIC_BLOCK_TMO. **/ void -lpfc_fabric_block_timeout(unsigned long ptr) +lpfc_fabric_block_timeout(struct timer_list *t) { - struct lpfc_hba *phba = (struct lpfc_hba *) ptr; + struct lpfc_hba *phba = from_timer(phba, t, fabric_block_timer); unsigned long iflags; uint32_t tmo_posted; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index 20808349a80e..8d491084eb5d 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -4370,8 +4370,7 @@ lpfc_initialize_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, { INIT_LIST_HEAD(&ndlp->els_retry_evt.evt_listp); INIT_LIST_HEAD(&ndlp->dev_loss_evt.evt_listp); - setup_timer(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, - (unsigned long)ndlp); + timer_setup(&ndlp->nlp_delayfunc, lpfc_els_retry_delay, 0); ndlp->nlp_DID = did; ndlp->vport = vport; ndlp->phba = vport->phba; @@ -5508,9 +5507,9 @@ lpfc_cleanup_discovery_resources(struct lpfc_vport *vport) */ /*****************************************************************************/ void -lpfc_disc_timeout(unsigned long ptr) +lpfc_disc_timeout(struct timer_list *t) { - struct lpfc_vport *vport = (struct lpfc_vport *) ptr; + struct lpfc_vport *vport = from_timer(vport, t, fc_disctmo); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long flags = 0; diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 100bc4c8798d..6a1e28ba9258 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -1138,13 +1138,13 @@ lpfc_hba_down_post(struct lpfc_hba *phba) * be cleared by the worker thread after it has taken the event bitmap out. **/ static void -lpfc_hb_timeout(unsigned long ptr) +lpfc_hb_timeout(struct timer_list *t) { struct lpfc_hba *phba; uint32_t tmo_posted; unsigned long iflag; - phba = (struct lpfc_hba *)ptr; + phba = from_timer(phba, t, hb_tmofunc); /* Check for heart beat timeout conditions */ spin_lock_irqsave(&phba->pport->work_port_lock, iflag); @@ -1172,12 +1172,12 @@ lpfc_hb_timeout(unsigned long ptr) * be cleared by the worker thread after it has taken the event bitmap out. **/ static void -lpfc_rrq_timeout(unsigned long ptr) +lpfc_rrq_timeout(struct timer_list *t) { struct lpfc_hba *phba; unsigned long iflag; - phba = (struct lpfc_hba *)ptr; + phba = from_timer(phba, t, rrq_tmr); spin_lock_irqsave(&phba->pport->work_port_lock, iflag); if (!(phba->pport->load_flag & FC_UNLOADING)) phba->hba_flag |= HBA_RRQ_ACTIVE; @@ -3937,14 +3937,11 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) INIT_LIST_HEAD(&vport->rcv_buffer_list); spin_lock_init(&vport->work_port_lock); - setup_timer(&vport->fc_disctmo, lpfc_disc_timeout, - (unsigned long)vport); + timer_setup(&vport->fc_disctmo, lpfc_disc_timeout, 0); - setup_timer(&vport->els_tmofunc, lpfc_els_timeout, - (unsigned long)vport); + timer_setup(&vport->els_tmofunc, lpfc_els_timeout, 0); - setup_timer(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, - (unsigned long)vport); + timer_setup(&vport->delayed_disc_tmo, lpfc_delayed_disc_tmo, 0); error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) @@ -4210,9 +4207,9 @@ lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *phba) * worker thread context. **/ static void -lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr) +lpfc_sli4_fcf_redisc_wait_tmo(struct timer_list *t) { - struct lpfc_hba *phba = (struct lpfc_hba *)ptr; + struct lpfc_hba *phba = from_timer(phba, t, fcf.redisc_wait); /* Don't send FCF rediscovery event if timer cancelled */ spin_lock_irq(&phba->hbalock); @@ -5624,15 +5621,13 @@ lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba) INIT_LIST_HEAD(&phba->luns); /* MBOX heartbeat timer */ - setup_timer(&psli->mbox_tmo, lpfc_mbox_timeout, (unsigned long)phba); + timer_setup(&psli->mbox_tmo, lpfc_mbox_timeout, 0); /* Fabric block timer */ - setup_timer(&phba->fabric_block_timer, lpfc_fabric_block_timeout, - (unsigned long)phba); + timer_setup(&phba->fabric_block_timer, lpfc_fabric_block_timeout, 0); /* EA polling mode timer */ - setup_timer(&phba->eratt_poll, lpfc_poll_eratt, - (unsigned long)phba); + timer_setup(&phba->eratt_poll, lpfc_poll_eratt, 0); /* Heartbeat timer */ - setup_timer(&phba->hb_tmofunc, lpfc_hb_timeout, (unsigned long)phba); + timer_setup(&phba->hb_tmofunc, lpfc_hb_timeout, 0); return 0; } @@ -5658,8 +5653,7 @@ lpfc_sli_driver_resource_setup(struct lpfc_hba *phba) */ /* FCP polling mode timer */ - setup_timer(&phba->fcp_poll_timer, lpfc_poll_timeout, - (unsigned long)phba); + timer_setup(&phba->fcp_poll_timer, lpfc_poll_timeout, 0); /* Host attention work mask setup */ phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT); @@ -5829,11 +5823,10 @@ lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba) * Initialize timers used by driver */ - setup_timer(&phba->rrq_tmr, lpfc_rrq_timeout, (unsigned long)phba); + timer_setup(&phba->rrq_tmr, lpfc_rrq_timeout, 0); /* FCF rediscover timer */ - setup_timer(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, - (unsigned long)phba); + timer_setup(&phba->fcf.redisc_wait, lpfc_sli4_fcf_redisc_wait_tmo, 0); /* * Control structure for handling external multi-buffer mailbox diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c index 1a6f122bb25d..c0cdaef4db24 100644 --- a/drivers/scsi/lpfc/lpfc_scsi.c +++ b/drivers/scsi/lpfc/lpfc_scsi.c @@ -4501,9 +4501,9 @@ void lpfc_poll_start_timer(struct lpfc_hba * phba) * and FCP Ring interrupt is disable. **/ -void lpfc_poll_timeout(unsigned long ptr) +void lpfc_poll_timeout(struct timer_list *t) { - struct lpfc_hba *phba = (struct lpfc_hba *) ptr; + struct lpfc_hba *phba = from_timer(phba, t, fcp_poll_timer); if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) { lpfc_sli_handle_fast_ring_event(phba, diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c index 8b119f87b51d..4edb81073409 100644 --- a/drivers/scsi/lpfc/lpfc_sli.c +++ b/drivers/scsi/lpfc/lpfc_sli.c @@ -3004,13 +3004,13 @@ lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring) * and wake up worker thread to process it. Otherwise, it will set up the * Error Attention polling timer for the next poll. **/ -void lpfc_poll_eratt(unsigned long ptr) +void lpfc_poll_eratt(struct timer_list *t) { struct lpfc_hba *phba; uint32_t eratt = 0; uint64_t sli_intr, cnt; - phba = (struct lpfc_hba *)ptr; + phba = from_timer(phba, t, eratt_poll); /* Here we will also keep track of interrupts per sec of the hba */ sli_intr = phba->sli.slistat.sli_intr; @@ -7167,9 +7167,9 @@ out_free_mbox: * done by the worker thread function lpfc_mbox_timeout_handler. **/ void -lpfc_mbox_timeout(unsigned long ptr) +lpfc_mbox_timeout(struct timer_list *t) { - struct lpfc_hba *phba = (struct lpfc_hba *) ptr; + struct lpfc_hba *phba = from_timer(phba, t, sli.mbox_tmo); unsigned long iflag; uint32_t tmo_posted; diff --git a/drivers/scsi/megaraid/megaraid_ioctl.h b/drivers/scsi/megaraid/megaraid_ioctl.h index 05f6e4ec3453..eedcbde46459 100644 --- a/drivers/scsi/megaraid/megaraid_ioctl.h +++ b/drivers/scsi/megaraid/megaraid_ioctl.h @@ -19,6 +19,7 @@ #include <linux/types.h> #include <linux/semaphore.h> +#include <linux/timer.h> #include "mbox_defs.h" @@ -153,6 +154,11 @@ typedef struct uioc { } __attribute__ ((aligned(1024),packed)) uioc_t; +/* For on-stack uioc timers. */ +struct uioc_timeout { + struct timer_list timer; + uioc_t *uioc; +}; /** * struct mraid_hba_info - information about the controller diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c index ec3c43854978..530358cdcb39 100644 --- a/drivers/scsi/megaraid/megaraid_mbox.c +++ b/drivers/scsi/megaraid/megaraid_mbox.c @@ -3904,19 +3904,19 @@ megaraid_sysfs_get_ldmap_done(uioc_t *uioc) wake_up(&raid_dev->sysfs_wait_q); } - /** * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap - * @data : timed out packet + * @t : timed out timer * * Timeout routine to recover and return to application, in case the adapter * has stopped responding. A timeout of 60 seconds for this command seems like * a good value. */ static void -megaraid_sysfs_get_ldmap_timeout(unsigned long data) +megaraid_sysfs_get_ldmap_timeout(struct timer_list *t) { - uioc_t *uioc = (uioc_t *)data; + struct uioc_timeout *timeout = from_timer(timeout, t, timer); + uioc_t *uioc = timeout->uioc; adapter_t *adapter = (adapter_t *)uioc->buf_vaddr; mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter); @@ -3951,8 +3951,7 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter) mbox64_t *mbox64; mbox_t *mbox; char *raw_mbox; - struct timer_list sysfs_timer; - struct timer_list *timerp; + struct uioc_timeout timeout; caddr_t ldmap; int rval = 0; @@ -3988,14 +3987,12 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter) /* * Setup a timer to recover from a non-responding controller */ - timerp = &sysfs_timer; - init_timer(timerp); - - timerp->function = megaraid_sysfs_get_ldmap_timeout; - timerp->data = (unsigned long)uioc; - timerp->expires = jiffies + 60 * HZ; + timeout.uioc = uioc; + timer_setup_on_stack(&timeout.timer, + megaraid_sysfs_get_ldmap_timeout, 0); - add_timer(timerp); + timeout.timer.expires = jiffies + 60 * HZ; + add_timer(&timeout.timer); /* * Send the command to the firmware @@ -4033,7 +4030,8 @@ megaraid_sysfs_get_ldmap(adapter_t *adapter) } - del_timer_sync(timerp); + del_timer_sync(&timeout.timer); + destroy_timer_on_stack(&timeout.timer); mutex_unlock(&raid_dev->sysfs_mtx); diff --git a/drivers/scsi/megaraid/megaraid_mm.c b/drivers/scsi/megaraid/megaraid_mm.c index 65b6f6ace3a5..bb802b0c12b8 100644 --- a/drivers/scsi/megaraid/megaraid_mm.c +++ b/drivers/scsi/megaraid/megaraid_mm.c @@ -35,7 +35,7 @@ static int kioc_to_mimd(uioc_t *, mimd_t __user *); static int handle_drvrcmd(void __user *, uint8_t, int *); static int lld_ioctl(mraid_mmadp_t *, uioc_t *); static void ioctl_done(uioc_t *); -static void lld_timedout(unsigned long); +static void lld_timedout(struct timer_list *); static void hinfo_to_cinfo(mraid_hba_info_t *, mcontroller_t *); static mraid_mmadp_t *mraid_mm_get_adapter(mimd_t __user *, int *); static uioc_t *mraid_mm_alloc_kioc(mraid_mmadp_t *); @@ -686,8 +686,7 @@ static int lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) { int rval; - struct timer_list timer; - struct timer_list *tp = NULL; + struct uioc_timeout timeout = { }; kioc->status = -ENODATA; rval = adp->issue_uioc(adp->drvr_data, kioc, IOCTL_ISSUE); @@ -698,14 +697,12 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) * Start the timer */ if (adp->timeout > 0) { - tp = &timer; - init_timer(tp); + timeout.uioc = kioc; + timer_setup_on_stack(&timeout.timer, lld_timedout, 0); - tp->function = lld_timedout; - tp->data = (unsigned long)kioc; - tp->expires = jiffies + adp->timeout * HZ; + timeout.timer.expires = jiffies + adp->timeout * HZ; - add_timer(tp); + add_timer(&timeout.timer); } /* @@ -713,8 +710,9 @@ lld_ioctl(mraid_mmadp_t *adp, uioc_t *kioc) * call, the ioctl either completed successfully or timedout. */ wait_event(wait_q, (kioc->status != -ENODATA)); - if (tp) { - del_timer_sync(tp); + if (timeout.timer.function) { + del_timer_sync(&timeout.timer); + destroy_timer_on_stack(&timeout.timer); } /* @@ -783,12 +781,13 @@ ioctl_done(uioc_t *kioc) /** * lld_timedout - callback from the expired timer - * @ptr : ioctl packet that timed out + * @t : timer that timed out */ static void -lld_timedout(unsigned long ptr) +lld_timedout(struct timer_list *t) { - uioc_t *kioc = (uioc_t *)ptr; + struct uioc_timeout *timeout = from_timer(timeout, t, timer); + uioc_t *kioc = timeout->uioc; kioc->status = -ETIME; kioc->timedout = 1; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index e518dadc8161..a36e18156e49 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -2114,22 +2114,19 @@ static void megasas_complete_cmd_dpc(unsigned long instance_addr) megasas_check_and_restore_queue_depth(instance); } +static void megasas_sriov_heartbeat_handler(struct timer_list *t); + /** - * megasas_start_timer - Initializes a timer object + * megasas_start_timer - Initializes sriov heartbeat timer object * @instance: Adapter soft state - * @timer: timer object to be initialized - * @fn: timer function - * @interval: time interval between timer function call * */ -void megasas_start_timer(struct megasas_instance *instance, - struct timer_list *timer, - void *fn, unsigned long interval) -{ - init_timer(timer); - timer->expires = jiffies + interval; - timer->data = (unsigned long)instance; - timer->function = fn; +void megasas_start_timer(struct megasas_instance *instance) +{ + struct timer_list *timer = &instance->sriov_heartbeat_timer; + + timer_setup(timer, megasas_sriov_heartbeat_handler, 0); + timer->expires = jiffies + MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF; add_timer(timer); } @@ -2515,10 +2512,10 @@ out: } /* Handler for SR-IOV heartbeat */ -void megasas_sriov_heartbeat_handler(unsigned long instance_addr) +static void megasas_sriov_heartbeat_handler(struct timer_list *t) { struct megasas_instance *instance = - (struct megasas_instance *)instance_addr; + from_timer(instance, t, sriov_heartbeat_timer); if (instance->hb_host_mem->HB.fwCounter != instance->hb_host_mem->HB.driverCounter) { @@ -5493,10 +5490,7 @@ static int megasas_init_fw(struct megasas_instance *instance) /* Launch SR-IOV heartbeat timer */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 1)) - megasas_start_timer(instance, - &instance->sriov_heartbeat_timer, - megasas_sriov_heartbeat_handler, - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + megasas_start_timer(instance); else instance->skip_heartbeat_timer_del = 1; } @@ -6507,10 +6501,7 @@ megasas_resume(struct pci_dev *pdev) /* Re-launch SR-IOV heartbeat timer */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 0)) - megasas_start_timer(instance, - &instance->sriov_heartbeat_timer, - megasas_sriov_heartbeat_handler, - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + megasas_start_timer(instance); else { instance->skip_heartbeat_timer_del = 1; goto fail_init_mfi; diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c index 11bd2e698b84..3c399e7b3fe1 100644 --- a/drivers/scsi/megaraid/megaraid_sas_fusion.c +++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c @@ -85,12 +85,9 @@ int megasas_transition_to_ready(struct megasas_instance *instance, int ocr); void megaraid_sas_kill_hba(struct megasas_instance *instance); extern u32 megasas_dbg_lvl; -void megasas_sriov_heartbeat_handler(unsigned long instance_addr); int megasas_sriov_start_heartbeat(struct megasas_instance *instance, int initial); -void megasas_start_timer(struct megasas_instance *instance, - struct timer_list *timer, - void *fn, unsigned long interval); +void megasas_start_timer(struct megasas_instance *instance); extern struct megasas_mgmt_info megasas_mgmt_info; extern unsigned int resetwaittime; extern unsigned int dual_qdepth_disable; @@ -4369,10 +4366,7 @@ transition_to_ready: /* Restart SR-IOV heartbeat */ if (instance->requestorId) { if (!megasas_sriov_start_heartbeat(instance, 0)) - megasas_start_timer(instance, - &instance->sriov_heartbeat_timer, - megasas_sriov_heartbeat_handler, - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + megasas_start_timer(instance); else instance->skip_heartbeat_timer_del = 1; } @@ -4404,10 +4398,7 @@ fail_kill_adapter: } else { /* For VF: Restart HB timer if we didn't OCR */ if (instance->requestorId) { - megasas_start_timer(instance, - &instance->sriov_heartbeat_timer, - megasas_sriov_heartbeat_handler, - MEGASAS_SRIOV_HEARTBEAT_INTERVAL_VF); + megasas_start_timer(instance); } clear_bit(MEGASAS_FUSION_IN_RESET, &instance->reset_flags); instance->instancet->enable_intr(instance); diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c index 718c88de328b..8c91637cd598 100644 --- a/drivers/scsi/mvsas/mv_init.c +++ b/drivers/scsi/mvsas/mv_init.c @@ -95,7 +95,7 @@ static void mvs_phy_init(struct mvs_info *mvi, int phy_id) phy->mvi = mvi; phy->port = NULL; - init_timer(&phy->timer); + timer_setup(&phy->timer, NULL, 0); sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0; sas_phy->class = SAS; sas_phy->iproto = SAS_PROTOCOL_ALL; @@ -248,7 +248,6 @@ static int mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost) mvi->devices[i].dev_type = SAS_PHY_UNUSED; mvi->devices[i].device_id = i; mvi->devices[i].dev_status = MVS_DEV_NORMAL; - init_timer(&mvi->devices[i].timer); } /* diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c index ee81d10252e0..cff1c37b8d2e 100644 --- a/drivers/scsi/mvsas/mv_sas.c +++ b/drivers/scsi/mvsas/mv_sas.c @@ -1283,9 +1283,10 @@ static void mvs_task_done(struct sas_task *task) complete(&task->slow_task->completion); } -static void mvs_tmf_timedout(unsigned long data) +static void mvs_tmf_timedout(struct timer_list *t) { - struct sas_task *task = (struct sas_task *)data; + struct sas_task_slow *slow = from_timer(slow, t, timer); + struct sas_task *task = slow->task; task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->slow_task->completion); @@ -1309,8 +1310,7 @@ static int mvs_exec_internal_tmf_task(struct domain_device *dev, memcpy(&task->ssp_task, parameter, para_len); task->task_done = mvs_task_done; - task->slow_task->timer.data = (unsigned long) task; - task->slow_task->timer.function = mvs_tmf_timedout; + task->slow_task->timer.function = (TIMER_FUNC_TYPE)mvs_tmf_timedout; task->slow_task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ; add_timer(&task->slow_task->timer); @@ -1954,9 +1954,9 @@ static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler) return ret; } -static void mvs_sig_time_out(unsigned long tphy) +static void mvs_sig_time_out(struct timer_list *t) { - struct mvs_phy *phy = (struct mvs_phy *)tphy; + struct mvs_phy *phy = from_timer(phy, t, timer); struct mvs_info *mvi = phy->mvi; u8 phy_no; @@ -2020,8 +2020,7 @@ void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events) MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no, tmp | PHYEV_SIG_FIS); if (phy->timer.function == NULL) { - phy->timer.data = (unsigned long)phy; - phy->timer.function = mvs_sig_time_out; + phy->timer.function = (TIMER_FUNC_TYPE)mvs_sig_time_out; phy->timer.expires = jiffies + 5*HZ; add_timer(&phy->timer); } diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h index f9afd4cdd4c4..080676c1c9e5 100644 --- a/drivers/scsi/mvsas/mv_sas.h +++ b/drivers/scsi/mvsas/mv_sas.h @@ -247,7 +247,6 @@ struct mvs_device { enum sas_device_type dev_type; struct mvs_info *mvi_info; struct domain_device *sas_device; - struct timer_list timer; u32 attached_phy; u32 device_id; u32 running_req; diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index ce584c31d36e..7b2f92ae9866 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -656,9 +656,10 @@ void pm8001_task_done(struct sas_task *task) complete(&task->slow_task->completion); } -static void pm8001_tmf_timedout(unsigned long data) +static void pm8001_tmf_timedout(struct timer_list *t) { - struct sas_task *task = (struct sas_task *)data; + struct sas_task_slow *slow = from_timer(slow, t, timer); + struct sas_task *task = slow->task; task->task_state_flags |= SAS_TASK_STATE_ABORTED; complete(&task->slow_task->completion); @@ -694,8 +695,7 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, task->task_proto = dev->tproto; memcpy(&task->ssp_task, parameter, para_len); task->task_done = pm8001_task_done; - task->slow_task->timer.data = (unsigned long)task; - task->slow_task->timer.function = pm8001_tmf_timedout; + task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout; task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT*HZ; add_timer(&task->slow_task->timer); @@ -781,8 +781,7 @@ pm8001_exec_internal_task_abort(struct pm8001_hba_info *pm8001_ha, task->dev = dev; task->task_proto = dev->tproto; task->task_done = pm8001_task_done; - task->slow_task->timer.data = (unsigned long)task; - task->slow_task->timer.function = pm8001_tmf_timedout; + task->slow_task->timer.function = (TIMER_FUNC_TYPE)pm8001_tmf_timedout; task->slow_task->timer.expires = jiffies + PM8001_TASK_TIMEOUT * HZ; add_timer(&task->slow_task->timer); diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index b4d6cd8cd1ad..4f9f115fb6a0 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c @@ -348,7 +348,7 @@ static void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index) cmd->sense_buffer = NULL; cmd->sense_buffer_dma = 0; cmd->dma_handle = 0; - init_timer(&cmd->timer); + timer_setup(&cmd->timer, NULL, 0); } /** @@ -557,8 +557,9 @@ static void pmcraid_reset_type(struct pmcraid_instance *pinstance) static void pmcraid_ioa_reset(struct pmcraid_cmd *); -static void pmcraid_bist_done(struct pmcraid_cmd *cmd) +static void pmcraid_bist_done(struct timer_list *t) { + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; int rc; @@ -572,9 +573,6 @@ static void pmcraid_bist_done(struct pmcraid_cmd *cmd) pmcraid_info("BIST not complete, waiting another 2 secs\n"); cmd->timer.expires = jiffies + cmd->time_left; cmd->time_left = 0; - cmd->timer.data = (unsigned long)cmd; - cmd->timer.function = - (void (*)(unsigned long))pmcraid_bist_done; add_timer(&cmd->timer); } else { cmd->time_left = 0; @@ -605,9 +603,8 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd) doorbells, intrs); cmd->time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); - cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT); - cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done; + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_bist_done; add_timer(&cmd->timer); } @@ -617,8 +614,9 @@ static void pmcraid_start_bist(struct pmcraid_cmd *cmd) * Return value * None */ -static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd) +static void pmcraid_reset_alert_done(struct timer_list *t) { + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); struct pmcraid_instance *pinstance = cmd->drv_inst; u32 status = ioread32(pinstance->ioa_status); unsigned long lock_flags; @@ -637,10 +635,8 @@ static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd) pmcraid_info("critical op is not yet reset waiting again\n"); /* restart timer if some more time is available to wait */ cmd->time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT; - cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; - cmd->timer.function = - (void (*)(unsigned long))pmcraid_reset_alert_done; + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done; add_timer(&cmd->timer); } } @@ -676,10 +672,8 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) * bit to be reset. */ cmd->time_left = PMCRAID_RESET_TIMEOUT; - cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT; - cmd->timer.function = - (void (*)(unsigned long))pmcraid_reset_alert_done; + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_reset_alert_done; add_timer(&cmd->timer); iowrite32(DOORBELL_IOA_RESET_ALERT, @@ -704,8 +698,9 @@ static void pmcraid_reset_alert(struct pmcraid_cmd *cmd) * Return value: * None */ -static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd) +static void pmcraid_timeout_handler(struct timer_list *t) { + struct pmcraid_cmd *cmd = from_timer(cmd, t, timer); struct pmcraid_instance *pinstance = cmd->drv_inst; unsigned long lock_flags; @@ -919,7 +914,7 @@ static void pmcraid_send_cmd( struct pmcraid_cmd *cmd, void (*cmd_done) (struct pmcraid_cmd *), unsigned long timeout, - void (*timeout_func) (struct pmcraid_cmd *) + void (*timeout_func) (struct timer_list *) ) { /* initialize done function */ @@ -927,9 +922,8 @@ static void pmcraid_send_cmd( if (timeout_func) { /* setup timeout handler */ - cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + timeout; - cmd->timer.function = (void (*)(unsigned long))timeout_func; + cmd->timer.function = (TIMER_FUNC_TYPE)timeout_func; add_timer(&cmd->timer); } @@ -1955,10 +1949,9 @@ static void pmcraid_soft_reset(struct pmcraid_cmd *cmd) * would re-initiate a reset */ cmd->cmd_done = pmcraid_ioa_reset; - cmd->timer.data = (unsigned long)cmd; cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT); - cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler; + cmd->timer.function = (TIMER_FUNC_TYPE)pmcraid_timeout_handler; if (!timer_pending(&cmd->timer)) add_timer(&cmd->timer); diff --git a/drivers/scsi/qla1280.c b/drivers/scsi/qla1280.c index 8a29fb09db14..390775d5c918 100644 --- a/drivers/scsi/qla1280.c +++ b/drivers/scsi/qla1280.c @@ -758,9 +758,9 @@ enum action { }; -static void qla1280_mailbox_timeout(unsigned long __data) +static void qla1280_mailbox_timeout(struct timer_list *t) { - struct scsi_qla_host *ha = (struct scsi_qla_host *)__data; + struct scsi_qla_host *ha = from_timer(ha, t, mailbox_timer); struct device_reg __iomem *reg; reg = ha->iobase; @@ -2465,7 +2465,6 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) uint16_t __iomem *mptr; uint16_t data; DECLARE_COMPLETION_ONSTACK(wait); - struct timer_list timer; ENTER("qla1280_mailbox_command"); @@ -2494,18 +2493,15 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb) /* Issue set host interrupt command. */ /* set up a timer just in case we're really jammed */ - init_timer_on_stack(&timer); - timer.expires = jiffies + 20*HZ; - timer.data = (unsigned long)ha; - timer.function = qla1280_mailbox_timeout; - add_timer(&timer); + timer_setup(&ha->mailbox_timer, qla1280_mailbox_timeout, 0); + mod_timer(&ha->mailbox_timer, jiffies + 20 * HZ); spin_unlock_irq(ha->host->host_lock); WRT_REG_WORD(®->host_cmd, HC_SET_HOST_INT); data = qla1280_debounce_register(®->istatus); wait_for_completion(&wait); - del_timer_sync(&timer); + del_timer_sync(&ha->mailbox_timer); spin_lock_irq(ha->host->host_lock); diff --git a/drivers/scsi/qla1280.h b/drivers/scsi/qla1280.h index 834884b9eed5..1522aca2c8c8 100644 --- a/drivers/scsi/qla1280.h +++ b/drivers/scsi/qla1280.h @@ -1055,6 +1055,7 @@ struct scsi_qla_host { struct list_head done_q; /* Done queue */ struct completion *mailbox_wait; + struct timer_list mailbox_timer; volatile struct { uint32_t online:1; /* 0 */ diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f852ca60c49f..3ad375f85b59 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -206,8 +206,8 @@ int qla24xx_async_abort_cmd(srb_t *); */ extern struct scsi_host_template qla2xxx_driver_template; extern struct scsi_transport_template *qla2xxx_transport_vport_template; -extern void qla2x00_timer(scsi_qla_host_t *); -extern void qla2x00_start_timer(scsi_qla_host_t *, void *, unsigned long); +extern void qla2x00_timer(struct timer_list *); +extern void qla2x00_start_timer(scsi_qla_host_t *, unsigned long); extern void qla24xx_deallocate_vp_id(scsi_qla_host_t *); extern int qla24xx_disable_vp (scsi_qla_host_t *); extern int qla24xx_enable_vp (scsi_qla_host_t *); @@ -753,7 +753,7 @@ extern int qla82xx_restart_isp(scsi_qla_host_t *); /* IOCB related functions */ extern int qla82xx_start_scsi(srb_t *); extern void qla2x00_sp_free(void *); -extern void qla2x00_sp_timeout(unsigned long); +extern void qla2x00_sp_timeout(struct timer_list *); extern void qla2x00_bsg_job_done(void *, int); extern void qla2x00_bsg_sp_free(void *); extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index b5b48ddca962..44cf875a484a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -45,9 +45,9 @@ static void qla24xx_handle_prli_done_event(struct scsi_qla_host *, /* SRB Extensions ---------------------------------------------------------- */ void -qla2x00_sp_timeout(unsigned long __data) +qla2x00_sp_timeout(struct timer_list *t) { - srb_t *sp = (srb_t *)__data; + srb_t *sp = from_timer(sp, t, u.iocb_cmd.timer); struct srb_iocb *iocb; scsi_qla_host_t *vha = sp->vha; struct req_que *req; diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h index 9a2c86eacf44..17d2c20f1f75 100644 --- a/drivers/scsi/qla2xxx/qla_inline.h +++ b/drivers/scsi/qla2xxx/qla_inline.h @@ -269,10 +269,8 @@ qla2x00_rel_sp(srb_t *sp) static inline void qla2x00_init_timer(srb_t *sp, unsigned long tmo) { - init_timer(&sp->u.iocb_cmd.timer); + timer_setup(&sp->u.iocb_cmd.timer, qla2x00_sp_timeout, 0); sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ; - sp->u.iocb_cmd.timer.data = (unsigned long)sp; - sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout; add_timer(&sp->u.iocb_cmd.timer); sp->free = qla2x00_sp_free; if (IS_QLAFX00(sp->vha->hw) && (sp->type == SRB_FXIOCB_DCMD)) diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c index c0f8f6c17b79..cbf544dbf883 100644 --- a/drivers/scsi/qla2xxx/qla_mid.c +++ b/drivers/scsi/qla2xxx/qla_mid.c @@ -487,7 +487,7 @@ qla24xx_create_vhost(struct fc_vport *fc_vport) atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); - qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); + qla2x00_start_timer(vha, WATCH_INTERVAL); vha->req = base_vha->req; host->can_queue = base_vha->req->length + 128; diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index dce42a416876..50286cf02eca 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -330,12 +330,10 @@ struct scsi_transport_template *qla2xxx_transport_vport_template = NULL; */ __inline__ void -qla2x00_start_timer(scsi_qla_host_t *vha, void *func, unsigned long interval) +qla2x00_start_timer(scsi_qla_host_t *vha, unsigned long interval) { - init_timer(&vha->timer); + timer_setup(&vha->timer, qla2x00_timer, 0); vha->timer.expires = jiffies + interval * HZ; - vha->timer.data = (unsigned long)vha; - vha->timer.function = (void (*)(unsigned long))func; add_timer(&vha->timer); vha->timer_active = 1; } @@ -3247,7 +3245,7 @@ skip_dpc: base_vha->host->irq = ha->pdev->irq; /* Initialized the timer */ - qla2x00_start_timer(base_vha, qla2x00_timer, WATCH_INTERVAL); + qla2x00_start_timer(base_vha, WATCH_INTERVAL); ql_dbg(ql_dbg_init, base_vha, 0x00ef, "Started qla2x00_timer with " "interval=%d.\n", WATCH_INTERVAL); @@ -5996,8 +5994,9 @@ qla2x00_rst_aen(scsi_qla_host_t *vha) * Context: Interrupt ***************************************************************************/ void -qla2x00_timer(scsi_qla_host_t *vha) +qla2x00_timer(struct timer_list *t) { + scsi_qla_host_t *vha = from_timer(vha, t, timer); unsigned long cpu_flags = 0; int start_dpc = 0; int index; diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c index f05cfc83c9c8..f946bf889015 100644 --- a/drivers/scsi/qla2xxx/qla_target.c +++ b/drivers/scsi/qla2xxx/qla_target.c @@ -996,7 +996,7 @@ static void qlt_free_session_done(struct work_struct *work) if (logout_started) { bool traced = false; - while (!ACCESS_ONCE(sess->logout_completed)) { + while (!READ_ONCE(sess->logout_completed)) { if (!traced) { ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086, "%s: waiting for sess %p logout\n", diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c index 64c6fa563fdb..2b8a8ce2a431 100644 --- a/drivers/scsi/qla4xxx/ql4_os.c +++ b/drivers/scsi/qla4xxx/ql4_os.c @@ -3955,16 +3955,15 @@ exit_session_conn_param: /* * Timer routines */ +static void qla4xxx_timer(struct timer_list *t); -static void qla4xxx_start_timer(struct scsi_qla_host *ha, void *func, +static void qla4xxx_start_timer(struct scsi_qla_host *ha, unsigned long interval) { DEBUG(printk("scsi: %s: Starting timer thread for adapter %d\n", __func__, ha->host->host_no)); - init_timer(&ha->timer); + timer_setup(&ha->timer, qla4xxx_timer, 0); ha->timer.expires = jiffies + interval * HZ; - ha->timer.data = (unsigned long)ha; - ha->timer.function = (void (*)(unsigned long))func; add_timer(&ha->timer); ha->timer_active = 1; } @@ -4508,8 +4507,9 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess) * qla4xxx_timer - checks every second for work to do. * @ha: Pointer to host adapter structure. **/ -static void qla4xxx_timer(struct scsi_qla_host *ha) +static void qla4xxx_timer(struct timer_list *t) { + struct scsi_qla_host *ha = from_timer(ha, t, timer); int start_dpc = 0; uint16_t w; @@ -8805,7 +8805,7 @@ skip_retry_init: ha->isp_ops->enable_intrs(ha); /* Start timer thread. */ - qla4xxx_start_timer(ha, qla4xxx_timer, 1); + qla4xxx_start_timer(ha, 1); set_bit(AF_INIT_DONE, &ha->flags); diff --git a/drivers/scsi/smartpqi/smartpqi_init.c b/drivers/scsi/smartpqi/smartpqi_init.c index 83bdbd84eb01..90f6effc32b4 100644 --- a/drivers/scsi/smartpqi/smartpqi_init.c +++ b/drivers/scsi/smartpqi/smartpqi_init.c @@ -2860,11 +2860,12 @@ out: #define PQI_HEARTBEAT_TIMER_INTERVAL (10 * HZ) -static void pqi_heartbeat_timer_handler(unsigned long data) +static void pqi_heartbeat_timer_handler(struct timer_list *t) { int num_interrupts; u32 heartbeat_count; - struct pqi_ctrl_info *ctrl_info = (struct pqi_ctrl_info *)data; + struct pqi_ctrl_info *ctrl_info = from_timer(ctrl_info, t, + heartbeat_timer); pqi_check_ctrl_health(ctrl_info); if (pqi_ctrl_offline(ctrl_info)) @@ -2902,8 +2903,6 @@ static void pqi_start_heartbeat_timer(struct pqi_ctrl_info *ctrl_info) ctrl_info->heartbeat_timer.expires = jiffies + PQI_HEARTBEAT_TIMER_INTERVAL; - ctrl_info->heartbeat_timer.data = (unsigned long)ctrl_info; - ctrl_info->heartbeat_timer.function = pqi_heartbeat_timer_handler; add_timer(&ctrl_info->heartbeat_timer); } @@ -6465,7 +6464,7 @@ static struct pqi_ctrl_info *pqi_alloc_ctrl_info(int numa_node) INIT_DELAYED_WORK(&ctrl_info->rescan_work, pqi_rescan_worker); INIT_DELAYED_WORK(&ctrl_info->update_time_work, pqi_update_time_worker); - init_timer(&ctrl_info->heartbeat_timer); + timer_setup(&ctrl_info->heartbeat_timer, pqi_heartbeat_timer_handler, 0); INIT_WORK(&ctrl_info->ctrl_offline_work, pqi_ctrl_offline_worker); sema_init(&ctrl_info->sync_request_sem, diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index a75f2a2cf780..603783976b81 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -1,10 +1,6 @@ # # SPI driver configuration # -# NOTE: the reason this doesn't show SPI slave support is mostly that -# nobody's needed a slave side API yet. The master-role API is not -# fully appropriate there, so it'd need some thought to do well. -# menuconfig SPI bool "SPI support" depends on HAS_IOMEM @@ -379,7 +375,7 @@ config SPI_FSL_DSPI tristate "Freescale DSPI controller" select REGMAP_MMIO depends on HAS_DMA - depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST + depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || M5441x || COMPILE_TEST help This enables support for the Freescale DSPI controller in master mode. VF610 platform uses the controller. @@ -626,6 +622,13 @@ config SPI_SIRF help SPI driver for CSR SiRFprimaII SoCs +config SPI_SPRD_ADI + tristate "Spreadtrum ADI controller" + depends on ARCH_SPRD || COMPILE_TEST + depends on HWSPINLOCK || (COMPILE_TEST && !HWSPINLOCK) + help + ADI driver based on SPI for Spreadtrum SoCs. + config SPI_STM32 tristate "STMicroelectronics STM32 SPI controller" depends on ARCH_STM32 || COMPILE_TEST diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index 8e0cda73b324..34c5f2832ddf 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -91,6 +91,7 @@ obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o obj-$(CONFIG_SPI_SIRF) += spi-sirf.o +obj-$(CONFIG_SPI_SPRD_ADI) += spi-sprd-adi.o obj-$(CONFIG_SPI_STM32) += spi-stm32.o obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 568e1c65aa82..77fe55ce790c 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -213,7 +213,7 @@ static void a3700_spi_mode_set(struct a3700_spi *a3700_spi, } static void a3700_spi_clock_set(struct a3700_spi *a3700_spi, - unsigned int speed_hz, u16 mode) + unsigned int speed_hz) { u32 val; u32 prescale; @@ -231,17 +231,6 @@ static void a3700_spi_clock_set(struct a3700_spi *a3700_spi, val |= A3700_SPI_CLK_CAPT_EDGE; spireg_write(a3700_spi, A3700_SPI_IF_TIME_REG, val); } - - val = spireg_read(a3700_spi, A3700_SPI_IF_CFG_REG); - val &= ~(A3700_SPI_CLK_POL | A3700_SPI_CLK_PHA); - - if (mode & SPI_CPOL) - val |= A3700_SPI_CLK_POL; - - if (mode & SPI_CPHA) - val |= A3700_SPI_CLK_PHA; - - spireg_write(a3700_spi, A3700_SPI_IF_CFG_REG, val); } static void a3700_spi_bytelen_set(struct a3700_spi *a3700_spi, unsigned int len) @@ -423,7 +412,7 @@ static void a3700_spi_transfer_setup(struct spi_device *spi, a3700_spi = spi_master_get_devdata(spi->master); - a3700_spi_clock_set(a3700_spi, xfer->speed_hz, spi->mode); + a3700_spi_clock_set(a3700_spi, xfer->speed_hz); byte_len = xfer->bits_per_word >> 3; @@ -584,6 +573,8 @@ static int a3700_spi_prepare_message(struct spi_master *master, a3700_spi_bytelen_set(a3700_spi, 4); + a3700_spi_mode_set(a3700_spi, spi->mode); + return 0; } diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c index 6ab4c7700228..68cfc351b47f 100644 --- a/drivers/spi/spi-axi-spi-engine.c +++ b/drivers/spi/spi-axi-spi-engine.c @@ -553,7 +553,7 @@ err_put_master: static int spi_engine_remove(struct platform_device *pdev) { - struct spi_master *master = platform_get_drvdata(pdev); + struct spi_master *master = spi_master_get(platform_get_drvdata(pdev)); struct spi_engine *spi_engine = spi_master_get_devdata(master); int irq = platform_get_irq(pdev, 0); @@ -561,6 +561,8 @@ static int spi_engine_remove(struct platform_device *pdev) free_irq(irq, master); + spi_master_put(master); + writel_relaxed(0xff, spi_engine->base + SPI_ENGINE_REG_INT_PENDING); writel_relaxed(0x00, spi_engine->base + SPI_ENGINE_REG_INT_ENABLE); writel_relaxed(0x01, spi_engine->base + SPI_ENGINE_REG_RESET); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index d89127f4a46d..f652f70cb8db 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -32,6 +32,7 @@ #include <linux/regmap.h> #include <linux/sched.h> #include <linux/spi/spi.h> +#include <linux/spi/spi-fsl-dspi.h> #include <linux/spi/spi_bitbang.h> #include <linux/time.h> @@ -151,6 +152,11 @@ static const struct fsl_dspi_devtype_data ls2085a_data = { .max_clock_factor = 8, }; +static const struct fsl_dspi_devtype_data coldfire_data = { + .trans_mode = DSPI_EOQ_MODE, + .max_clock_factor = 8, +}; + struct fsl_dspi_dma { /* Length of transfer in words of DSPI_FIFO_SIZE */ u32 curr_xfer_len; @@ -741,6 +747,7 @@ static int dspi_setup(struct spi_device *spi) { struct chip_data *chip; struct fsl_dspi *dspi = spi_master_get_devdata(spi->master); + struct fsl_dspi_platform_data *pdata; u32 cs_sck_delay = 0, sck_cs_delay = 0; unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0; unsigned char pasc = 0, asc = 0, fmsz = 0; @@ -761,11 +768,18 @@ static int dspi_setup(struct spi_device *spi) return -ENOMEM; } - of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay", - &cs_sck_delay); + pdata = dev_get_platdata(&dspi->pdev->dev); - of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay", - &sck_cs_delay); + if (!pdata) { + of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay", + &cs_sck_delay); + + of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay", + &sck_cs_delay); + } else { + cs_sck_delay = pdata->cs_sck_delay; + sck_cs_delay = pdata->sck_cs_delay; + } chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS | SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF; @@ -949,6 +963,7 @@ static int dspi_probe(struct platform_device *pdev) struct fsl_dspi *dspi; struct resource *res; void __iomem *base; + struct fsl_dspi_platform_data *pdata; int ret = 0, cs_num, bus_num; master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); @@ -969,25 +984,34 @@ static int dspi_probe(struct platform_device *pdev) master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) | SPI_BPW_MASK(16); - ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); - if (ret < 0) { - dev_err(&pdev->dev, "can't get spi-num-chipselects\n"); - goto out_master_put; - } - master->num_chipselect = cs_num; + pdata = dev_get_platdata(&pdev->dev); + if (pdata) { + master->num_chipselect = pdata->cs_num; + master->bus_num = pdata->bus_num; - ret = of_property_read_u32(np, "bus-num", &bus_num); - if (ret < 0) { - dev_err(&pdev->dev, "can't get bus-num\n"); - goto out_master_put; - } - master->bus_num = bus_num; + dspi->devtype_data = &coldfire_data; + } else { - dspi->devtype_data = of_device_get_match_data(&pdev->dev); - if (!dspi->devtype_data) { - dev_err(&pdev->dev, "can't get devtype_data\n"); - ret = -EFAULT; - goto out_master_put; + ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num); + if (ret < 0) { + dev_err(&pdev->dev, "can't get spi-num-chipselects\n"); + goto out_master_put; + } + master->num_chipselect = cs_num; + + ret = of_property_read_u32(np, "bus-num", &bus_num); + if (ret < 0) { + dev_err(&pdev->dev, "can't get bus-num\n"); + goto out_master_put; + } + master->bus_num = bus_num; + + dspi->devtype_data = of_device_get_match_data(&pdev->dev); + if (!dspi->devtype_data) { + dev_err(&pdev->dev, "can't get devtype_data\n"); + ret = -EFAULT; + goto out_master_put; + } } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index babb15f07995..79ddefe4180d 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -53,10 +53,13 @@ /* generic defines to abstract from the different register layouts */ #define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ #define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ +#define MXC_INT_RDR BIT(4) /* Receive date threshold interrupt */ /* The maximum bytes that a sdma BD can transfer.*/ #define MAX_SDMA_BD_BYTES (1 << 15) #define MX51_ECSPI_CTRL_MAX_BURST 512 +/* The maximum bytes that IMX53_ECSPI can transfer in slave mode.*/ +#define MX53_MAX_TRANSFER_BYTES 512 enum spi_imx_devtype { IMX1_CSPI, @@ -76,7 +79,9 @@ struct spi_imx_devtype_data { void (*trigger)(struct spi_imx_data *); int (*rx_available)(struct spi_imx_data *); void (*reset)(struct spi_imx_data *); + void (*disable)(struct spi_imx_data *); bool has_dmamode; + bool has_slavemode; unsigned int fifo_size; bool dynamic_burst; enum spi_imx_devtype devtype; @@ -108,6 +113,11 @@ struct spi_imx_data { unsigned int dynamic_burst, read_u32; unsigned int word_mask; + /* Slave mode */ + bool slave_mode; + bool slave_aborted; + unsigned int slave_burst; + /* DMA */ bool usedma; u32 wml; @@ -221,6 +231,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, if (!master->dma_rx) return false; + if (spi_imx->slave_mode) + return false; + bytes_per_word = spi_imx_bytes_per_word(transfer->bits_per_word); if (bytes_per_word != 1 && bytes_per_word != 2 && bytes_per_word != 4) @@ -262,6 +275,7 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi, #define MX51_ECSPI_INT 0x10 #define MX51_ECSPI_INT_TEEN (1 << 0) #define MX51_ECSPI_INT_RREN (1 << 3) +#define MX51_ECSPI_INT_RDREN (1 << 4) #define MX51_ECSPI_DMA 0x14 #define MX51_ECSPI_DMA_TX_WML(wml) ((wml) & 0x3f) @@ -378,6 +392,44 @@ static void spi_imx_buf_tx_swap(struct spi_imx_data *spi_imx) spi_imx_buf_tx_u16(spi_imx); } +static void mx53_ecspi_rx_slave(struct spi_imx_data *spi_imx) +{ + u32 val = be32_to_cpu(readl(spi_imx->base + MXC_CSPIRXDATA)); + + if (spi_imx->rx_buf) { + int n_bytes = spi_imx->slave_burst % sizeof(val); + + if (!n_bytes) + n_bytes = sizeof(val); + + memcpy(spi_imx->rx_buf, + ((u8 *)&val) + sizeof(val) - n_bytes, n_bytes); + + spi_imx->rx_buf += n_bytes; + spi_imx->slave_burst -= n_bytes; + } +} + +static void mx53_ecspi_tx_slave(struct spi_imx_data *spi_imx) +{ + u32 val = 0; + int n_bytes = spi_imx->count % sizeof(val); + + if (!n_bytes) + n_bytes = sizeof(val); + + if (spi_imx->tx_buf) { + memcpy(((u8 *)&val) + sizeof(val) - n_bytes, + spi_imx->tx_buf, n_bytes); + val = cpu_to_be32(val); + spi_imx->tx_buf += n_bytes; + } + + spi_imx->count -= n_bytes; + + writel(val, spi_imx->base + MXC_CSPITXDATA); +} + /* MX51 eCSPI */ static unsigned int mx51_ecspi_clkdiv(struct spi_imx_data *spi_imx, unsigned int fspi, unsigned int *fres) @@ -427,6 +479,9 @@ static void mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable) if (enable & MXC_INT_RR) val |= MX51_ECSPI_INT_RREN; + if (enable & MXC_INT_RDR) + val |= MX51_ECSPI_INT_RDREN; + writel(val, spi_imx->base + MX51_ECSPI_INT); } @@ -439,6 +494,15 @@ static void mx51_ecspi_trigger(struct spi_imx_data *spi_imx) writel(reg, spi_imx->base + MX51_ECSPI_CTRL); } +static void mx51_ecspi_disable(struct spi_imx_data *spi_imx) +{ + u32 ctrl; + + ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); + ctrl &= ~MX51_ECSPI_CTRL_ENABLE; + writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); +} + static int mx51_ecspi_config(struct spi_device *spi) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); @@ -446,14 +510,11 @@ static int mx51_ecspi_config(struct spi_device *spi) u32 clk = spi_imx->speed_hz, delay, reg; u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); - /* - * The hardware seems to have a race condition when changing modes. The - * current assumption is that the selection of the channel arrives - * earlier in the hardware than the mode bits when they are written at - * the same time. - * So set master mode for all channels as we do not support slave mode. - */ - ctrl |= MX51_ECSPI_CTRL_MODE_MASK; + /* set Master or Slave mode */ + if (spi_imx->slave_mode) + ctrl &= ~MX51_ECSPI_CTRL_MODE_MASK; + else + ctrl |= MX51_ECSPI_CTRL_MODE_MASK; /* * Enable SPI_RDY handling (falling edge/level triggered). @@ -468,9 +529,22 @@ static int mx51_ecspi_config(struct spi_device *spi) /* set chip select to use */ ctrl |= MX51_ECSPI_CTRL_CS(spi->chip_select); - ctrl |= (spi_imx->bits_per_word - 1) << MX51_ECSPI_CTRL_BL_OFFSET; + if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx)) + ctrl |= (spi_imx->slave_burst * 8 - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; + else + ctrl |= (spi_imx->bits_per_word - 1) + << MX51_ECSPI_CTRL_BL_OFFSET; - cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select); + /* + * eCSPI burst completion by Chip Select signal in Slave mode + * is not functional for imx53 Soc, config SPI burst completed when + * BURST_LENGTH + 1 bits are received + */ + if (spi_imx->slave_mode && is_imx53_ecspi(spi_imx)) + cfg &= ~MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select); + else + cfg |= MX51_ECSPI_CONFIG_SBBCTRL(spi->chip_select); if (spi->mode & SPI_CPHA) cfg |= MX51_ECSPI_CONFIG_SCLKPHA(spi->chip_select); @@ -805,6 +879,7 @@ static struct spi_imx_devtype_data imx1_cspi_devtype_data = { .fifo_size = 8, .has_dmamode = false, .dynamic_burst = false, + .has_slavemode = false, .devtype = IMX1_CSPI, }; @@ -817,6 +892,7 @@ static struct spi_imx_devtype_data imx21_cspi_devtype_data = { .fifo_size = 8, .has_dmamode = false, .dynamic_burst = false, + .has_slavemode = false, .devtype = IMX21_CSPI, }; @@ -830,6 +906,7 @@ static struct spi_imx_devtype_data imx27_cspi_devtype_data = { .fifo_size = 8, .has_dmamode = false, .dynamic_burst = false, + .has_slavemode = false, .devtype = IMX27_CSPI, }; @@ -842,6 +919,7 @@ static struct spi_imx_devtype_data imx31_cspi_devtype_data = { .fifo_size = 8, .has_dmamode = false, .dynamic_burst = false, + .has_slavemode = false, .devtype = IMX31_CSPI, }; @@ -855,6 +933,7 @@ static struct spi_imx_devtype_data imx35_cspi_devtype_data = { .fifo_size = 8, .has_dmamode = true, .dynamic_burst = false, + .has_slavemode = false, .devtype = IMX35_CSPI, }; @@ -867,6 +946,8 @@ static struct spi_imx_devtype_data imx51_ecspi_devtype_data = { .fifo_size = 64, .has_dmamode = true, .dynamic_burst = true, + .has_slavemode = true, + .disable = mx51_ecspi_disable, .devtype = IMX51_ECSPI, }; @@ -878,6 +959,8 @@ static struct spi_imx_devtype_data imx53_ecspi_devtype_data = { .reset = mx51_ecspi_reset, .fifo_size = 64, .has_dmamode = true, + .has_slavemode = true, + .disable = mx51_ecspi_disable, .devtype = IMX53_ECSPI, }; @@ -945,14 +1028,16 @@ static void spi_imx_push(struct spi_imx_data *spi_imx) spi_imx->txfifo++; } - spi_imx->devtype_data->trigger(spi_imx); + if (!spi_imx->slave_mode) + spi_imx->devtype_data->trigger(spi_imx); } static irqreturn_t spi_imx_isr(int irq, void *dev_id) { struct spi_imx_data *spi_imx = dev_id; - while (spi_imx->devtype_data->rx_available(spi_imx)) { + while (spi_imx->txfifo && + spi_imx->devtype_data->rx_available(spi_imx)) { spi_imx->rx(spi_imx); spi_imx->txfifo--; } @@ -1034,7 +1119,7 @@ static int spi_imx_setupxfer(struct spi_device *spi, spi_imx->speed_hz = t->speed_hz; /* Initialize the functions for transfer */ - if (spi_imx->devtype_data->dynamic_burst) { + if (spi_imx->devtype_data->dynamic_burst && !spi_imx->slave_mode) { u32 mask; spi_imx->dynamic_burst = 0; @@ -1078,6 +1163,12 @@ static int spi_imx_setupxfer(struct spi_device *spi, return ret; } + if (is_imx53_ecspi(spi_imx) && spi_imx->slave_mode) { + spi_imx->rx = mx53_ecspi_rx_slave; + spi_imx->tx = mx53_ecspi_tx_slave; + spi_imx->slave_burst = t->len; + } + spi_imx->devtype_data->config(spi); return 0; @@ -1262,11 +1353,61 @@ static int spi_imx_pio_transfer(struct spi_device *spi, return transfer->len; } +static int spi_imx_pio_transfer_slave(struct spi_device *spi, + struct spi_transfer *transfer) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + int ret = transfer->len; + + if (is_imx53_ecspi(spi_imx) && + transfer->len > MX53_MAX_TRANSFER_BYTES) { + dev_err(&spi->dev, "Transaction too big, max size is %d bytes\n", + MX53_MAX_TRANSFER_BYTES); + return -EMSGSIZE; + } + + spi_imx->tx_buf = transfer->tx_buf; + spi_imx->rx_buf = transfer->rx_buf; + spi_imx->count = transfer->len; + spi_imx->txfifo = 0; + + reinit_completion(&spi_imx->xfer_done); + spi_imx->slave_aborted = false; + + spi_imx_push(spi_imx); + + spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE | MXC_INT_RDR); + + if (wait_for_completion_interruptible(&spi_imx->xfer_done) || + spi_imx->slave_aborted) { + dev_dbg(&spi->dev, "interrupted\n"); + ret = -EINTR; + } + + /* ecspi has a HW issue when works in Slave mode, + * after 64 words writtern to TXFIFO, even TXFIFO becomes empty, + * ECSPI_TXDATA keeps shift out the last word data, + * so we have to disable ECSPI when in slave mode after the + * transfer completes + */ + if (spi_imx->devtype_data->disable) + spi_imx->devtype_data->disable(spi_imx); + + return ret; +} + static int spi_imx_transfer(struct spi_device *spi, struct spi_transfer *transfer) { struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master); + /* flush rxfifo before transfer */ + while (spi_imx->devtype_data->rx_available(spi_imx)) + spi_imx->rx(spi_imx); + + if (spi_imx->slave_mode) + return spi_imx_pio_transfer_slave(spi, transfer); + if (spi_imx->usedma) return spi_imx_dma_transfer(spi_imx, transfer); else @@ -1323,6 +1464,16 @@ spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg) return 0; } +static int spi_imx_slave_abort(struct spi_master *master) +{ + struct spi_imx_data *spi_imx = spi_master_get_devdata(master); + + spi_imx->slave_aborted = true; + complete(&spi_imx->xfer_done); + + return 0; +} + static int spi_imx_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; @@ -1334,13 +1485,23 @@ static int spi_imx_probe(struct platform_device *pdev) struct spi_imx_data *spi_imx; struct resource *res; int i, ret, irq, spi_drctl; + const struct spi_imx_devtype_data *devtype_data = of_id ? of_id->data : + (struct spi_imx_devtype_data *)pdev->id_entry->driver_data; + bool slave_mode; if (!np && !mxc_platform_info) { dev_err(&pdev->dev, "can't get the platform data\n"); return -EINVAL; } - master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data)); + slave_mode = devtype_data->has_slavemode && + of_property_read_bool(np, "spi-slave"); + if (slave_mode) + master = spi_alloc_slave(&pdev->dev, + sizeof(struct spi_imx_data)); + else + master = spi_alloc_master(&pdev->dev, + sizeof(struct spi_imx_data)); if (!master) return -ENOMEM; @@ -1358,20 +1519,29 @@ static int spi_imx_probe(struct platform_device *pdev) spi_imx = spi_master_get_devdata(master); spi_imx->bitbang.master = master; spi_imx->dev = &pdev->dev; + spi_imx->slave_mode = slave_mode; - spi_imx->devtype_data = of_id ? of_id->data : - (struct spi_imx_devtype_data *)pdev->id_entry->driver_data; + spi_imx->devtype_data = devtype_data; + /* Get number of chip selects, either platform data or OF */ if (mxc_platform_info) { master->num_chipselect = mxc_platform_info->num_chipselect; - master->cs_gpios = devm_kzalloc(&master->dev, - sizeof(int) * master->num_chipselect, GFP_KERNEL); - if (!master->cs_gpios) - return -ENOMEM; + if (mxc_platform_info->chipselect) { + master->cs_gpios = devm_kzalloc(&master->dev, + sizeof(int) * master->num_chipselect, GFP_KERNEL); + if (!master->cs_gpios) + return -ENOMEM; + + for (i = 0; i < master->num_chipselect; i++) + master->cs_gpios[i] = mxc_platform_info->chipselect[i]; + } + } else { + u32 num_cs; - for (i = 0; i < master->num_chipselect; i++) - master->cs_gpios[i] = mxc_platform_info->chipselect[i]; - } + if (!of_property_read_u32(np, "num-cs", &num_cs)) + master->num_chipselect = num_cs; + /* If not preset, default value of 1 is used */ + } spi_imx->bitbang.chipselect = spi_imx_chipselect; spi_imx->bitbang.setup_transfer = spi_imx_setupxfer; @@ -1380,6 +1550,7 @@ static int spi_imx_probe(struct platform_device *pdev) spi_imx->bitbang.master->cleanup = spi_imx_cleanup; spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message; spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message; + spi_imx->bitbang.master->slave_abort = spi_imx_slave_abort; spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | SPI_NO_CS; if (is_imx35_cspi(spi_imx) || is_imx51_ecspi(spi_imx) || @@ -1451,37 +1622,38 @@ static int spi_imx_probe(struct platform_device *pdev) spi_imx->devtype_data->intctrl(spi_imx, 0); master->dev.of_node = pdev->dev.of_node; + + /* Request GPIO CS lines, if any */ + if (!spi_imx->slave_mode && master->cs_gpios) { + for (i = 0; i < master->num_chipselect; i++) { + if (!gpio_is_valid(master->cs_gpios[i])) + continue; + + ret = devm_gpio_request(&pdev->dev, + master->cs_gpios[i], + DRIVER_NAME); + if (ret) { + dev_err(&pdev->dev, "Can't get CS GPIO %i\n", + master->cs_gpios[i]); + goto out_spi_bitbang; + } + } + } + ret = spi_bitbang_start(&spi_imx->bitbang); if (ret) { dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); goto out_clk_put; } - if (!master->cs_gpios) { - dev_err(&pdev->dev, "No CS GPIOs available\n"); - ret = -EINVAL; - goto out_clk_put; - } - - for (i = 0; i < master->num_chipselect; i++) { - if (!gpio_is_valid(master->cs_gpios[i])) - continue; - - ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i], - DRIVER_NAME); - if (ret) { - dev_err(&pdev->dev, "Can't get CS GPIO %i\n", - master->cs_gpios[i]); - goto out_clk_put; - } - } - dev_info(&pdev->dev, "probed\n"); clk_disable(spi_imx->clk_ipg); clk_disable(spi_imx->clk_per); return ret; +out_spi_bitbang: + spi_bitbang_stop(&spi_imx->bitbang); out_clk_put: clk_disable_unprepare(spi_imx->clk_ipg); out_put_per: diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c index 5b0e9a3e83f6..3d216b950b41 100644 --- a/drivers/spi/spi-mxs.c +++ b/drivers/spi/spi-mxs.c @@ -44,6 +44,7 @@ #include <linux/completion.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> #include <linux/module.h> #include <linux/stmp_device.h> #include <linux/spi/spi.h> @@ -442,6 +443,85 @@ static int mxs_spi_transfer_one(struct spi_master *master, return status; } +static int mxs_spi_runtime_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct mxs_spi *spi = spi_master_get_devdata(master); + struct mxs_ssp *ssp = &spi->ssp; + int ret; + + clk_disable_unprepare(ssp->clk); + + ret = pinctrl_pm_select_idle_state(dev); + if (ret) { + int ret2 = clk_prepare_enable(ssp->clk); + + if (ret2) + dev_warn(dev, "Failed to reenable clock after failing pinctrl request (pinctrl: %d, clk: %d)\n", + ret, ret2); + } + + return ret; +} + +static int mxs_spi_runtime_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + struct mxs_spi *spi = spi_master_get_devdata(master); + struct mxs_ssp *ssp = &spi->ssp; + int ret; + + ret = pinctrl_pm_select_default_state(dev); + if (ret) + return ret; + + ret = clk_prepare_enable(ssp->clk); + if (ret) + pinctrl_pm_select_idle_state(dev); + + return ret; +} + +static int __maybe_unused mxs_spi_suspend(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + int ret; + + ret = spi_master_suspend(master); + if (ret) + return ret; + + if (!pm_runtime_suspended(dev)) + return mxs_spi_runtime_suspend(dev); + else + return 0; +} + +static int __maybe_unused mxs_spi_resume(struct device *dev) +{ + struct spi_master *master = dev_get_drvdata(dev); + int ret; + + if (!pm_runtime_suspended(dev)) + ret = mxs_spi_runtime_resume(dev); + else + ret = 0; + if (ret) + return ret; + + ret = spi_master_resume(master); + if (ret < 0 && !pm_runtime_suspended(dev)) + mxs_spi_runtime_suspend(dev); + + return ret; +} + +static const struct dev_pm_ops mxs_spi_pm = { + SET_RUNTIME_PM_OPS(mxs_spi_runtime_suspend, + mxs_spi_runtime_resume, NULL) + SET_SYSTEM_SLEEP_PM_OPS(mxs_spi_suspend, mxs_spi_resume) +}; + static const struct of_device_id mxs_spi_dt_ids[] = { { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, }, { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, }, @@ -493,12 +573,15 @@ static int mxs_spi_probe(struct platform_device *pdev) if (!master) return -ENOMEM; + platform_set_drvdata(pdev, master); + master->transfer_one_message = mxs_spi_transfer_one; master->bits_per_word_mask = SPI_BPW_MASK(8); master->mode_bits = SPI_CPOL | SPI_CPHA; master->num_chipselect = 3; master->dev.of_node = np; master->flags = SPI_MASTER_HALF_DUPLEX; + master->auto_runtime_pm = true; spi = spi_master_get_devdata(master); ssp = &spi->ssp; @@ -521,28 +604,41 @@ static int mxs_spi_probe(struct platform_device *pdev) goto out_master_free; } - ret = clk_prepare_enable(ssp->clk); - if (ret) - goto out_dma_release; + pm_runtime_enable(ssp->dev); + if (!pm_runtime_enabled(ssp->dev)) { + ret = mxs_spi_runtime_resume(ssp->dev); + if (ret < 0) { + dev_err(ssp->dev, "runtime resume failed\n"); + goto out_dma_release; + } + } + + ret = pm_runtime_get_sync(ssp->dev); + if (ret < 0) { + dev_err(ssp->dev, "runtime_get_sync failed\n"); + goto out_pm_runtime_disable; + } clk_set_rate(ssp->clk, clk_freq); ret = stmp_reset_block(ssp->base); if (ret) - goto out_disable_clk; - - platform_set_drvdata(pdev, master); + goto out_pm_runtime_put; ret = devm_spi_register_master(&pdev->dev, master); if (ret) { dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret); - goto out_disable_clk; + goto out_pm_runtime_put; } + pm_runtime_put(ssp->dev); + return 0; -out_disable_clk: - clk_disable_unprepare(ssp->clk); +out_pm_runtime_put: + pm_runtime_put(ssp->dev); +out_pm_runtime_disable: + pm_runtime_disable(ssp->dev); out_dma_release: dma_release_channel(ssp->dmach); out_master_free: @@ -560,7 +656,10 @@ static int mxs_spi_remove(struct platform_device *pdev) spi = spi_master_get_devdata(master); ssp = &spi->ssp; - clk_disable_unprepare(ssp->clk); + pm_runtime_disable(&pdev->dev); + if (!pm_runtime_status_suspended(&pdev->dev)) + mxs_spi_runtime_suspend(&pdev->dev); + dma_release_channel(ssp->dmach); return 0; @@ -572,6 +671,7 @@ static struct platform_driver mxs_spi_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = mxs_spi_dt_ids, + .pm = &mxs_spi_pm, }, }; diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c index 4b6dd73b80da..8974bb340b3a 100644 --- a/drivers/spi/spi-orion.c +++ b/drivers/spi/spi-orion.c @@ -671,7 +671,6 @@ static int orion_spi_probe(struct platform_device *pdev) dev_err(&pdev->dev, "%pOF has no valid 'reg' property (%d)\n", np, status); - status = 0; continue; } diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c index 2a10b3f94ff7..2ce875764ca6 100644 --- a/drivers/spi/spi-rspi.c +++ b/drivers/spi/spi-rspi.c @@ -1221,7 +1221,6 @@ static int rspi_probe(struct platform_device *pdev) struct spi_master *master; struct rspi_data *rspi; int ret; - const struct of_device_id *of_id; const struct rspi_plat_data *rspi_pd; const struct spi_ops *ops; @@ -1229,9 +1228,8 @@ static int rspi_probe(struct platform_device *pdev) if (master == NULL) return -ENOMEM; - of_id = of_match_device(rspi_of_match, &pdev->dev); - if (of_id) { - ops = of_id->data; + ops = of_device_get_match_data(&pdev->dev); + if (ops) { ret = rspi_parse_dt(&pdev->dev, master); if (ret) goto error1; diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c index b392cca8fa4f..de7df20f8712 100644 --- a/drivers/spi/spi-s3c64xx.c +++ b/drivers/spi/spi-s3c64xx.c @@ -752,7 +752,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi) { struct s3c64xx_spi_csinfo *cs = spi->controller_data; struct s3c64xx_spi_driver_data *sdd; - struct s3c64xx_spi_info *sci; int err; sdd = spi_master_get_devdata(spi->master); @@ -788,8 +787,6 @@ static int s3c64xx_spi_setup(struct spi_device *spi) spi_set_ctldata(spi, cs); } - sci = sdd->cntrlr_info; - pm_runtime_get_sync(&sdd->pdev->dev); /* Check if we can provide the requested rate */ diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c index 0eb1e9583485..fcd261f98b9f 100644 --- a/drivers/spi/spi-sh-msiof.c +++ b/drivers/spi/spi-sh-msiof.c @@ -900,7 +900,7 @@ static int sh_msiof_transfer_one(struct spi_master *master, break; copy32 = copy_bswap32; } else if (bits <= 16) { - if (l & 1) + if (l & 3) break; copy32 = copy_wswap32; } else { @@ -1021,6 +1021,8 @@ static const struct sh_msiof_chipdata rcar_gen3_data = { static const struct of_device_id sh_msiof_match[] = { { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data }, + { .compatible = "renesas,msiof-r8a7743", .data = &rcar_gen2_data }, + { .compatible = "renesas,msiof-r8a7745", .data = &rcar_gen2_data }, { .compatible = "renesas,msiof-r8a7790", .data = &rcar_gen2_data }, { .compatible = "renesas,msiof-r8a7791", .data = &rcar_gen2_data }, { .compatible = "renesas,msiof-r8a7792", .data = &rcar_gen2_data }, @@ -1188,12 +1190,10 @@ free_tx_chan: static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p) { struct spi_master *master = p->master; - struct device *dev; if (!master->dma_tx) return; - dev = &p->pdev->dev; dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr, PAGE_SIZE, DMA_FROM_DEVICE); dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr, @@ -1209,15 +1209,13 @@ static int sh_msiof_spi_probe(struct platform_device *pdev) struct resource *r; struct spi_master *master; const struct sh_msiof_chipdata *chipdata; - const struct of_device_id *of_id; struct sh_msiof_spi_info *info; struct sh_msiof_spi_priv *p; int i; int ret; - of_id = of_match_device(sh_msiof_match, &pdev->dev); - if (of_id) { - chipdata = of_id->data; + chipdata = of_device_get_match_data(&pdev->dev); + if (chipdata) { info = sh_msiof_spi_parse_dt(&pdev->dev); } else { chipdata = (const void *)pdev->id_entry->driver_data; diff --git a/drivers/spi/spi-sprd-adi.c b/drivers/spi/spi-sprd-adi.c new file mode 100644 index 000000000000..5993bdbf79e4 --- /dev/null +++ b/drivers/spi/spi-sprd-adi.c @@ -0,0 +1,418 @@ +/* + * Copyright (C) 2017 Spreadtrum Communications Inc. + * + * SPDX-License-Identifier: GPL-2.0 + */ + +#include <linux/hwspinlock.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/sizes.h> + +/* Registers definitions for ADI controller */ +#define REG_ADI_CTRL0 0x4 +#define REG_ADI_CHN_PRIL 0x8 +#define REG_ADI_CHN_PRIH 0xc +#define REG_ADI_INT_EN 0x10 +#define REG_ADI_INT_RAW 0x14 +#define REG_ADI_INT_MASK 0x18 +#define REG_ADI_INT_CLR 0x1c +#define REG_ADI_GSSI_CFG0 0x20 +#define REG_ADI_GSSI_CFG1 0x24 +#define REG_ADI_RD_CMD 0x28 +#define REG_ADI_RD_DATA 0x2c +#define REG_ADI_ARM_FIFO_STS 0x30 +#define REG_ADI_STS 0x34 +#define REG_ADI_EVT_FIFO_STS 0x38 +#define REG_ADI_ARM_CMD_STS 0x3c +#define REG_ADI_CHN_EN 0x40 +#define REG_ADI_CHN_ADDR(id) (0x44 + (id - 2) * 4) +#define REG_ADI_CHN_EN1 0x20c + +/* Bits definitions for register REG_ADI_GSSI_CFG0 */ +#define BIT_CLK_ALL_ON BIT(30) + +/* Bits definitions for register REG_ADI_RD_DATA */ +#define BIT_RD_CMD_BUSY BIT(31) +#define RD_ADDR_SHIFT 16 +#define RD_VALUE_MASK GENMASK(15, 0) +#define RD_ADDR_MASK GENMASK(30, 16) + +/* Bits definitions for register REG_ADI_ARM_FIFO_STS */ +#define BIT_FIFO_FULL BIT(11) +#define BIT_FIFO_EMPTY BIT(10) + +/* + * ADI slave devices include RTC, ADC, regulator, charger, thermal and so on. + * The slave devices address offset is always 0x8000 and size is 4K. + */ +#define ADI_SLAVE_ADDR_SIZE SZ_4K +#define ADI_SLAVE_OFFSET 0x8000 + +/* Timeout (ms) for the trylock of hardware spinlocks */ +#define ADI_HWSPINLOCK_TIMEOUT 5000 +/* + * ADI controller has 50 channels including 2 software channels + * and 48 hardware channels. + */ +#define ADI_HW_CHNS 50 + +#define ADI_FIFO_DRAIN_TIMEOUT 1000 +#define ADI_READ_TIMEOUT 2000 +#define REG_ADDR_LOW_MASK GENMASK(11, 0) + +struct sprd_adi { + struct spi_controller *ctlr; + struct device *dev; + void __iomem *base; + struct hwspinlock *hwlock; + unsigned long slave_vbase; + unsigned long slave_pbase; +}; + +static int sprd_adi_check_paddr(struct sprd_adi *sadi, u32 paddr) +{ + if (paddr < sadi->slave_pbase || paddr > + (sadi->slave_pbase + ADI_SLAVE_ADDR_SIZE)) { + dev_err(sadi->dev, + "slave physical address is incorrect, addr = 0x%x\n", + paddr); + return -EINVAL; + } + + return 0; +} + +static unsigned long sprd_adi_to_vaddr(struct sprd_adi *sadi, u32 paddr) +{ + return (paddr - sadi->slave_pbase + sadi->slave_vbase); +} + +static int sprd_adi_drain_fifo(struct sprd_adi *sadi) +{ + u32 timeout = ADI_FIFO_DRAIN_TIMEOUT; + u32 sts; + + do { + sts = readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS); + if (sts & BIT_FIFO_EMPTY) + break; + + cpu_relax(); + } while (--timeout); + + if (timeout == 0) { + dev_err(sadi->dev, "drain write fifo timeout\n"); + return -EBUSY; + } + + return 0; +} + +static int sprd_adi_fifo_is_full(struct sprd_adi *sadi) +{ + return readl_relaxed(sadi->base + REG_ADI_ARM_FIFO_STS) & BIT_FIFO_FULL; +} + +static int sprd_adi_read(struct sprd_adi *sadi, u32 reg_paddr, u32 *read_val) +{ + int read_timeout = ADI_READ_TIMEOUT; + u32 val, rd_addr; + + /* + * Set the physical register address need to read into RD_CMD register, + * then ADI controller will start to transfer automatically. + */ + writel_relaxed(reg_paddr, sadi->base + REG_ADI_RD_CMD); + + /* + * Wait read operation complete, the BIT_RD_CMD_BUSY will be set + * simultaneously when writing read command to register, and the + * BIT_RD_CMD_BUSY will be cleared after the read operation is + * completed. + */ + do { + val = readl_relaxed(sadi->base + REG_ADI_RD_DATA); + if (!(val & BIT_RD_CMD_BUSY)) + break; + + cpu_relax(); + } while (--read_timeout); + + if (read_timeout == 0) { + dev_err(sadi->dev, "ADI read timeout\n"); + return -EBUSY; + } + + /* + * The return value includes data and read register address, from bit 0 + * to bit 15 are data, and from bit 16 to bit 30 are read register + * address. Then we can check the returned register address to validate + * data. + */ + rd_addr = (val & RD_ADDR_MASK ) >> RD_ADDR_SHIFT; + + if (rd_addr != (reg_paddr & REG_ADDR_LOW_MASK)) { + dev_err(sadi->dev, "read error, reg addr = 0x%x, val = 0x%x\n", + reg_paddr, val); + return -EIO; + } + + *read_val = val & RD_VALUE_MASK; + return 0; +} + +static int sprd_adi_write(struct sprd_adi *sadi, unsigned long reg, u32 val) +{ + u32 timeout = ADI_FIFO_DRAIN_TIMEOUT; + int ret; + + ret = sprd_adi_drain_fifo(sadi); + if (ret < 0) + return ret; + + /* + * we should wait for write fifo is empty before writing data to PMIC + * registers. + */ + do { + if (!sprd_adi_fifo_is_full(sadi)) { + writel_relaxed(val, (void __iomem *)reg); + break; + } + + cpu_relax(); + } while (--timeout); + + if (timeout == 0) { + dev_err(sadi->dev, "write fifo is full\n"); + return -EBUSY; + } + + return 0; +} + +static int sprd_adi_transfer_one(struct spi_controller *ctlr, + struct spi_device *spi_dev, + struct spi_transfer *t) +{ + struct sprd_adi *sadi = spi_controller_get_devdata(ctlr); + unsigned long flags, virt_reg; + u32 phy_reg, val; + int ret; + + if (t->rx_buf) { + phy_reg = *(u32 *)t->rx_buf + sadi->slave_pbase; + + ret = sprd_adi_check_paddr(sadi, phy_reg); + if (ret) + return ret; + + ret = hwspin_lock_timeout_irqsave(sadi->hwlock, + ADI_HWSPINLOCK_TIMEOUT, + &flags); + if (ret) { + dev_err(sadi->dev, "get the hw lock failed\n"); + return ret; + } + + ret = sprd_adi_read(sadi, phy_reg, &val); + hwspin_unlock_irqrestore(sadi->hwlock, &flags); + if (ret) + return ret; + + *(u32 *)t->rx_buf = val; + } else if (t->tx_buf) { + u32 *p = (u32 *)t->tx_buf; + + /* + * Get the physical register address need to write and convert + * the physical address to virtual address. Since we need + * virtual register address to write. + */ + phy_reg = *p++ + sadi->slave_pbase; + ret = sprd_adi_check_paddr(sadi, phy_reg); + if (ret) + return ret; + + virt_reg = sprd_adi_to_vaddr(sadi, phy_reg); + val = *p; + + ret = hwspin_lock_timeout_irqsave(sadi->hwlock, + ADI_HWSPINLOCK_TIMEOUT, + &flags); + if (ret) { + dev_err(sadi->dev, "get the hw lock failed\n"); + return ret; + } + + ret = sprd_adi_write(sadi, virt_reg, val); + hwspin_unlock_irqrestore(sadi->hwlock, &flags); + if (ret) + return ret; + } else { + dev_err(sadi->dev, "no buffer for transfer\n"); + return -EINVAL; + } + + return 0; +} + +static void sprd_adi_hw_init(struct sprd_adi *sadi) +{ + struct device_node *np = sadi->dev->of_node; + int i, size, chn_cnt; + const __be32 *list; + u32 tmp; + + /* Address bits select default 12 bits */ + writel_relaxed(0, sadi->base + REG_ADI_CTRL0); + + /* Set all channels as default priority */ + writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIL); + writel_relaxed(0, sadi->base + REG_ADI_CHN_PRIH); + + /* Set clock auto gate mode */ + tmp = readl_relaxed(sadi->base + REG_ADI_GSSI_CFG0); + tmp &= ~BIT_CLK_ALL_ON; + writel_relaxed(tmp, sadi->base + REG_ADI_GSSI_CFG0); + + /* Set hardware channels setting */ + list = of_get_property(np, "sprd,hw-channels", &size); + if (!list || !size) { + dev_info(sadi->dev, "no hw channels setting in node\n"); + return; + } + + chn_cnt = size / 8; + for (i = 0; i < chn_cnt; i++) { + u32 value; + u32 chn_id = be32_to_cpu(*list++); + u32 chn_config = be32_to_cpu(*list++); + + /* Channel 0 and 1 are software channels */ + if (chn_id < 2) + continue; + + writel_relaxed(chn_config, sadi->base + + REG_ADI_CHN_ADDR(chn_id)); + + if (chn_id < 32) { + value = readl_relaxed(sadi->base + REG_ADI_CHN_EN); + value |= BIT(chn_id); + writel_relaxed(value, sadi->base + REG_ADI_CHN_EN); + } else if (chn_id < ADI_HW_CHNS) { + value = readl_relaxed(sadi->base + REG_ADI_CHN_EN1); + value |= BIT(chn_id - 32); + writel_relaxed(value, sadi->base + REG_ADI_CHN_EN1); + } + } +} + +static int sprd_adi_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct spi_controller *ctlr; + struct sprd_adi *sadi; + struct resource *res; + u32 num_chipselect; + int ret; + + if (!np) { + dev_err(&pdev->dev, "can not find the adi bus node\n"); + return -ENODEV; + } + + pdev->id = of_alias_get_id(np, "spi"); + num_chipselect = of_get_child_count(np); + + ctlr = spi_alloc_master(&pdev->dev, sizeof(struct sprd_adi)); + if (!ctlr) + return -ENOMEM; + + dev_set_drvdata(&pdev->dev, ctlr); + sadi = spi_controller_get_devdata(ctlr); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + sadi->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(sadi->base)) { + ret = PTR_ERR(sadi->base); + goto put_ctlr; + } + + sadi->slave_vbase = (unsigned long)sadi->base + ADI_SLAVE_OFFSET; + sadi->slave_pbase = res->start + ADI_SLAVE_OFFSET; + sadi->ctlr = ctlr; + sadi->dev = &pdev->dev; + ret = of_hwspin_lock_get_id(np, 0); + if (ret < 0) { + dev_err(&pdev->dev, "can not get the hardware spinlock\n"); + goto put_ctlr; + } + + sadi->hwlock = hwspin_lock_request_specific(ret); + if (!sadi->hwlock) { + ret = -ENXIO; + goto put_ctlr; + } + + sprd_adi_hw_init(sadi); + + ctlr->dev.of_node = pdev->dev.of_node; + ctlr->bus_num = pdev->id; + ctlr->num_chipselect = num_chipselect; + ctlr->flags = SPI_MASTER_HALF_DUPLEX; + ctlr->bits_per_word_mask = 0; + ctlr->transfer_one = sprd_adi_transfer_one; + + ret = devm_spi_register_controller(&pdev->dev, ctlr); + if (ret) { + dev_err(&pdev->dev, "failed to register SPI controller\n"); + goto free_hwlock; + } + + return 0; + +free_hwlock: + hwspin_lock_free(sadi->hwlock); +put_ctlr: + spi_controller_put(ctlr); + return ret; +} + +static int sprd_adi_remove(struct platform_device *pdev) +{ + struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev); + struct sprd_adi *sadi = spi_controller_get_devdata(ctlr); + + hwspin_lock_free(sadi->hwlock); + return 0; +} + +static const struct of_device_id sprd_adi_of_match[] = { + { + .compatible = "sprd,sc9860-adi", + }, + { }, +}; +MODULE_DEVICE_TABLE(of, sprd_adi_of_match); + +static struct platform_driver sprd_adi_driver = { + .driver = { + .name = "sprd-adi", + .of_match_table = sprd_adi_of_match, + }, + .probe = sprd_adi_probe, + .remove = sprd_adi_remove, +}; +module_platform_driver(sprd_adi_driver); + +MODULE_DESCRIPTION("Spreadtrum ADI Controller Driver"); +MODULE_AUTHOR("Baolin Wang <Baolin.Wang@spreadtrum.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c index 44550182a4a3..a76acedd7e2f 100644 --- a/drivers/spi/spi-tegra114.c +++ b/drivers/spi/spi-tegra114.c @@ -50,7 +50,7 @@ #define SPI_IDLE_SDA_PULL_LOW (2 << 18) #define SPI_IDLE_SDA_PULL_HIGH (3 << 18) #define SPI_IDLE_SDA_MASK (3 << 18) -#define SPI_CS_SS_VAL (1 << 20) +#define SPI_CS_SW_VAL (1 << 20) #define SPI_CS_SW_HW (1 << 21) /* SPI_CS_POL_INACTIVE bits are default high */ /* n from 0 to 3 */ @@ -705,9 +705,9 @@ static u32 tegra_spi_setup_transfer_one(struct spi_device *spi, command1 |= SPI_CS_SW_HW; if (spi->mode & SPI_CS_HIGH) - command1 |= SPI_CS_SS_VAL; + command1 |= SPI_CS_SW_VAL; else - command1 &= ~SPI_CS_SS_VAL; + command1 &= ~SPI_CS_SW_VAL; tegra_spi_writel(tspi, 0, SPI_COMMAND2); } else { diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index e8b5a5e21b2e..b33a727a0158 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -2200,7 +2200,7 @@ static void devm_spi_unregister(struct device *dev, void *res) * Context: can sleep * * Register a SPI device as with spi_register_controller() which will - * automatically be unregister + * automatically be unregistered and freed. * * Return: zero on success, else a negative error code. */ @@ -2241,15 +2241,18 @@ static int __unregister(struct device *dev, void *null) * only ones directly touching chip registers. * * This must be called from context that can sleep. + * + * Note that this function also drops a reference to the controller. */ void spi_unregister_controller(struct spi_controller *ctlr) { struct spi_controller *found; + int id = ctlr->bus_num; int dummy; /* First make sure that this controller was ever added */ mutex_lock(&board_lock); - found = idr_find(&spi_master_idr, ctlr->bus_num); + found = idr_find(&spi_master_idr, id); mutex_unlock(&board_lock); if (found != ctlr) { dev_dbg(&ctlr->dev, @@ -2269,7 +2272,7 @@ void spi_unregister_controller(struct spi_controller *ctlr) device_unregister(&ctlr->dev); /* free bus id */ mutex_lock(&board_lock); - idr_remove(&spi_master_idr, ctlr->bus_num); + idr_remove(&spi_master_idr, id); mutex_unlock(&board_lock); } EXPORT_SYMBOL_GPL(spi_unregister_controller); diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c index 56f7be6af1f6..585925bb49a4 100644 --- a/drivers/staging/speakup/main.c +++ b/drivers/staging/speakup/main.c @@ -1165,7 +1165,7 @@ static const int NUM_CTL_LABELS = (MSG_CTL_END - MSG_CTL_START + 1); static void read_all_doc(struct vc_data *vc); static void cursor_done(u_long data); -static DEFINE_TIMER(cursor_timer, cursor_done, 0, 0); +static DEFINE_TIMER(cursor_timer, cursor_done); static void do_handle_shift(struct vc_data *vc, u_char value, char up_flag) { diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c index a1ca68c76579..6ddd3fc3f08d 100644 --- a/drivers/staging/speakup/synth.c +++ b/drivers/staging/speakup/synth.c @@ -158,7 +158,7 @@ static void thread_wake_up(u_long data) wake_up_interruptible_all(&speakup_event); } -static DEFINE_TIMER(thread_timer, thread_wake_up, 0, 0); +static DEFINE_TIMER(thread_timer, thread_wake_up); void synth_start(void) { diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c index af12925a9d2b..de6ff59e3e65 100644 --- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c +++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c @@ -267,7 +267,7 @@ static void update_scan_time(void) last_scanned_shadow[i].time_scan = jiffies; } -static void remove_network_from_shadow(unsigned long arg) +static void remove_network_from_shadow(unsigned long unused) { unsigned long now = jiffies; int i, j; @@ -288,7 +288,6 @@ static void remove_network_from_shadow(unsigned long arg) } if (last_scanned_cnt != 0) { - hAgingTimer.data = arg; mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME)); } } @@ -305,7 +304,6 @@ static int is_network_in_shadow(struct network_info *pstrNetworkInfo, int i; if (last_scanned_cnt == 0) { - hAgingTimer.data = (unsigned long)user_void; mod_timer(&hAgingTimer, jiffies + msecs_to_jiffies(AGING_TIME)); state = -1; } else { diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 5001261f5d69..9e67c7678c86 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -372,6 +372,8 @@ struct iscsi_np *iscsit_add_np( init_completion(&np->np_restart_comp); INIT_LIST_HEAD(&np->np_list); + timer_setup(&np->np_login_timer, iscsi_handle_login_thread_timeout, 0); + ret = iscsi_target_setup_login_socket(np, sockaddr); if (ret != 0) { kfree(np); diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c index 7fe2aa73cff6..718fe9a1b709 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.c +++ b/drivers/target/iscsi/iscsi_target_erl0.c @@ -749,9 +749,9 @@ int iscsit_check_post_dataout( } } -static void iscsit_handle_time2retain_timeout(unsigned long data) +void iscsit_handle_time2retain_timeout(struct timer_list *t) { - struct iscsi_session *sess = (struct iscsi_session *) data; + struct iscsi_session *sess = from_timer(sess, t, time2retain_timer); struct iscsi_portal_group *tpg = sess->tpg; struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; @@ -809,14 +809,10 @@ void iscsit_start_time2retain_handler(struct iscsi_session *sess) pr_debug("Starting Time2Retain timer for %u seconds on" " SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid); - init_timer(&sess->time2retain_timer); - sess->time2retain_timer.expires = - (get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ); - sess->time2retain_timer.data = (unsigned long)sess; - sess->time2retain_timer.function = iscsit_handle_time2retain_timeout; sess->time2retain_timer_flags &= ~ISCSI_TF_STOP; sess->time2retain_timer_flags |= ISCSI_TF_RUNNING; - add_timer(&sess->time2retain_timer); + mod_timer(&sess->time2retain_timer, + jiffies + sess->sess_ops->DefaultTime2Retain * HZ); } /* diff --git a/drivers/target/iscsi/iscsi_target_erl0.h b/drivers/target/iscsi/iscsi_target_erl0.h index 3393407bc4e4..883ebf6d36cf 100644 --- a/drivers/target/iscsi/iscsi_target_erl0.h +++ b/drivers/target/iscsi/iscsi_target_erl0.h @@ -12,6 +12,7 @@ extern void iscsit_set_dataout_sequence_values(struct iscsi_cmd *); extern int iscsit_check_pre_dataout(struct iscsi_cmd *, unsigned char *); extern int iscsit_check_post_dataout(struct iscsi_cmd *, unsigned char *, u8); extern void iscsit_start_time2retain_handler(struct iscsi_session *); +extern void iscsit_handle_time2retain_timeout(struct timer_list *t); extern int iscsit_stop_time2retain_timer(struct iscsi_session *); extern void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *); extern void iscsit_cause_connection_reinstatement(struct iscsi_conn *, int); diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index fe9b7f1e44ac..76184094a0cf 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -1148,11 +1148,11 @@ static int iscsit_set_dataout_timeout_values( /* * NOTE: Called from interrupt (timer) context. */ -static void iscsit_handle_dataout_timeout(unsigned long data) +void iscsit_handle_dataout_timeout(struct timer_list *t) { u32 pdu_length = 0, pdu_offset = 0; u32 r2t_length = 0, r2t_offset = 0; - struct iscsi_cmd *cmd = (struct iscsi_cmd *) data; + struct iscsi_cmd *cmd = from_timer(cmd, t, dataout_timer); struct iscsi_conn *conn = cmd->conn; struct iscsi_session *sess = NULL; struct iscsi_node_attrib *na; @@ -1264,13 +1264,9 @@ void iscsit_start_dataout_timer( pr_debug("Starting DataOUT timer for ITT: 0x%08x on" " CID: %hu.\n", cmd->init_task_tag, conn->cid); - init_timer(&cmd->dataout_timer); - cmd->dataout_timer.expires = (get_jiffies_64() + na->dataout_timeout * HZ); - cmd->dataout_timer.data = (unsigned long)cmd; - cmd->dataout_timer.function = iscsit_handle_dataout_timeout; cmd->dataout_timer_flags &= ~ISCSI_TF_STOP; cmd->dataout_timer_flags |= ISCSI_TF_RUNNING; - add_timer(&cmd->dataout_timer); + mod_timer(&cmd->dataout_timer, jiffies + na->dataout_timeout * HZ); } void iscsit_stop_dataout_timer(struct iscsi_cmd *cmd) diff --git a/drivers/target/iscsi/iscsi_target_erl1.h b/drivers/target/iscsi/iscsi_target_erl1.h index 5f66b265b25b..1f6973f87fea 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.h +++ b/drivers/target/iscsi/iscsi_target_erl1.h @@ -30,6 +30,7 @@ extern int iscsit_execute_ooo_cmdsns(struct iscsi_session *); extern int iscsit_execute_cmd(struct iscsi_cmd *, int); extern int iscsit_handle_ooo_cmdsn(struct iscsi_session *, struct iscsi_cmd *, u32); extern void iscsit_remove_ooo_cmdsn(struct iscsi_session *, struct iscsi_ooo_cmdsn *); +extern void iscsit_handle_dataout_timeout(struct timer_list *t); extern void iscsit_mod_dataout_timer(struct iscsi_cmd *); extern void iscsit_start_dataout_timer(struct iscsi_cmd *, struct iscsi_conn *); extern void iscsit_stop_dataout_timer(struct iscsi_cmd *); diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index dc13afbd4c88..64c5a57b92e4 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -333,6 +333,9 @@ static int iscsi_login_zero_tsih_s1( spin_lock_init(&sess->session_usage_lock); spin_lock_init(&sess->ttt_lock); + timer_setup(&sess->time2retain_timer, + iscsit_handle_time2retain_timeout, 0); + idr_preload(GFP_KERNEL); spin_lock_bh(&sess_idr_lock); ret = idr_alloc(&sess_idr, NULL, 0, 0, GFP_NOWAIT); @@ -839,9 +842,9 @@ void iscsi_post_login_handler( iscsit_dec_conn_usage_count(conn); } -static void iscsi_handle_login_thread_timeout(unsigned long data) +void iscsi_handle_login_thread_timeout(struct timer_list *t) { - struct iscsi_np *np = (struct iscsi_np *) data; + struct iscsi_np *np = from_timer(np, t, np_login_timer); spin_lock_bh(&np->np_thread_lock); pr_err("iSCSI Login timeout on Network Portal %pISpc\n", @@ -866,13 +869,9 @@ static void iscsi_start_login_thread_timer(struct iscsi_np *np) * point we do not have access to ISCSI_TPG_ATTRIB(tpg)->login_timeout */ spin_lock_bh(&np->np_thread_lock); - init_timer(&np->np_login_timer); - np->np_login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ); - np->np_login_timer.data = (unsigned long)np; - np->np_login_timer.function = iscsi_handle_login_thread_timeout; np->np_login_timer_flags &= ~ISCSI_TF_STOP; np->np_login_timer_flags |= ISCSI_TF_RUNNING; - add_timer(&np->np_login_timer); + mod_timer(&np->np_login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ); pr_debug("Added timeout timer to iSCSI login request for" " %u seconds.\n", TA_LOGIN_TIMEOUT); @@ -1266,6 +1265,10 @@ static int __iscsi_target_login_thread(struct iscsi_np *np) pr_debug("Moving to TARG_CONN_STATE_FREE.\n"); conn->conn_state = TARG_CONN_STATE_FREE; + timer_setup(&conn->nopin_response_timer, + iscsit_handle_nopin_response_timeout, 0); + timer_setup(&conn->nopin_timer, iscsit_handle_nopin_timeout, 0); + if (iscsit_conn_set_transport(conn, np->np_transport) < 0) { kfree(conn); return 1; diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h index c2495e03625c..74ac3abc44a0 100644 --- a/drivers/target/iscsi/iscsi_target_login.h +++ b/drivers/target/iscsi/iscsi_target_login.h @@ -25,5 +25,6 @@ extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8) extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, bool, bool); extern int iscsi_target_login_thread(void *); +extern void iscsi_handle_login_thread_timeout(struct timer_list *t); #endif /*** ISCSI_TARGET_LOGIN_H ***/ diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 7a6751fecd32..b686e2ce9c0e 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -559,9 +559,15 @@ static void iscsi_target_login_drop(struct iscsi_conn *conn, struct iscsi_login iscsi_target_login_sess_out(conn, np, zero_tsih, true); } -static void iscsi_target_login_timeout(unsigned long data) +struct conn_timeout { + struct timer_list timer; + struct iscsi_conn *conn; +}; + +static void iscsi_target_login_timeout(struct timer_list *t) { - struct iscsi_conn *conn = (struct iscsi_conn *)data; + struct conn_timeout *timeout = from_timer(timeout, t, timer); + struct iscsi_conn *conn = timeout->conn; pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n"); @@ -580,7 +586,7 @@ static void iscsi_target_do_login_rx(struct work_struct *work) struct iscsi_np *np = login->np; struct iscsi_portal_group *tpg = conn->tpg; struct iscsi_tpg_np *tpg_np = conn->tpg_np; - struct timer_list login_timer; + struct conn_timeout timeout; int rc, zero_tsih = login->zero_tsih; bool state; @@ -618,15 +624,14 @@ static void iscsi_target_do_login_rx(struct work_struct *work) conn->login_kworker = current; allow_signal(SIGINT); - init_timer(&login_timer); - login_timer.expires = (get_jiffies_64() + TA_LOGIN_TIMEOUT * HZ); - login_timer.data = (unsigned long)conn; - login_timer.function = iscsi_target_login_timeout; - add_timer(&login_timer); - pr_debug("Starting login_timer for %s/%d\n", current->comm, current->pid); + timeout.conn = conn; + timer_setup_on_stack(&timeout.timer, iscsi_target_login_timeout, 0); + mod_timer(&timeout.timer, jiffies + TA_LOGIN_TIMEOUT * HZ); + pr_debug("Starting login timer for %s/%d\n", current->comm, current->pid); rc = conn->conn_transport->iscsit_get_login_rx(conn, login); - del_timer_sync(&login_timer); + del_timer_sync(&timeout.timer); + destroy_timer_on_stack(&timeout.timer); flush_signals(current); conn->login_kworker = NULL; diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index 1e36f83b5961..54f20f184dd6 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -176,6 +176,7 @@ struct iscsi_cmd *iscsit_allocate_cmd(struct iscsi_conn *conn, int state) spin_lock_init(&cmd->istate_lock); spin_lock_init(&cmd->error_lock); spin_lock_init(&cmd->r2t_lock); + timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0); return cmd; } @@ -880,9 +881,9 @@ static int iscsit_add_nopin(struct iscsi_conn *conn, int want_response) return 0; } -static void iscsit_handle_nopin_response_timeout(unsigned long data) +void iscsit_handle_nopin_response_timeout(struct timer_list *t) { - struct iscsi_conn *conn = (struct iscsi_conn *) data; + struct iscsi_conn *conn = from_timer(conn, t, nopin_response_timer); iscsit_inc_conn_usage_count(conn); @@ -949,14 +950,10 @@ void iscsit_start_nopin_response_timer(struct iscsi_conn *conn) return; } - init_timer(&conn->nopin_response_timer); - conn->nopin_response_timer.expires = - (get_jiffies_64() + na->nopin_response_timeout * HZ); - conn->nopin_response_timer.data = (unsigned long)conn; - conn->nopin_response_timer.function = iscsit_handle_nopin_response_timeout; conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP; conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING; - add_timer(&conn->nopin_response_timer); + mod_timer(&conn->nopin_response_timer, + jiffies + na->nopin_response_timeout * HZ); pr_debug("Started NOPIN Response Timer on CID: %d to %u" " seconds\n", conn->cid, na->nopin_response_timeout); @@ -980,9 +977,9 @@ void iscsit_stop_nopin_response_timer(struct iscsi_conn *conn) spin_unlock_bh(&conn->nopin_timer_lock); } -static void iscsit_handle_nopin_timeout(unsigned long data) +void iscsit_handle_nopin_timeout(struct timer_list *t) { - struct iscsi_conn *conn = (struct iscsi_conn *) data; + struct iscsi_conn *conn = from_timer(conn, t, nopin_timer); iscsit_inc_conn_usage_count(conn); @@ -1015,13 +1012,9 @@ void __iscsit_start_nopin_timer(struct iscsi_conn *conn) if (conn->nopin_timer_flags & ISCSI_TF_RUNNING) return; - init_timer(&conn->nopin_timer); - conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); - conn->nopin_timer.data = (unsigned long)conn; - conn->nopin_timer.function = iscsit_handle_nopin_timeout; conn->nopin_timer_flags &= ~ISCSI_TF_STOP; conn->nopin_timer_flags |= ISCSI_TF_RUNNING; - add_timer(&conn->nopin_timer); + mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); pr_debug("Started NOPIN Timer on CID: %d at %u second" " interval\n", conn->cid, na->nopin_timeout); @@ -1043,13 +1036,9 @@ void iscsit_start_nopin_timer(struct iscsi_conn *conn) return; } - init_timer(&conn->nopin_timer); - conn->nopin_timer.expires = (get_jiffies_64() + na->nopin_timeout * HZ); - conn->nopin_timer.data = (unsigned long)conn; - conn->nopin_timer.function = iscsit_handle_nopin_timeout; conn->nopin_timer_flags &= ~ISCSI_TF_STOP; conn->nopin_timer_flags |= ISCSI_TF_RUNNING; - add_timer(&conn->nopin_timer); + mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ); pr_debug("Started NOPIN Timer on CID: %d at %u second" " interval\n", conn->cid, na->nopin_timeout); diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 5e053d61c0c5..d66dfc212624 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -48,9 +48,11 @@ extern struct iscsi_conn *iscsit_get_conn_from_cid_rcfr(struct iscsi_session *, extern void iscsit_check_conn_usage_count(struct iscsi_conn *); extern void iscsit_dec_conn_usage_count(struct iscsi_conn *); extern void iscsit_inc_conn_usage_count(struct iscsi_conn *); +extern void iscsit_handle_nopin_response_timeout(struct timer_list *t); extern void iscsit_mod_nopin_response_timer(struct iscsi_conn *); extern void iscsit_start_nopin_response_timer(struct iscsi_conn *); extern void iscsit_stop_nopin_response_timer(struct iscsi_conn *); +extern void iscsit_handle_nopin_timeout(struct timer_list *t); extern void __iscsit_start_nopin_timer(struct iscsi_conn *); extern void iscsit_start_nopin_timer(struct iscsi_conn *); extern void iscsit_stop_nopin_timer(struct iscsi_conn *); diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 942d094269fb..9469695f5871 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -985,7 +985,7 @@ static unsigned int tcmu_handle_completions(struct tcmu_dev *udev) mb = udev->mb_addr; tcmu_flush_dcache_range(mb, sizeof(*mb)); - while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) { + while (udev->cmdr_last_cleaned != READ_ONCE(mb->cmd_tail)) { struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned; struct tcmu_cmd *cmd; diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index d272bc4e7fb5..dac8a1a8e4ac 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c @@ -283,7 +283,7 @@ static void cyz_poll(unsigned long); /* The Cyclades-Z polling cycle is defined by this variable */ static long cyz_polling_cycle = CZ_DEF_POLL; -static DEFINE_TIMER(cyz_timerlist, cyz_poll, 0, 0); +static DEFINE_TIMER(cyz_timerlist, cyz_poll); #else /* CONFIG_CYZ_INTR */ static void cyz_rx_restart(unsigned long); diff --git a/drivers/tty/isicom.c b/drivers/tty/isicom.c index 61ecdd6b2fc2..40af32108ff5 100644 --- a/drivers/tty/isicom.c +++ b/drivers/tty/isicom.c @@ -177,7 +177,7 @@ static struct tty_driver *isicom_normal; static void isicom_tx(unsigned long _data); static void isicom_start(struct tty_struct *tty); -static DEFINE_TIMER(tx, isicom_tx, 0, 0); +static DEFINE_TIMER(tx, isicom_tx); /* baud index mappings from linux defns to isi */ diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c index 7f3d4cb0341b..93d37655d928 100644 --- a/drivers/tty/moxa.c +++ b/drivers/tty/moxa.c @@ -428,7 +428,7 @@ static const struct tty_port_operations moxa_port_ops = { }; static struct tty_driver *moxaDriver; -static DEFINE_TIMER(moxaTimer, moxa_poll, 0, 0); +static DEFINE_TIMER(moxaTimer, moxa_poll); /* * HW init diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c index 20d79a6007d5..aa695fda1084 100644 --- a/drivers/tty/rocket.c +++ b/drivers/tty/rocket.c @@ -111,7 +111,7 @@ static struct r_port *rp_table[MAX_RP_PORTS]; /* The main repository of static unsigned int xmit_flags[NUM_BOARDS]; /* Bit significant, indicates port had data to transmit. */ /* eg. Bit 0 indicates port 0 has xmit data, ... */ static atomic_t rp_num_ports_open; /* Number of serial ports open */ -static DEFINE_TIMER(rocket_timer, rp_do_poll, 0, 0); +static DEFINE_TIMER(rocket_timer, rp_do_poll); static unsigned long board1; /* ISA addresses, retrieved from rocketport.conf */ static unsigned long board2; diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c index f4166263bb3a..f974d6340d04 100644 --- a/drivers/tty/vt/keyboard.c +++ b/drivers/tty/vt/keyboard.c @@ -250,7 +250,7 @@ static void kd_nosound(unsigned long ignored) input_handler_for_each_handle(&kbd_handler, &zero, kd_sound_helper); } -static DEFINE_TIMER(kd_mksound_timer, kd_nosound, 0, 0); +static DEFINE_TIMER(kd_mksound_timer, kd_nosound); void kd_mksound(unsigned int hz, unsigned int ticks) { diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c index 2ebaba16f785..602d71630952 100644 --- a/drivers/tty/vt/vt.c +++ b/drivers/tty/vt/vt.c @@ -228,7 +228,7 @@ static int scrollback_delta; */ int (*console_blank_hook)(int); -static DEFINE_TIMER(console_timer, blank_screen_t, 0, 0); +static DEFINE_TIMER(console_timer, blank_screen_t); static int blank_state; static int blank_timer_expired; enum { diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 3e865dbf878c..fbaa2a90d25d 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -483,7 +483,7 @@ static ssize_t wdm_read if (rv < 0) return -ERESTARTSYS; - cntr = ACCESS_ONCE(desc->length); + cntr = READ_ONCE(desc->length); if (cntr == 0) { desc->read = 0; retry: diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index e9326f31db8d..4ae667d8c238 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -150,7 +150,7 @@ static int usbfs_increase_memory_usage(u64 amount) { u64 lim; - lim = ACCESS_ONCE(usbfs_memory_mb); + lim = READ_ONCE(usbfs_memory_mb); lim <<= 20; atomic64_add(amount, &usbfs_memory_usage); diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c index d930bfda4010..58d59c5f8592 100644 --- a/drivers/usb/core/sysfs.c +++ b/drivers/usb/core/sysfs.c @@ -973,7 +973,7 @@ static ssize_t interface_show(struct device *dev, struct device_attribute *attr, char *string; intf = to_usb_interface(dev); - string = ACCESS_ONCE(intf->cur_altsetting->string); + string = READ_ONCE(intf->cur_altsetting->string); if (!string) return 0; return sprintf(buf, "%s\n", string); @@ -989,7 +989,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, intf = to_usb_interface(dev); udev = interface_to_usbdev(intf); - alt = ACCESS_ONCE(intf->cur_altsetting); + alt = READ_ONCE(intf->cur_altsetting); return sprintf(buf, "usb:v%04Xp%04Xd%04Xdc%02Xdsc%02Xdp%02X" "ic%02Xisc%02Xip%02Xin%02X\n", diff --git a/drivers/usb/gadget/udc/gr_udc.c b/drivers/usb/gadget/udc/gr_udc.c index 1f9941145746..0b59fa50aa30 100644 --- a/drivers/usb/gadget/udc/gr_udc.c +++ b/drivers/usb/gadget/udc/gr_udc.c @@ -1261,7 +1261,7 @@ static int gr_handle_in_ep(struct gr_ep *ep) if (!req->last_desc) return 0; - if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) + if (READ_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) return 0; /* Not put in hardware buffers yet */ if (gr_read32(&ep->regs->epstat) & (GR_EPSTAT_B1 | GR_EPSTAT_B0)) @@ -1290,7 +1290,7 @@ static int gr_handle_out_ep(struct gr_ep *ep) if (!req->curr_desc) return 0; - ctrl = ACCESS_ONCE(req->curr_desc->ctrl); + ctrl = READ_ONCE(req->curr_desc->ctrl); if (ctrl & GR_DESC_OUT_CTRL_EN) return 0; /* Not received yet */ diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index 44924824fa41..c86f89babd57 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -785,7 +785,7 @@ static void io_watchdog_func(unsigned long _ohci) } /* find the last TD processed by the controller. */ - head = hc32_to_cpu(ohci, ACCESS_ONCE(ed->hwHeadP)) & TD_MASK; + head = hc32_to_cpu(ohci, READ_ONCE(ed->hwHeadP)) & TD_MASK; td_start = td; td_next = list_prepare_entry(td, &ed->td_list, td_list); list_for_each_entry_continue(td_next, &ed->td_list, td_list) { diff --git a/drivers/usb/host/uhci-hcd.h b/drivers/usb/host/uhci-hcd.h index d97f0d9b3ce6..f1cc47292a59 100644 --- a/drivers/usb/host/uhci-hcd.h +++ b/drivers/usb/host/uhci-hcd.h @@ -187,7 +187,7 @@ struct uhci_qh { * We need a special accessor for the element pointer because it is * subject to asynchronous updates by the controller. */ -#define qh_element(qh) ACCESS_ONCE((qh)->element) +#define qh_element(qh) READ_ONCE((qh)->element) #define LINK_TO_QH(uhci, qh) (UHCI_PTR_QH((uhci)) | \ cpu_to_hc32((uhci), (qh)->dma_handle)) @@ -275,7 +275,7 @@ struct uhci_td { * subject to asynchronous updates by the controller. */ #define td_status(uhci, td) hc32_to_cpu((uhci), \ - ACCESS_ONCE((td)->status)) + READ_ONCE((td)->status)) #define LINK_TO_TD(uhci, td) (cpu_to_hc32((uhci), (td)->dma_handle)) diff --git a/drivers/usb/misc/usbtest.c b/drivers/usb/misc/usbtest.c index b3fc602b2e24..5ad74750e8a4 100644 --- a/drivers/usb/misc/usbtest.c +++ b/drivers/usb/misc/usbtest.c @@ -576,11 +576,16 @@ alloc_sglist(int nents, int max, int vary, struct usbtest_dev *dev, int pipe) return sg; } -static void sg_timeout(unsigned long _req) +struct sg_timeout { + struct timer_list timer; + struct usb_sg_request *req; +}; + +static void sg_timeout(struct timer_list *t) { - struct usb_sg_request *req = (struct usb_sg_request *) _req; + struct sg_timeout *timeout = from_timer(timeout, t, timer); - usb_sg_cancel(req); + usb_sg_cancel(timeout->req); } static int perform_sglist( @@ -594,9 +599,11 @@ static int perform_sglist( { struct usb_device *udev = testdev_to_usbdev(tdev); int retval = 0; - struct timer_list sg_timer; + struct sg_timeout timeout = { + .req = req, + }; - setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req); + timer_setup_on_stack(&timeout.timer, sg_timeout, 0); while (retval == 0 && iterations-- > 0) { retval = usb_sg_init(req, udev, pipe, @@ -607,13 +614,14 @@ static int perform_sglist( if (retval) break; - mod_timer(&sg_timer, jiffies + + mod_timer(&timeout.timer, jiffies + msecs_to_jiffies(SIMPLE_IO_TIMEOUT)); usb_sg_wait(req); - if (!del_timer_sync(&sg_timer)) + if (!del_timer_sync(&timeout.timer)) retval = -ETIMEDOUT; else retval = req->status; + destroy_timer_on_stack(&timeout.timer); /* FIXME check resulting data pattern */ diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c index f5a86f651f38..2bc3705a99bd 100644 --- a/drivers/vfio/vfio.c +++ b/drivers/vfio/vfio.c @@ -665,7 +665,7 @@ static int vfio_dev_viable(struct device *dev, void *data) { struct vfio_group *group = data; struct vfio_device *device; - struct device_driver *drv = ACCESS_ONCE(dev->driver); + struct device_driver *drv = READ_ONCE(dev->driver); struct vfio_unbound_dev *unbound; int ret = -EINVAL; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 046f6d280af5..35e929f132e8 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -929,7 +929,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq) continue; } - tpg = ACCESS_ONCE(vs_tpg[*target]); + tpg = READ_ONCE(vs_tpg[*target]); if (unlikely(!tpg)) { /* Target does not exist, fail the request */ vhost_scsi_send_bad_target(vs, vq, head, out); diff --git a/drivers/watchdog/alim7101_wdt.c b/drivers/watchdog/alim7101_wdt.c index 665e0e7dfe1e..18e896eeca62 100644 --- a/drivers/watchdog/alim7101_wdt.c +++ b/drivers/watchdog/alim7101_wdt.c @@ -71,7 +71,7 @@ MODULE_PARM_DESC(use_gpio, "Use the gpio watchdog (required by old cobalt boards)."); static void wdt_timer_ping(unsigned long); -static DEFINE_TIMER(timer, wdt_timer_ping, 0, 1); +static DEFINE_TIMER(timer, wdt_timer_ping); static unsigned long next_heartbeat; static unsigned long wdt_is_open; static char wdt_expect_close; @@ -87,7 +87,7 @@ MODULE_PARM_DESC(nowayout, * Whack the dog */ -static void wdt_timer_ping(unsigned long data) +static void wdt_timer_ping(unsigned long unused) { /* If we got a heartbeat pulse within the WDT_US_INTERVAL * we agree to ping the WDT diff --git a/drivers/watchdog/cpwd.c b/drivers/watchdog/cpwd.c index 3d43775548e5..aee0b25cf10d 100644 --- a/drivers/watchdog/cpwd.c +++ b/drivers/watchdog/cpwd.c @@ -230,9 +230,9 @@ static void cpwd_resetbrokentimer(struct cpwd *p, int index) * interrupts within the PLD so me must continually * reset the timers ad infinitum. */ -static void cpwd_brokentimer(unsigned long data) +static void cpwd_brokentimer(struct timer_list *unused) { - struct cpwd *p = (struct cpwd *) data; + struct cpwd *p = cpwd_device; int id, tripped = 0; /* kill a running timer instance, in case we @@ -275,7 +275,7 @@ static void cpwd_stoptimer(struct cpwd *p, int index) if (p->broken) { p->devs[index].runstatus |= WD_STAT_BSTOP; - cpwd_brokentimer((unsigned long) p); + cpwd_brokentimer(NULL); } } } @@ -608,7 +608,7 @@ static int cpwd_probe(struct platform_device *op) } if (p->broken) { - setup_timer(&cpwd_timer, cpwd_brokentimer, (unsigned long)p); + timer_setup(&cpwd_timer, cpwd_brokentimer, 0); cpwd_timer.expires = WD_BTIMEOUT; pr_info("PLD defect workaround enabled for model %s\n", diff --git a/drivers/watchdog/lpc18xx_wdt.c b/drivers/watchdog/lpc18xx_wdt.c index 3b8bb59adf02..b4221f43cd94 100644 --- a/drivers/watchdog/lpc18xx_wdt.c +++ b/drivers/watchdog/lpc18xx_wdt.c @@ -78,10 +78,10 @@ static int lpc18xx_wdt_feed(struct watchdog_device *wdt_dev) return 0; } -static void lpc18xx_wdt_timer_feed(unsigned long data) +static void lpc18xx_wdt_timer_feed(struct timer_list *t) { - struct watchdog_device *wdt_dev = (struct watchdog_device *)data; - struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev); + struct lpc18xx_wdt_dev *lpc18xx_wdt = from_timer(lpc18xx_wdt, t, timer); + struct watchdog_device *wdt_dev = &lpc18xx_wdt->wdt_dev; lpc18xx_wdt_feed(wdt_dev); @@ -96,7 +96,9 @@ static void lpc18xx_wdt_timer_feed(unsigned long data) */ static int lpc18xx_wdt_stop(struct watchdog_device *wdt_dev) { - lpc18xx_wdt_timer_feed((unsigned long)wdt_dev); + struct lpc18xx_wdt_dev *lpc18xx_wdt = watchdog_get_drvdata(wdt_dev); + + lpc18xx_wdt_timer_feed(&lpc18xx_wdt->timer); return 0; } @@ -267,8 +269,7 @@ static int lpc18xx_wdt_probe(struct platform_device *pdev) __lpc18xx_wdt_set_timeout(lpc18xx_wdt); - setup_timer(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed, - (unsigned long)&lpc18xx_wdt->wdt_dev); + timer_setup(&lpc18xx_wdt->timer, lpc18xx_wdt_timer_feed, 0); watchdog_set_nowayout(&lpc18xx_wdt->wdt_dev, nowayout); watchdog_set_restart_priority(&lpc18xx_wdt->wdt_dev, 128); diff --git a/drivers/watchdog/machzwd.c b/drivers/watchdog/machzwd.c index 9826b59ef734..8a616a57bb90 100644 --- a/drivers/watchdog/machzwd.c +++ b/drivers/watchdog/machzwd.c @@ -127,7 +127,7 @@ static int zf_action = GEN_RESET; static unsigned long zf_is_open; static char zf_expect_close; static DEFINE_SPINLOCK(zf_port_lock); -static DEFINE_TIMER(zf_timer, zf_ping, 0, 0); +static DEFINE_TIMER(zf_timer, zf_ping); static unsigned long next_heartbeat; diff --git a/drivers/watchdog/mixcomwd.c b/drivers/watchdog/mixcomwd.c index be86ea359eee..c9e38096ea91 100644 --- a/drivers/watchdog/mixcomwd.c +++ b/drivers/watchdog/mixcomwd.c @@ -105,7 +105,7 @@ static unsigned long mixcomwd_opened; /* long req'd for setbit --RR */ static int watchdog_port; static int mixcomwd_timer_alive; -static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun, 0, 0); +static DEFINE_TIMER(mixcomwd_timer, mixcomwd_timerfun); static char expect_close; static bool nowayout = WATCHDOG_NOWAYOUT; diff --git a/drivers/watchdog/sbc60xxwdt.c b/drivers/watchdog/sbc60xxwdt.c index 2eef58a0cf05..8d589939bc84 100644 --- a/drivers/watchdog/sbc60xxwdt.c +++ b/drivers/watchdog/sbc60xxwdt.c @@ -113,7 +113,7 @@ MODULE_PARM_DESC(nowayout, __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static void wdt_timer_ping(unsigned long); -static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); +static DEFINE_TIMER(timer, wdt_timer_ping); static unsigned long next_heartbeat; static unsigned long wdt_is_open; static char wdt_expect_close; diff --git a/drivers/watchdog/sc520_wdt.c b/drivers/watchdog/sc520_wdt.c index 1cfd3f6a13d5..3e9bbaa37bf4 100644 --- a/drivers/watchdog/sc520_wdt.c +++ b/drivers/watchdog/sc520_wdt.c @@ -124,7 +124,7 @@ MODULE_PARM_DESC(nowayout, static __u16 __iomem *wdtmrctl; static void wdt_timer_ping(unsigned long); -static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); +static DEFINE_TIMER(timer, wdt_timer_ping); static unsigned long next_heartbeat; static unsigned long wdt_is_open; static char wdt_expect_close; diff --git a/drivers/watchdog/via_wdt.c b/drivers/watchdog/via_wdt.c index 5f9cbc37520d..ad3c3be13b40 100644 --- a/drivers/watchdog/via_wdt.c +++ b/drivers/watchdog/via_wdt.c @@ -68,7 +68,7 @@ static struct resource wdt_res; static void __iomem *wdt_mem; static unsigned int mmio; static void wdt_timer_tick(unsigned long data); -static DEFINE_TIMER(timer, wdt_timer_tick, 0, 0); +static DEFINE_TIMER(timer, wdt_timer_tick); /* The timer that pings the watchdog */ static unsigned long next_heartbeat; /* the next_heartbeat for the timer */ diff --git a/drivers/watchdog/w83877f_wdt.c b/drivers/watchdog/w83877f_wdt.c index f0483c75ed32..ba6b680af100 100644 --- a/drivers/watchdog/w83877f_wdt.c +++ b/drivers/watchdog/w83877f_wdt.c @@ -98,7 +98,7 @@ MODULE_PARM_DESC(nowayout, __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static void wdt_timer_ping(unsigned long); -static DEFINE_TIMER(timer, wdt_timer_ping, 0, 0); +static DEFINE_TIMER(timer, wdt_timer_ping); static unsigned long next_heartbeat; static unsigned long wdt_is_open; static char wdt_expect_close; diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index 2c6a9114d332..a8721d718186 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -305,7 +305,7 @@ struct deferred_entry { }; static LIST_HEAD(deferred_list); static void gnttab_handle_deferred(unsigned long); -static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0); +static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred); static void gnttab_handle_deferred(unsigned long unused) { |